text stringlengths 957 885k |
|---|
<filename>emoter/data/fitness_coach/fitness_coach_corpus.py
("'I'm here to help you learn.'", 'certainty'),
("'I'm here to help you learn.'", 'agreeable'),
("'I'm here to help you learn.'", 'positive'),
("'I'm here to help you learn.'", 'instructive'),
("'I'm here to help you learn.'", 'emphatic'),
("'I am here to help you learn.'", 'certainty'),
("'I am here to help you learn.'", 'agreeable'),
("'I am here to help you learn.'", 'positive'),
("'I am here to help you learn.'", 'instructive'),
("'I am here to help you dance.'", 'certainty'),
("'I am here to help you dance.'", 'agreeable'),
("'I am here to help you dance.'", 'positive'),
("'I am here to help you dance.'", 'instructive'),
("'I am here to help you learn.'", 'emphatic'),
("'I'm here to help you dance.'", 'certainty'),
("'I'm here to help you dance.'", 'agreeable'),
("'I'm here to help you dance.'", 'positive'),
("'I'm here to help you dance.'", 'instructive'),
("'I'm here to help you dance.'", 'desire'),
("'I'm here to help you learn.'", 'desire'),
("'I'm here to help you learn.'", 'emphatic'),
("'I am here to help you learn.'", 'desire'),
("'I am here to help you learn.'", 'desire'),
("'I am here to help you learn.'", 'emphatic'),
("'I want to help you get fit.'", 'desire'),
("'I want to help you get fit.'", 'instructive'),
("'I want to help you get fit.'", 'certainty'),
("'I want to help you get fit.'", 'agreeable'),
("'I want to help you get fit.'", 'positive'),
("'I want to help you get fit.'", 'emphatic'),
("'I can help you get fit.'", 'desire'),
("'I can help you get fit.'", 'instructive'),
("'I can help you get fit.'", 'certainty'),
("'I can help you get fit.'", 'agreeable'),
("'I can help you get fit.'", 'positive'),
("'I can help you get fit.'", 'desire'),
("'I could help you get fit.'", 'desire'),
("'I could help you get fit.'", 'instructive'),
("'I could help you get fit.'", 'certainty'),
("'I could help you get fit.'", 'agreeable'),
("'I could help you get fit.'", 'positive'),
("'I cannot help you get fit.'", 'negative'),
("'I cannot help you get fit.'", 'instructive'),
("'I cannot help you get fit.'", 'certainty'),
("'I can't help you get fit.'", 'negative'),
("'I can't help you get fit.'", 'instructive'),
("'I can't help you get fit.'", 'certainty'),
("'I could not help you get fit.'", 'negative'),
("'I could not help you get fit.'", 'instructive'),
("'I could not help you get fit.'", 'certainty'),
("'I couldn't help you get fit.'", 'negative'),
("'I couldn't help you get fit.'", 'instructive'),
("'I couldn't help you get fit.'", 'certainty'),
("'I couldn't help you get fit.'", 'regret'),
("'I could not help you get fit.'", 'regret'),
("'I couldn't help you get fit.'", 'emphatic'),
("'I could not help you get fit.'", 'emphatic'),
("'I can help you get fit.'", 'emphatic'),
("'I could help you get fit.'", 'emphatic'),
("'Do more pushups!'", 'instructive'),
("'Do more pushups!'", 'emphatic'),
("'Do more pushups!'", 'intensity'),
("'Do more pushups!'", 'certainty'),
("'You need to do more pushups.'", 'instructive'),
("'You need to do more pushups.'", 'emphatic'),
("'You need to do more pushups.'", 'intensity'),
("'You need to do more pushups.'", 'certainty'),
("'You need to do more pushups.'", 'challenging'),
("'You have to do more pushups.'", 'instructive'),
("'You have to do more pushups.'", 'emphatic'),
("'You have to do more pushups.'", 'intensity'),
("'You have to do more pushups.'", 'certainty'),
("'You have to do more pushups.'", 'challenging'),
("'You have to do more pushups.'", 'calm'),
("'You should do more pushups.'", 'instructive'),
("'You should do more pushups.'", 'emphatic'),
("'You should do more pushups.'", 'intensity'),
("'You should do more pushups.'", 'certainty'),
("'You should do more pushups.'", 'challenging'),
("'You should do more pushups.'", 'calm'),
("'You should do more work.'", 'instructive'),
("'You should do more work.'", 'emphatic'),
("'You should do more work.'", 'intensity'),
("'You should do more work.'", 'certainty'),
("'You should do more work.'", 'challenging'),
("'You should do more work.'", 'calm'),
("'You should do more work.'", 'negative'),
("'You could do more work.'", 'instructive'),
("'You could do more work.'", 'emphatic'),
("'You could do more work.'", 'intensity'),
("'You could do more work.'", 'certainty'),
("'You could do more work.'", 'challenging'),
("'You could do more work.'", 'calm'),
("'You would have to do more work.'", 'instructive'),
("'You would have to do more work.'", 'emphatic'),
("'You would have to do more work.'", 'intensity'),
("'You would have to do more work.'", 'certainty'),
("'You would have to do more work.'", 'challenging'),
("'You would have to do more work.'", 'calm'),
("'How many calories should I intake every day?'", 'inquisitive'),
("'How many calories should I intake every day?'", 'calm'),
("'How many calories should I intake every day?'", 'emphatic'),
("'How many calories should I intake every day?'", 'confusion'),
("'How much milk should I drink a day?'", 'inquisitive'),
("'How much milk should I drink a day?'", 'calm'),
("'How much milk should I drink a day?'", 'emphatic'),
("'How much milk should I drink a day?'", 'confusion'),
("'How much milk can I drink in one day?'", 'inquisitive'),
("'How much milk can I drink in one day?'", 'calm'),
("'How much milk can I drink in one day?'", 'confusion'),
("'How much milk can I drink in one day?'", 'emphatic'),
("'How much milk can I drink in one day?'", 'inquisitive'),
("'How much milk can I drink in one day?'", 'calm'),
("'How much milk can I drink in one day?'", 'confusion'),
("Have you gotten enough protein today?", 'inquisitive'),
("Have you gotten enough protein today?", 'calm'),
("Have you gotten enough protein today?", 'emphatic'),
("Have you gotten enough protein today?", 'instructive'),
("Have you eaten enough vegetables today?", 'inquisitive'),
("Have you eaten enough vegetables today?", 'calm'),
("Have you eaten enough vegetables today?", 'emphatic'),
("Have you eaten enough vegetables today?", 'instructive'),
("What time should I take this?", 'inquisitive'),
("What time should I take this?", 'confusion'),
("What time should I take this?", 'emphatic'),
("When is the best time to eat after a workout?", 'emphatic'),
("When is the best time to eat after a workout?", 'confusion'),
("When is the best time to eat after a workout?", 'inquisitive'),
("When should I?", 'inquisitive'),
("When should I?", 'confusion'),
("When should I?", 'emphatic'),
("When do we?", 'inquisitive'),
("When do we?", 'confusion'),
("When do we?", 'emphatic'),
("When's the best time for me to eat after a workout?", 'emphatic'),
("When's the best time for me to eat after a workout?", 'confusion'),
("When's the best time for me to eat after a workout?", 'inquisitive'),
("When can I try that?", 'emphatic'),
("When can I try that?", 'confusion'),
("When can I try that?", 'inquisitive'),
("When can I move on?", 'emphatic'),
("When can I move on?", 'confusion'),
("When can I move on?", 'inquisitive'),
("When do I get to do that?", 'emphatic'),
("When do I get to do that?", 'confusion'),
("When do I get to do that?", 'inquisitive'),
("Where do I go to do that?", 'emphatic'),
("Where do I go to do that?", 'confusion'),
("Where do I go to do that?", 'inquisitive'),
("How did you do that?", 'emphatic'),
("How did you do that?", 'confusion'),
("How did you do that?", 'inquisitive'),
("How did you do that?", 'accusative'),
("How do you know?", 'emphatic'),
("How do you know?", 'confusion'),
("How do you know?", 'inquisitive'),
("How do you know?", 'accusative'),
("Where do I go to get diet pills?", 'emphatic'),
("Where do I go to get diet pills?", 'confusion'),
("Where do I go to get diet pills?", 'inquisitive'),
("You need to eat more.", 'instructive'),
("You need to eat more.", 'certainty'),
("You need to eat more.", 'emphatic'),
("You need to eat more.", 'accusative'),
("You need to eat less.", 'instructive'),
("You need to eat less.", 'certainty'),
("You need to eat less.", 'emphatic'),
("You need to eat less.", 'accusative'),
("You should eat more.", 'instructive'),
("You should eat more.", 'certainty'),
("You should eat more.", 'emphatic'),
("You should eat more.", 'accusative'),
("You should eat more.", 'calm'),
("You should try to eat more healthy foods.", 'instructive'),
("You should try to eat more healthy foods.", 'certainty'),
("You should try to eat more healthy foods.", 'emphatic'),
("You should try to eat more healthy foods.", 'accusative'),
("You should try to eat more healthy foods.", 'calm'),
("You need to try to eat more healthy foods.", 'instructive'),
("You need to try to eat more healthy foods.", 'certainty'),
("You need to try to eat more healthy foods.", 'emphatic'),
("You need to try to eat more healthy foods.", 'accusative'),
("You need to try to eat more healthy foods.", 'negative'),
("You need to try to eat more healthy foods.", 'intensity'),
("You have got to try to eat more healthy foods.", 'instructive'),
("You have got to try to eat more healthy foods.", 'certainty'),
("You have got to try to eat more healthy foods.", 'emphatic'),
("You have got to try to eat more healthy foods.", 'accusative'),
("You have got to try to eat more healthy foods.", 'negative'),
("You got to try to eat more healthy foods.", 'instructive'),
("You got to try to eat more healthy foods.", 'certainty'),
("You got to try to eat more healthy foods.", 'emphatic'),
("You got to try to eat more healthy foods.", 'accusative'),
("You got to try to eat more healthy foods.", 'negative'),
("You gotta try to eat more healthy foods.", 'instructive'),
("You gotta try to eat more healthy foods.", 'certainty'),
("You gotta try to eat more healthy foods.", 'emphatic'),
("You gotta try to eat more healthy foods.", 'accusative'),
("You gotta try to eat more healthy foods.", 'negative'),
("You've got to try to eat more healthy foods.", 'instructive'),
("You've got to try to eat more healthy foods.", 'certainty'),
("You've got to try to eat more healthy foods.", 'emphatic'),
("You've got to try to eat more healthy foods.", 'accusative'),
("You've got to try to eat more healthy foods.", 'negative'),
("You should try to eat less fatty foods.", 'instructive'),
("You should try to eat less fatty foods.", 'certainty'),
("You should try to eat less fatty foods.", 'emphatic'),
("You should try to eat less fatty foods.", 'accusative'),
("You need to try to eat less fatty foods.", 'instructive'),
("You need to try to eat less fatty foods.", 'certainty'),
("You need to try to eat less fatty foods.", 'emphatic'),
("You need to try to eat less fatty foods.", 'accusative'),
("You need to try to eat less fatty foods.", 'negative'),
("You need to try to eat less fatty foods.", 'calm'),
("You have got to try to eat less fatty foods.", 'instructive'),
("You have got to try to eat less fatty foods.", 'certainty'),
("You have got to try to eat less fatty foods.", 'emphatic'),
("You have got to try to eat less fatty foods.", 'accusative'),
("You have got to try to eat less fatty foods.", 'negative'),
("You have got to try to eat less fatty foods.", 'intensity'),
("You got to try to eat less fatty foods.", 'instructive'),
("You got to try to eat less fatty foods.", 'certainty'),
("You got to try to eat less fatty foods.", 'emphatic'),
("You got to try to eat less fatty foods.", 'accusative'),
("You got to try to eat less fatty foods.", 'negative'),
("You got to try to eat less fatty foods.", 'intensity'),
("You gotta try to eat less fatty foods.", 'instructive'),
("You gotta try to eat less fatty foods.", 'certainty'),
("You gotta try to eat less fatty foods.", 'emphatic'),
("You gotta try to eat less fatty foods.", 'accusative'),
("You gotta try to eat less fatty foods.", 'negative'),
("You gotta try to eat less fatty foods.", 'intensity'),
("You've got to try to less fatty foods.", 'instructive'),
("You've got to try to less fatty foods.", 'certainty'),
("You've got to try to less fatty foods.", 'emphatic'),
("You've got to try to less fatty foods.", 'accusative'),
("You've got to try to less fatty foods.", 'negative'),
("You should eat less.", 'instructive'),
("You should eat less.", 'certainty'),
("You should eat less.", 'emphatic'),
("You should eat less.", 'accusative'),
("You should eat less.", 'calm'),
("Keep it up!", 'instructive'),
("Keep it up!", 'intensity'),
("Keep it up!", 'certainty'),
("Keep it up!", 'positive'),
("Keep it up!", 'emphatic'),
("Keep it up!", 'agreeable'),
("Keep going!", 'instructive'),
("Keep going!", 'intensity'),
("Keep going!", 'certainty'),
("Keep going!", 'positive'),
("Keep going!", 'emphatic'),
("Keep going!", 'agreeable'),
("Stay steady..", 'instructive'),
("Stay steady..", 'intensity'),
("Stay steady..", 'certainty'),
("Stay steady..", 'positive'),
("Stay steady..", 'emphatic'),
("Stay steady..", 'agreeable'),
("Steady now..", 'instructive'),
("Steady now..", 'intensity'),
("Steady now..", 'certainty'),
("Steady now..", 'positive'),
("Steady now..", 'emphatic'),
("Steady now..", 'agreeable'),
("Keep up the good work!", 'instructive'),
("Keep up the good work!", 'intensity'),
("Keep up the good work!", 'certainty'),
("Keep up the good work!", 'positive'),
("Keep up the good work!", 'emphatic'),
("Keep up the good work!", 'agreeable'),
("Keep up the good work guys!", 'instructive'),
("Keep up the good work guys!", 'intensity'),
("Keep up the good work guys!", 'certainty'),
("Keep up the good work guys!", 'positive'),
("Keep up the good work guys!", 'emphatic'),
("Keep up the good work guys!", 'agreeable'),
("Keep up the great work!", 'instructive'),
("Keep up the great work!", 'intensity'),
("Keep up the great work!", 'certainty'),
("Keep up the great work!", 'positive'),
("Keep up the great work!", 'emphatic'),
("Keep up the great work!", 'agreeable'),
("Keep it up, man!", 'instructive'),
("Keep it up, man!", 'intensity'),
("Keep it up, man!", 'certainty'),
("Keep it up, man!", 'positive'),
("Keep it up, man!", 'emphatic'),
("Keep it up, man!", 'agreeable'),
("Keep it up, girl!", 'instructive'),
("Keep it up, girl!", 'intensity'),
("Keep it up, girl!", 'certainty'),
("Keep it up, girl!", 'positive'),
("Keep it up, girl!", 'emphatic'),
("Keep it up, girl!", 'agreeable'),
("Keep it up, dude!", 'instructive'),
("Keep it up, dude!", 'intensity'),
("Keep it up, dude!", 'certainty'),
("Keep it up, dude!", 'positive'),
("Keep it up, dude!", 'emphatic'),
("Keep it up, dude!", 'agreeable'),
("Keep it up, bro!", 'instructive'),
("Keep it up, bro!", 'intensity'),
("Keep it up, bro!", 'certainty'),
("Keep it up, bro!", 'positive'),
("Keep it up, bro!", 'emphatic'),
("Keep it up, bro!", 'agreeable'),
("Keep up!", 'instructive'),
("Keep up!", 'intensity'),
("Keep up!", 'certainty'),
("You have to keep up!", 'certainty'),
("You have to keep up!", 'intensity'),
("You have to keep up!", 'instructive'),
("You have to keep up!", 'emphatic'),
("You've got to keep up!", 'certainty'),
("You've got to keep up!", 'intensity'),
("You've got to keep up!", 'instructive'),
("You've got to keep up!", 'emphatic'),
("You've gotta keep up!", 'certainty'),
("You've gotta keep up!", 'intensity'),
("You've gotta keep up!", 'instructive'),
("You've gotta keep up!", 'emphatic'),
("You've got to keep up with me!", 'certainty'),
("You've got to keep up with me!", 'intensity'),
("You've got to keep up with me!", 'instructive'),
("You've got to keep up with me!", 'emphatic'),
("You've gotta keep up with us!", 'certainty'),
("You've gotta keep up with us!", 'intensity'),
("You've gotta keep up with us!", 'instructive'),
("You've gotta keep up with us!", 'emphatic'),
("Hold still.", 'instructive'),
("Hold still.", 'intensity'),
("Hold still.", 'certainty'),
("Stay still for a few seconds.", 'instructive'),
("Stay still for a few seconds.", 'intensity'),
("Stay still for a few seconds.", 'certainty'),
("You need to keep still during this stretch.", 'instructive'),
("You need to keep still during this stretch.", 'intensity'),
("You need to keep still during this stretch.", 'certainty'),
("Stretch those arms.", 'instructive'),
("Stretch those arms.", 'certainty'),
("You need to stretch your legs.", 'instructive'),
("You need to stretch your legs.", 'certainty'),
("You need to stretch your legs.", 'intensity'),
("You need to stretch more often.", 'instructive'),
("You need to stretch more often.", 'certainty'),
("You need to stretch more often.", 'intensity'),
("Wash your face with cold water.", 'instructive'),
("Wash your face with cold water.", 'certainty'),
("Wash your face with cold water because it helps your skin retain moisture.", 'certainty'),
("Wash your face with cold water because it helps your skin retain moisture.", 'instructive'),
("Wash your face with cold water because it helps your skin retain moisture.", 'emphatic'),
("Shower everyday.", 'instructive'),
("Shower everyday.", 'certainty'),
("Shower everyday.", 'intensity'),
("Good job, man.", 'certainty'),
("Good job, man.", 'positive'),
("Good job, man.", 'emphatic'),
("Good job, man.", 'instructive'),
("Good job.", 'certainty'),
("Good job.", 'positive'),
("Good job.", 'emphatic'),
("Good job.", 'instructive'),
("Nice job.", 'certainty'),
("Nice job.", 'positive'),
("Nice job.", 'emphatic'),
("Nice job.", 'instructive'),
("Nice work.", 'certainty'),
("Nice work.", 'positive'),
("Nice work.", 'emphatic'),
("Nice work.", 'instructive'),
("Good work.", 'certainty'),
("Good work.", 'positive'),
("Good work.", 'emphatic'),
("Good work.", 'instructive'),
("Good going.", 'certainty'),
("Good going.", 'positive'),
("Good going.", 'emphatic'),
("Good going.", 'instructive'),
("Great job, guys.", 'certainty'),
("Great job, guys.", 'positive'),
("Great job, guys.", 'emphatic'),
("Great job, guys.", 'instructive'),
("Great job, man.", 'certainty'),
("Great job, man.", 'positive'),
("Great job, man.", 'emphatic'),
("Amazing work, everyone.", 'instructive'),
("Amazing work, everyone.", 'certainty'),
("Amazing work, everyone.", 'positive'),
("Amazing work, everyone.", 'emphatic'),
("Amazing work, everyone.", 'instructive'),
("You did the best you can.", 'certainty'),
("You did the best you can.", 'instructive'),
("You did the best you can.", 'positive'),
("You did the best you can.", 'emphatic'),
("You did the best you can.", 'pride'),
("You tried.", 'certainty'),
("You tried.", 'calm'),
("You tried.", 'positive'),
("You tried your best.", 'agreeable'),
("You tried your best.", 'challenging'),
("You tried your best.", 'certainty'),
("You tried your best.", 'pride'),
("You tried your best.", 'positive'),
("You did your best.", 'certainty'),
("You did your best.", 'pride'),
("You did your best.", 'positive'),
("You did the best you can.", 'certainty'),
("You did the best you can.", 'pride'),
("You did the best you can.", 'positive'),
("You did the best you could.", 'certainty'),
("You did the best you could.", 'pride'),
("You did the best you could.", 'positive'),
("You are the best", 'positive'),
("You are the best.", 'certainty'),
("You are the best", 'pride'),
("You are the best", 'intensity'),
("You are great.", 'positive'),
("You are great.", 'certainty'),
("You are great.", 'pride'),
("You are great.", 'intensity'),
("You're the best", 'positive'),
("You're the best.", 'certainty'),
("You're the best", 'pride'),
("You're the best", 'intensity'),
("You're great.", 'positive'),
("You're great.", 'certainty'),
("You're great.", 'pride'),
("You're great.", 'intensity'),
("Don't cry.", 'instructive'),
("Don't cry.", 'emphatic'),
("Don't cry.", 'intensity'),
("Don't cry.", 'challenging'),
("Don't whine.", 'instructive'),
("Don't whine.", 'emphatic'),
("Don't whine.", 'intensity'),
("Don't whine.", 'challenging'),
("Do not cry.", 'instructive'),
("Do not cry.", 'emphatic'),
("Do not cry.", 'intensity'),
("Do not cry.", 'challenging'),
("Do not whine.", 'instructive'),
("Do not whine.", 'emphatic'),
("Do not whine.", 'intensity'),
("Do not whine.", 'challenging'),
("Stop crying.", 'instructive'),
("Stop crying.", 'emphatic'),
("Stop crying.", 'intensity'),
("Stop crying.", 'challenging'),
("Stop whining.", 'instructive'),
("Stop whining.", 'emphatic'),
("Stop whining.", 'intensity'),
("Stop whining.", 'challenging'),
("Stop this crying.", 'instructive'),
("Stop this crying.", 'emphatic'),
("Stop this crying.", 'intensity'),
("Stop this crying.", 'challenging'),
("Stop this whining.", 'instructive'),
("Stop this whining.", 'emphatic'),
("Stop this whining.", 'intensity'),
("Stop this whining.", 'challenging'),
("Stop your crying.", 'instructive'),
("Stop your crying.", 'emphatic'),
("Stop your crying.", 'intensity'),
("Stop your crying.", 'challenging'),
("Stop your whining.", 'instructive'),
("Stop your whining.", 'emphatic'),
("Stop your whining.", 'intensity'),
("Stop your whining.", 'challenging'),
("Quit crying.", 'instructive'),
("Quit crying.", 'emphatic'),
("Quit crying.", 'intensity'),
("Quit crying.", 'challenging'),
("Quit whining.", 'instructive'),
("Quit whining.", 'emphatic'),
("Quit whining.", 'intensity'),
("Quit whining.", 'challenging'),
("End this crying.", 'instructive'),
("End this crying.", 'emphatic'),
("End this crying.", 'intensity'),
("End this crying.", 'challenging'),
("End this whining.", 'instructive'),
("End this whining.", 'emphatic'),
("End this whining.", 'intensity'),
("End this whining.", 'challenging'),
("I've had enough of your crying.", 'emphatic'),
("I've had enough of your crying.", 'intensity'),
("I've had enough of your crying.", 'challenging'),
("I've had enough of your whining.", 'instructive'),
("I've had enough of your whining.", 'emphatic'),
("I've had enough of your whining.", 'intensity'),
("I've had enough of your whining.", 'challenging'),
("I have heard enough of your crying.", 'emphatic'),
("I have heard enough of your crying.", 'intensity'),
("I have heard enough of your crying.", 'challenging'),
("I have heard enough of your whining.", 'instructive'),
("I have heard enough of your whining.", 'emphatic'),
("I have heard enough of your whining.", 'intensity'),
("I have heard enough of your whining.", 'challenging'),
("I have had enough of your crying.", 'emphatic'),
("I have had enough of your crying.", 'intensity'),
("I have had enough of your crying.", 'challenging'),
("I have had enough of your whining.", 'instructive'),
("I have had enough of your whining.", 'emphatic'),
("I have had enough of your whining.", 'intensity'),
("I have had enough of your whining.", 'challenging'),
("I've heard enough of your crying.", 'emphatic'),
("I've heard enough of your crying.", 'intensity'),
("I've heard enough of your crying.", 'challenging'),
("I've heard enough of your whining.", 'instructive'),
("I've heard enough of your whining.", 'emphatic'),
("I've heard enough of your whining.", 'intensity'),
("I've heard enough of your whining.", 'challenging'),
("I'm sorry I hurt you.", 'regret'),
("I'm sorry I hurt you.", 'modest'),
("I'm sorry I hurt you.", 'negative'),
("I'm sorry I hurt you.", 'anger'),
("I'm sorry I hurt you.", 'emphatic'),
("I am sorry I hurt you.", 'regret'),
("I am sorry I hurt you.", 'modest'),
("I am sorry I hurt you.", 'negative'),
("I am sorry I hurt you.", 'anger'),
("I am sorry I hurt you.", 'emphatic'),
("I'm sorry that I hurt you.", 'regret'),
("I'm sorry that I hurt you.", 'modest'),
("I'm sorry that I hurt you.", 'negative'),
("I'm sorry that I hurt you.", 'anger'),
("I'm sorry that I hurt you.", 'emphatic'),
("I am sorry that I have hurt you.", 'regret'),
("I am sorry that I have hurt you.", 'modest'),
("I am sorry that I have hurt you.", 'negative'),
("I am sorry that I have hurt you.", 'anger'),
("I am sorry that I have hurt you.", 'emphatic'),
("I am sorry that I had hurt you.", 'regret'),
("I am sorry that I had hurt you.", 'modest'),
("I am sorry that I had hurt you.", 'negative'),
("I am sorry that I had hurt you.", 'anger'),
("I am sorry that I had hurt you.", 'emphatic'),
("I'm sorry he hurt you.", 'regret'),
("I'm sorry he hurt you.", 'modest'),
("I'm sorry he hurt you.", 'negative'),
("I'm sorry he hurt you.", 'anger'),
("I'm sorry he hurt you.", 'emphatic'),
("I am sorry she hurt you.", 'regret'),
("I am sorry she hurt you.", 'modest'),
("I am sorry she hurt you.", 'negative'),
("I am sorry she hurt you.", 'anger'),
("I am sorry she hurt you.", 'emphatic'),
("I am sorry they hurt you.", 'regret'),
("I am sorry they hurt you.", 'modest'),
("I am sorry they hurt you.", 'negative'),
("I am sorry they hurt you.", 'anger'),
("I am sorry they hurt you.", 'emphatic'),
("I am sorry we hurt you.", 'regret'),
("I am sorry we hurt you.", 'modest'),
("I am sorry we hurt you.", 'negative'),
("I am sorry we hurt you.", 'anger'),
("I am sorry we hurt you.", 'emphatic'),
("We are sorry we hurt you.", 'regret'),
("We are sorry we hurt you.", 'modest'),
("We are sorry we hurt you.", 'negative'),
("We are sorry we hurt you.", 'anger'),
("We are sorry we hurt you.", 'emphatic'),
("We are very sorry we hurt you.", 'regret'),
("We are very sorry we hurt you.", 'modest'),
("We are very sorry we hurt you.", 'negative'),
("We are very sorry we hurt you.", 'anger'),
("We are very sorry we hurt you.", 'emphatic'),
("We're so sorry we hurt you.", 'regret'),
("We're so sorry we hurt you.", 'modest'),
("We're so sorry we hurt you.", 'negative'),
("We're so sorry we hurt you.", 'anger'),
("We're so sorry we hurt you.", 'emphatic'),
("We're really sorry we hurt you.", 'regret'),
("We're really sorry we hurt you.", 'modest'),
("We're really sorry we hurt you.", 'negative'),
("We're really sorry we hurt you.", 'anger'),
("We're really sorry we hurt you.", 'emphatic'),
("I hate those.", 'intensity'),
("I hate those.", 'anger'),
("I hate those.", 'negative'),
("I hate those.", 'regret'),
("I hate those.", 'hate'),
("I hate those.", 'challenging'),
("Oh, I hate those.", 'intensity'),
("Oh, I hate those.", 'anger'),
("Oh, I hate those.", 'negative'),
("Oh, I hate those.", 'regret'),
("Oh, I hate those.", 'hate'),
("Oh, I hate those.", 'challenging'),
("No I hate those things.", 'intensity'),
("No I hate those things.", 'anger'),
("No I hate those things.", 'negative'),
("No I hate those things.", 'regret'),
("No I hate those things.", 'hate'),
("No I hate those things.", 'challenging'),
("My name is Sam.", 'certainty'),
("My name is Sam.", 'calm'),
("My name's Sam.", 'certainty'),
("My name's Sam.", 'calm'),
("My name is Samantha.", 'certainty'),
("My name is Samantha.", 'calm'),
("You're the same, girl.", 'certainty'),
("You're the same, girl.", 'challenging'),
("You're the same, girl.", 'accusative'),
("You're the same, girl.", 'emphatic'),
("You're the same.", 'accusative'),
("You're the same.", 'emphatic'),
("You're the same.", 'certainty'),
("You are the same, girl.", 'certainty'),
("You are the same, girl.", 'challenging'),
("You are the same, girl.", 'accusative'),
("You are the same, girl.", 'emphatic'),
("You are the same.", 'accusative'),
("You are the same.", 'emphatic'),
("You are the same.", 'certainty'),
("You fucked up.", 'vulgarity'),
("You fucked up.", 'anger'),
("You fucked up.", 'accusative'),
("You fucked up.", 'negative'),
("You fucked up.", 'hate'),
("You fucked up.", 'regret'),
("You fucked up.", 'certainty'),
("You fucked up.", 'emphatic'),
("You fuck up.", 'vulgarity'),
("You fuck up.", 'anger'),
("You fuck up.", 'accusative'),
("You fuck up.", 'negative'),
("You fuck up.", 'hate'),
("You fuck up.", 'regret'),
("You fuck up.", 'certainty'),
("You fuck up.", 'emphatic'),
("You dumbass.", 'vulgarity'),
("You dumbass.", 'anger'),
("You dumbass.", 'accusative'),
("You dumbass.", 'negative'),
("You dumbass.", 'hate'),
("You dumbass.", 'regret'),
("You dumbass.", 'certainty'),
("You dumbass.", 'emphatic'),
("You dumbfuck.", 'vulgarity'),
("You dumbfuck.", 'anger'),
("You dumbfuck.", 'accusative'),
("You dumbfuck.", 'negative'),
("You dumbfuck.", 'hate'),
("You dumbfuck.", 'regret'),
("You dumbfuck.", 'certainty'),
("You dumbfuck.", 'emphatic'),
("You dumb bitch.", 'vulgarity'),
("You dumb bitch.", 'anger'),
("You dumb bitch.", 'accusative'),
("You dumb bitch.", 'negative'),
("You dumb bitch.", 'hate'),
("You dumb bitch.", 'regret'),
("You dumb bitch.", 'certainty'),
("You dumb bitch.", 'emphatic'),
("You bitch.", 'vulgarity'),
("You bitch.", 'anger'),
("You bitch.", 'accusative'),
("You bitch.", 'negative'),
("You bitch.", 'hate'),
("You bitch.", 'regret'),
("You bitch.", 'certainty'),
("You bitch.", 'emphatic'),
("You slut.", 'vulgarity'),
("You slut.", 'anger'),
("You slut.", 'accusative'),
("You slut.", 'negative'),
("You slut.", 'hate'),
("You slut.", 'regret'),
("You slut.", 'certainty'),
("You slut.", 'emphatic'),
("You're a slut.", 'vulgarity'),
("You're a slut.", 'anger'),
("You're a slut.", 'accusative'),
("You're a slut.", 'negative'),
("You're a slut.", 'hate'),
("You're a slut.", 'regret'),
("You're a slut.", 'certainty'),
("You're a slut.", 'emphatic'),
("You dumb shit.", 'vulgarity'),
("You dumb shit.", 'anger'),
("You dumb shit.", 'accusative'),
("You dumb shit.", 'negative'),
("You dumb shit.", 'hate'),
("You dumb shit.", 'regret'),
("You dumb shit.", 'certainty'),
("You dumb shit.", 'emphatic'),
("You idiot.", 'anger'),
("You idiot.", 'accusative'),
("You idiot.", 'negative'),
("You idiot.", 'hate'),
("You idiot.", 'regret'),
("You idiot.", 'certainty'),
("You idiot.", 'emphatic'),
("You moron.", 'anger'),
("You moron.", 'accusative'),
("You moron.", 'negative'),
("You moron.", 'hate'),
("You moron.", 'regret'),
("You moron.", 'certainty'),
("You moron.", 'emphatic'),
("You screwed up.", 'vulgarity'),
("You screwed up.", 'anger'),
("You screwed up.", 'accusative'),
("You screwed up.", 'negative'),
("You screwed up.", 'hate'),
("You screwed up.", 'regret'),
("You screwed up.", 'certainty'),
("You screwed up.", 'emphatic'),
("You fucked it up.", 'vulgarity'),
("You fucked it up.", 'anger'),
("You fucked it up.", 'accusative'),
("You fucked it up.", 'negative'),
("You fucked it up.", 'hate'),
("You fucked it up.", 'regret'),
("You fucked it up.", 'certainty'),
("You fucked it up.", 'emphatic'),
("You fucked it all up.", 'vulgarity'),
("You fucked it all up.", 'anger'),
("You fucked it all up.", 'accusative'),
("You fucked it all up.", 'negative'),
("You fucked it all up.", 'hate'),
("You fucked it all up.", 'regret'),
("You fucked it all up.", 'certainty'),
("You fucked it all up.", 'emphatic'),
("You fucked everything up.", 'vulgarity'),
("You fucked everything up.", 'anger'),
("You fucked everything up.", 'accusative'),
("You fucked everything up.", 'negative'),
("You fucked everything up.", 'hate'),
("You fucked everything up.", 'regret'),
("You fucked everything up.", 'certainty'),
("You fucked everything up.", 'emphatic'),
("You screwed it up.", 'vulgarity'),
("You screwed it up.", 'anger'),
("You screwed it up.", 'accusative'),
("You screwed it up.", 'negative'),
("You screwed it up.", 'hate'),
("You screwed it up.", 'regret'),
("You screwed it up.", 'certainty'),
("You screwed it up.", 'emphatic'),
("You messed up.", 'anger'),
("You messed up.", 'accusative'),
("You messed up.", 'negative'),
("You messed up.", 'hate'),
("You messed up.", 'regret'),
("You messed up.", 'certainty'),
("You messed up.", 'emphatic'),
("You messed it up.", 'anger'),
("You messed it up.", 'accusative'),
("You messed it up.", 'negative'),
("You messed it up.", 'hate'),
("You messed it up.", 'regret'),
("You messed it up.", 'certainty'),
("You messed it up.", 'emphatic'),
("You messed it all up.", 'anger'),
("You messed it all up.", 'accusative'),
("You messed it all up.", 'negative'),
("You messed it all up.", 'hate'),
("You messed it all up.", 'regret'),
("You messed it all up.", 'certainty'),
("You messed it all up.", 'emphatic'),
("You mess everything up.", 'anger'),
("You mess everything up.", 'accusative'),
("You mess everything up.", 'negative'),
("You mess everything up.", 'hate'),
("You mess everything up.", 'regret'),
("You mess everything up.", 'certainty'),
("You mess everything up.", 'emphatic'),
("You mess everything up.", 'intensity'),
("You mess everything up.", 'challenging'),
("You mess anything up.", 'anger'),
("You mess anythig up.", 'accusative'),
("You mess anything up.", 'negative'),
("You mess anything up.", 'hate'),
("You mess anything up.", 'regret'),
("You mess anything up.", 'certainty'),
("You mess anything up.", 'emphatic'),
("You mess anything up.", 'intensity'),
("You mess anything up.", 'challenging'),
("I don't even want you.", 'regret'),
("I don't even want you.", 'hate'),
("I don't even want you.", 'anger'),
("I don't even want you.", 'negative'),
("I don't even want you.", 'intensity'),
("I don't even want you.", 'certainty'),
("I don't even want you.", 'ambivalence'),
("I don't even want you.", 'emphatic'),
("I don't even want you.", 'challenging'),
("I do not want you.", 'ambivalence'),
("I do not want you.", 'emphatic'),
("I don't even want it.", 'regret'),
("I don't even want it.", 'hate'),
("I don't even want it.", 'anger'),
("I don't even want it.", 'negative'),
("I don't even want it.", 'intensity'),
("I don't even want it.", 'certainty'),
("I don't even want it.", 'ambivalence'),
("I don't even want it.", 'emphatic'),
("I don't even want it.", 'challenging'),
("I do not want it.", 'ambivalence'),
("I do not want it.", 'intensity'),
("I do not want it.", 'negative'),
("I do not want it.", 'challenging'),
("I do not want it.", 'emphatic'),
("Now, I don't want you.", 'certainty'),
("Now, I don't want you.", 'negative'),
("Now, I don't want you.", 'certainty'),
("Now, I don't want you.", 'challenging'),
("Now, I don't want you.", 'hate'),
("Now, I don't want you.", 'anger'),
("Now, I don't want you.", 'intensity'),
("Now, I don't want you.", 'emphatic'),
("Now, I do not even want you.", 'certainty'),
("Now, I do not even want you.", 'negative'),
("Now, I do not even want you.", 'certainty'),
("Now, I do not even want you.", 'challenging'),
("Now, I do not even want you.", 'hate'),
("Now, I do not even want you.", 'anger'),
("Now, I do not even want you.", 'intensity'),
("Now, I do not even want you.", 'emphatic'),
("I don't want you anymore.", 'certainty'),
("I don't want you anymore.", 'negative'),
("I don't want you anymore.", 'certainty'),
("I don't want you anymore.", 'challenging'),
("I don't want you anymore.", 'hate'),
("I don't want you anymore.", 'anger'),
("I don't want you anymore.", 'intensity'),
("I don't want you anymore.", 'emphatic'),
("I don't want you now.", 'certainty'),
("I don't want you now.", 'negative'),
("I don't want you now.", 'certainty'),
("I don't want you now.", 'challenging'),
("I don't want you now.", 'hate'),
("I don't want you now.", 'anger'),
("I don't want you now.", 'intensity'),
("I don't want you now.", 'emphatic'),
("I'm just saying.", 'certainty'),
("I'm just saying.", 'ambivalence'),
("I'm just saying.", 'agreeable'),
("I'm just saying.", 'challenging'),
("I'm just saying.", 'emphatic'),
("I'm just sayin'.", 'certainty'),
("I'm just sayin'.", 'ambivalence'),
("I'm just sayin'.", 'agreeable'),
("I'm just sayin'.", 'challenging'),
("I'm just sayin'.", 'emphatic'),
("I've got a saying, girl.", 'certainty'),
("I've got a saying, girl.", 'instructive'),
("I've got a saying, girl.", 'emphatic'),
("I have got a saying, you know.", 'certainty'),
("I have got a saying, you know.", 'instructive'),
("I have got a saying, you know.", 'emphatic'),
("I noticed that I'm better off without you.", 'certainty'),
("I noticed that I'm better off without you.", 'emphatic'),
("I noticed that I'm better off without you.", 'intensity'),
("I noticed that I am better off without you.", 'certainty'),
("I noticed that I am better off without you.", 'intensity'),
("I noticed that I am better off without you.", 'emphatic'),
("I noticed that I am better off without you.", 'negative'),
("Now I see that I'm better off without you.", 'certainty'),
("Now I see that I am better off without you.", 'emphatic'),
("Now I see that I am better off without you.", 'intensity'),
("Now I see that I am better off without you.", 'negative'),
("Don't be calling my phone, please.", 'challenging'),
("Don't be calling my phone, please.", 'instructive'),
("Don't be calling my phone, please.", 'calm'),
("Don't call my phone.", 'challenging'),
("Don't call my phone.", 'instructive'),
("Do not call my phone.", 'challenging'),
("Do not call my phone.", 'instructive'),
("Don't go calling my phone.", 'certainty'),
("Don't go calling my phone, please.", 'certainty'),
("I don't love you no more.", 'certainty'),
("I don't love you no more.", 'negative'),
("I don't love you no more.", 'challenging'),
("I don't love you anymore.", 'certainty'),
("I don't love you anymore.", 'negative'),
("I don't love you anymore.", 'challenging'),
("I tried.", 'certainty'),
("I tried.", 'calm'),
("I tried my best.", 'agreeable'),
("I tried my best.", 'challenging'),
("I tried my best.", 'certainty'),
("I tried my best.", 'pride'),
("I did my best.", 'certainty'),
("I did my best.", 'pride'),
("I did the best I can.", 'certainty'),
("I did the best I can.", 'pride'),
("I am the best", 'positive'),
("I am the best.", 'certainty'),
("I am the best", 'pride'),
("I am the best", 'intensity'),
("I am great.", 'positive'),
("I am great.", 'certainty'),
("I am great.", 'pride'),
("I am great.", 'intensity'),
("Just know that I tried.", 'agreeable'),
("Just know that I tried.", 'certainty'),
("Just know that I tried.", 'instructive'),
("Just know that I tried.", 'pride'),
("Just know that I tried.", 'emphatic'),
("You should just know that I tried.", 'agreeable'),
("You should just know that I tried.", 'certainty'),
("You should just know that I tried.", 'instructive'),
("You should just know that I tried.", 'pride'),
("You should just know that I tried.", 'emphatic'),
("I want you to know I tried.", 'desire'),
("I want you to know I tried.", 'certainty'),
("I want you to know I tried.", 'instructive'),
("I want you to know I tried.", 'pride'),
("I want you to know I tried.", 'agreeable'),
("I want you to know I tried.", 'emphatic'),
("I want you to know I tried.", 'instructive'),
("Maybe one day we'll find common ground.", 'instructive'),
("Maybe one day we'll find common ground.", 'emphatic'),
("Maybe one day we'll find common ground.", 'ambivalence'),
("Maybe one day we'll find common ground.", 'confusion'),
("Maybe one day we'll find common ground.", 'agreeable'),
("Maybe one day we will find common ground.", 'instructive'),
("Maybe one day we will find common ground.", 'emphatic'),
("Maybe one day we will find common ground.", 'ambivalence'),
("Maybe one day we will find common ground.", 'confusion'),
("Maybe one day we will find common ground.", 'agreeable'),
("Maybe one day we can find something to agree on.", 'instructive'),
("Maybe one day we can find something to agree on.", 'emphatic'),
("Maybe one day we can find something to agree on.", 'ambivalence'),
("Maybe one day we can find something to agree on.", 'confusion'),
("Maybe one day we can find something to agree on.", 'agreeable'),
("Maybe then we'll find ourselves together again.", 'emphatic'),
("Maybe then we'll find ourselves together again.", 'ambivalence'),
("Maybe then we'll find ourselves together again.", 'confusion'),
("Maybe then we'll find ourselves together again.", 'agreeable'),
("Maybe then we will find ourselves together again.", 'emphatic'),
("Maybe then we will find ourselves together again.", 'ambivalence'),
("Maybe then we will find ourselves together again.", 'confusion'),
("Maybe then we will find ourselves together again.", 'agreeable'),
("Maybe then we could find ourselves together again.", 'emphatic'),
("Maybe then we could find ourselves together again.", 'ambivalence'),
("Maybe then we could find ourselves together again.", 'confusion'),
("Maybe then we could find ourselves together again.", 'agreeable'),
("Maybe then we can find it there.", 'emphatic'),
("Maybe then we can find it there.", 'ambivalence'),
("Maybe then we can find it there.", 'confusion'),
("Maybe then we can find it there.", 'agreeable'),
("Maybe when the sky starts falling.", 'emphatic'),
("Maybe when the sky starts falling.", 'ambivalence'),
("Maybe when the sky starts falling.", 'agreeable'),
("Maybe when the sky starts falling.", 'instructive'),
("Maybe when the day the sky starts falling down.", 'emphatic'),
("Maybe when the day the sky starts falling down.", 'ambivalence'),
("Maybe when the day the sky starts falling down.", 'agreeable'),
("Maybe when the day the sky starts falling down.", 'instructive'),
("Take me to your water, and lay me on your shore.", 'instructive'),
("Take me to your water, and lay me on your shore.", 'certainty'),
("Take me to your water, and lay me on your shore.", 'calm'),
("Take me to your water, and lay me on your shore.", 'desire'),
("Take us there.", 'instructive'),
("Take us there.", 'certainty'),
("Take us there.", 'calm'),
("Take us there.", 'desire'),
("Take me to your water.", 'instructive'),
("Take me to your water.", 'certainty'),
("Take me to your water.", 'calm'),
("Take me to your water.", 'desire'),
("Lay me on your shore.", 'instructive'),
("Lay me on your shore.", 'certainty'),
("Lay me on your shore.", 'calm'),
("Lay me on your shore.", 'desire'),
("I will wait for you.", 'instructive'),
("I will wait for you.", 'calm'),
("I will wait for you.", 'modest'),
("I will wait for you.", 'agreeable'),
("I will wait for you.", 'certainty'),
("I will wait.", 'instructive'),
("I will wait.", 'calm'),
("I will wait.", 'modest'),
("I will wait.", 'agreeable'),
("I will wait.", 'certainty'),
("I will wait.", 'desire'),
("I'll wait for you to finish.", 'calm'),
("I'll wait for you to finish.", 'modest'),
("I'll wait for you to finish.", 'agreeable'),
("I'll wait for you to finish.", 'certainty'),
("I'll wait for you to finish.", 'desire'),
("I want to come in deeper.", 'intensity'),
("I want to come in deeper.", 'certainty'),
("I want to come in deeper.", 'desire'),
("But the water is so cold.", 'negative'),
("But the water is so cold.", 'challenging'),
("But the water is so cold.", 'regret'),
("But the water is so cold.", 'ambivalence'),
("I want to come in deeper, but the water is so cold.", 'ambivalence'),
("I want to come in deeper, but the water is so cold.", 'confusion'),
("I want to come in deeper, but the water is so cold.", 'emphatic'),
("Show me the highline.", 'instructive'),
("Show me the highline.", 'desire'),
("Show me your hand, please.", 'instructive'),
("Show me your hand, please.", 'calm'),
("Show me your hand, please.", 'desire'),
("Please show me the highline.", 'instructive'),
("Please show me the highline.", 'calm'),
("Please show me the highline.", 'desire'),
("So high up in the sky.", 'certainty'),
("So high up in the sky.", 'intensity'),
("It is so high up in the sky.", 'intensity'),
("It is so high up in the sky.", 'certainty'),
("It's so high up in the sky.", 'intensity'),
("It's so high up in the sky.", 'certainty'),
("Good job!", 'admiration'),
("Good job!", 'joy'),
("Good job!", 'instructive'),
("Good job!", 'pride'),
("Nice job!", 'admiration'),
("Nice job!", 'instructive'),
("Nice job!", 'joy'),
("Nice job!", 'pride'),
("Good work!", 'admiration'),
("Good work!", 'instructive'),
("Good work!", 'joy'),
("Good work!", 'pride'),
("Great work!", 'admiration'),
("Great work!", 'instructive'),
("Great work!", 'joy'),
("Great work!", 'pride'),
("Good going!", 'admiration'),
("Good going!", 'instructive'),
("Good going!", 'joy'),
("Good going!", 'pride'),
("Good for you!", 'admiration'),
("Good for you!", 'positive'),
("Good for you!", 'instructive'),
("Good for you!", 'joy'),
("Good for you!", 'pride'),
("How can I help you today?", 'inquisitive'),
("How can I help you today?", 'agreeable'),
("How can I help you today?", 'modest'),
("How can I help you today?", 'calm'),
("How can I help you today?", 'desire'),
("How could we help out?", 'inquisitive'),
("How could we help out?", 'agreeable'),
("How could we help out?", 'modest'),
("How could we help out?", 'calm'),
("How could we help out?", 'desire'),
("Can I help you find something?", 'inquisitive'),
("Can I help you find something?", 'agreeable'),
("Can I help you find something?", 'modest'),
("Can I help you find something?", 'calm'),
("Can I help you find something?", 'desire'),
("How can I help you achieve your goals?", 'inquisitive'),
("How can I help you achieve your goals?", 'agreeable'),
("How can I help you achieve your goals?", 'modest'),
("How can I help you achieve your goals?", 'calm'),
("How can I help you achieve your goals?", 'desire'),
("We're logging you in.", 'calm'),
("We're logging you in.", 'certainty'),
("We are logging you in.", 'calm'),
("We are logging you in.", 'certainty'),
("I'm logging you in.", 'calm'),
("I'm logging you in.", 'certainty'),
("I am logging you in.", 'calm'),
("I am logging you in.", 'certainty'),
("We have to talk about the video script.", 'certainty'),
("We have to talk about the video script.", 'intensity'),
("We have to talk about the video script.", 'instructive'),
("We have to talk about the video script.", 'desire'),
("We have got to talk about that first.", 'certainty'),
("We have got to talk about that first.", 'intensity'),
("We have got to talk about that first.", 'desire'),
("Good-bye, then sir!", 'certainty'),
("Good-bye, then sir!", 'calm'),
("Good-bye, then sir!", 'modest'),
("Bye, then!", 'certainty'),
("Bye, then!", 'calm'),
("Bye, then!", 'modest'),
("Okay, good-bye!", 'certainty'),
("Okay, good-bye", 'calm'),
("Okay, good-bye!", 'modest'),
("Good-bye, then.", 'certainty'),
("Good-bye, then.", 'calm'),
("Good-bye, then.", 'modest'),
("I'll look that up now.", 'certainty'),
("I'll look that up now.", 'calm'),
("I'll look that up now.", 'agreeable'),
("I will go look that up now.", 'certainty'),
("I will go look that up now.", 'calm'),
("I will go look that up now.", 'agreeable'),
("I will go look that up now.", 'positive'),
("I can go find that for you.", 'emphatic'),
("I can go find that for you.", 'certainty'),
("I can go find that for you.", 'calm'),
("I can go find that for you.", 'agreeable'),
("I can go find that for you.", 'positive'),
("I can go find that for you.", 'emphatic'),
("I'll go find out.", 'emphatic'),
("I'll go find out.", 'certainty'),
("I'll go find out.", 'calm'),
("I'll go find out.", 'agreeable'),
("I'll go find out.", 'positive'),
("I'll go find out.", 'emphatic'),
("We will find out why.", 'certainty'),
("We will find out why.", 'calm'),
("We will find out why.", 'agreeable'),
("We will find out why.", 'emphatic'),
("We'll will find out why.", 'certainty'),
("We'll will find out why.", 'calm'),
("We'll will find out why.", 'agreeable'),
("We'll will find out why.", 'emphatic'),
("I don't know the answer to that.", 'calm'),
("I don't know the answer to that.", 'confusion'),
("I don't know the answer to that.", 'agreeable'),
("I do not know the answer to that.", 'calm'),
("I do not know the answer to that.", 'confusion'),
("I do not know the answer to that.", 'agreeable'),
("I wish I knew the answer to that.", 'intensity'),
("I wish I knew the answer to that.", 'desire'),
("I wish I knew the answer to that.", 'emphatic'),
("I wish I knew the answer to that.", 'certainty'),
("I want to know why that is.", 'intensity'),
("I want to know why that is.", 'desire'),
("I want to know why that is.", 'emphatic'),
("I want to know why that is.", 'certainty'),
("Do you want to know why?.", 'emphatic'),
("Do you want to know why?.", 'inquisitive'),
("Do you want to know why?.", 'confusion'),
("You want to know why?.", 'emphatic'),
("You want to know why?.", 'inquisitive'),
("You want to know why?.", 'confusion'),
("Want to know why?.", 'emphatic'),
("Want to know why?.", 'inquisitive'),
("Want to know why?.", 'confusion'),
("Why is it doing that?.", 'emphatic'),
("Why is it doing that?.", 'inquisitive'),
("Why is it doing that?.", 'confusion'),
("Why do you want that?.", 'emphatic'),
("Why do you want that?.", 'inquisitive'),
("Why do you want that?.", 'confusion'),
("Why exactly you want that?.", 'emphatic'),
("Why exactly you want that?.", 'inquisitive'),
("Why exactly you want that?.", 'confusion'),
("Why exactly you want that?.", 'sarcastic'),
("Did you figure out the answer to your question?", 'emphatic'),
("Did you figure out the answer to your question?", 'inquisitive'),
("Did you figure out the answer to your question?", 'confusion'),
("Were we able to answer all your questions?", 'emphatic'),
("Were we able to answer all your questions?", 'inquisitive'),
("Were we able to answer all your questions?", 'confusion'),
("Were we able to answer all your questions?", 'modest'),
("Was I able to answer all your questions?", 'emphatic'),
("Was I able to answer all your questions?", 'inquisitive'),
("Was I able to answer all your questions?", 'modest'),
("Was I able to answer all your questions?", 'confusion'),
("Are you satisfied now?", 'emphatic'),
("Are you satisfied now?", 'inquisitive'),
("Are you satisfied now?", 'modest'),
("Are you satisfied now?", 'confusion'),
("Are you now satisfied?", 'emphatic'),
("Are you now satisfied?", 'inquisitive'),
("Are you now satisfied?", 'modest'),
("Are you now satisfied?", 'confusion'),
("Are you happy now?", 'emphatic'),
("Are you happy now?", 'inquisitive'),
("Are you happy now?", 'confusion'),
("Are you a happy person?", 'emphatic'),
("Are you a happy person?", 'inquisitive'),
("Are you a happy person?", 'confusion'),
("I think we need to play around with it more.", 'emphatic'),
("I think we need to play around with it more.", 'certainty'),
("I think we need to play around with it more.", 'calm'),
("I think we need to play around with it more.", 'challenging'),
("I am getting so frustrated.", 'challenging'),
("I am getting so frustrated.", 'negative'),
("I am getting so frustrated.", 'certainty'),
("I am getting so frustrated.", 'anger'),
("I am getting so frustrated.", 'regret'),
("I am getting so frustrated.", 'intensity'),
("I am getting so frustrated.", 'emphatic'),
("I'm getting so frustrated.", 'challenging'),
("I'm getting so frustrated.", 'negative'),
("I'm getting so frustrated.", 'certainty'),
("I'm getting so frustrated.", 'anger'),
("I'm getting so frustrated.", 'regret'),
("I'm getting so frustrated.", 'intensity'),
("I'm getting so frustrated.", 'emphatic'),
("I am starting to get angry.", 'challenging'),
("I am starting to get angry.", 'negative'),
("I am starting to get angry.", 'certainty'),
("I am starting to get angry.", 'anger'),
("I am starting to get angry.", 'regret'),
("I am starting to get angry.", 'emphatic'),
("I'm starting to get angry.", 'challenging'),
("I'm starting to get angry.", 'negative'),
("I'm starting to get angry.", 'certainty'),
("I'm starting to get angry.", 'anger'),
("I'm starting to get angry.", 'regret'),
("I'm starting to get angry.", 'emphatic'),
("I'm getting mad.", 'challenging'),
("I'm getting mad.", 'negative'),
("I'm getting mad.", 'certainty'),
("I'm getting mad.", 'anger'),
("I'm getting mad.", 'regret'),
("I'm getting mad.", 'emphatic'),
("I am getting mad.", 'challenging'),
("I am getting mad.", 'negative'),
("I am getting mad.", 'certainty'),
("I am getting mad.", 'anger'),
("I am getting mad.", 'regret'),
("I am getting mad.", 'emphatic'),
("I'm growing very impatient.", 'challenging'),
("I'm growing very impatient.", 'negative'),
("I'm growing very impatient.", 'certainty'),
("I'm growing very impatient.", 'anger'),
("I'm growing very impatient.", 'regret'),
("I'm growing very impatient.", 'intensity'),
("I'm growing very impatient.", 'emphatic'),
("I am growing very impatient.", 'challenging'),
("I am growing very impatient.", 'negative'),
("I am growing very impatient.", 'certainty'),
("I am growing very impatient.", 'anger'),
("I am growing very impatient.", 'regret'),
("I am growing very impatient.", 'intensity'),
("I am growing very impatient.", 'emphatic'),
("I'm getting really strong now.", 'agreeable'),
("I'm getting really strong now.", 'positive'),
("I'm getting really strong now.", 'pride'),
("I'm getting really strong now.", 'certainty'),
("I'm getting really strong now.", 'joy'),
("I'm getting really strong now.", 'intensity'),
("I'm getting really strong now.", 'emphatic'),
("I am getting really strong now.", 'agreeable'),
("I am getting really strong now.", 'positive'),
("I am getting really strong now.", 'pride'),
("I am getting really strong now.", 'certainty'),
("I am getting really strong now.", 'joy'),
("I am getting really strong now.", 'intensity'),
("I am getting really strong now.", 'emphatic'),
("I'm becoming so strong!", 'agreeable'),
("I'm becoming so strong!", 'positive'),
("I'm becoming so strong!", 'pride'),
("I'm becoming so strong!", 'certainty'),
("I'm becoming so strong!", 'joy'),
("I'm becoming so strong!", 'intensity'),
("I'm becoming so strong!", 'emphatic'),
("I am becoming so strong!", 'agreeable'),
("I am becoming so strong!", 'positive'),
("I am becoming so strong!", 'pride'),
("I am becoming so strong!", 'certainty'),
("I am becoming so strong!", 'joy'),
("I am becoming so strong!", 'intensity'),
("I am becoming so strong!", 'emphatic'),
("I'm getting really fit soon!", 'agreeable'),
("I'm getting really fit soon!", 'positive'),
("I'm getting really fit soon!", 'pride'),
("I'm getting really fit soon!", 'certainty'),
("I'm getting really fit soon!", 'joy'),
("I'm getting really fit soon!", 'intensity'),
("I'm getting really fit soon!", 'emphatic'),
("I'm going to get real fit soon!", 'agreeable'),
("I'm going to get real fit soon!", 'positive'),
("I'm going to get real fit soon!", 'pride'),
("I'm going to get real fit soon!", 'certainty'),
("I'm going to get real fit soon!", 'joy'),
("I'm going to get real fit soon!", 'intensity'),
("I'm going to get real fit soon!", 'emphatic'),
("I am going to get really healthy now!", 'agreeable'),
("I am going to get really healthy now!", 'positive'),
("I am going to get really healthy now!", 'pride'),
("I am going to get really healthy now!", 'certainty'),
("I am going to get really healthy now!", 'joy'),
("I am going to get really healthy now!", 'intensity'),
("I am going to get really healthy now!", 'emphatic'),
("I have been working nonstop.", 'negative'),
("I have been working nonstop.", 'certainty'),
("I have been working nonstop.", 'intensity'),
("I have been working nonstop.", 'emphatic'),
("I have been working nonstop.", 'pride'),
("I've been working nonstop.", 'negative'),
("I've been working nonstop.", 'certainty'),
("I've been working nonstop.", 'intensity'),
("I've been working nonstop.", 'emphatic'),
("I've been working nonstop.", 'pride'),
("You don't have any idea what you're talking about.", 'negative'),
("You don't have any idea what you're talking about.", 'emphatic'),
("You don't have any idea what you're talking about.", 'intensity'),
("You don't have any idea what you're talking about.", 'anger'),
("You don't have any idea what you're talking about.", 'certainty'),
("You don't have any idea what you're talking about.", 'accusative'),
("You do not have any idea what you're talking about.", 'negative'),
("You do not have any idea what you're talking about.", 'emphatic'),
("You do not have any idea what you're talking about.", 'intensity'),
("You do not have any idea what you're talking about.", 'anger'),
("You do not have any idea what you're talking about.", 'certainty'),
("You do not have any idea what you're talking about.", 'accusative'),
("Do you have any idea what you're talking about?", 'negative'),
("Do you have any idea what you're talking about?", 'confusion'),
("Do you have any idea what you're talking about?", 'inquisitive'),
("Do you have any idea what you're talking about?", 'accusative'),
("Do you have any idea what you're talking about?", 'anger'),
("Do you have any idea what you are talking about?", 'negative'),
("Do you have any idea what you are talking about?", 'confusion'),
("Do you have any idea what you are talking about?", 'inquisitive'),
("Do you have any idea what you are talking about?", 'anger'),
("Do you have any idea what you are talking about?", 'accusative'),
("You have any idea what you're saying?", 'negative'),
("You have any idea what you're saying?", 'confusion'),
("You have any idea what you're saying?", 'inquisitive'),
("You have any idea what you're saying?", 'anger'),
("You have any idea what you've saying?", 'negative'),
("You have any idea what you've saying?", 'accusative'),
("You have any idea what you've saying?", 'confusion'),
("You have any idea what you've saying?", 'inquisitive'),
("You have any idea what you've saying?", 'accusative'),
("You have any idea what you've saying?", 'anger'),
("You have any idea what you've saying?", 'accusative'),
("You have any idea what you are saying?", 'negative'),
("You have any idea what you are saying?", 'confusion'),
("You have any idea what you are saying?", 'inquisitive'),
("You have any idea what you are saying?", 'anger'),
("You have any idea what you are saying?", 'accusative'),
("You have any idea what you have been saying?", 'negative'),
("You have any idea what you have been saying?", 'confusion'),
("You have any idea what you have been saying?", 'inquisitive'),
("You have any idea what you have been saying?", 'anger'),
("You have any idea what you have been saying?", 'accusative'),
("You are really good at this!", 'intensity'),
("You are really good at this!", 'positive'),
("You are really good at this!", 'joy'),
("You are really good at this!", 'certainty'),
("You are really good at this!", 'admiration'),
("You're really good at this!", 'intensity'),
("You're really good at this!", 'positive'),
("You're really good at this!", 'joy'),
("You're really good at this!", 'certainty'),
("You're really good at this!", 'admiration'),
("You are really great at this!", 'intensity'),
("You are really great at this!", 'positive'),
("You are really great at this!", 'joy'),
("You are really great at this!", 'certainty'),
("You are really great at this!", 'admiration'),
("You're really great at this!", 'intensity'),
("You're really great at this!", 'positive'),
("You're really great at this!", 'joy'),
("You're really great at this!", 'certainty'),
("You're really great at this!", 'admiration'),
("Wow, you are awesome at this!", 'intensity'),
("Wow, you are awesome at this!", 'positive'),
("Wow, you are awesome at this!", 'joy'),
("Wow, you are awesome at this!", 'certainty'),
("Wow, you are awesome at this!", 'admiration'),
("Woah, you're awesome at this!", 'intensity'),
("Woah, you're awesome at this!", 'positive'),
("Woah, you're awesome at this!", 'joy'),
("Woah, you're awesome at this!", 'certainty'),
("Woah, you're awesome at this!", 'admiration'),
("Thank God!", 'positive'),
('I love this sandwich.', 'positive'),
('This is an amazing place!', 'positive'),
('I feel very"ood about these beers.', 'positive'),
('This is my best work.', 'positive'),
("What an awesome view", 'positive'),
("I liked it.", 'positive'),
("I did like it.", 'positive'),
("I liked it a lot.", 'positive'),
('I did like it a lot.', 'positive'),
('I am liking it.', 'positive'),
('I really enjoyed it.', 'positive'),
('I am enjoying it.', 'positive'),
('I enjoyed it.', 'positive'),
("I didn't think he was that bad.", 'positive'),
("I did not think he was that bad.", 'positive'),
("I didn't find him so bad.", 'positive'),
("I did not find him to be so bad.", 'positive'),
("I did not find him so bad, actually.", 'positive'),
("I hadn't thought him to be so ill, actually.", 'positive'),
("I had not thought he was that bad.", 'positive'),
("I had not thought he was so bad.", 'positive'),
("Be sure to check out the API Reference for the classifiers module.", 'positive'),
("I'm excited to try my new classifier.", 'positive'),
("I'm excited to try it.", 'positive'),
("I'm so excited to try it!", 'positive'),
("""'"How good it was in you, my dear Mr. Bennet!'""", 'positive'),
("""'Well, how pleased I am!'""", 'positive'),
("""'"What an excellent father you have, girls!'""", 'positive'),
("""'Sir William had been delighted with him.'""", 'positive'),
("""'He was quite young, wonderfully handsome, extremely agreeable, and, to crown the whole, he meant to be at the next assembly with a large party.'""", 'positive'),
("""'Nothing could be more delightful!'""", 'positive'),
("""'If I can but see one of my daughters happily settled at Netherfield,' said Mrs. Bennet to her husband, 'and all the others equally well married, I shall have nothing to wish for.'""", 'positive'),
("""'The ladies were somewhat more fortunate, for they had the advantage of ascertaining from an upper window that he wore a blue coat, and rode a black horse.'""", 'positive'),
("""'Mr. Bingley was good-looking and gentlemanlike; he had a pleasant countenance, and easy, unaffected manners.'""", 'positive'),
("""'His sisters were fine women, with an air of decided fashion.'""", 'positive'),
("""'His brother-in-law, Mr. Hurst, merely looked the gentleman; but his friend Mr. Darcy soon drew the attention of the room by his fine, tall person, handsome features, noble mien, and the report which was in general circulation within five minutes after his entrance, of his having ten thousand a year.'""", 'positive'),
("""'My dear, I am quite delighted with him.'""", 'positive'),
("""'He is also handsome,' replied Elizabeth, 'which a young man ought likewise to be, if he possibly can.'""", 'positive'),
("""'I was very much flattered by his asking me to dance a second time.'""", 'positive'),
("""'Her performance was pleasing.'""", 'positive'),
("""'What a charming amusement for young people this is, Mr. Darcy!'""", 'positive'),
("""'There is nothing like dancing after all.'""", 'positive'),
("""'I consider it as one of the first refinements of polished society.'""", 'positive'),
("""'And I doubt not that you are an adept in the science yourself, Mr. Darcy.'""", 'positive'),
("""'This was a lucky idea of mine, indeed!'""", 'positive'),
("I thought <NAME> looked remarkably well when she came into the room this morning.", 'positive'),
("It shows an affection for her sister that is very pleasing.", 'positive'),
("I do not know a place in the country that is equal to Netherfield.", 'positive'),
("They have each their advantages, and I can be equally happy in either.", 'positive'),
("Aye--that is because you have the right disposition.", 'positive'),
("What an agreeable man Sir William is, Mr. Bingley, is not he?", 'positive'),
("So much the man of fashion!", 'positive'),
("Of a fine, stout, healthy love it may.", 'positive'),
("""'But if it be only a slight, thin sort of inclination, I am convinced that one good sonnet will starve it entirely away.'""", 'positive'),
("Yes, it would be much better to wait till Jane was well", 'positive'),
("How delighted <NAME> will be to receive such a letter!", 'positive'),
("Tell your sister I am delighted to hear of her improvement on the harp.", 'positive'),
("Perhaps I do.", 'positive'),
("I hope I never ridicule what is wise and good.", 'positive'),
("But pride--where there is a real superiority of mind, pride will be always under good regulation.", 'positive'),
("I am perfectly convinced by it that Mr. Darcy has no defect.", 'positive'),
("I have faults enough, but they are not, I hope, of understanding.", 'positive'),
("""Louisa, you will not mind my waking Mr. Hurst?""", 'positive'),
("The person of whom I speak is a gentleman, and a stranger.", 'positive'),
("Well, I am sure I shall be extremely glad to see Mr. Bingley.", 'positive'),
("She is a most charming young lady indeed.", 'positive'),
("You judge very properly.", 'positive'),
("And it is happy for you that you possess the talent of flattering with delicacy.", 'positive'),
("Mr. Bennet's expectations were fully answered.", 'positive'),
("I know little of the game at present, but I shall be glad to improve myself.", 'positive'),
("""But I verily believe I could forgive him anything and everything, rather than his disappointing the hopes and disgracing the memory of his father.""", 'positive'),
("I cannot do justice to his kindness.", 'positive'),
("He meant to provide for me amply, and thought he had done it; but when the living fell, it was given elsewhere.", 'positive'),
("Elizabeth honoured him for such feelings, and thought him handsomer than ever as he expressed them.", 'positive'),
("I had not thought so very ill of him.", 'positive'),
("""I did play just the opening scene of the original BioShock several years back, but was so enthralled by its atmospheric tension and detailed environment that I knew I would have to finish the whole story eventually.""", 'positive'),
("It is clear that the developers wanted an action game that would actually make players think.", 'positive'),
("You are charmingly grouped, and appear to uncommon advantage.", 'positive'),
("This is no very striking resemblance of your own character, I am sure.", 'positive'),
("Excuse my interference--it was kindly meant.", 'positive'),
("I have no reason, I assure you, to be dissatisfied with my reception.", 'positive'),
("Mr. Darcy seemed much pleased with the attention.", 'positive'),
("If I were so fortunate as to be able to sing, I should have great pleasure, I am sure, in obliging the company with an air.", 'positive'),
("He can have nothing to say to me that anybody need not hear.", 'positive'),
("You are uniformly charming!", 'positive'),
("I shall be glad to have the library to myself as soon as may be.", 'positive'),
("I really do not think <NAME> has her equal for beauty, elegance, and accomplishments.", 'positive'),
("Most willingly.", 'positive'),
("You shall have it in a few words.", 'positive'),
("I hope you will be satisfied with what I have done.", 'positive'),
("Undoubtedly.", 'positive'),
("Thank God! I have not that pain.", 'positive'),
("Your sweetness and disinterestedness are really angelic; I do not know what to say to you.", 'positive'),
("I feel as if I had never done you justice, or loved you as you deserve.", 'positive'),
('I do not like this restaurant', 'negative'),
('I am tired of this stuff.', 'negative'),
("I can't deal with this", 'negative'),
('He is my sworn enemy!', 'negative'),
('My boss is horrible.', 'negative'),
("""'You are very severe.'""", 'negative'),
("""'You are a severe person.'""", 'negative'),
("""'You are so severe.'""", 'negative'),
("""'So much severity.""", 'negative'),
("I'm trying to change a module variable, but it doesn't work.", 'negative'),
("""The enemies are deadly, and it is nerve-wracking to go through the game, hearing their manic, hostile cries through the very frightening and creepy sound design.""", 'negative'),
("""'You are over-scrupulous, surely.'""", 'negative'),
("""Lizzy is not a bit better than the others; and I am sure she is not half so handsome as Jane, nor half so good-humoured as Lydia.""", 'negative'),
("""'We are not in a way to know _what_ <NAME> likes," said her mother resentfully, "since we are not to visit.'""", 'negative'),
("""'She is a selfish, hypocritical woman, and I have no opinion of her.'""", 'negative'),
("""'"I am sick of <NAME>," cried his wife.'""", 'negative'),
("""'She could not imagine what business he could have in town so soon after his arrival in Hertfordshire; and she began to fear that he might be always flying about from one place to another, and never settled at Netherfield as he ought to be.'""", 'negative'),
("""'I hate to see you standing about by yourself in this stupid manner.'""", 'negative'),
("""'She is tolerable, but not handsome enough to tempt me.'""", 'negative'),
("""'I am in no humour at present to give consequence to young ladies who are slighted by other men.'""", 'negative'),
("""'Not handsome enough to dance with!'""", 'negative'),
("""'You are severe on us.'""", 'negative'),
("""'I am astonished, my dear,' said <NAME>, 'that you should be so ready to think your own children silly.'""", 'negative'),
("""'No, indeed, I do not wish to avoid the walk.'""", 'negative'),
("Her hair, so untidy, so blowsy!", 'negative'),
("My sister, I am sure, will not hear of her removal.", 'negative'),
("It is a pity they are not handsome!", 'negative'),
("How odious I should think them!", 'negative'),
("I am afraid you do not like your pen.", 'negative'),
("That will not do for a compliment to Darcy.", 'negative'),
("He studies too much, for words of four syllables.", 'negative'),
("""'Nothing is more deceitful,' said Darcy, 'than the appearance of humility.'""", 'negative'),
("I daresay you believed it.", 'negative'),
("You appear to me, Mr. Darcy, to allow nothing for the influence of friendship and affection.", 'negative'),
("I heard you before, but I could not immediately determine what to say in reply.", 'negative'),
("You used us abominably ill.", 'negative'),
("The picturesque would be spoilt by admitting a fourth.", 'negative'),
("I should like balls infinitely better.", 'negative'),
("If they were carried on in a different manner; but there is something insufferably tedious in the usual process of such a meeting.", 'negative'),
("I have not the smallest objection to explaining them.", 'negative'),
("I never heard anything so abominable.", 'negative'),
("How shall we punish him for such a speech?", 'negative'),
("Tease him--laugh at him.", 'negative'),
("But upon my honour, I do not.", 'negative'),
("I do assure you that my intimacy has not yet taught me that.", 'negative'),
("No, no--feel he may defy us there.", 'negative'),
("Mr. Darcy is not to be laughed at!", 'negative'),
("Certainly, there are such people, but I hope I am not one of them.", 'negative'),
("But it has been the study of my life to avoid those weaknesses which often expose a strong understanding to ridicule.", 'negative'),
("Yes, vanity is a weakness indeed.", 'negative'),
("I have made no such pretension.", 'negative'),
("My temper I dare not vouch for.", 'negative'),
("""There is, I believe, in every disposition a tendency to some particular evil--a natural defect, which not even the best education can overcome.""", 'negative'),
("And yours is willfully to misunderstand them.", 'negative'),
("The person of whom I speak is a gentleman, and a stranger.", 'negative'),
('There is not a bit of fish to be got today.', 'negative'),
("It is a pity that great ladies in general are not more like her.", 'negative'),
("As much as I ever wish to be.", 'negative'),
("I have spent four days in the same house with him, and I think him very disagreeable.", 'negative'),
("He is not at all liked in Hertfordshire.", 'negative'),
("Everybody is disgusted with his pride.", 'negative'),
("You will not find him more favourably spoken of by anyone.", 'negative'),
("""The world is blinded by his fortune and consequence, or frightened by his high and imposing manners, and sees him only as he chooses to be seen.""", 'negative'),
("Good heavens! But how could that be?", 'negative'),
("Good heavens, but how could that be?", 'negative'),
("How could his will be disregarded?", 'negative'),
("""A man of honour could not have doubted the intention, but <NAME> chose to doubt it--or to treat it as a merely conditional recommendation.""", 'negative'),
("This is quite shocking! He deserves to be publicly disgraced.", 'negative'),
("He deserves to be publicly disgraced.", 'negative'),
("What can have induced him to behave so cruelly?", 'negative'),
("I had not thought <NAME> so bad as this--though I have never liked him.", 'negative'),
("I will not trust myself on the subject.", 'negative'),
("I heard you before, but I could not immediately determine what to say in reply.", 'negative'),
("""You wanted me, I know, to say 'Yes,' that you might have the pleasure of despising my taste; but I always delight in overthrowing those kind of schemes, and cheating a person of their premeditated contempt.""", 'negative'),
("""I have, therefore, made up my mind to tell you, that I do not want to dance a reel at all--and now despise me if you dare.""", 'negative'),
("That would be the greatest misfortune of all!", 'negative'),
("I must not decide on my own performance.", 'negative'),
("He has been so unlucky as to lose your friendship.", 'negative'),
("But if I do not take your likeness now, I may never have another opportunity.", 'negative'),
("Insolent girl!", 'negative'),
("You are much mistaken if you expect to influence me by such a paltry attack as this.", 'negative'),
("I see nothing in it but your own wilful ignorance and the malice of Mr. Darcy.", 'negative'),
("For heaven's sake, madam, speak lower.", 'negative'),
("You will never recommend yourself to his friend by so doing!", 'negative'),
("Dear madam, do not go.", 'negative'),
("You are too hasty, sir.",' negative'),
("I do assure you that I am not one of those young ladies.", 'negative'),
("I am perfectly serious in my refusal.", 'negative'),
("She is a very headstrong, foolish girl, and does not know her own interest but I will make her know it.", 'negative'),
("Sir, you quite misunderstand me.", 'negative'),
("Yes, or I will never see her again.", 'negative'),
("I told you in the library, you know, that I should never speak to you again, and you will find me as good as my word.", 'negative'),
("I have no pleasure in talking to undutiful children.", 'negative'),
("Nobody can tell what I suffer!", 'negative'),
("It is unlucky that you should not be able to see your friends before they leave the country.", 'negative'),
("I will read you the passage which particularly hurts me.", 'negative'),
("I am not romantic, you know; I never was.", 'negative'),
("This was not very consoling to Mrs. Bennet, and therefore, instead of making any answer, she went on as before.", 'negative'),
("I cannot bear to think that they should have all this estate.", 'negative'),
("I never can be thankful, Mr. Bennet, for anything about the entail.", 'negative'),
("""How anyone could have the conscience to entail away an estate from one's own daughters, I cannot understand; and all for the sake of Mr. Collins too!""", 'negative'),
("""Hope was over, entirely over; and when Jane could attend to the rest of the letter, she found little, except the professed affection of the writer, that could give her any comfort.""", 'negative'),
("You doubt me, indeed, you have no reason.", 'negative'),
("You wish to think all the world respectable, and are hurt if I speak ill of anybody.", 'negative'),
("I do not blame Jane, for Jane would have got <NAME> if she could.", 'negative'),
("It is very hard to think that she might have been <NAME>'s wife by this time, had it not been for her own perverseness.", 'negative'),
("It is clear that the developers wanted an action game that would actually make players think.", 'joy'),
("I'm excited to try my new classifier.", 'joy'),
("How pleasant it is to spend an evening in this way!", 'joy'),
("I declare after all there is no enjoyment like reading!", 'joy'),
("You are uniformly charming!", 'joy'),
("Do you not want to know who has taken it?", 'anger'),
("""'My dear Mr. Bennet,' replied his wife, 'how can you be so tiresome!'""", 'anger'),
("""'Nonsense, how can you talk so!'""", 'anger'),
("""'It is more than I engage for, I assure you.'""", 'anger'),
("""'But you are always giving her the preference.'""", 'anger'),
("""'Mr. Bennet, how can you abuse your own children in such a way?'""", 'anger'),
("""'You take delight in vexing me.'""", 'anger'),
("""'You have no compassion for my poor nerves.'""", 'anger'),
("""'I was so vexed to see him stand up with her!'""", 'anger'),
("""'So high and so conceited that there was no enduring him!'""", 'anger'),
("Very nonsensical to come at all!", 'anger'),
("Charles writes in the most careless way imaginable!", 'anger'),
("""'Nothing is more deceitful,' said Darcy, 'than the appearance of humility.'""", 'anger'),
("How shall we punish him for such a speech?", 'anger'),
("Mr. Darcy is not to be laughed at!", 'anger'),
("I have spent four days in the same house with him, and I think him very disagreeable.", 'anger'),
("He is not at all liked in Hertfordshire.", 'anger'),
("Everybody is disgusted with his pride.", 'anger'),
("Good heavens! But how could that be?", 'anger'),
("Good heavens, but how could that be?", 'anger'),
("""A man of honour could not have doubted the intention, but Mr. Darcy chose to doubt it--or to treat it as a merely conditional recommendation.""", 'anger'),
("This is quite shocking! He deserves to be publicly disgraced.", 'anger'),
("He deserves to be publicly disgraced.", 'anger'),
("You used us abominably ill, running away without telling us that you were coming out.", 'anger'),
("I never heard anything so abominable.", 'anger'),
("That would be the greatest misfortune of all!", 'anger'),
("Insolent girl!", 'anger'),
("You are too hasty, sir.", 'anger'),
("I have no pleasure in talking to undutiful children.", 'anger'),
("Nobody can tell what I suffer!", 'anger'),
("""Now, I do insist upon it, that you, all of you, hold your tongues, and let me and <NAME> have a little conversation together.""", 'anger'),
("Good Lord! Sir William, how can you tell such a story?", 'anger'),
("Sir William, how can you tell such a story?", 'anger'),
("""Do you think it incredible that <NAME> should be able to procure any woman's good opinion, because he was not so happy as to succeed with you?""", 'anger'),
("I cannot bear to think that they should have all this estate.", 'anger'),
("""How anyone could have the conscience to entail away an estate from one's own daughters, I cannot understand; and all for the sake of <NAME> too!""", 'anger'),
("Why should he have it more than anybody else?", 'anger'),
("You doubt me, indeed, you have no reason.", 'anger'),
("It is very hard to think that she might have been <NAME>'s wife by this time, had it not been for her own perverseness.", 'anger'),
('I feel fine.', 'calm'),
('I am fine.', 'calm'),
("Any files you download or bookmarks you create will be kept.", 'calm'),
("""Pages you view in incognito tabs won't stick around in your browser's history, cookie store, or search history after you've closed all of your incognito tabs.""", 'calm'),
("You can also create synsets directly.", 'calm'),
("Certainty is perfect knowledge that has total security from error, or the mental state of being without doubt.", 'calm'),
("Something is certain only if no skepticism can occur.", 'calm'),
("A tuple is a sequence of immutable Python objects. Tuples are sequences, just like lists.", 'calm'),
("Creating a tuple is as simple as putting different comma-separated values.", 'calm'),
("""Creating a tuple is as simple as putting different comma-separated values. Optionally you can put comma-separated values between parentheses also.""", 'calm'),
("First we'll create some training and test data.", 'calm'),
("As an example, let's create a custom sentiment analyzer.", 'calm'),
("You can also load data from common file formats including CSV, JSON, and TSV.", 'calm'),
("Be sure to check out the API Reference for the classifiers module.", 'calm'),
("Headings you add to your document will appear here.", 'calm'),
("""Released in 2007, BioShock was quickly received as one of the greatest games of all time, with much praise being devoted to its unique, atmospheric setting and provocative, morality-based storyline.""", 'calm'),
("""I would describe BioShock as being a FPS / RPG game, which I would say is one of its greatest weaknesses and strengths.""", 'calm'),
("""'What is his name?'""", 'calm'),
("""'The rest of the evening was spent in conjecturing how soon he would return Mr. Bennet's visit, and determining when they should ask him to dinner.'""", 'calm'),
("""'In a few days Mr. Bingley returned Mr. Bennet's visit, and sat about ten minutes with him in his library.'""", 'calm'),
("""'He had entertained hopes of being admitted to a sight of the young ladies, of whose beauty he had heard much; but he saw only the father.'""", 'calm'),
("""'An invitation to dinner was soon afterwards dispatched; and already had Mrs. Bennet planned the courses that were to do credit to her housekeeping, when an answer arrived which deferred it all.'""", 'calm'),
("""'Mr. Bingley was obliged to be in town the following day, and, consequently, unable to accept the honour of their invitation, etc.'""", 'calm'),
("""'Amongst the most violent against him was Mrs. Bennet, whose dislike of his general behaviour was sharpened into particular resentment by his having slighted one of her daughters.'""", 'calm'),
("""'She was therefore obliged to seek another branch of the subject, and related, with much bitterness of spirit and some exaggeration, the shocking rudeness of Mr. Darcy.'""", 'calm'),
("The picturesque would be spoilt by admitting a fourth.", 'calm'),
("<NAME> made no answer, and soon afterwards she got up and walked about the room.", 'calm'),
("I assure you it is very refreshing after sitting so long in one attitude.", 'calm'),
("Elizabeth was surprised, but agreed to it immediately.", 'calm'),
("""I hope, my dear, that you have ordered a good dinner to-day, because I have reason to expect an addition to our family party.""", 'calm'),
("Who do you mean, my dear?", 'calm'),
("The person of whom I speak is a gentleman, and a stranger.", 'calm'),
("It is a person whom I never saw in the whole course of my life.", 'calm'),
("""You may well be surprised, <NAME>, at such an assertion, after seeing, as you probably might, the very cold manner of our meeting yesterday.""", 'calm'),
("I am not qualified to form one.", 'calm'),
("""The world is blinded by his fortune and consequence, or frightened by his high and imposing manners, and sees him only as he chooses to be seen.""", 'calm'),
("I did not know that you intended to walk.", 'calm'),
("The picturesque would be spoilt by admitting a fourth.", 'calm'),
("She shall hear my opinion.", 'calm'),
("Let her be called down.", 'calm'),
("I understand that <NAME> has made you an offer of marriage.", 'calm'),
("I have, sir.", 'calm'),
("We now come to the point.", 'calm'),
("You shall hear what she says.", 'calm'),
("It is unlucky that you should not be able to see your friends before they leave the country.", 'calm'),
("Caroline decidedly says that none of the party will return into Hertfordshire this winter.", 'calm'),
("It is evident by this that he comes back no more this winter.", 'calm'),
("He is his own master.", 'calm'),
("I will read you the passage which particularly hurts me.", 'calm'),
("No one who has ever seen you together can doubt his affection.", 'calm'),
("My dear, do not give way to such gloomy thoughts.", 'calm'),
("Let us hope for better things.", 'calm'),
("Let us flatter ourselves that I may be the survivor.", 'calm'),
("This was not very consoling to Mrs. Bennet, and therefore, instead of making any answer, she went on as before.", 'calm'),
("""The very first sentence conveyed the assurance of their being all settled in London for the winter, and concluded with her brother's regret at not having had time to pay his respects to his friends in Hertfordshire before he left the country.""", 'calm'),
("You wish to think all the world respectable, and are hurt if I speak ill of anybody.", 'calm'),
('I love you.', 'love'),
('I love you very much.', 'love'),
('I love you so much.', 'love'),
('I just love you so much.', 'love'),
("""'You are a very handsome woman.'""", 'love'),
("""'You are a very pretty man.'""", 'love'),
("""'You are so pretty.'""", 'love'),
("""'You are so hot.'""", 'love'),
("""'You are so sexy.'""", 'love'),
("""'You are so cute.'""", 'love'),
("""'You are so handsome.'""", 'love'),
("""'You are so beautiful.'""", 'love'),
("""'She is the most beautiful creature I ever beheld!'""", 'love'),
("""'He is so excessively handsome!'""", 'love'),
("""'He is just what a young man ought to be,' said she, 'sensible, good-humoured, lively; and I never saw such happy manners!--so much ease, with such perfect good breeding!'""", 'love'),
("""I have a excessive regard for <NAME>, she is really a very sweet girl, and I wish with all my heart she were well settled.""", 'love'),
("So much the man of fashion!", 'love'),
("Who do you mean, my dear?", 'love'),
("She is a most charming young lady indeed.", 'love'),
("You are uniformly charming!", 'love'),
("I really do not think <NAME> has her equal for beauty, elegance, and accomplishments.", 'love'),
("Your sweetness and disinterestedness are really angelic; I do not know what to say to you.", 'love'),
("I feel as if I had never done you justice, or loved you as you deserve.", 'love'),
('I hate you.', 'hate'),
('I hate you!', 'hate'),
("I don't love you", 'hate'),
("I don't love you!", 'hate'),
("I don't love anyone.", 'hate'),
("I detest her.", 'hate'),
("I detest him.", 'hate'),
("I hate everyone.", 'hate'),
("I detest everyone.", 'hate'),
("I quite detest him.", 'hate'),
("I quite detest her.", 'hate'),
("I quite detest everyone.", 'hate'),
("I don't like you.", 'hate'),
("I do not like you.", 'hate'),
("I don't like you at all.", 'hate'),
("I do not like you at all.", 'hate'),
("""'He was the proudest, most disagreeable man in the world, and everybody hoped that he would never come there again.'""", 'hate'),
("""'You know how I detest it, unless I am particularly acquainted with my partner.'""", 'hate'),
("""'I quite detest the man.'""", 'hate'),
("I never heard anything so abominable.", 'hate'),
("How shall we punish him for such a speech?", 'hate'),
("Tease him--laugh at him.", 'hate'),
("He is not at all liked in Hertfordshire.", 'hate'),
("Everybody is disgusted with his pride.", 'hate'),
("This is quite shocking! He deserves to be publicly disgraced.", 'hate'),
("He deserves to be publicly disgraced.", 'hate'),
("I had not thought <NAME> so bad as this--though I have never liked him.", 'hate'),
("Nobody can tell what I suffer!", 'hate'),
('I am confused.', 'confusion'),
("""'I had once had some thought of fixing in town myself--for I am fond of superior society; but I did not feel quite certain that the air of London would agree with Lady Lucas.'""", 'confusion'),
("What could she mean by it?", 'confusion'),
("It does not follow that a deep, intricate character is more or less estimable than suc a one as yours.", 'confusion'),
("""'I did not know that you intended to walk,' said <NAME>, in some confusion, lest they had been overheard.""", 'confusion'),
("Perhaps that is not possible for anyone.", 'confusion'),
("My temper I dare not vouch for.", 'confusion'),
("""There is, I believe, in every disposition a tendency to some particular evil--a natural defect, which not even the best education can overcome.""", 'confusion'),
("I think you said she was a widow, sir?", 'confusion'),
("Are you much acquainted with Mr. Darcy?", 'confusion'),
("How could his will be disregarded?", 'confusion'),
("What can have induced him to behave so cruelly?", 'confusion'),
("What if we just simply chose to leave, or try to leave, the game area, without actually setting the bomb off?", 'confusion'),
("I'm trying to change a module variable, but it doesn't work.", 'confusion'),
("I heard you before, but I could not immediately determine what to say in reply.", 'confusion'),
("I did not know that you intended to walk.", 'confusion'),
("Mr. Bingley does not know <NAME> himself?", 'confusion'),
("But what does he say of the living?", 'confusion'),
("What advantage can it be for you to offend <NAME>?", 'confusion'),
("""May I hope, madam, for your interest with your fair daughter Elizabeth, when I solicit for the honour of a private audience with her in the course of this morning?""", 'confusion'),
("Why will you think so?", 'confusion'),
("My dear Charlotte--impossible!", 'confusion'),
("Why should you be surprised, my dear Eliza?", 'confusion'),
("I hope you will be satisfied with what I have done.", 'confusion'),
("Do not you know that Mr. Collins wants to marry Lizzy?", 'confusion'),
("Why should he have it more than anybody else?", 'confusion'),
('I am ready.', 'certainty'),
('This is my best work.', 'certainty'),
("A tuple is a sequence of immutable Python objects. Tuples are sequences, just like lists.", 'certainty'),
("Creating a tuple is as simple as putting different comma-separated values.", 'certainty'),
("""The differences between tuples and lists are, the tuples cannot be changed unlike lists and tuples use parentheses, whereas lists use square brackets.""", 'certainty'),
("""Creating a tuple is as simple as putting different comma-separated values. Optionally you can put comma-separated values between parentheses also.""", 'certainty'),
("First we'll create some training and test data.", 'certainty'),
("As an example, let's create a custom sentiment analyzer.", 'certainty'),
("You can also load data from common file formats including CSV, JSON, and TSV.", 'certainty'),
("Be sure to check out the API Reference for the classifiers module.", 'certainty'),
("Headings you add to your document will appear here.", 'certainty'), |
<reponame>kielni/megapis-python<gh_stars>0
import re
import sys
from bs4 import BeautifulSoup
import requests
from megapis.tasks.task_base import TaskBase
DEFAULT_CONFIG = {
'apiKey': '',
'steamId': '',
'libraryUrl': '',
'excludeTags': '+|Valve|Valve Anti-Cheat enabled|Steam Trading Cards|Captions available|Steam Workshop|Steam Cloud|Steam Achievements|Commentary available',
}
class SteamLibraryTask(TaskBase):
def __str__(self):
return 'SteamLibraryTask'
def __init__(self, config):
self.default_config = DEFAULT_CONFIG
super(SteamLibraryTask, self).__init__(config)
def run(self, data):
owned = self._get_owned()
'''
{
"appid": 42910,
"name": "Magicka",
"playtime_2weeks": 609,
"playtime_forever": 3268,
"img_icon_url": "0eb97d0cd644ee08b1339d2160c7a6adf2ea0a65",
"img_logo_url": "8c59c674ef40f59c3bafde8ff0d59b7994c66477",
"has_community_visible_stats": true
},
'''
added = 0
# get library as dict
library = self._get_library()
for game in owned:
appid = game['appid']
if appid in library:
# update playtime and icons
library[appid].update(game)
continue
# fetch and update metadata
game.update(self._get_meta(appid))
library[appid] = game
added += 1
# back to list
print('updated %s games' % added)
return {'data': [library[appid] for appid in library]}
def _get_meta(self, appid):
url = 'http://store.steampowered.com/app/%s' % appid
try:
soup = BeautifulSoup(requests.get(url=url).text, 'html.parser')
except:
print('error loading %s: %s' % (url, sys.exc_info()[0]))
return {}
if soup.select('#app_agegate'):
return {
'ageRestricted': True
}
# see if it redirected somewhere else
redirect = True
for meta in soup.select('meta[property="og:url"]'):
if '/app/%s' % appid in meta.get('content', ''):
redirect = False
if redirect:
return {
'missingSteamPage': True
}
# meta property="og:url" content
print('get %s' % appid)
meta = {}
meta['title'] = soup.select_one('.apphub_AppName').string
text = str(soup.select_one('#game_area_description'))
text = re.sub(r'id=".*?"', '', text).replace('<h2>About This Game</h2>', '')
text = re.sub(r'(<br>)+', '<br>', text).replace('</br>', '')
meta['description'] = text
meta['players'] = []
meta['tags'] = []
exclude_tags = (self.config['excludeTags'] or '').split('|')
for detail in [s.string for s in soup.select('.game_area_details_specs a.name')]:
if detail in exclude_tags:
continue
if 'player' in detail.lower():
val = _normalize_players(detail)
if val not in meta['players']:
meta['players'].append(val)
else:
meta['tags'].append(detail)
for tag in [s.string.strip() for s in soup.select('.glance_tags .app_tag')]:
if tag in meta['tags'] or tag in exclude_tags:
continue
if 'player' in tag.lower():
val = _normalize_players(tag)
if val not in meta['players']:
meta['players'].append(val)
else:
meta['tags'].append(tag)
text = soup.select('.details_block .linkbar')
meta['genres'] = [g.string for g in text if '/genre/' in g['href']]
print('new game %s\t%s\t%s' % (appid, meta['title'], ', '.join(meta['tags'])))
return meta
def _get_owned(self):
# https://developer.valvesoftware.com/wiki/Steam_Web_API#GetOwnedGames_.28v0001.29
url = 'http://api.steampowered.com/IPlayerService/GetOwnedGames/v0001/?'
url += 'key=%s&steamid=%s&format=json&include_appinfo=1' % (
self.config['apiKey'], self.config['steamId'])
print('url=%s' % url)
return requests.get(url).json()['response']['games']
def _get_library(self):
# load library.json
resp = requests.get(self.config['libraryUrl'])
# list to dict
library = {}
for game in resp.json()['data']:
library[game['appid']] = game
return library
def _normalize_players(text):
val = text.lower()
for player in ['single', 'multi']:
if player in val:
val = player
return val
|
<gh_stars>1-10
import math
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import *
import tensorflow_probability as tfp
import tensorflow as tf
tfd = tfp.distributions
def create_feature_extractor_block(x, units):
# x = Dense(16, activation='relu')(x)
# x = BatchNormalization()(x)
# x = Dense(8, activation='relu')(x)
# x = BatchNormalization()(x)
# x = Dropout(0.2)(x)
x = Dense(units, activation='relu')(x)
return x
def create_stddev_block(x):
# x = Dense(1, kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01))(x)
x = Dense(1)(x)
return x
def create_mu_block(x_list):
x = Concatenate()(x_list)
# x = Dense(8, activation='relu')(x)
# x = BatchNormalization()(x)
# x = Dense(1, kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01))(x)
x = Dense(1)(x)
return x
def create_gaussian_output(mu, stddev, name):
x = Concatenate()([mu, stddev])
x = tfp.layers.DistributionLambda(
lambda t: tfd.Normal(loc=t[..., :1], scale=tf.math.softplus(t[...,1:])+1e-6), name=name)(x)
return x
def create_multivariate_gaussian_output(mus, stddevs, name):
x = Concatenate()(mus+stddevs)
x = tfp.layers.DistributionLambda(
lambda t: tfd.MultivariateNormalDiag(loc=t[...,:len(mus)], scale_diag=tf.math.softplus(t[...,len(mus):])+1e-6), name=name)(x)
return x
def build_model(config):
if config.build_model=='point':
loss = tf.keras.losses.mean_squared_error
model = Sequential()
model.add(Dense(config.units, activation='relu'))
model.add(Dense(1))
elif config.build_model=='gaussian' and config.mod_split=='none' and 'alzheimers' not in config.dataset:
loss = lambda y, p_y: -p_y.log_prob(y)
input = Input((config.feature_split_lengths[0],))
x = Dense(config.units, activation='relu', dtype='float32')(input)
x = Dense(2, dtype='float32')(x)
output = tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t[..., :1], scale=tf.math.softplus(t[..., 1:])+1e-6), dtype='float32')(x)
model = tf.keras.models.Model(inputs=input, outputs=output)
elif config.build_model=='gaussian' and 'alzheimers' not in config.dataset:
loss = lambda y, p_y: -p_y.log_prob(y)
input = Input((config.input_feature_length,))
x = Dense(config.units, activation='relu', dtype='float32')(input)
x = Dense(2, dtype='float32')(x)
output = tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t[..., :1], scale=tf.math.softplus(t[..., 1:])+1e-6), dtype='float32')(x)
model = tf.keras.models.Model(inputs=input, outputs=output)
elif config.build_model=='combined_pog' and 'alzheimers' not in config.dataset:
loss = lambda y, p_y: -p_y.log_prob(y)
n_feature_sets = len(config.feature_split_lengths)
inputs = []
for i in range(n_feature_sets):
inputs.append(Input((config.feature_split_lengths[i],)))
feature_extractors = []
for i in range(n_feature_sets):
if config.units_type == 'absolute':
units = config.units
elif config.units_type == 'prorated':
units = math.floor(config.feature_split_lengths[i] * config.units / sum(config.feature_split_lengths) )
feature_extractors.append(create_feature_extractor_block(inputs[i], units = units))
stddevs = []
for i in range(n_feature_sets):
stddevs.append(create_stddev_block(feature_extractors[i]))
mu = create_mu_block(feature_extractors)
outputs = []
for i in range(n_feature_sets):
outputs.append(create_gaussian_output(mu, stddevs[i], name='set_{}'.format(i)))
model = tf.keras.models.Model(inputs=inputs, outputs=outputs)
elif config.build_model=='combined_multivariate':
loss = lambda y, p_y: -p_y.log_prob(y)
n_feature_sets = len(config.feature_split_lengths)
inputs = []
for i in range(n_feature_sets):
inputs.append(Input((config.feature_split_lengths[i],)))
feature_extractors = []
for i in range(n_feature_sets):
if config.units_type == 'absolute':
units = config.units
elif config.units_type == 'prorated':
units = math.floor(config.feature_split_lengths[i] * config.units / sum(config.feature_split_lengths) )
feature_extractors.append(create_feature_extractor_block(inputs[i], units = units))
stddevs = []
for i in range(n_feature_sets):
stddevs.append(create_stddev_block(feature_extractors[i]))
mu = create_mu_block(feature_extractors)
mus = [mu]*n_feature_sets
outputs = create_multivariate_gaussian_output(mus, stddevs, name='mv')
model = tf.keras.models.Model(inputs=inputs, outputs=outputs)
elif 'alzheimers' in config.dataset:
if config.build_model == 'gaussian':
model = gaussian_alzheimers_model()
else:
model = alzheimers_model()
loss = lambda y, p_y: -p_y.log_prob(y)
else:
raise Exception('{} model type not available'.format(config.build_model))
return model, loss
def alzheimers_model():
intervention_inputs = Input((32, 3))
pause_inputs = Input((11,))
compare_inputs = Input((21,))
intervention_x = LSTM(16)(intervention_inputs)
intervention_x = BatchNormalization()(intervention_x)
intervention_x = Dense(16, activation='relu')(intervention_x)
intervention_x = BatchNormalization()(intervention_x)
intervention_x = Dense(8, activation='relu')(intervention_x)
intervention_x = BatchNormalization()(intervention_x)
intervention_x = Dropout(0.2)(intervention_x)
intervention_std = Dense(1, kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01))(intervention_x)
pause_x = Dense(24, activation='relu')(pause_inputs)
pause_x = BatchNormalization()(pause_x)
pause_x = Dense(16, activation='relu')(pause_x)
pause_x = BatchNormalization()(pause_x)
pause_x = Dense(16, activation='relu')(pause_x)
pause_x = BatchNormalization()(pause_x)
pause_x = Dense(8, activation='relu')(pause_x)
pause_x = BatchNormalization()(pause_x)
pause_x = Dropout(0.2)(pause_x)
pause_std = Dense(1, kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01))(pause_x)
compare_x = Dense(24, activation='relu')(compare_inputs)
compare_x = BatchNormalization()(compare_x)
compare_x = Dense(16, activation='relu')(compare_x)
compare_x = BatchNormalization()(compare_x)
compare_x = Dense(16, activation='relu')(compare_x)
compare_x = BatchNormalization()(compare_x)
compare_x = Dense(8, activation='relu')(compare_x)
compare_x = BatchNormalization()(compare_x)
compare_x = Dropout(0.2)(compare_x)
compare_std = Dense(1, kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01))(compare_x)
mu = Concatenate()([intervention_x, pause_x, compare_x])
# mu = Dense(8, activation='relu')(mu)
# mu = BatchNormalization()(mu)
mu = Dense(1, kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01))(mu)
intervention_gaus = Concatenate()([mu, intervention_std])
pause_gaus = Concatenate()([mu, pause_std])
compare_gaus = Concatenate()([mu, compare_std])
intervention_output = tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t[..., :1], scale=tf.math.softplus(t[...,1:])+1e-6), name='intervention')(intervention_gaus)
pause_output = tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t[..., :1], scale=tf.math.softplus(t[...,1:])+1e-6), name='pause')(pause_gaus)
compare_output = tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t[..., :1], scale=tf.math.softplus(t[...,1:])+1e-6), name='compare')(compare_gaus)
return tf.keras.models.Model(inputs=[intervention_inputs, pause_inputs, compare_inputs],
outputs=[intervention_output, pause_output, compare_output])
def gaussian_alzheimers_model():
intervention_inputs = Input((32, 3))
pause_inputs = Input((11,))
compare_inputs = Input((21,))
intervention_x = LSTM(16)(intervention_inputs)
intervention_x = BatchNormalization()(intervention_x)
intervention_x = Dense(16, activation='relu')(intervention_x)
intervention_x = BatchNormalization()(intervention_x)
intervention_x = Dense(8, activation='relu')(intervention_x)
intervention_x = BatchNormalization()(intervention_x)
# intervention_x = Dropout(0.2)(intervention_x)
pause_x = Dense(24, activation='relu')(pause_inputs)
pause_x = BatchNormalization()(pause_x)
pause_x = Dense(16, activation='relu')(pause_x)
pause_x = BatchNormalization()(pause_x)
pause_x = Dense(16, activation='relu')(pause_x)
pause_x = BatchNormalization()(pause_x)
pause_x = Dense(8, activation='relu')(pause_x)
pause_x = BatchNormalization()(pause_x)
# pause_x = Dropout(0.2)(pause_x)
compare_x = Dense(24, activation='relu')(compare_inputs)
compare_x = BatchNormalization()(compare_x)
compare_x = Dense(16, activation='relu')(compare_x)
compare_x = BatchNormalization()(compare_x)
compare_x = Dense(16, activation='relu')(compare_x)
compare_x = BatchNormalization()(compare_x)
compare_x = Dense(8, activation='relu')(compare_x)
compare_x = BatchNormalization()(compare_x)
# compare_x = Dropout(0.2)(compare_x)
mu_var = Concatenate()([intervention_x, pause_x, compare_x])
# mu = Dense(8, activation='relu')(mu)
# mu = BatchNormalization()(mu)
mu_var = Dense(2, kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01))(mu_var)
output = tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t[..., :1], scale=tf.math.softplus(t[..., 1:])+1e-6), dtype='float32')(mu_var)
return tf.keras.models.Model(inputs=[intervention_inputs, pause_inputs, compare_inputs],
outputs=[output])
|
<reponame>JustinPedersen/maya_fspy
"""
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
Usage:
import maya_fspy.ui as mfspy_ui
mfspy_ui.maya_fspy_ui()
Note that you will nee to set the correct axes inside of the standalone fspy application for the best results.
Vanishing point axes:
1. -Z
2. -X
Reference distance:
Along the y-axis
"""
import os
import platform
from functools import partial
import maya.OpenMayaUI as omui
import pymel.core as pm
from PySide2 import QtCore
from PySide2 import QtWidgets
from shiboken2 import wrapInstance
from .core import create_camera_and_plane
__author__ = '<NAME>'
__version__ = '1.2.0'
WINDOW_NAME = "Fspy Importer - v{}".format(__version__)
# Python 3 compatibility
if platform.python_version_tuple()[0] == '3':
long = int
def maya_main_window():
"""
Return the Maya main window widget as a Python object
"""
main_window_ptr = omui.MQtUtil.mainWindow()
return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)
def close_existing_windows():
"""
Close any existing instances of the maya fspy window
"""
for child_window in maya_main_window().children():
if hasattr(child_window, 'windowTitle'):
if child_window.windowTitle() == WINDOW_NAME:
child_window.close()
child_window.deleteLater()
class FSpyImporter(QtWidgets.QDialog):
"""
Main UI Class for the importer
"""
def __init__(self, parent=maya_main_window()):
super(FSpyImporter, self).__init__(parent)
self.setWindowTitle(WINDOW_NAME)
self.setMinimumWidth(300)
self.setWindowFlags(self.windowFlags() ^ QtCore.Qt.WindowContextHelpButtonHint)
self.create_widgets()
self.create_layouts()
self.create_connections()
def create_widgets(self):
self.json_lineedit = QtWidgets.QLineEdit()
self.json_btn = QtWidgets.QPushButton("JSON")
self.image_lineedit = QtWidgets.QLineEdit()
self.image_btn = QtWidgets.QPushButton("Image")
self.import_btn = QtWidgets.QPushButton("Import")
self.json_btn.setFixedHeight(20)
self.json_lineedit.setFixedHeight(20)
self.image_btn.setFixedHeight(20)
self.image_lineedit.setFixedHeight(20)
self.import_btn.setMinimumHeight(40)
def create_layouts(self):
form_layout = QtWidgets.QFormLayout()
form_layout.addRow(self.json_btn, self.json_lineedit)
form_layout.addRow(self.image_btn, self.image_lineedit)
button_layout = QtWidgets.QHBoxLayout()
button_layout.addWidget(self.import_btn)
main_layout = QtWidgets.QVBoxLayout(self)
main_layout.setContentsMargins(4, 4, 4, 4)
main_layout.addLayout(form_layout)
main_layout.addLayout(button_layout)
def create_connections(self):
self.json_btn.clicked.connect(partial(self.set_line_edit, self.json_lineedit, 'Import Json'))
self.image_btn.clicked.connect(partial(self.set_line_edit, self.image_lineedit, 'Import Image'))
self.import_btn.clicked.connect(self.generate_camera)
def set_line_edit(self, line_edit, caption):
"""
Open a file dialog and set the result to the string inside a line edit.
:param line_edit: The target line edit.
:param str caption: The window caption.
"""
# Setting up the dialog filters to only accept what is expected in that field to prevent user error.
if line_edit == self.json_lineedit:
file_filter = '*.json'
else:
all_image_formats = ['psd', 'als', 'avi', 'dds', 'gif', 'jpg', 'cin', 'iff', 'exr',
'png', 'eps', 'yuv', 'hdr', 'tga', 'tif', 'tim', 'bmp', 'xpm']
file_filter = 'All Image Files (*.{})'.format(' *.'.join([x for x in all_image_formats]))
filename = pm.fileDialog2(fileMode=1, caption=caption, fileFilter=file_filter)
if filename:
line_edit.setText(filename[0])
def generate_camera(self):
"""
Main function to generate the camera and image plane from UI.
"""
# Making sure no one put a .json file in the JSON field
if os.path.splitext(self.json_lineedit.text())[-1].lower() != '.json':
return pm.warning('The JSON field only accepts .json file formats')
if self.json_lineedit and self.image_lineedit:
create_camera_and_plane(self.json_lineedit.text(), self.image_lineedit.text())
else:
pm.warning('Please set a JSON and image path.')
def maya_fspy_ui():
"""
Open the maya fspy ui.
"""
close_existing_windows()
fspy_importer_dialog = FSpyImporter()
fspy_importer_dialog.show()
|
<reponame>dmarvs/bfg-nets<filename>bfgn/tests/reporting/visualizations/test_samples.py<gh_stars>1-10
import numpy as np
import pytest
from bfgn.reporting.visualizations import samples
@pytest.fixture()
def mock_sampled(tmp_path) -> object:
class MockDataBuild:
window_radius = 2
loss_window_radius = 1
class MockConfig:
data_build = MockDataBuild
class MockDataSeq:
nan_replacement_value = 1
class MockSampled:
data_sequence = MockDataSeq
data_sequence_label = "test"
model = None
is_model_trained = True
config = MockConfig
num_samples = 3
num_features = 4
num_responses = 5
has_features_transform = False
has_responses_transform = False
raw_features = np.ones((3, 8, 8, 4))
raw_features_range = np.ones((4, 2))
trans_features = np.ones((3, 8, 8, 4))
trans_features_range = np.ones((4, 2))
raw_responses = np.ones((3, 8, 8, 5))
raw_responses_range = np.ones((5, 2))
trans_responses = np.ones((3, 8, 8, 5))
trans_responses_range = np.ones((5, 2))
raw_predictions = np.ones((3, 8, 8, 5))
raw_predictions_range = np.ones((5, 2))
trans_predictions = np.ones((3, 8, 8, 5))
trans_predictions_range = np.ones((5, 2))
weights = np.ones((3, 8, 8))
weights_range = np.ones((1, 2))
feature_band_types = ["R"] * 4
response_band_types = ["R"] * 5
return MockSampled
def test__plot_samples_passes_classification_excessive_maxes(mock_sampled) -> None:
max_pages = 20
max_samples = 10
max_features = 10
max_responses = 10
sample_type = samples.LABEL_CLASSIFICATION
samples._plot_samples(mock_sampled, max_pages, max_samples, max_features, max_responses, sample_type)
def test__plot_samples_passes_regression_excessive_maxes(mock_sampled) -> None:
max_pages = 20
max_samples = 10
max_features = 10
max_responses = 10
sample_type = samples.LABEL_REGRESSION
samples._plot_samples(mock_sampled, max_pages, max_samples, max_features, max_responses, sample_type)
def test__plot_samples_passes_limited_features(mock_sampled) -> None:
max_pages = 20
max_samples = 10
max_features = 3
max_responses = 10
sample_type = samples.LABEL_REGRESSION
samples._plot_samples(mock_sampled, max_pages, max_samples, max_features, max_responses, sample_type)
def test__plot_samples_passes_limited_responses(mock_sampled) -> None:
max_pages = 20
max_samples = 10
max_features = 10
max_responses = 3
sample_type = samples.LABEL_REGRESSION
samples._plot_samples(mock_sampled, max_pages, max_samples, max_features, max_responses, sample_type)
def test__plot_samples_passes_limited_pages(mock_sampled) -> None:
max_pages = 1
max_samples = 2
max_features = 10
max_responses = 10
sample_type = samples.LABEL_REGRESSION
samples._plot_samples(mock_sampled, max_pages, max_samples, max_features, max_responses, sample_type)
def test__plot_samples_passes_multiple_pages(mock_sampled) -> None:
max_pages = 10
max_samples = 1
max_features = 10
max_responses = 10
sample_type = samples.LABEL_REGRESSION
samples._plot_samples(mock_sampled, max_pages, max_samples, max_features, max_responses, sample_type)
def test__plot_classification_sample_passes_no_plots() -> None:
class MockSampled:
raw_predictions = True
idx_sample = 0
num_features = 3
num_responses = 3
sample_axes = iter([None] * 50)
samples._plot_classification_sample(MockSampled, idx_sample, num_features, num_responses, sample_axes)
def test__plot_regression_sample_passes_no_plots() -> None:
class MockSampled:
raw_predictions = True
idx_sample = 0
num_features = 3
num_responses = 3
sample_axes = iter([None] * 50)
samples._plot_regression_sample(MockSampled, idx_sample, num_features, num_responses, sample_axes)
|
<reponame>MihailMiller/OpenAlchemy
"""Integration tests against database for relationships."""
import pytest
from sqlalchemy.ext import declarative
import open_alchemy
@pytest.mark.integration
def test_many_to_one(engine, sessionmaker):
"""
GIVEN specification with a schema with a many to one object relationship
WHEN schema is created, values inserted in both tables and queried
THEN the data is returned as it was inserted.
"""
# Defining specification
spec = {
"components": {
"schemas": {
"RefTable": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
},
"x-tablename": "ref_table",
"x-backref": "tables",
"type": "object",
},
"Table": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
"ref_table": {"$ref": "#/components/schemas/RefTable"},
},
"x-tablename": "table",
"type": "object",
},
}
}
}
# Creating model factory
base = declarative.declarative_base()
model_factory = open_alchemy.init_model_factory(spec=spec, base=base)
model = model_factory(name="Table")
ref_model = model_factory(name="RefTable")
# Creating models
base.metadata.create_all(engine)
# Creating instance of model and ref_model
ref_model_instance = ref_model(id=11, name="ref table name 1")
model_instance = model(id=12, name="table name 1", ref_table=ref_model_instance)
session = sessionmaker()
session.add(ref_model_instance)
session.add(model_instance)
session.flush()
# Querying session
queried_model = session.query(model).first()
assert queried_model.id == 12
assert queried_model.name == "table name 1"
assert queried_model.ref_table_id == 11
assert queried_model.ref_table.id == 11
assert queried_model.ref_table.name == "ref table name 1"
queried_ref_model = session.query(ref_model).first()
assert queried_ref_model.id == 11
assert queried_ref_model.name == "ref table name 1"
assert len(queried_ref_model.tables) == 1
assert queried_ref_model.tables[0].id == 12
@pytest.mark.integration
def test_many_to_one_backref(engine, sessionmaker):
"""
GIVEN specification with a schema with a many to one object relationship
WHEN schema is created, values inserted in both tables and queried
THEN the data is returned as it was inserted.
"""
# Defining specification
spec = {
"components": {
"schemas": {
"RefTable": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
},
"x-tablename": "ref_table",
"x-backref": "tables",
"type": "object",
},
"Table": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
"ref_table": {"$ref": "#/components/schemas/RefTable"},
},
"x-tablename": "table",
"type": "object",
},
}
}
}
# Creating model factory
base = declarative.declarative_base()
model_factory = open_alchemy.init_model_factory(spec=spec, base=base)
model = model_factory(name="Table")
ref_model = model_factory(name="RefTable")
# Creating models
base.metadata.create_all(engine)
# Creating instance of model and ref_model
ref_model_instance = ref_model(
id=11, name="ref table name 1", tables=[model(id=12, name="table name 1")]
)
session = sessionmaker()
session.add(ref_model_instance)
session.flush()
# Querying session
queried_model = session.query(model).first()
assert queried_model.id == 12
assert queried_model.name == "table name 1"
assert queried_model.ref_table_id == 11
assert queried_model.ref_table.id == 11
assert queried_model.ref_table.name == "ref table name 1"
queried_ref_model = session.query(ref_model).first()
assert queried_ref_model.id == 11
assert queried_ref_model.name == "ref table name 1"
assert len(queried_ref_model.tables) == 1
assert queried_ref_model.tables[0].id == 12
@pytest.mark.integration
def test_many_to_one_relationship_fk(engine, sessionmaker):
"""
GIVEN specification with a schema with a many to one object relationship with a
defined foreign key
WHEN schema is created, values inserted in both tables and queried
THEN the data is returned as it was inserted.
"""
# Defining specification
spec = {
"components": {
"schemas": {
"RefTable": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
},
"x-tablename": "ref_table",
"x-backref": "tables",
"type": "object",
},
"Table": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
"ref_table": {
"allOf": [
{"$ref": "#/components/schemas/RefTable"},
{"x-foreign-key-column": "name"},
]
},
},
"x-tablename": "table",
"type": "object",
},
}
}
}
# Creating model factory
base = declarative.declarative_base()
model_factory = open_alchemy.init_model_factory(spec=spec, base=base)
model = model_factory(name="Table")
ref_model = model_factory(name="RefTable")
# Creating models
base.metadata.create_all(engine)
# Creating instance of model and ref_model
ref_model_instance = ref_model(id=11, name="ref table name 1")
model_instance = model(id=12, name="table name 1", ref_table=ref_model_instance)
session = sessionmaker()
session.add(ref_model_instance)
session.add(model_instance)
session.flush()
# Querying session
queried_model = session.query(model).first()
assert queried_model.ref_table_name == "ref table name 1"
assert queried_model.ref_table.name == "ref table name 1"
@pytest.mark.integration
def test_one_to_one(engine, sessionmaker):
"""
GIVEN specification with a schema with an one to one object relationship
WHEN schema is created, values inserted in both tables and queried
THEN the data is returned as it was inserted.
"""
# Defining specification
spec = {
"components": {
"schemas": {
"RefTable": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
},
"x-tablename": "ref_table",
"x-backref": "table",
"x-uselist": False,
"type": "object",
},
"Table": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
"ref_table": {"$ref": "#/components/schemas/RefTable"},
},
"x-tablename": "table",
"type": "object",
},
}
}
}
# Creating model factory
base = declarative.declarative_base()
model_factory = open_alchemy.init_model_factory(spec=spec, base=base)
model = model_factory(name="Table")
ref_model = model_factory(name="RefTable")
# Creating models
base.metadata.create_all(engine)
# Creating instance of model and ref_model
ref_model_instance = ref_model(id=11, name="ref table name 1")
model_instance = model(id=12, name="table name 1", ref_table=ref_model_instance)
session = sessionmaker()
session.add(ref_model_instance)
session.add(model_instance)
session.flush()
# Querying session
queried_ref_model = session.query(ref_model).first()
assert queried_ref_model.table.id == 12
@pytest.mark.integration
def test_one_to_many(engine, sessionmaker):
"""
GIVEN specification with a schema with a one to many object relationship
WHEN schema is created, values inserted in both tables and queried
THEN the data is returned as it was inserted.
"""
# Defining specification
spec = {
"components": {
"schemas": {
"RefTable": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
},
"x-tablename": "ref_table",
"x-backref": "table",
"type": "object",
},
"Table": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
"ref_tables": {
"type": "array",
"items": {"$ref": "#/components/schemas/RefTable"},
},
},
"x-tablename": "table",
"type": "object",
},
}
}
}
# Creating model factory
base = declarative.declarative_base()
model_factory = open_alchemy.init_model_factory(spec=spec, base=base)
model = model_factory(name="Table")
ref_model = model_factory(name="RefTable")
# Creating models
base.metadata.create_all(engine)
# Creating instance of model and ref_model
ref_model_instance = ref_model(id=11, name="ref table name 1")
model_instance = model(id=12, name="table name 1", ref_tables=[ref_model_instance])
session = sessionmaker()
session.add(ref_model_instance)
session.add(model_instance)
session.flush()
# Querying session
queried_model = session.query(model).first()
assert queried_model.id == 12
assert queried_model.name == "table name 1"
assert len(queried_model.ref_tables) == 1
assert queried_model.ref_tables[0].id == 11
assert queried_model.ref_tables[0].name == "ref table name 1"
assert queried_model.ref_tables[0].table_ref_tables_id == 12
assert queried_model.ref_tables[0].table.name == "table name 1"
queried_ref_model = session.query(ref_model).first()
assert queried_ref_model.id == 11
assert queried_ref_model.name == "ref table name 1"
assert queried_ref_model.table.id == 12
@pytest.mark.integration
def test_one_to_many_relationship_kwargs(engine, sessionmaker):
"""
GIVEN specification with a schema with a one to many object relationship with kwargs
WHEN schema is created, values inserted in both tables and queried
THEN the data is returned as specified by kwargs.
"""
# Defining specification
spec = {
"components": {
"schemas": {
"RefTable": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
},
"x-tablename": "ref_table",
"x-backref": "table",
"type": "object",
},
"Table": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
"ref_tables": {
"type": "array",
"items": {
"allOf": [
{"$ref": "#/components/schemas/RefTable"},
{
"x-kwargs": {
"order_by": "desc(RefTable.name)",
"lazy": "dynamic",
}
},
]
},
},
},
"x-tablename": "table",
"type": "object",
},
}
}
}
# Creating model factory
base = declarative.declarative_base()
model_factory = open_alchemy.init_model_factory(spec=spec, base=base)
model = model_factory(name="Table")
ref_model = model_factory(name="RefTable")
# Creating models
base.metadata.create_all(engine)
# Creating instance of model and ref_model
ref_model_instance1 = ref_model(id=11, name="ref table name 1")
ref_model_instance2 = ref_model(id=21, name="ref table name 2")
model_instance = model(
id=12,
name="table name 1",
ref_tables=[ref_model_instance1, ref_model_instance2],
)
session = sessionmaker()
session.add(ref_model_instance1)
session.add(ref_model_instance2)
session.add(model_instance)
session.flush()
# Querying session
queried_model = session.query(model).first()
ref_tables = list(ref_table.name for ref_table in queried_model.ref_tables)
assert ref_tables == ["ref table name 2", "ref table name 1"]
@pytest.mark.integration
def test_one_to_many_relationship_other_order(engine, sessionmaker):
"""
GIVEN specification with a schema with a one to many object relationship which are
defined in reverse order
WHEN schema is created, values inserted in both tables and queried
THEN the data is returned as it was inserted.
"""
# Defining specification
spec = {
"components": {
"schemas": {
"RefTable": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
},
"x-tablename": "ref_table",
"x-backref": "table",
"type": "object",
},
"Table": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
"ref_tables": {
"type": "array",
"items": {"$ref": "#/components/schemas/RefTable"},
},
},
"x-tablename": "table",
"type": "object",
},
}
}
}
# Creating model factory
base = declarative.declarative_base()
model_factory = open_alchemy.init_model_factory(spec=spec, base=base)
ref_model = model_factory(name="RefTable")
model = model_factory(name="Table")
# Creating models
base.metadata.create_all(engine)
# Creating instance of model and ref_model
ref_model_instance = ref_model(id=11, name="ref table name 1")
model_instance = model(id=12, name="table name 1", ref_tables=[ref_model_instance])
session = sessionmaker()
session.add(ref_model_instance)
session.add(model_instance)
session.flush()
# Querying session
queried_model = session.query(model).first()
assert queried_model.id == 12
assert queried_model.ref_tables[0].id == 11
@pytest.mark.integration
def test_many_to_many(engine, sessionmaker):
"""
GIVEN specification with a schema with a many to many object relationship
WHEN schema is created, values inserted in both tables and queried
THEN the data is returned as it was inserted.
"""
# Defining specification
spec = {
"components": {
"schemas": {
"RefTable": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
},
"x-tablename": "ref_table",
"x-backref": "tables",
"x-secondary": "association",
"type": "object",
},
"Table": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
"ref_tables": {
"type": "array",
"items": {"$ref": "#/components/schemas/RefTable"},
},
},
"x-tablename": "table",
"type": "object",
},
}
}
}
# Creating model factory
base = declarative.declarative_base()
model_factory = open_alchemy.init_model_factory(spec=spec, base=base)
model = model_factory(name="Table")
ref_model = model_factory(name="RefTable")
# Creating models
base.metadata.create_all(engine)
# Creating instance of model and ref_model
ref_model_instance = ref_model(id=11, name="ref table name 1")
model_instance = model(id=12, name="table name 1", ref_tables=[ref_model_instance])
session = sessionmaker()
session.add(ref_model_instance)
session.add(model_instance)
session.flush()
# Querying session
queried_model = session.query(model).first()
assert queried_model.id == 12
assert queried_model.name == "table name 1"
assert len(queried_model.ref_tables) == 1
assert queried_model.ref_tables[0].id == 11
assert queried_model.ref_tables[0].name == "ref table name 1"
queried_ref_model = session.query(ref_model).first()
assert queried_ref_model.id == 11
assert queried_ref_model.name == "ref table name 1"
assert len(queried_ref_model.tables) == 1
assert queried_ref_model.tables[0].id == 12
assert queried_ref_model.tables[0].name == "table name 1"
@pytest.mark.integration
def test_many_to_many_pre_defined(engine, sessionmaker):
"""
GIVEN specification with a schema with a many to many object relationship and a
pre-defined association
WHEN schema is created, values inserted in both tables and queried
THEN the data is returned as it was inserted.
"""
# Defining specification
spec = {
"components": {
"schemas": {
"RefTable": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
},
"x-tablename": "ref_table",
"x-backref": "tables",
"x-secondary": "association",
"type": "object",
},
"Table": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
"ref_tables": {
"type": "array",
"items": {"$ref": "#/components/schemas/RefTable"},
},
},
"x-tablename": "table",
"type": "object",
},
"DefinedAssociation": {
"type": "object",
"x-tablename": "association",
"properties": {
"table_id": {
"x-primary-key": True,
"type": "integer",
"x-foreign-key": "table.id",
}
},
},
}
}
}
# Creating model factory
base = declarative.declarative_base()
model_factory = open_alchemy.init_model_factory(spec=spec, base=base)
model = model_factory(name="Table")
ref_model = model_factory(name="RefTable")
association_model = model_factory(name="DefinedAssociation")
# Creating models
base.metadata.create_all(engine)
# Creating instance of model and ref_model
ref_model_instance = ref_model(id=11, name="ref table name 1")
model_instance = model(id=12, name="table name 1", ref_tables=[ref_model_instance])
session = sessionmaker()
session.add(ref_model_instance)
session.add(model_instance)
session.flush()
# Querying session
queried_model = session.query(model).first()
assert queried_model.id == 12
queried_ref_model = session.query(ref_model).first()
assert queried_ref_model.id == 11
queried_association_model = session.query(association_model).first()
assert queried_association_model.table_id == 12
assert queried_association_model.ref_table_id == 11
@pytest.mark.parametrize(
"spec",
[
pytest.param(
{
"components": {
"schemas": {
"Column": {"type": "integer", "x-primary-key": True},
"Table": {
"properties": {
"column": {"$ref": "#/components/schemas/Column"}
},
"x-tablename": "table",
"type": "object",
},
}
}
},
id="ref column",
),
pytest.param(
{
"components": {
"schemas": {
"Table": {
"properties": {
"column": {
"allOf": [
{"type": "integer", "x-primary-key": True}
]
}
},
"x-tablename": "table",
"type": "object",
}
}
}
},
id="allOf column",
),
pytest.param(
{
"components": {
"schemas": {
"Table": {
"allOf": [
{
"properties": {
"column": {
"type": "integer",
"x-primary-key": True,
}
},
"x-tablename": "table",
"type": "object",
}
]
}
}
}
},
id="allOf model",
),
],
)
@pytest.mark.integration
def test_ref_all_of(engine, sessionmaker, spec):
"""
GIVEN specification with a schema that has a $ref on a column
WHEN schema is created and an instance is added to the session
THEN the instance is returned when the session is queried for it.
"""
# Creating model factory
base = declarative.declarative_base()
model_factory = open_alchemy.init_model_factory(spec=spec, base=base)
model = model_factory(name="Table")
# Creating models
base.metadata.create_all(engine)
# Creating model instance
model_instance = model(column=1)
session = sessionmaker()
session.add(model_instance)
session.flush()
# Querying session
queried_model = session.query(model).first()
assert queried_model.column == 1
@pytest.mark.integration
def test_multiple(engine, sessionmaker):
"""
GIVEN specification with a schema with multiple relationships pointing to the same
table
WHEN schema is created, values inserted in both tables and queried
THEN the data is returned as it was inserted with the correct foreign key.
"""
# Defining specification
spec = {
"components": {
"schemas": {
"RefTable": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
},
"x-tablename": "ref_table",
"type": "object",
},
"Table": {
"properties": {
"id": {"type": "integer", "x-primary-key": True},
"name": {"type": "string"},
"ref_table_first": {
"allOf": [
{"$ref": "#/components/schemas/RefTable"},
{
"x-kwargs": {
"foreign_keys": "Table.ref_table_first_id"
}
},
]
},
"ref_table_second": {
"allOf": [
{"$ref": "#/components/schemas/RefTable"},
{
"x-kwargs": {
"foreign_keys": "Table.ref_table_second_id"
}
},
]
},
},
"x-tablename": "table",
"type": "object",
},
}
}
}
# Creating model factory
base = declarative.declarative_base()
model_factory = open_alchemy.init_model_factory(spec=spec, base=base)
model = model_factory(name="Table")
ref_model = model_factory(name="RefTable")
# Creating models
base.metadata.create_all(engine)
# Creating instance of model and ref_model
ref_model_instance_first = ref_model(id=11, name="ref table name 1")
ref_model_instance_second = ref_model(id=21, name="ref table name 2")
model_instance = model(
id=12,
name="table name 1",
ref_table_first=ref_model_instance_first,
ref_table_second=ref_model_instance_second,
)
session = sessionmaker()
session.add(ref_model_instance_first)
session.add(ref_model_instance_second)
session.add(model_instance)
session.flush()
# Querying session
queried_model = session.query(model).first()
assert queried_model.id == 12
assert queried_model.name == "table name 1"
assert queried_model.ref_table_first_id == 11
assert queried_model.ref_table_first.id == 11
assert queried_model.ref_table_second_id == 21
assert queried_model.ref_table_second.id == 21
assert queried_model.ref_table_first.name == "ref table name 1"
assert queried_model.ref_table_second.name == "ref table name 2"
|
# Copyright 2021 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Langford's number problem in OR-tools CP-SAT Solver.
This is a port of my old CP model langford.py
Langford's number problem (CSP lib problem 24)
http://www.csplib.org/prob/prob024/
'''
Arrange 2 sets of positive integers 1..k to a sequence,
such that, following the first occurence of an integer i,
each subsequent occurrence of i, appears i+1 indices later
than the last.
For example, for k=4, a solution would be 41312432
'''
* <NAME>: Langford's Problem
http://www.lclark.edu/~miller/langford.html
* https://en.wikipedia.org/wiki/Langford_pairing
* http://dialectrix.com/langford.html
* Encyclopedia of Integer Sequences for the number of solutions for each k
http://www.research.att.com/cgi-bin/access.cgi/as/njas/sequences/eisA.cgi?Anum=014552
For a solution to be possible this must hold:
k % 4 == 0 or k % 4 == 3
Here's a solution of k = 159, solved in 6.6s
'''
k: 159
[127, 137, 2, 116, 75, 2, 89, 86, 151, 15, 119, 123, 28, 79, 112, 156, 148, 130,
135, 106, 124, 6, 143, 50, 115, 15, 43, 54, 6, 46, 3, 129, 34, 136, 3, 68, 113,
95, 83, 150, 93, 28, 13, 111, 19, 41, 44, 108, 147, 59, 32, 63, 96, 77, 30, 12,
13, 21, 73, 153, 53, 121, 155, 31, 19, 131, 39, 34, 12, 67, 43, 1, 5, 1, 50, 36,
46, 138, 5, 21, 75, 142, 54, 32, 23, 30, 125, 41, 52, 91, 87, 44, 145, 79, 86,
31, 89, 122, 99, 88, 35, 71, 56, 100, 68, 158, 39, 141, 23, 59, 126, 146, 36,
57, 53, 63, 27, 97, 134, 157, 116, 120, 83, 149, 109, 84, 106, 112, 127, 144,
119, 77, 73, 95, 93, 123, 35, 67, 92, 137, 115, 52, 133, 110, 27, 124, 154,
140, 130, 96, 113, 118, 74, 132, 135, 111, 108, 159, 20, 56, 151, 129, 152,
139, 76, 148, 143, 128, 8, 51, 136, 57, 156, 71, 55, 60, 114, 8, 87, 20, 45,
91, 64, 121, 102, 37, 81, 101, 88, 90, 150, 117, 72, 66, 62, 103, 147, 131, 99,
105, 82, 80, 69, 107, 100, 29, 10, 85, 104, 98, 84, 94, 125, 153, 70, 97, 138,
10, 155, 58, 122, 51, 22, 37, 142, 78, 45, 74, 4, 49, 55, 92, 17, 4, 109, 29,
60, 126, 145, 16, 14, 76, 120, 40, 24, 22, 65, 64, 26, 141, 17, 42, 61, 134,
110, 14, 16, 62, 146, 38, 66, 33, 47, 7, 158, 72, 25, 48, 81, 24, 118, 7, 69,
149, 144, 26, 133, 157, 58, 49, 90, 18, 80, 82, 40, 70, 132, 102, 140, 101, 11,
114, 25, 85, 42, 33, 128, 9, 38, 103, 18, 154, 11, 139, 78, 105, 94, 9, 98, 117,
47, 107, 65, 104, 61, 152, 48, 159]
status: FEASIBLE
NumSolutions: 1
NumConflicts: 1048
NumBranches: 206368
WallTime: 6.595721124000001
'''
This model was created by <NAME> (<EMAIL>)
Also see my other OR-tools models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
from cp_sat_utils import ListPrinter
def main(k=8, num_sol=0):
model = cp.CpModel()
#
# data
#
print("k:", k)
if not (k % 4 == 0 or k % 4 == 3):
print("There is no solution for K unless K mod 4 == 0 or K mod 4 == 3")
return
p = list(range(2 * k))
#
# declare variables
#
position = [model.NewIntVar(0, 2 * k - 1, "position[%i]" % i) for i in p]
solution = [model.NewIntVar(1, k, "position[%i]" % i) for i in p]
#
# constraints
#
model.AddAllDifferent(position)
for i in range(1, k + 1):
model.Add(position[i + k - 1] == position[i - 1] + i + 1)
model.AddElement(position[i - 1], solution, i)
model.AddElement(position[k + i - 1], solution, i)
# symmetry breaking
model.Add(solution[0] < solution[2 * k - 1])
#
# search and result
#
solver = cp.CpSolver()
# Activating these makes it faster for small numbers but
# slower on larger numbers.
# solver.parameters.search_branching = cp.PORTFOLIO_SEARCH
# solver.parameters.cp_model_presolve = False
# solver.parameters.linearization_level = 0
# solver.parameters.cp_model_probing_level = 0
# status = solver.Solve(model)
solution_printer = ListPrinter(solution,num_sol)
# solution_printer = SimpleSolutionCounter(solution) # count sols
status = solver.SearchForAllSolutions(model, solution_printer)
print("status:", solver.StatusName(status))
if status != cp.OPTIMAL and status != cp.FEASIBLE:
print("No solution")
print()
print("NumSolutions:", solution_printer.SolutionCount())
print("NumConflicts:", solver.NumConflicts())
print("NumBranches:", solver.NumBranches())
print("WallTime:", solver.WallTime())
def benchmark():
"""
Benchmark langford for k = 1..200
"""
for k in range(1,201):
main(k, 1)
print()
k = 8
num_sol = 0
if __name__ == "__main__":
if len(sys.argv) > 1:
k = int(sys.argv[1])
if len(sys.argv) > 2:
num_sol = int(sys.argv[2])
main(k, num_sol)
# benchmark()
|
# -*- coding: utf-8 -*-
#BEGIN_HEADER
import sys
import traceback
from biokbase.workspace.client import Workspace as workspaceService
import requests
requests.packages.urllib3.disable_warnings()
import subprocess
import os
import re
from pprint import pprint, pformat
from datetime import datetime
import uuid
## SDK Utils
from ReadsUtils.ReadsUtilsClient import ReadsUtils
from SetAPI.SetAPIServiceClient import SetAPI
from DataFileUtil.DataFileUtilClient import DataFileUtil as DFUClient
from KBaseReport.KBaseReportClient import KBaseReport
#END_HEADER
class kb_trimmomatic:
'''
Module Name:
kb_trimmomatic
Module Description:
A KBase module: kb_trimmomatic
This module contains two methods
runTrimmomatic() to backend a KBase App, potentially operating on ReadSets
execTrimmomatic() the local method that handles overloading Trimmomatic to run on a set or a single library
execTrimmomaticSingleLibrary() runs Trimmomatic on a single library
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "1.2.11"
GIT_URL = "https://github.com/kbaseapps/kb_trimmomatic"
GIT_COMMIT_HASH = "9ff31d23f62491d7a47c004f6cf8f800535b47f1"
#BEGIN_CLASS_HEADER
workspaceURL = None
TRIMMOMATIC = 'java -jar /kb/module/Trimmomatic-0.36/trimmomatic-0.36.jar'
ADAPTER_DIR = '/kb/module/Trimmomatic-0.36/adapters/'
def log(self, target, message):
if target is not None:
target.append(message)
print(message)
sys.stdout.flush()
# Determine if phred64
#
def is_fastq_phred64 (self, this_input_path):
read_buf_size = 65536
input_is_phred33 = False
data_seen = False
with open (this_input_path, 'r', read_buf_size) as this_input_handle:
while True:
line = this_input_handle.readline()
if not line:
break
if not line.startswith('@'):
raise ValueError ("Badly formatted FASTQ file: "+this_input_path+"\n"+"BAD LINE: '"+line+"'")
# skip two more lines
this_input_handle.readline() # seq
this_input_handle.readline() # '+' qual header
qual_line = this_input_handle.readline().rstrip()
data_seen = True
#def qual33(qual64): return chr(ord(qual64)-31)
for qual_val in qual_line:
q64_ascii = ord(qual_val)
if q64_ascii < 64:
input_is_phred33 = True
break
if input_is_phred33:
break
if not data_seen:
raise ValueError ("no qual score line found in FASTQ file: "+this_input_path)
input_is_phred64 = not input_is_phred33
return input_is_phred64
# Translate phred64 to phred33
#
def translate_fastq_from_phred64_to_phred33 (self, this_input_path, this_output_path):
if not self.is_fastq_phred64 (this_input_path):
return this_input_path
# internal Method
def qual33(qual64): return chr(ord(qual64)-31)
# read through and translate qual scores
read_buf_size = 65536
write_buf_size = 65536
qual33_handle = open (this_output_path, 'w', write_buf_size)
with open (this_input_path, 'r', read_buf_size) as this_input_handle:
while True:
buf = []
line = this_input_handle.readline()
if not line:
break
if line.startswith('@'):
buf.append(line) # header
buf.append(this_input_handle.readline()) # seq
buf.append(this_input_handle.readline()) # '+'
qual_line = this_input_handle.readline().rstrip()
q33_line = ''
for q64 in qual_line:
q33_line += qual33(q64)
buf.append(q33_line+"\n")
qual33_handle.write(''.join(buf))
qual33_handle.close()
return this_output_path
# Set up Trimmomatic params
#
def parse_trimmomatic_steps(self, input_params):
# validate input parameters and return string defining trimmomatic steps
parameter_string = ''
# if 'read_type' not in input_params and input_params['read_type'] is not None:
# raise ValueError('read_type not defined')
# elif input_params['read_type'] not in ('PE', 'SE'):
# raise ValueError('read_type must be PE or SE')
# if 'quality_encoding' not in input_params and input_params['quality_encoding'] is not None:
# raise ValueError('quality_encoding not defined')
# elif input_params['quality_encoding'] not in ('phred33', 'phred64'):
# raise ValueError('quality_encoding must be phred33 or phred64')
# set adapter trimming
if ('adapterFa' in input_params and input_params['adapterFa'] is not None and
'seed_mismatches' in input_params and input_params['seed_mismatches'] is not None and
'palindrome_clip_threshold' in input_params and input_params['palindrome_clip_threshold'] is not None and
'simple_clip_threshold' in input_params and input_params['simple_clip_threshold'] is not None):
parameter_string = ("ILLUMINACLIP:" + self.ADAPTER_DIR +
":".join((str(input_params['adapterFa']),
str(input_params['seed_mismatches']),
str(input_params['palindrome_clip_threshold']),
str(input_params['simple_clip_threshold']))) + " ")
elif ( ('adapterFa' in input_params and input_params['adapterFa'] is not None) or
('seed_mismatches' in input_params and input_params['seed_mismatches'] is not None) or
('palindrome_clip_threshold' in input_params and input_params['palindrome_clip_threshold'] is not None) or
('simple_clip_threshold' in input_params and input_params['simple_clip_threshold'] is not None) ):
raise ValueError('Adapter Clipping requires Adapter, Seed Mismatches, Palindrome Clip Threshold and Simple Clip Threshold')
# set Crop
if 'crop_length' in input_params and input_params['crop_length'] is not None \
and int(input_params['crop_length']) > 0:
parameter_string += 'CROP:' + str(input_params['crop_length']) + ' '
# set Headcrop
if 'head_crop_length' in input_params and input_params['head_crop_length'] is not None \
and input_params['head_crop_length'] > 0:
parameter_string += 'HEADCROP:' + str(input_params['head_crop_length']) + ' '
# set Leading
if 'leading_min_quality' in input_params and input_params['leading_min_quality'] is not None \
and input_params['leading_min_quality'] > 0:
parameter_string += 'LEADING:' + str(input_params['leading_min_quality']) + ' '
# set Trailing
if 'trailing_min_quality' in input_params and input_params['trailing_min_quality'] is not None \
and input_params['trailing_min_quality'] > 0:
parameter_string += 'TRAILING:' + str(input_params['trailing_min_quality']) + ' '
# set sliding window
if 'sliding_window_size' in input_params and input_params['sliding_window_size'] is not None \
and input_params['sliding_window_size'] > 0 \
and 'sliding_window_min_quality' in input_params and input_params['sliding_window_min_quality'] is not None \
and input_params['sliding_window_min_quality'] > 0:
parameter_string += 'SLIDINGWINDOW:' + str(input_params['sliding_window_size']) + ":" + str(input_params['sliding_window_min_quality']) + ' '
elif ('sliding_window_size' in input_params and input_params['sliding_window_size'] is not None \
and input_params['sliding_window_size'] > 0) \
or ('sliding_window_min_quality' in input_params and input_params['sliding_window_min_quality'] is not None \
and input_params['sliding_window_size'] > 0):
raise ValueError('Sliding Window filtering requires both Window Size and Window Minimum Quality to be set')
# set min length
if 'min_length' in input_params and input_params['min_length'] is not None \
and input_params['min_length'] > 0:
parameter_string += 'MINLEN:' + str(input_params['min_length']) + ' '
if parameter_string == '':
raise ValueError('No filtering/trimming steps specified!')
return parameter_string
def _save_RNASeqSampleSet(self, items, wsName, output_SampleSet_name, reads_desc_ext,
single_reads):
print ('Start saving RNASeqSampleSet object')
workspace_id = self.dfu.ws_name_to_id(wsName)
Library_type = 'SingleEnd' if single_reads else 'PairedEnd'
sample_set_data = {'sampleset_id': output_SampleSet_name,
'sampleset_desc': reads_desc_ext,
'Library_type': Library_type,
'sample_ids': [item.get('ref') for item in items],
'condition': [item.get('label') for item in items],
'domain': 'Unknown',
'num_samples': len(items),
'platform': 'Unknown'}
save_object_params = {
'id': workspace_id,
'objects': [{'type': 'KBaseRNASeq.RNASeqSampleSet',
'data': sample_set_data,
'name': output_SampleSet_name}]
}
dfu_oi = self.dfu.save_objects(save_object_params)[0]
sample_set_ref = str(dfu_oi[6]) + '/' + str(dfu_oi[0]) + '/' + str(dfu_oi[4])
return sample_set_ref
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.workspaceURL = config['workspace-url']
self.shockURL = config['shock-url']
self.scratch = os.path.abspath(config['scratch'])
self.handleURL = config['handle-service-url']
self.serviceWizardURL = config['service-wizard-url']
self.callbackURL = os.environ.get('SDK_CALLBACK_URL', None)
if self.callbackURL is None:
raise ValueError("SDK_CALLBACK_URL not set in environment")
if not os.path.exists(self.scratch):
os.makedirs(self.scratch)
os.chdir(self.scratch)
self.dfu = DFUClient(self.callbackURL)
#END_CONSTRUCTOR
pass
def runTrimmomatic(self, ctx, input_params):
"""
:param input_params: instance of type "runTrimmomaticInput"
(runTrimmomatic() ** ** to backend a KBase App, potentially
operating on ReadSets) -> structure: parameter "input_ws" of type
"workspace_name" (** Common types), parameter "input_reads_ref" of
type "data_obj_ref", parameter "output_ws" of type
"workspace_name" (** Common types), parameter "output_reads_name"
of type "data_obj_name", parameter "translate_to_phred33" of type
"bool", parameter "adapter_clip" of type "AdapterClip_Options" ->
structure: parameter "adapterFa" of String, parameter
"seed_mismatches" of Long, parameter "palindrome_clip_threshold"
of Long, parameter "simple_clip_threshold" of Long, parameter
"sliding_window" of type "SlidingWindow_Options" (parameter
groups) -> structure: parameter "sliding_window_size" of Long,
parameter "sliding_window_min_quality" of Long, parameter
"leading_min_quality" of Long, parameter "trailing_min_quality" of
Long, parameter "crop_length" of Long, parameter
"head_crop_length" of Long, parameter "min_length" of Long
:returns: instance of type "runTrimmomaticOutput" -> structure:
parameter "report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN runTrimmomatic
console = []
self.log(console, 'Running runTrimmomatic with parameters: ')
self.log(console, "\n"+pformat(input_params))
token = ctx['token']
env = os.environ.copy()
env['KB_AUTH_TOKEN'] = token
SERVICE_VER = 'release'
# param checks
if ('output_ws' not in input_params or input_params['output_ws'] is None):
input_params['output_ws'] = input_params['input_ws']
required_params = ['input_reads_ref',
'output_ws',
'output_reads_name'
# 'read_type'
]
for required_param in required_params:
if required_param not in input_params or input_params[required_param] == None:
raise ValueError ("Must define required param: '"+required_param+"'")
# load provenance
provenance = [{}]
if 'provenance' in ctx:
provenance = ctx['provenance']
provenance[0]['input_ws_objects']=[str(input_params['input_reads_ref'])]
# set up and run execTrimmomatic()
#
execTrimmomaticParams = { 'input_reads_ref': str(input_params['input_reads_ref']),
'output_ws': input_params['output_ws'],
'output_reads_name': input_params['output_reads_name']
# 'read_type': input_params['read_type'],
}
#if 'quality_encoding' in input_params:
# execTrimmomaticParams['quality_encoding'] = input_params['quality_encoding']
if 'translate_to_phred33' in input_params:
execTrimmomaticParams['translate_to_phred33'] = input_params['translate_to_phred33']
# adapter_clip grouped params
if 'adapter_clip' in input_params and input_params['adapter_clip'] != None:
if 'adapterFa' in input_params['adapter_clip']:
execTrimmomaticParams['adapterFa'] = input_params['adapter_clip']['adapterFa']
else:
execTrimmomaticParams['adapterFa'] = None
if 'seed_mismatches' in input_params['adapter_clip']:
execTrimmomaticParams['seed_mismatches'] = input_params['adapter_clip']['seed_mismatches']
else:
execTrimmomaticParams['seed_mismatches'] = None
if 'palindrome_clip_threshold' in input_params['adapter_clip']:
execTrimmomaticParams['palindrome_clip_threshold'] = input_params['adapter_clip']['palindrome_clip_threshold']
else:
execTrimmomaticParams['palindrome_clip_threshold'] = None
if 'simple_clip_threshold' in input_params['adapter_clip']:
execTrimmomaticParams['simple_clip_threshold'] = input_params['adapter_clip']['simple_clip_threshold']
else:
execTrimmomaticParams['simple_clip_threshold'] = None
# sliding window
if 'sliding_window' in input_params:
if 'sliding_window_size' in input_params['sliding_window']:
execTrimmomaticParams['sliding_window_size'] = input_params['sliding_window']['sliding_window_size']
else:
execTrimmomaticParams['sliding_window_size'] = None
if 'sliding_window_min_quality' in input_params['sliding_window']:
execTrimmomaticParams['sliding_window_min_quality'] = input_params['sliding_window']['sliding_window_min_quality']
else:
execTrimmomaticParams['sliding_window_min_quality'] = None
# remaining params
if 'leading_min_quality' in input_params:
execTrimmomaticParams['leading_min_quality'] = input_params['leading_min_quality']
if 'trailing_min_quality' in input_params:
execTrimmomaticParams['trailing_min_quality'] = input_params['trailing_min_quality']
if 'crop_length' in input_params:
execTrimmomaticParams['crop_length'] = input_params['crop_length']
if 'head_crop_length' in input_params:
execTrimmomaticParams['head_crop_length'] = input_params['head_crop_length']
if 'min_length' in input_params:
execTrimmomaticParams['min_length'] = input_params['min_length']
# RUN
trimmomatic_retVal = self.execTrimmomatic (ctx, execTrimmomaticParams)[0]
# build report
#
reportName = 'kb_trimmomatic_report_'+str(uuid.uuid4())
reportObj = {'objects_created': [],
#'text_message': '', # or is it 'message'?
'message': '', # or is it 'text_message'?
'direct_html': None,
'file_links': [],
'html_links': [],
'html_window_height': 220,
'workspace_name': input_params['input_ws'],
'report_object_name': reportName
}
# text report (replaced by HTML report)
try:
#reportObj['text_message'] = trimmomatic_retVal['report']
#reportObj['message'] = trimmomatic_retVal['report']
msg = trimmomatic_retVal['report']
except:
raise ValueError ("no report generated by execTrimmomatic()")
# parse text report
report_data = []
report_field_order = []
report_lib_refs = []
report_lib_names = []
lib_i = -1
# This is some powerful brute force nonsense, but it should be okay.
# (Note: it was not OK. Now it is)
se_expected_field_order = ['Input Reads',
'Surviving',
'Dropped']
se_report_re = re.compile('^Input Reads:\s*(\d+)\s*Surviving:\s*(\d+)\s*\(\d+\.\d+\%\)\s*Dropped:\s*(\d+)\s*\(\d+\.\d+\%\)')
pe_expected_field_order = ['Input Read Pairs',
'Both Surviving',
'Forward Only Surviving',
'Reverse Only Surviving',
'Dropped']
pe_report_re = re.compile('^Input Read Pairs:\s*(\d+)\s*Both Surviving:\s*(\d+)\s*\(\d+\.\d+\%\)\s*Forward Only Surviving:\s*(\d+)\s*\(\d+\.\d+\%\)\s*Reverse Only Surviving:\s*(\d+)\s*\(\d+\.\d+\%\)\s*Dropped:\s*(\d+)\s*\(\d+\.\d+\%\)')
for line in trimmomatic_retVal['report'].split("\n"):
if line.startswith("RUNNING"):
lib_i += 1
lib_ids = re.sub("RUNNING TRIMMOMATIC ON LIBRARY: ", '', line)
[ref, name] = lib_ids.split(" ")
report_lib_refs.append(ref)
report_lib_names.append(name)
report_data.append({})
report_field_order.append([])
elif line.startswith("-"):
continue
elif len(line) == 0:
continue
else:
# single end stats
m_se = se_report_re.match(line)
if m_se and len(m_se.groups()) == len(se_expected_field_order):
report_field_order[lib_i] = se_expected_field_order
report_data[lib_i] = dict(zip(report_field_order[lib_i], m_se.groups()))
for f_name in report_field_order[lib_i]:
report_data[lib_i][f_name] = int(report_data[lib_i][f_name])
# paired end stats
m_pe = pe_report_re.match(line)
if m_pe and len(m_pe.groups()) == len(pe_expected_field_order):
report_field_order[lib_i] = pe_expected_field_order
report_data[lib_i] = dict(zip(report_field_order[lib_i], m_pe.groups()))
for f_name in report_field_order[lib_i]:
report_data[lib_i][f_name] = int(report_data[lib_i][f_name])
else:
self.log(console, "SKIPPING OUTPUT. Can't parse [" + line + "] (lib_i=" + str(lib_i) + ")")
#### HTML report
##
timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)
html_output_dir = os.path.join(self.scratch,'output_html.'+str(timestamp))
if not os.path.exists(html_output_dir):
os.makedirs(html_output_dir)
html_file = input_params['output_reads_name']+'.html'
output_html_file_path = os.path.join(html_output_dir, html_file);
# html config
sp = ' '
text_color = "#606060"
bar_color = "lightblue"
bar_width = 100
bar_char = "."
bar_fontsize = "-2"
row_spacing = "-2"
html_report_lines = ['<html>']
html_report_lines += ['<body bgcolor="white">']
for lib_i in range(len(report_data)):
html_report_lines += ['<p><b><font color="'+text_color+'">TRIMMOMATIC RESULTS FOR '+str(report_lib_names[lib_i])+' (object '+str(report_lib_refs[lib_i])+')</font></b><br>'+"\n"]
high_val = 0
if not len(report_field_order[lib_i]) > 0:
html_report_lines += ['All reads were trimmed - no new reads object created.']
else:
html_report_lines += ['<table cellpadding=0 cellspacing=0 border=0>']
html_report_lines += ['<tr><td></td><td>'+sp+sp+sp+sp+'</td><td></td><td>'+sp+sp+'</td></tr>']
for f_name in report_field_order[lib_i]:
if int(report_data[lib_i][f_name]) > high_val:
high_val = int(report_data[lib_i][f_name])
for f_name in report_field_order[lib_i]:
percent = round(float(report_data[lib_i][f_name])/float(high_val)*100, 1)
this_width = int(round(float(bar_width)*float(report_data[lib_i][f_name])/float(high_val), 0))
#self.log(console,"this_width: "+str(this_width)+" report_data: "+str(report_data[lib_i][f_name])+" calc: "+str(float(width)*float(report_data[lib_i][f_name])/float(high_val))) # DEBUG
if this_width < 1:
if report_data[lib_i][f_name] > 0:
this_width = 1
else:
this_width = 0
html_report_lines += ['<tr>']
html_report_lines += [' <td align=right><font color="'+text_color+'">'+str(f_name)+'</font></td><td></td>']
#html_report_lines += [' <td align=right><font color="'+text_color+'">'+'{:0,}'.format(report_data[lib_i][f_name])+'</font></td><td></td>']
html_report_lines += [' <td align=right><font color="'+text_color+'">'+str(report_data[lib_i][f_name])+'</font></td><td></td>']
html_report_lines += [' <td align=right><font color="'+text_color+'">'+'('+str(percent)+'%)'+sp+sp+'</font></td><td></td>']
if this_width > 0:
for tic in range(this_width):
html_report_lines += [' <td bgcolor="'+bar_color+'"><font size='+bar_fontsize+' color="'+bar_color+'">'+bar_char+'</font></td>']
html_report_lines += ['</tr>']
html_report_lines += ['<tr><td><font size='+row_spacing+'>'+sp+'</font></td></tr>']
html_report_lines += ['</table>']
html_report_lines += ['<p>']
html_report_lines += ['</body>']
html_report_lines += ['</html>']
# write html to file and upload
html_report_str = "\n".join(html_report_lines)
#reportObj['direct_html'] = "\n".join(html_report_lines) # doesn't always fit in buf
with open (output_html_file_path, 'w', 0) as html_handle:
html_handle.write(html_report_str)
try:
html_upload_ret = self.dfu.file_to_shock({'file_path': html_output_dir,
#html_upload_ret = dfu.file_to_shock({'file_path': output_html_file_path,
#'make_handle': 0})
'make_handle': 0,
'pack': 'zip'})
except:
raise ValueError ('error uploading HTML file to shock')
# attach to report obj
#reportObj['direct_html'] = None
reportObj['direct_html'] = ''
reportObj['direct_html_link_index'] = 0
reportObj['html_links'] = [{'shock_id': html_upload_ret['shock_id'],
'name': html_file,
'label': input_params['output_reads_name']+' HTML'
}
]
# trimmed object
if trimmomatic_retVal['output_filtered_ref'] != None:
try:
# DEBUG
#self.log(console,"OBJECT CREATED: '"+str(trimmomatic_retVal['output_filtered_ref'])+"'")
reportObj['objects_created'].append({'ref':trimmomatic_retVal['output_filtered_ref'],
'description':'Trimmed Reads'})
except:
raise ValueError ("failure saving trimmed output")
else:
self.log(console, "No trimmed output generated by execTrimmomatic()")
if trimmomatic_retVal.get('output_filtered_sampleset_ref'):
try:
reportObj['objects_created'].append({'ref':trimmomatic_retVal['output_filtered_sampleset_ref'],
'description':'Trimmed Reads'})
except:
raise ValueError ("failure saving trimmed output")
else:
self.log(console, "No trimmed output generated by execTrimmomatic()")
# unpaired fwd
if trimmomatic_retVal['output_unpaired_fwd_ref'] != None:
try:
reportObj['objects_created'].append({'ref':trimmomatic_retVal['output_unpaired_fwd_ref'],
'description':'Trimmed Unpaired Forward Reads'})
except:
raise ValueError ("failure saving unpaired fwd output")
else:
pass
if trimmomatic_retVal.get('output_unpaired_sampleset_fwd_ref'):
try:
reportObj['objects_created'].append({'ref':trimmomatic_retVal['output_unpaired_sampleset_fwd_ref'],
'description':'Trimmed Unpaired Forward Reads'})
except:
raise ValueError ("failure saving unpaired fwd output")
else:
pass
# unpaired rev
if trimmomatic_retVal['output_unpaired_rev_ref'] != None:
try:
reportObj['objects_created'].append({'ref':trimmomatic_retVal['output_unpaired_rev_ref'],
'description':'Trimmed Unpaired Reverse Reads'})
except:
raise ValueError ("failure saving unpaired fwd output")
else:
pass
if trimmomatic_retVal.get('output_unpaired_sampleset_rev_ref'):
try:
reportObj['objects_created'].append({'ref':trimmomatic_retVal['output_unpaired_sampleset_rev_ref'],
'description':'Trimmed Unpaired Reverse Reads'})
except:
raise ValueError ("failure saving unpaired fwd output")
else:
pass
# save report object
#
report = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)
#report_info = report.create({'report':reportObj, 'workspace_name':input_params['input_ws']})
report_info = report.create_extended_report(reportObj)
output = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
#END runTrimmomatic
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method runTrimmomatic return value ' +
'output is not type dict as required.')
# return the results
return [output]
def execTrimmomatic(self, ctx, input_params):
"""
:param input_params: instance of type "execTrimmomaticInput"
(execTrimmomatic() ** ** the local method that runs Trimmomatic on
each read library) -> structure: parameter "input_reads_ref" of
type "data_obj_ref", parameter "output_ws" of type
"workspace_name" (** Common types), parameter "output_reads_name"
of type "data_obj_name", parameter "read_type" of String,
parameter "adapterFa" of String, parameter "seed_mismatches" of
Long, parameter "palindrome_clip_threshold" of Long, parameter
"simple_clip_threshold" of Long, parameter "translate_to_phred33"
of type "bool", parameter "sliding_window_size" of Long, parameter
"sliding_window_min_quality" of Long, parameter
"leading_min_quality" of Long, parameter "trailing_min_quality" of
Long, parameter "crop_length" of Long, parameter
"head_crop_length" of Long, parameter "min_length" of Long
:returns: instance of type "execTrimmomaticOutput" -> structure:
parameter "output_filtered_ref" of type "data_obj_ref", parameter
"output_unpaired_fwd_ref" of type "data_obj_ref", parameter
"output_unpaired_rev_ref" of type "data_obj_ref", parameter
"report" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN execTrimmomatic
console = []
self.log(console, 'Running execTrimmomatic with parameters: ')
self.log(console, "\n"+pformat(input_params))
report = ''
trimmomatic_retVal = dict()
trimmomatic_retVal['output_filtered_ref'] = None
trimmomatic_retVal['output_unpaired_fwd_ref'] = None
trimmomatic_retVal['output_unpaired_rev_ref'] = None
token = ctx['token']
wsClient = workspaceService(self.workspaceURL, token=token)
headers = {'Authorization': 'OAuth '+token}
env = os.environ.copy()
env['KB_AUTH_TOKEN'] = token
# object info
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple
Set_types = ["KBaseSets.ReadsSet", "KBaseRNASeq.RNASeqSampleSet"]
PE_types = ["KBaseFile.PairedEndLibrary", "KBaseAssembly.PairedEndLibrary"]
SE_types = ["KBaseFile.SingleEndLibrary", "KBaseAssembly.SingleEndLibrary"]
acceptable_types = Set_types + PE_types + SE_types
# param checks
required_params = ['input_reads_ref',
'output_ws',
'output_reads_name'
# 'read_type'
]
for required_param in required_params:
if required_param not in input_params or input_params[required_param] == None:
raise ValueError ("Must define required param: '"+required_param+"'")
# load provenance
provenance = [{}]
if 'provenance' in ctx:
provenance = ctx['provenance']
# add additional info to provenance here, in this case the input data object reference
provenance[0]['input_ws_objects']=[str(input_params['input_reads_ref'])]
# Determine whether read library or read set is input object
#
try:
input_reads_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_params['input_reads_ref']}]})[0]
input_reads_obj_type = input_reads_obj_info[TYPE_I]
input_reads_obj_type = re.sub ('-[0-9]+\.[0-9]+$', "", input_reads_obj_type) # remove trailing version
#input_reads_obj_version = input_reads_obj_info[VERSION_I] # this is object version, not type version
except Exception as e:
raise ValueError('Unable to get read library object from workspace: (' + str(input_params['input_reads_ref']) +')' + str(e))
if input_reads_obj_type not in acceptable_types:
raise ValueError ("Input reads of type: '"+input_reads_obj_type+"'. Must be one of "+", ".join(acceptable_types))
# auto-detect reads type
read_type = None
if input_reads_obj_type in PE_types:
read_type = 'PE'
elif input_reads_obj_type in SE_types:
read_type = 'SE'
# get set
#
readsSet_ref_list = []
readsSet_names_list = []
if input_reads_obj_type in Set_types:
try:
#self.log (console, "INPUT_READS_REF: '"+input_params['input_reads_ref']+"'") # DEBUG
#setAPI_Client = SetAPI (url=self.callbackURL, token=ctx['token']) # for SDK local. doesn't work for SetAPI
setAPI_Client = SetAPI (url=self.serviceWizardURL, token=ctx['token'], service_ver='beta') # for dynamic service
input_readsSet_obj = setAPI_Client.get_reads_set_v1 ({'ref':input_params['input_reads_ref'],'include_item_info':1})
except Exception as e:
raise ValueError('SetAPI FAILURE: Unable to get read library set object from workspace: (' + str(input_params['input_reads_ref'])+")\n" + str(e))
for readsLibrary_obj in input_readsSet_obj['data']['items']:
readsSet_ref_list.append(readsLibrary_obj['ref'])
readsSet_names_list.append(readsLibrary_obj['info'][NAME_I])
reads_item_type = readsLibrary_obj['info'][TYPE_I]
reads_item_type = re.sub ('-[0-9]+\.[0-9]+$', "", reads_item_type) # remove trailing version
if reads_item_type in PE_types:
this_read_type = 'PE'
elif reads_item_type in SE_types:
this_read_type = 'SE'
else:
raise ValueError ("Can't handle read item type '"+reads_item_type+"' obj_name: '"+readsLibrary_obj['info'][NAME_I]+" in Set: '"+str(input_params['input_reads_ref'])+"'")
if read_type != None and this_read_type != read_type:
raise ValueError ("Can't handle read Set: '"+str(input_params['input_reads_ref'])+"'. Unable to process mixed PairedEndLibrary and SingleEndLibrary. Please split into separate ReadSets")
elif read_type == None:
read_type = this_read_type
else:
readsSet_ref_list = [input_params['input_reads_ref']]
readsSet_names_list = [input_reads_obj_info[NAME_I]]
# Iterate through readsLibrary members of set
#
report = ''
trimmed_readsSet_ref = None
unpaired_fwd_readsSet_ref = None
unpaired_rev_readsSet_ref = None
trimmed_RNASeqSampleSet_ref = None
unpaired_fwd_SampleSet_ref = None
unpaired_rev_SampleSet_ref = None
trimmed_readsSet_refs = []
unpaired_fwd_readsSet_refs = []
unpaired_rev_readsSet_refs = []
for reads_item_i,input_reads_library_ref in enumerate(readsSet_ref_list):
execTrimmomaticParams = { 'input_reads_ref': input_reads_library_ref,
'output_ws': input_params['output_ws']
}
optional_params = [ #'read_type',
'adapterFa',
'seed_mismatches',
'palindrome_clip_threshold',
'simple_clip_threshold',
#'quality_encoding',
'translate_to_phred33',
'sliding_window_size',
'sliding_window_min_quality',
'leading_min_quality',
'trailing_min_quality',
'crop_length',
'head_crop_length',
'min_length'
]
for arg in optional_params:
if arg in input_params:
execTrimmomaticParams[arg] = input_params[arg]
# add auto-detected read_type
execTrimmomaticParams['read_type'] = read_type
# set output name
if input_reads_obj_type not in Set_types:
execTrimmomaticParams['output_reads_name'] = input_params['output_reads_name']
else:
execTrimmomaticParams['output_reads_name'] = readsSet_names_list[reads_item_i]+'_trimm'
report += "RUNNING TRIMMOMATIC ON LIBRARY: "+str(input_reads_library_ref)+" "+str(readsSet_names_list[reads_item_i])+"\n"
report += "-----------------------------------------------------------------------------------\n\n"
# run Trimmomatic App for One Library at a Time
trimmomaticSingleLibrary_retVal = self.execTrimmomaticSingleLibrary (ctx, execTrimmomaticParams)[0]
# add to report
report += trimmomaticSingleLibrary_retVal['report']+"\n\n"
trimmed_readsSet_refs.append (trimmomaticSingleLibrary_retVal['output_filtered_ref'])
unpaired_fwd_readsSet_refs.append (trimmomaticSingleLibrary_retVal['output_unpaired_fwd_ref'])
unpaired_rev_readsSet_refs.append (trimmomaticSingleLibrary_retVal['output_unpaired_rev_ref'])
# Just one Library
if input_reads_obj_type not in ["KBaseSets.ReadsSet", "KBaseRNASeq.RNASeqSampleSet"]:
# create return output object
output = { 'report': report,
'output_filtered_ref': trimmed_readsSet_refs[0],
'output_unpaired_fwd_ref': unpaired_fwd_readsSet_refs[0],
'output_unpaired_rev_ref': unpaired_rev_readsSet_refs[0],
}
# ReadsSet
else:
# save trimmed readsSet
some_trimmed_output_created = False
items = []
for i,lib_ref in enumerate(trimmed_readsSet_refs): # FIX: assumes order maintained
if lib_ref == None:
#items.append(None) # can't have 'None' items in ReadsSet
continue
else:
some_trimmed_output_created = True
try:
label = input_readsSet_obj['data']['items'][i]['label']
except:
label = wsClient.get_object_info_new ({'objects':[{'ref':lib_ref}]})[0][NAME_I]
label = label + "_Trimm_paired"
items.append({'ref': lib_ref,
'label': label
#'data_attachment': ,
#'info':
})
if some_trimmed_output_created:
single_reads = False
if read_type == 'SE':
reads_desc_ext = " Trimmomatic trimmed SingleEndLibrary"
reads_name_ext = "_trimm"
single_reads = True
else:
reads_desc_ext = " Trimmomatic trimmed paired reads"
reads_name_ext = "_trimm_paired"
output_readsSet_obj = { 'description': str(input_readsSet_obj['data']['description'])+reads_desc_ext,
'items': items
}
output_readsSet_name = str(input_params['output_reads_name'])+reads_name_ext
trimmed_readsSet_ref = setAPI_Client.save_reads_set_v1 ({'workspace_name': input_params['output_ws'],
'output_object_name': output_readsSet_name,
'data': output_readsSet_obj
})['set_ref']
trimmed_RNASeqSampleSet_ref = self._save_RNASeqSampleSet(
items,
input_params['output_ws'],
output_readsSet_name + '_SampleSet',
reads_desc_ext,
single_reads)
else:
self.log(console, "No trimmed output created")
# raise ValueError ("No trimmed output created")
# save unpaired forward readsSet
some_unpaired_fwd_output_created = False
if len(unpaired_fwd_readsSet_refs) > 0:
items = []
for i,lib_ref in enumerate(unpaired_fwd_readsSet_refs): # FIX: assumes order maintained
if lib_ref == None:
#items.append(None) # can't have 'None' items in ReadsSet
continue
else:
some_unpaired_fwd_output_created = True
try:
if len(unpaired_fwd_readsSet_refs) == len(input_readsSet_obj['data']['items']):
label = input_readsSet_obj['data']['items'][i]['label']
else:
label = wsClient.get_object_info_new ({'objects':[{'ref':lib_ref}]})[0][NAME_I]
except:
label = wsClient.get_object_info_new ({'objects':[{'ref':lib_ref}]})[0][NAME_I]
label = label + "_Trimm_unpaired_fwd"
items.append({'ref': lib_ref,
'label': label
#'data_attachment': ,
#'info':
})
if some_unpaired_fwd_output_created:
output_readsSet_obj = { 'description': str(input_readsSet_obj['data']['description'])+" Trimmomatic unpaired fwd reads",
'items': items
}
output_readsSet_name = str(input_params['output_reads_name'])+'_trimm_unpaired_fwd'
unpaired_fwd_readsSet_ref = setAPI_Client.save_reads_set_v1 ({'workspace_name': input_params['output_ws'],
'output_object_name': output_readsSet_name,
'data': output_readsSet_obj
})['set_ref']
unpaired_fwd_SampleSet_ref = self._save_RNASeqSampleSet(
items,
input_params['output_ws'],
output_readsSet_name + '_SampleSet',
reads_desc_ext,
single_reads)
else:
self.log (console, "no unpaired_fwd readsLibraries created")
unpaired_fwd_readsSet_ref = None
# save unpaired reverse readsSet
some_unpaired_rev_output_created = False
if len(unpaired_rev_readsSet_refs) > 0:
items = []
for i,lib_ref in enumerate(unpaired_fwd_readsSet_refs): # FIX: assumes order maintained
if lib_ref == None:
#item`s.append(None) # can't have 'None' items in ReadsSet
continue
else:
some_unpaired_rev_output_created = True
try:
if len(unpaired_rev_readsSet_refs) == len(input_readsSet_obj['data']['items']):
label = input_readsSet_obj['data']['items'][i]['label']
else:
label = wsClient.get_object_info_new ({'objects':[{'ref':lib_ref}]})[0][NAME_I]
except:
label = wsClient.get_object_info_new ({'objects':[{'ref':lib_ref}]})[0][NAME_I]
label = label + "_Trimm_unpaired_rev"
items.append({'ref': lib_ref,
'label': label
#'data_attachment': ,
#'info':
})
if some_unpaired_rev_output_created:
output_readsSet_obj = { 'description': str(input_readsSet_obj['data']['description'])+" Trimmomatic unpaired rev reads",
'items': items
}
output_readsSet_name = str(input_params['output_reads_name'])+'_trimm_unpaired_rev'
unpaired_rev_readsSet_ref = setAPI_Client.save_reads_set_v1 ({'workspace_name': input_params['output_ws'],
'output_object_name': output_readsSet_name,
'data': output_readsSet_obj
})['set_ref']
unpaired_rev_SampleSet_ref = self._save_RNASeqSampleSet(
items,
input_params['output_ws'],
output_readsSet_name + '_SampleSet',
reads_desc_ext,
single_reads)
else:
self.log (console, "no unpaired_rev readsLibraries created")
unpaired_rev_readsSet_ref = None
# create return output object
output = {'report': report,
'output_filtered_ref': trimmed_readsSet_ref,
'output_unpaired_fwd_ref': unpaired_fwd_readsSet_ref,
'output_unpaired_rev_ref': unpaired_rev_readsSet_ref,
'output_filtered_sampleset_ref': trimmed_RNASeqSampleSet_ref,
'output_unpaired_sampleset_fwd_ref': unpaired_fwd_SampleSet_ref,
'output_unpaired_sampleset_rev_ref': unpaired_rev_SampleSet_ref
}
#END execTrimmomatic
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method execTrimmomatic return value ' +
'output is not type dict as required.')
# return the results
return [output]
def execTrimmomaticSingleLibrary(self, ctx, input_params):
"""
:param input_params: instance of type "execTrimmomaticInput"
(execTrimmomatic() ** ** the local method that runs Trimmomatic on
each read library) -> structure: parameter "input_reads_ref" of
type "data_obj_ref", parameter "output_ws" of type
"workspace_name" (** Common types), parameter "output_reads_name"
of type "data_obj_name", parameter "read_type" of String,
parameter "adapterFa" of String, parameter "seed_mismatches" of
Long, parameter "palindrome_clip_threshold" of Long, parameter
"simple_clip_threshold" of Long, parameter "translate_to_phred33"
of type "bool", parameter "sliding_window_size" of Long, parameter
"sliding_window_min_quality" of Long, parameter
"leading_min_quality" of Long, parameter "trailing_min_quality" of
Long, parameter "crop_length" of Long, parameter
"head_crop_length" of Long, parameter "min_length" of Long
:returns: instance of type "execTrimmomaticOutput" -> structure:
parameter "output_filtered_ref" of type "data_obj_ref", parameter
"output_unpaired_fwd_ref" of type "data_obj_ref", parameter
"output_unpaired_rev_ref" of type "data_obj_ref", parameter
"report" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN execTrimmomaticSingleLibrary
console = []
self.log(console, 'Running Trimmomatic with parameters: ')
self.log(console, "\n"+pformat(input_params))
report = ''
retVal = dict()
retVal['output_filtered_ref'] = None
retVal['output_unpaired_fwd_ref'] = None
retVal['output_unpaired_rev_ref'] = None
token = ctx['token']
wsClient = workspaceService(self.workspaceURL, token=token)
headers = {'Authorization': 'OAuth '+token}
env = os.environ.copy()
env['KB_AUTH_TOKEN'] = token
# object info
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple
#Set_types = ["KBaseSets.ReadsSet", "KBaseRNASeq.RNASeqSampleSet"]
PE_types = ["KBaseFile.PairedEndLibrary", "KBaseAssembly.PairedEndLibrary"]
SE_types = ["KBaseFile.SingleEndLibrary", "KBaseAssembly.SingleEndLibrary"]
acceptable_types = PE_types + SE_types
# param checks
required_params = ['input_reads_ref',
'output_ws',
'output_reads_name',
'read_type'
]
for required_param in required_params:
if required_param not in input_params or input_params[required_param] == None:
raise ValueError ("Must define required param: '"+required_param+"'")
# and param defaults
defaults = {
#'quality_encoding': 'phred33',
'seed_mismatches': '0', # '2',
'palindrome_clip_threshold': '0', # '3',
'simple_clip_threshold': '0', # '10',
'crop_length': '0',
'head_crop_length': '0',
'leading_min_quality': '0', # '3',
'trailing_min_quality': '0', # '3',
'sliding_window_size': '0', # '4',
'sliding_window_min_quality': '0', # '15',
'min_length': '0', # '36'
}
for arg in defaults.keys():
if arg not in input_params or input_params[arg] is None or input_params[arg] == '':
input_params[arg] = defaults[arg]
# conditional arg behavior
arg = 'adapterFa'
if arg not in input_params or input_params[arg] is None or input_params[arg] == '':
input_params['adapterFa'] = None
input_params['seed_mismatches'] = None
input_params['palindrome_clip_threshold'] = None
input_params['simple_clip_threshold'] = None
#load provenance
provenance = [{}]
if 'provenance' in ctx:
provenance = ctx['provenance']
# add additional info to provenance here, in this case the input data object reference
provenance[0]['input_ws_objects']=[str(input_params['input_reads_ref'])]
# Determine whether read library is of correct type
#
try:
input_reads_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_params['input_reads_ref']}]})[0]
input_reads_obj_type = input_reads_obj_info[TYPE_I]
#input_reads_obj_version = input_reads_obj_info[VERSION_I] # this is object version, not type version
except Exception as e:
raise ValueError('Unable to get read library object from workspace: (' + str(input_params['input_reads_ref']) +')' + str(e))
input_reads_obj_type = re.sub ('-[0-9]+\.[0-9]+$', "", input_reads_obj_type) # remove trailing version
acceptable_types = PE_types + SE_types
if input_reads_obj_type not in acceptable_types:
raise ValueError ("Input reads of type: '"+input_reads_obj_type+"'. Must be one of "+", ".join(acceptable_types))
# Confirm user is paying attention (matters because Trimmomatic params are very different for PairedEndLibary and SingleEndLibrary
#
if input_params['read_type'] == 'PE' and not input_reads_obj_type in PE_types:
raise ValueError ("read_type set to 'Paired End' but object is SingleEndLibrary")
if input_params['read_type'] == 'SE' and not input_reads_obj_type in SE_types:
raise ValueError ("read_type set to 'Single End' but object is PairedEndLibrary")
# Instatiate ReadsUtils
#
try:
readsUtils_Client = ReadsUtils (url=self.callbackURL, token=ctx['token']) # SDK local
readsLibrary = readsUtils_Client.download_reads ({'read_libraries': [input_params['input_reads_ref']],
'interleaved': 'false'
})
except Exception as e:
raise ValueError('Unable to get read library object from workspace: (' + str(input_params['input_reads_ref']) +")\n" + str(e))
if input_params['read_type'] == 'PE':
# Download reads Libs to FASTQ files
input_fwd_file_path = readsLibrary['files'][input_params['input_reads_ref']]['files']['fwd']
input_rev_file_path = readsLibrary['files'][input_params['input_reads_ref']]['files']['rev']
sequencing_tech = readsLibrary['files'][input_params['input_reads_ref']]['sequencing_tech']
# DEBUG
# self.log (console, "FWD_INPUT\n")
# fwd_reads_handle = open (input_fwd_file_path, 'r')
# for line_i in range(20):
# self.log (console, fwd_reads_handle.readline())
# fwd_reads_handle.close ()
# self.log (console, "REV_INPUT\n")
# rev_reads_handle = open (input_rev_file_path, 'r')
# for line_i in range(20):
# self.log (console, rev_reads_handle.readline())
# rev_reads_handle.close ()
# Set Params
#
trimmomatic_params = self.parse_trimmomatic_steps(input_params)
# add auto-detected quality_encoding
if self.is_fastq_phred64 (input_fwd_file_path):
quality_encoding = 'phred64'
else:
quality_encoding = 'phred33'
trimmomatic_options = str(input_params['read_type']) + ' -' + quality_encoding
self.log(console, pformat(trimmomatic_params))
self.log(console, pformat(trimmomatic_options))
# Run Trimmomatic
#
self.log(console, 'Starting Trimmomatic')
input_fwd_file_path = re.sub ("\.fq$", "", input_fwd_file_path)
input_fwd_file_path = re.sub ("\.FQ$", "", input_fwd_file_path)
input_rev_file_path = re.sub ("\.fq$", "", input_rev_file_path)
input_rev_file_path = re.sub ("\.FQ$", "", input_rev_file_path)
input_fwd_file_path = re.sub ("\.fastq$", "", input_fwd_file_path)
input_fwd_file_path = re.sub ("\.FASTQ$", "", input_fwd_file_path)
input_rev_file_path = re.sub ("\.fastq$", "", input_rev_file_path)
input_rev_file_path = re.sub ("\.FASTQ$", "", input_rev_file_path)
output_fwd_paired_file_path = input_fwd_file_path+"_trimm_fwd_paired.fastq"
output_fwd_unpaired_file_path = input_fwd_file_path+"_trimm_fwd_unpaired.fastq"
output_rev_paired_file_path = input_rev_file_path+"_trimm_rev_paired.fastq"
output_rev_unpaired_file_path = input_rev_file_path+"_trimm_rev_unpaired.fastq"
input_fwd_file_path = input_fwd_file_path+".fastq"
input_rev_file_path = input_rev_file_path+".fastq"
cmdstring = " ".join( (self.TRIMMOMATIC, trimmomatic_options,
input_fwd_file_path,
input_rev_file_path,
output_fwd_paired_file_path,
output_fwd_unpaired_file_path,
output_rev_paired_file_path,
output_rev_unpaired_file_path,
trimmomatic_params) )
cmdProcess = subprocess.Popen(cmdstring, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
outputlines = []
while True:
line = cmdProcess.stdout.readline()
outputlines.append(line)
if not line: break
self.log(console, line.replace('\n', ''))
cmdProcess.stdout.close()
cmdProcess.wait()
self.log(console, 'return code: ' + str(cmdProcess.returncode) + '\n')
if cmdProcess.returncode != 0:
raise ValueError('Error running kb_trimmomatic, return code: ' +
str(cmdProcess.returncode) + '\n')
#report += "cmdstring: " + cmdstring + " stdout: " + stdout + " stderr " + stderr
# free up disk
os.remove(input_fwd_file_path)
os.remove(input_rev_file_path)
# Only keep line that starts with Input and that
# needs to be parsed for HTML report
for line in outputlines:
if line.startswith('Input'):
report += line
# upload paired reads
if not os.path.isfile (output_fwd_paired_file_path) \
or os.path.getsize (output_fwd_paired_file_path) == 0 \
or not os.path.isfile (output_rev_paired_file_path) \
or os.path.getsize (output_rev_paired_file_path) == 0:
retVal['output_filtered_ref'] = None
report += "\n\nNo reads were trimmed, so no trimmed reads object was generated."
else:
# standardize quality encoding
if 'translate_to_phred33' in input_params and input_params['translate_to_phred33'] == 1 and quality_encoding == 'phred64':
#if False: # DEBUG
self.log (console, "TRANSLATING OUTPUT FWD PAIRED FASTQ FILE...")
output_fwd_paired_file_path = self.translate_fastq_from_phred64_to_phred33 \
(output_fwd_paired_file_path, \
re.sub ("\.fastq$", ".q33.fastq", output_fwd_paired_file_path))
output_rev_paired_file_path = self.translate_fastq_from_phred64_to_phred33 \
(output_rev_paired_file_path, \
re.sub ("\.fastq$", ".q33.fastq", output_rev_paired_file_path))
output_obj_name = input_params['output_reads_name']+'_paired'
self.log(console, 'Uploading trimmed paired reads: '+output_obj_name)
retVal['output_filtered_ref'] = readsUtils_Client.upload_reads ({ 'wsname': str(input_params['output_ws']),
'name': output_obj_name,
# remove sequencing_tech arg once ReadsUtils is updated to accept source_reads_ref
#'sequencing_tech': sequencing_tech,
'source_reads_ref': input_params['input_reads_ref'],
'fwd_file': output_fwd_paired_file_path,
'rev_file': output_rev_paired_file_path
})['obj_ref']
# free up disk
os.remove(output_fwd_paired_file_path)
os.remove(output_rev_paired_file_path)
# upload reads forward unpaired
if not os.path.isfile (output_fwd_unpaired_file_path) \
or os.path.getsize (output_fwd_unpaired_file_path) == 0:
retVal['output_unpaired_fwd_ref'] = None
else:
# standardize quality encoding
if 'translate_to_phred33' in input_params and input_params['translate_to_phred33'] == 1 and quality_encoding == 'phred64':
#if False: # DEBUG
self.log (console, "TRANSLATING OUTPUT FWD UNPAIRED FASTQ FILE...")
output_fwd_unpaired_file_path = self.translate_fastq_from_phred64_to_phred33 \
(output_fwd_unpaired_file_path, \
re.sub ("\.fastq$", ".q33.fastq", output_fwd_unpaired_file_path))
output_obj_name = input_params['output_reads_name']+'_unpaired_fwd'
self.log(console, '\nUploading trimmed unpaired forward reads: '+output_obj_name)
retVal['output_unpaired_fwd_ref'] = readsUtils_Client.upload_reads ({ 'wsname': str(input_params['output_ws']),
'name': output_obj_name,
# remove sequencing_tech arg once ReadsUtils is updated to accept source_reads_ref
#'sequencing_tech': sequencing_tech,
'source_reads_ref': input_params['input_reads_ref'],
'fwd_file': output_fwd_unpaired_file_path
})['obj_ref']
# free up disk
os.remove(output_fwd_unpaired_file_path)
# upload reads reverse unpaired
if not os.path.isfile (output_rev_unpaired_file_path) \
or os.path.getsize (output_rev_unpaired_file_path) == 0:
retVal['output_unpaired_rev_ref'] = None
else:
# standardize quality encoding
if 'translate_to_phred33' in input_params and input_params['translate_to_phred33'] == 1 and quality_encoding == 'phred64':
#if False: # DEBUG
self.log (console, "TRANSLATING OUTPUT REV UNPAIRED FASTQ FILE...")
output_rev_unpaired_file_path = self.translate_fastq_from_phred64_to_phred33 \
(output_rev_unpaired_file_path, \
re.sub ("\.fastq$", ".q33.fastq", output_rev_unpaired_file_path))
output_obj_name = input_params['output_reads_name']+'_unpaired_rev'
self.log(console, '\nUploading trimmed unpaired reverse reads: '+output_obj_name)
retVal['output_unpaired_rev_ref'] = readsUtils_Client.upload_reads ({ 'wsname': str(input_params['output_ws']),
'name': output_obj_name,
# remove sequencing_tech arg once ReadsUtils is updated to accept source_reads_ref
#'sequencing_tech': sequencing_tech,
'source_reads_ref': input_params['input_reads_ref'],
'fwd_file': output_rev_unpaired_file_path
})['obj_ref']
# free up disk
os.remove(output_rev_unpaired_file_path)
# SingleEndLibrary
#
else:
self.log(console, "Downloading Single End reads file...")
# Download reads Libs to FASTQ files
input_fwd_file_path = readsLibrary['files'][input_params['input_reads_ref']]['files']['fwd']
sequencing_tech = readsLibrary['files'][input_params['input_reads_ref']]['sequencing_tech']
# Set Params
#
trimmomatic_params = self.parse_trimmomatic_steps(input_params)
# add auto-detected quality_encoding
if self.is_fastq_phred64 (input_fwd_file_path):
quality_encoding = 'phred64'
else:
quality_encoding = 'phred33'
trimmomatic_options = str(input_params['read_type']) + ' -' + quality_encoding
self.log(console, pformat(trimmomatic_params))
self.log(console, pformat(trimmomatic_options))
# Run Trimmomatic
#
self.log(console, 'Starting Trimmomatic')
input_fwd_file_path = re.sub ("\.fq$", "", input_fwd_file_path)
input_fwd_file_path = re.sub ("\.FQ$", "", input_fwd_file_path)
input_fwd_file_path = re.sub ("\.fastq$", "", input_fwd_file_path)
input_fwd_file_path = re.sub ("\.FASTQ$", "", input_fwd_file_path)
output_fwd_file_path = input_fwd_file_path+"_trimm_fwd.fastq"
input_fwd_file_path = input_fwd_file_path+".fastq"
cmdstring = " ".join( (self.TRIMMOMATIC, trimmomatic_options,
input_fwd_file_path,
output_fwd_file_path,
trimmomatic_params) )
cmdProcess = subprocess.Popen(cmdstring, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
#report += "cmdstring: " + cmdstring
outputlines = []
while True:
line = cmdProcess.stdout.readline()
outputlines.append(line)
if not line: break
self.log(console, line.replace('\n', ''))
cmdProcess.stdout.close()
cmdProcess.wait()
self.log(console, 'return code: ' + str(cmdProcess.returncode) + '\n')
if cmdProcess.returncode != 0:
raise ValueError('Error running kb_trimmomatic, return code: ' +
str(cmdProcess.returncode) + '\n')
# Only keep line that starts with Input and that
# needs to be parsed for HTML report
for line in outputlines:
if line.startswith('Input'):
report += line
# free up disk
os.remove(input_fwd_file_path)
# get read count
match = re.search(r'Surviving: (\d+)', report)
readcount = match.group(1)
# upload reads
if not os.path.isfile (output_fwd_file_path) \
or os.path.getsize (output_fwd_file_path) == 0:
retVal['output_filtered_ref'] = None
else:
# standardize quality encoding
if 'translate_to_phred33' in input_params and input_params['translate_to_phred33'] == 1 and quality_encoding == 'phred64':
#if False: # DEBUG
self.log (console, "TRANSLATING OUTPUT FASTQ FILE...")
output_fwd_file_path = self.translate_fastq_from_phred64_to_phred33 \
(output_fwd_file_path, \
re.sub ("\.fastq$", ".q33.fastq", output_fwd_file_path))
output_obj_name = input_params['output_reads_name']
self.log(console, 'Uploading trimmed reads: '+output_obj_name)
retVal['output_filtered_ref'] = readsUtils_Client.upload_reads ({ 'wsname': str(input_params['output_ws']),
'name': output_obj_name,
# remove sequencing_tech arg once ReadsUtils is updated to accept source_reads_ref
#'sequencing_tech': sequencing_tech,
'source_reads_ref': input_params['input_reads_ref'],
'fwd_file': output_fwd_file_path
})['obj_ref']
# free up disk
os.remove(output_fwd_file_path)
# return created objects
#
output = { 'report': report,
'output_filtered_ref': retVal['output_filtered_ref'],
'output_unpaired_fwd_ref': retVal['output_unpaired_fwd_ref'],
'output_unpaired_rev_ref': retVal['output_unpaired_rev_ref']
}
#END execTrimmomaticSingleLibrary
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method execTrimmomaticSingleLibrary return value ' +
'output is not type dict as required.')
# return the results
return [output]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK", 'message': "", 'version': self.VERSION,
'git_url': self.GIT_URL, 'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
|
<reponame>MarkusShepherd/flamme-rouge
# -*- coding: utf-8 -*-
""" tracks """
import logging
import re
from collections import deque
from typing import (
TYPE_CHECKING,
Any,
Deque,
Generator,
Iterable,
Iterator,
Optional,
Tuple,
Type,
Union,
cast,
overload,
)
from .cards import Card
from .utils import class_from_path, window
if TYPE_CHECKING:
# pylint: disable=cyclic-import,unused-import
from .teams import Cyclist
LOGGER = logging.getLogger(__name__)
CLASS_REGEX = re.compile(r"[^\w.]+")
class Section:
""" section on the track """
LANE_STR_WIDTH = 20
def __init__(
self,
position: int,
lanes: int = 2,
slipstream: bool = True,
min_speed: Optional[int] = None,
max_speed: Optional[int] = None,
) -> None:
self.position = position
self.lanes = lanes
self.slipstream = slipstream
self.min_speed = min_speed
self.max_speed = max_speed
self._cyclists: Deque["Cyclist"] = deque(maxlen=lanes)
@property
def cyclists(self) -> Tuple["Cyclist", ...]:
""" cyclists """
return tuple(self._cyclists)
@property
def empty(self) -> bool:
""" true if section is empty """
return not self._cyclists
@property
def full(self) -> bool:
""" true if section is filled to capacity """
return len(self._cyclists) >= self.lanes
def add_cyclist(self, cyclist: "Cyclist") -> bool:
""" add a rider to the section """
if self.full:
return False
self._cyclists.append(cyclist)
cyclist.section = self
return True
def remove_cyclist(self, cyclist: "Cyclist") -> bool:
""" remove a rider from this section """
try:
self._cyclists.remove(cyclist)
return True
except ValueError:
pass
finally:
if cyclist.section == self:
cyclist.section = None
return False
def lane(self, cyclist: "Cyclist") -> Optional[int]:
""" lane number for the given cyclist """
for lane, occupant in enumerate(self._cyclists):
if cyclist == occupant:
return lane
return None
def reset(self) -> "Section":
""" reset this section """
self._cyclists = deque(maxlen=self.lanes)
LOGGER.debug("position: %d, cyclists: %s", self.position, self.cyclists)
return self
def __str__(self) -> str:
total = (self.LANE_STR_WIDTH + 1) * self.lanes - 1
left = (total - 5) // 2
right = total - left - 5
top = "+" + "-" * left + f" {self.position:3d} " + "-" * right + "+"
if not self.slipstream:
top += " 🚫"
lane_str = f" {{:{self.LANE_STR_WIDTH - 2}s}} "
cyclists = tuple(map(str, self.cyclists))
cyclists += ("",) * (self.lanes - len(self._cyclists))
# TODO format correctly without messing up colors
# lane_str.format(str(cyclist)[:self.LANE_STR_WIDTH - 2]) for cyclist in cyclists)
cyclists = tuple(map(lane_str.format, cyclists))
middle = "|".join(("",) + cyclists + ("",))
if self.max_speed is not None:
middle = f"{middle} ≤{self.max_speed}"
bottom = "+" + "-" * total + "+"
if self.min_speed is not None:
bottom = f"{bottom} ≥{self.min_speed}"
return "\n".join((top, middle, bottom))
class Section3(Section):
""" 3 lane section """
def __init__(self, position: int) -> None:
super().__init__(position=position, lanes=3)
class Finish(Section):
""" finish section """
def __init__(self, position: int) -> None:
super().__init__(position=position, slipstream=False)
class Finish3(Section):
""" finish section with 3 lanes """
def __init__(self, position: int) -> None:
super().__init__(position=position, lanes=3, slipstream=False)
class MountainUp(Section):
""" up section """
def __init__(self, position: int) -> None:
super().__init__(position=position, slipstream=False, max_speed=5)
class MountainDown(Section):
""" down section """
def __init__(self, position: int) -> None:
super().__init__(position=position, min_speed=5)
class Supply(Section):
""" supply zone section """
def __init__(self, position: int) -> None:
super().__init__(position=position, lanes=3, min_speed=4)
class Cobblestone1(Section):
""" cobblestone with one lane """
def __init__(self, position: int) -> None:
super().__init__(position=position, lanes=1, slipstream=False)
class Cobblestone2(Section):
""" cobblestone with two lanes """
def __init__(self, position: int) -> None:
super().__init__(position=position, slipstream=False)
class Track:
""" track """
def __init__(
self,
name: str,
sections: Iterable[Section],
start: int = 5,
finish: int = -5,
min_players: int = 3,
max_players: int = 4,
) -> None:
self.name = name
self.sections = tuple(sections)
self.start = start
self.finish = finish if finish > 0 else len(self) + finish
self.min_players = min_players
self.max_players = max_players
def __len__(self) -> int:
return len(self.sections)
# pylint: disable=function-redefined
@overload
def __getitem__(self, key: int) -> Section:
pass
@overload
def __getitem__(self, key: slice) -> Tuple[Section, ...]:
pass
def __getitem__(self, key):
return self.sections[key]
def __iter__(self) -> Iterator[Section]:
return iter(self.sections)
def __reversed__(self) -> Iterator[Section]:
return reversed(self.sections)
@property
def available_start(self) -> Tuple[Section, ...]:
""" available starting positions """
return tuple(
section for section in self.sections[: self.start] if not section.full
)
def cyclists(self) -> Generator["Cyclist", None, None]:
""" generator of riders from first to last """
for section in reversed(self.sections):
yield from section.cyclists
def _move_cyclist(
self, cyclist: "Cyclist", value: int, start: int, min_speed: bool = False,
) -> int:
min_speed_value = self.sections[start].min_speed
value = (
value
if not min_speed or min_speed_value is None
else max(value, min_speed_value)
)
for i, section in enumerate(self.sections[start : start + value + 1]):
max_speed = section.max_speed
if max_speed is None:
continue
if i > max_speed:
value = i - 1
break
value = min(value, max_speed)
for pos in range(min(start + value, len(self) - 1), start, -1):
section = self.sections[pos]
if section.add_cyclist(cyclist):
if pos >= self.finish:
cyclist.finished = True
return pos
return start
def move_cyclist(
self, cyclist: "Cyclist", card: Union[Card, int], min_speed: bool = False,
) -> int:
""" move cyclists """
if isinstance(card, int):
value = card
elif cyclist.team is None:
value = card.value_front
else:
others = (c for c in cyclist.team.cyclists if c is not cyclist)
value = (
card.value_behind
if any(c.ahead_of(cyclist, self) for c in others)
else card.value_front
)
for pos, section in enumerate(self.sections):
if cyclist not in section.cyclists:
continue
end = self._move_cyclist(
cyclist=cyclist, value=value, start=pos, min_speed=min_speed
)
if pos != end:
section.remove_cyclist(cyclist)
return end - pos
raise ValueError("something went wrong during movement")
def do_slipstream(self) -> None:
""" move cyclists through slipstream """
while True:
for sec in window(self.sections, 3):
if (
all(s.slipstream for s in sec)
and sec[0].cyclists
and sec[1].empty
and sec[2].cyclists
):
for cyclist in sec[0].cyclists:
LOGGER.info("🚴 <%s> receives slipstream", cyclist)
self.move_cyclist(cyclist, 1)
break # start over to move cyclists at the end of the pack
else:
return # all slipstreams done
def do_exhaustion(self) -> None:
""" add exhaustion cards """
for sec0, sec1 in window(self.sections[: self.finish + 1], 2):
if sec1.empty:
for cyclist in sec0.cyclists:
if not cyclist.team or cyclist.team.exhaustion:
LOGGER.info("🚴 <%s> gets exhausted", cyclist)
cyclist.discard(Card.EXHAUSTION)
@property
def leading(self) -> Optional["Cyclist"]:
""" leading cyclist """
return next(self.cyclists(), None)
def non_empty(self) -> Generator[Section, None, None]:
""" non-empty sections """
for section in self.sections:
if not section.empty:
yield section
def finished(self, all_cyclists: bool = False) -> bool:
""" game finished """
if all_cyclists:
return all(section.empty for section in self.sections[: self.finish])
return any(not section.empty for section in self.sections[self.finish :])
def reset(self) -> "Track":
""" reset this track """
for section in self.sections:
section.reset()
LOGGER.debug(
"start: %d, finish: %d, available start: <%s>, finished: %s, track: %s",
self.start,
self.finish,
", ".join(str(s.position) for s in self.available_start),
self.finished(),
self,
)
return self
def compare(self, cyclist_1: "Cyclist", cyclist_2: "Cyclist",) -> int:
""" returns +1 if cyclist_1 is ahead else -1 """
for cyclist in self.cyclists():
if cyclist == cyclist_1:
return +1
if cyclist == cyclist_2:
return -1
raise RuntimeError(f"unable to find either of {cyclist_1} or {cyclist_2}")
def __str__(self) -> str:
start = next(self.non_empty(), None)
start_pos = start.position - 1 if start is not None and start.position else 0
finish = max(start_pos, self.finish)
total = (Section.LANE_STR_WIDTH + 1) * 2 + 1
sections = (
cast(Tuple[Any], (self.name,))
+ self.sections[start_pos:finish]
+ ("#" * total,)
+ self.sections[finish:]
)
return "\n".join(map(str, sections))
@classmethod
def from_sections(
cls, sections: Union[str, Iterable[str], Iterable[Type[Section]]], **kwargs,
) -> "Track":
""" create a track from a sequence of sections """
if isinstance(sections, str):
sections = CLASS_REGEX.split(sections)
classes = filter(None, map(class_from_path, sections))
sections = (clazz(i) for i, clazz in enumerate(classes))
return cls(sections=sections, **kwargs)
_SEC: Tuple[Type[Section]] = (Section,)
_SEC3: Tuple[Type[Section]] = (Section3,)
_FIN: Tuple[Type[Section]] = (Finish,)
_FIN3: Tuple[Type[Section]] = (Finish3,)
_UP: Tuple[Type[Section]] = (MountainUp,)
_DOWN: Tuple[Type[Section]] = (MountainDown,)
_SUP: Tuple[Type[Section]] = (Supply,)
_COB1: Tuple[Type[Section]] = (Cobblestone1,)
_COB2: Tuple[Type[Section]] = (Cobblestone2,)
AVENUE_CORSO_PASEO = Track.from_sections(
name="AVENUE_CORSO_PASEO", sections=_SEC * 73 + _FIN * 5
)
FIRENZE_MILANO = Track.from_sections(
name="FIRENZE_MILANO",
sections=_SEC * 22
+ _UP * 5
+ _DOWN * 3
+ _SEC * 16
+ _UP * 7
+ _DOWN * 3
+ _SEC * 17
+ _FIN * 5,
)
LA_CLASSICISSIMA = Track.from_sections(
name="LA_CLASSICISSIMA",
sections=_SEC * 14
+ _UP * 10
+ _DOWN * 4
+ _SEC * 12
+ _UP * 5
+ _DOWN * 4
+ _SEC * 5
+ _UP * 3
+ _DOWN * 3
+ _SEC * 13
+ _FIN * 5,
start=4,
)
LA_HAUT_MONTAGNE = Track.from_sections(
name="LA_HAUT_MONTAGNE",
sections=_SEC * 36 + _UP * 7 + _DOWN * 5 + _SEC * 14 + _UP * 12 + _FIN * 4,
finish=-4,
)
LE_COL_DU_BALLON = Track.from_sections(
name="LE_COL_DU_BALLON",
sections=_SEC * 12
+ _UP * 3
+ _DOWN * 5
+ _SEC * 18
+ _UP * 4
+ _DOWN * 4
+ _SEC * 10
+ _UP * 5
+ _DOWN * 4
+ _SEC * 8
+ _FIN * 5,
start=4,
)
PLATEAUX_DE_WALLONIE = Track.from_sections(
name="PLATEAUX_DE_WALLONIE",
sections=_SEC * 16
+ _UP * 3
+ _DOWN * 3
+ _SEC * 6
+ _UP * 2
+ _DOWN * 2
+ _SEC * 34
+ _UP * 2
+ _SEC * 5
+ _FIN * 5,
start=4,
)
RONDE_VAN_WEVELGEM = Track.from_sections(
name="RONDE_VAN_WEVELGEM",
sections=_SEC * 46
+ _UP * 3
+ _DOWN * 5
+ _SEC * 6
+ _UP * 5
+ _DOWN * 3
+ _SEC * 5
+ _FIN * 5,
)
STAGE_7 = Track.from_sections(
name="STAGE_7",
sections=_SEC * 12
+ _SUP * 5
+ _SEC * 5
+ _UP * 6
+ _DOWN * 2
+ _SEC * 10
+ _SUP * 5
+ _SEC * 7
+ _UP * 5
+ _DOWN * 3
+ _SEC * 13
+ _FIN * 5,
)
STAGE_7_5_6 = Track.from_sections(
name="STAGE_7_5_6",
sections=_SEC3 * 11
+ _SEC
+ _SUP * 5
+ _SEC * 5
+ _UP * 6
+ _DOWN * 2
+ _SEC * 10
+ _SUP * 5
+ _SEC * 7
+ _UP * 5
+ _DOWN * 3
+ _SEC * 4
+ _SEC3 * 2
+ _SEC * 10
+ _FIN * 5,
min_players=5,
max_players=6,
)
STAGE_9 = Track.from_sections(
name="STAGE_9",
sections=_SEC * 12
+ _SUP * 5
+ _SEC * 3
+ _COB1
+ _COB2
+ _COB1
+ _COB2
+ _COB1 * 3
+ _COB2
+ _COB1
+ _SEC * 11
+ _SUP * 5
+ _SEC * 6
+ _COB1
+ _COB2
+ _COB1 * 4
+ _COB2
+ _COB1
+ _SEC * 14
+ _FIN * 5,
start=4,
)
ALL_TRACKS = tuple(obj for obj in locals().values() if isinstance(obj, Track))
|
from __future__ import print_function
import numpy as np
from openmdao.api import ExplicitComponent
class VLMMtxRHSComp(ExplicitComponent):
def initialize(self):
self.options.declare('surfaces', types=list)
def setup(self):
surfaces = self.options['surfaces']
system_size = 0
for surface in surfaces:
nx = surface['num_x']
ny = surface['num_y']
name = surface['name']
system_size += (nx - 1) * (ny - 1)
self.system_size = system_size
self.add_input('inflow_velocities', shape=(system_size, 3), units='m/s')
self.add_output('mtx', shape=(system_size, system_size), units='1/m')
self.add_output('rhs', shape=system_size, units='m/s')
inflow_indices = np.arange(system_size * 3).reshape((system_size, 3))
mtx_indices = np.arange(system_size * system_size).reshape((system_size, system_size))
rhs_indices = np.arange(system_size)
self.declare_partials('*', '*', dependent=False)
self.declare_partials('rhs', 'inflow_velocities',
rows=np.einsum('i,j->ij', rhs_indices, np.ones(3, int)).flatten(),
cols=inflow_indices.flatten()
)
ind_1 = 0
ind_2 = 0
for surface in surfaces:
nx = surface['num_x']
ny = surface['num_y']
name = surface['name']
num = (nx - 1) * (ny - 1)
ind_2 += num
vel_mtx_name = '{}_{}_vel_mtx'.format(name, 'coll_pts')
normals_name = '{}_normals'.format(name)
self.add_input(vel_mtx_name,
shape=(system_size, nx - 1, ny - 1, 3), units='1/m')
self.add_input(normals_name, shape=(nx - 1, ny - 1, 3))
velocities_indices = np.arange(system_size * num * 3).reshape(
(system_size, nx - 1, ny - 1, 3)
)
normals_indices = np.arange(num * 3).reshape((num, 3))
self.declare_partials('mtx', vel_mtx_name,
rows=np.einsum('ij,k->ijk', mtx_indices[:, ind_1:ind_2], np.ones(3, int)).flatten(),
cols=velocities_indices.flatten(),
)
self.declare_partials('mtx', normals_name,
rows=np.einsum('ij,k->ijk', mtx_indices[ind_1:ind_2, :], np.ones(3, int)).flatten(),
cols=np.einsum('ik,j->ijk', normals_indices, np.ones(system_size, int)).flatten(),
)
self.declare_partials('rhs', normals_name,
rows=np.outer(rhs_indices[ind_1:ind_2], np.ones(3, int)).flatten(),
cols=normals_indices.flatten(),
)
ind_1 += num
self.mtx_n_n_3 = np.zeros((system_size, system_size, 3))
self.normals_n_3 = np.zeros((system_size, 3))
self.set_check_partial_options(wrt='*', method='fd', step=1e-5)
def compute(self, inputs, outputs):
surfaces = self.options['surfaces']
system_size = self.system_size
ind_1 = 0
ind_2 = 0
for surface in surfaces:
nx = surface['num_x']
ny = surface['num_y']
name = surface['name']
num = (nx - 1) * (ny - 1)
ind_2 += num
vel_mtx_name = '{}_{}_vel_mtx'.format(name, 'coll_pts')
normals_name = '{}_normals'.format(name)
self.mtx_n_n_3[:, ind_1:ind_2, :] = inputs[vel_mtx_name].reshape((system_size, num, 3))
self.normals_n_3[ind_1:ind_2, :] = inputs[normals_name].reshape((num, 3))
ind_1 += num
outputs['mtx'] = np.einsum('ijk,ik->ij', self.mtx_n_n_3, self.normals_n_3)
outputs['rhs'] = -np.einsum('ij,ij->i', inputs['inflow_velocities'], self.normals_n_3)
def compute_partials(self, inputs, partials):
surfaces = self.options['surfaces']
system_size = self.system_size
ind_1 = 0
ind_2 = 0
for surface in surfaces:
nx = surface['num_x']
ny = surface['num_y']
name = surface['name']
num = (nx - 1) * (ny - 1)
ind_2 += num
vel_mtx_name = '{}_{}_vel_mtx'.format(name, 'coll_pts')
normals_name = '{}_normals'.format(name)
partials['mtx', vel_mtx_name] = np.einsum('ijk,ik->ijk',
np.ones((system_size, num, 3)),
self.normals_n_3,
).flatten()
partials['mtx', normals_name] = self.mtx_n_n_3[ind_1:ind_2, :, :].flatten()
partials['rhs', normals_name] = -inputs['inflow_velocities'][ind_1:ind_2, :].flatten()
ind_1 += num
partials['rhs', 'inflow_velocities'] = -self.normals_n_3.flatten()
|
<gh_stars>0
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import app, gl, gloo, glm
from glumpy.transforms import TrackballPan, Position
vertex = """
#version 120
uniform float linewidth;
uniform float antialias;
attribute vec4 fg_color;
attribute vec4 bg_color;
attribute float radius;
attribute vec3 position;
varying float v_pointsize;
varying float v_radius;
varying float v_z;
varying vec4 v_fg_color;
varying vec4 v_bg_color;
void main (void)
{
v_radius = radius;
v_fg_color = fg_color;
v_bg_color = bg_color;
gl_Position = <transform>;
v_z = gl_Position.z;
gl_PointSize = 2 * (v_radius + linewidth + 1.5*antialias);
}
"""
fragment = """
#version 120
uniform float linewidth;
uniform float antialias;
varying float v_radius;
varying float v_z;
varying vec4 v_fg_color;
varying vec4 v_bg_color;
float marker(vec2 P, float size)
{
const float SQRT_2 = 1.4142135623730951;
float x = SQRT_2/2 * (P.x - P.y);
float y = SQRT_2/2 * (P.x + P.y);
float r1 = max(abs(x)- size/2, abs(y)- size/10);
float r2 = max(abs(y)- size/2, abs(x)- size/10);
float r3 = max(abs(P.x)- size/2, abs(P.y)- size/10);
float r4 = max(abs(P.y)- size/2, abs(P.x)- size/10);
return min( min(r1,r2), min(r3,r4));
}
void main()
{
float r = (v_radius + linewidth + 1.5*antialias);
float t = linewidth/2.0 - antialias;
float signed_distance = length(gl_PointCoord.xy - vec2(0.5,0.5)) * 2 * r - v_radius;
// float signed_distance = marker((gl_PointCoord.xy - vec2(0.5,0.5))*r*2, 2*v_radius);
float border_distance = abs(signed_distance) - t;
float alpha = border_distance/antialias;
alpha = exp(-alpha*alpha);
vec2 p = (gl_PointCoord.xy - vec2(0.5, 0.5)) * 2;
float len_p = length(p);
gl_FragDepth = 0.5 * v_z + 0.5* (len_p)*v_radius / 64.0;
vec3 normal = normalize(vec3(p.xy, 1.0 - len_p));
vec3 direction = normalize(vec3(1.0, 1.0, 1.0));
float diffuse = max(0.0, dot(direction, normal));
float specular = pow(diffuse, 24.0);
vec4 bg_color = vec4(max(diffuse*v_bg_color.rgb, specular*vec3(1.0)), 1);
// Inside shape
if( signed_distance < 0 ) {
// Fully within linestroke
if( border_distance < 0 ) {
gl_FragColor = v_fg_color;
} else {
gl_FragColor = mix(bg_color, v_fg_color, alpha);
}
// Outside shape
} else {
discard;
// Fully within linestroke
if( border_distance < 0 ) {
gl_FragColor = v_fg_color;
} else if( abs(signed_distance) < (linewidth/2.0 + antialias) ) {
gl_FragColor = vec4(v_fg_color.rgb, v_fg_color.a * alpha);
} else {
discard;
}
}
}
"""
theta, phi = 0,0
window = app.Window(width=1920, height=1080, color=(1,1,1,1))
n = 1<<8
program = gloo.Program(vertex, fragment, count=n)
view = np.eye(4, dtype=np.float32)
glm.translate(view, 0, 0, -5)
program['position'] = 0.35 * np.random.randn(n,3)
program['radius'] = np.random.uniform(10,50,n)
program['fg_color'] = 0,0,0,1
colors = np.random.uniform(0.75, 1.00, (n, 4))
colors[:,3] = 1
program['bg_color'] = colors
program['linewidth'] = 0.0
program['antialias'] = 0.0
# create an instance of the TrackballPan object.
trackball = TrackballPan(Position("position"), znear=3, zfar=10, distance=5)
program['transform'] = trackball
trackball.aspect = 1
# rotation around the X axis
trackball.phi = 0
# rotation around the Y axis
trackball.theta = 0
trackball.zoom = 50
@window.event
def on_draw(dt):
window.clear()
program.draw(gl.GL_POINTS)
@window.event
def on_key_press(symbol, modifiers):
pass
# if (symbol == app.window.key.RIGHT):
# trackball.view_x += 0.1
# elif (symbol == app.window.key.LEFT):
# trackball.view_x -= 0.1
# elif (symbol == app.window.key.UP):
# trackball.view_y += 0.1
# elif (symbol == app.window.key.DOWN):
# trackball.view_y -= 0.1
# @window.event
# def on_mouse_scroll(mouse_x, mouse_y, scroll_dx, scroll_dy):
# view = np.array(program['view']).reshape(4,4)
# glm.translate(view, 0, 0, scroll_dy)
# program['view'] = view
# old_zoom_size = zoom_size
# zoom_size += 0.01 * scroll_dy
# V["radius"] *= zoom_size / old_zoom_size
# draw_offset += -0.01 * scroll_dx
@window.event
def on_character(character):
if (character in '+='):
program['radius'] += 0.5
elif (character in "-_"):
program['radius'] -= 0.5
# if (character in "wW"):
# trackball.distance += 0.1
# elif (character in "sS"):
# trackball.distance -= 0.1
# elif (character in "wW"):
# trackball.view_y += 0.1
# elif (character in "sS"):
# trackball.view_y -= 0.1
# elif (character in " eE"):
# trackball.zoom += 1
# elif (character in "qQ"):
# trackball.zoom -= 1
# view = np.array(program['view']).reshape(4,4)
# glm.translate(view, *amount_to_translate)
# program['view'] = view
# global stepsize, frame_idx, draw_offset
# if (character == "r"):
# frame_idx = 0
# stepsize = 0
# elif (character in ".>"):
# frame_idx += 1
# elif (character in ",<"):
# frame_idx -= 1
# elif (character == "R"):
# draw_offset = 0
# elif (character in ["+", "="] ):
# V["radius"] *= 1.1
# elif (character in ["-", "_"]):
# V["radius"] *= 1/1.1
# elif (character in "dD"):
# draw_offset += 0.1
# elif (character in "aA"):
# draw_offset -= 0.1
window.attach(program["transform"])
gl.glEnable(gl.GL_DEPTH_TEST)
app.run() |
<gh_stars>0
import logging
from datetime import datetime
from Common.Objects.Generic import GenericObject
import Common.Objects.Datasets as Datasets
import Common.Objects.Samples as Samples
class Code(GenericObject):
def __init__(self, name, parent=None, key=None):
GenericObject.__init__(self, name=name, parent=parent, key=key)
self._colour_rgb = (255,255,255,)
self.subcodes = {}
self.connections = []
self.doc_positions = {}
self.quotations = []
def __repr__(self):
return 'Code[%s][%s]' % (self.name, self.key,)
@property
def colour_rgb(self):
return self._colour_rgb
@colour_rgb.setter
def colour_rgb(self, value):
self._colour_rgb = value
self._last_changed_dt = datetime.now()
@property
def last_changed_dt(self):
for subcode_key in self.subcodes:
tmp_last_changed_dt = self.subcodes[subcode_key].last_changed_dt
if tmp_last_changed_dt > self._last_changed_dt:
self._last_changed_dt = tmp_last_changed_dt
for quotation in self.quotations:
tmp_last_changed_dt = quotation.last_changed_dt
if tmp_last_changed_dt > self._last_changed_dt:
self._last_changed_dt = tmp_last_changed_dt
return self._last_changed_dt
@last_changed_dt.setter
def last_changed_dt(self, value):
self._last_changed_dt = value
def GetAncestors(self):
ancestors = []
if self.parent != None:
ancestors.append(self.parent)
ancestors.extend(self.parent.GetAncestors())
return ancestors
def GetDescendants(self):
descendants = []
for subcode in self.subcodes.values():
descendants.append(subcode)
descendants.extend(subcode.GetDescendants())
return descendants
def AddConnection(self, obj):
obj_module = getattr(obj, '__module__', None)
key_path = []
key_path.append((type(obj), obj.key))
while obj.parent != None:
obj = obj.parent
key_path.append((type(obj), obj.key))
key_path.reverse()
self.connections.append((obj_module, key_path))
self.last_changed_dt = datetime.now()
def RemoveConnection(self, obj):
obj_module = getattr(obj, '__module__', None)
key_path = []
key_path.append((type(obj), obj.key))
while obj.parent != None:
obj = obj.parent
key_path.append((type(obj), obj.key))
key_path.reverse()
if (obj_module, key_path) in self.connections:
self.connections.remove((obj_module, key_path))
self.last_changed_dt = datetime.now()
def GetConnections(self, datasets, samples):
connection_objects = []
for key_path in reversed(self.connections):
current_parent = None
if key_path[0] == Datasets.__name__:
current_parent = datasets
for key in key_path[1]:
if isinstance(current_parent, dict):
if key[1] in current_parent:
current_parent = current_parent[key[1]]
else:
current_parent = None
break
elif isinstance(current_parent, Datasets.Dataset):
if key[0] == Datasets.Field:
if key[1] in current_parent.available_fields:
current_parent = current_parent.available_fields[key[1]]
else:
current_parent = None
break
elif key[0] == Datasets.Field:
if key[1] in current_parent.computational_fields:
current_parent = current_parent.computational_fields[key[1]]
else:
current_parent = None
break
elif key[0] == Datasets.Document:
if key[1] in current_parent.documents:
current_parent = current_parent.documents[key[1]]
else:
current_parent = None
break
else:
current_parent = None
break
else:
current_parent = None
break
elif key_path[0] == Samples.__name__:
current_parent = samples
for key in key_path[1]:
if isinstance(current_parent, dict):
if key in current_parent:
current_parent = current_parent[key[1]]
else:
current_parent = None
break
elif isinstance(current_parent, Samples.Sample):
if key[1] in current_parent.parts_dict:
current_parent = current_parent.parts_dict[key[1]]
else:
current_parent = None
break
elif isinstance(current_parent, Samples.MergedPart):
if key[1] in current_parent.parts_dict:
current_parent = current_parent.parts_dict[key[1]]
else:
current_parent = None
break
else:
current_parent = None
break
if current_parent is not None:
connection_objects.append(current_parent)
else:
#remove keypaths that dont exist to cleanup from name changes
self.connections.remove(key_path)
return list(reversed(connection_objects))
def DestroyObject(self):
#any childrens
for code_key in list(self.subcodes.keys()):
self.subcodes[code_key].DestroyObject()
for quotation in reversed(self.quotations):
quotation.DestroyObject()
#remove self from parent if any
if self.parent is not None:
if self.key in self.parent.subcodes:
if self.parent.subcodes[self.key] == self:
del self.parent.subcodes[self.key]
self.parent.last_changed_dt = datetime.now()
self.parent = None
class Quotation(GenericObject):
def __init__(self, parent, dataset_key, document_key, original_data=None, paraphrased_data=None):
GenericObject.__init__(self, parent=parent)
self._dataset_key = dataset_key
self._document_key = document_key
self._original_data = original_data
self._paraphrased_data = paraphrased_data
def __repr__(self):
return 'Quotation[%s]' % (str(self.key))
@property
def dataset_key(self):
return self._dataset_key
@property
def document_key(self):
return self._document_key
@property
def original_data(self):
return self._original_data
@original_data.setter
def original_data(self, value):
self._original_data = value
self.last_changed_dt = datetime.now()
@property
def paraphrased_data(self):
return self._paraphrased_data
@paraphrased_data.setter
def paraphrased_data(self, value):
self._paraphrased_data = value
self.last_changed_dt = datetime.now()
def DestroyObject(self):
#remove self from parent if any
if self.parent is not None:
if self in self.parent.quotations:
self.parent.quotations.remove(self)
self.parent.last_changed_dt = datetime.now()
self.parent = None
class Theme(GenericObject):
def __init__(self, name, parent=None, key=None):
GenericObject.__init__(self, name=name, parent=parent, key=key)
self._colour_rgb = (255,255,255,)
self.subthemes = {}
self.code_keys = []
self.quotations = []
def __repr__(self):
return 'Theme[%s][%s]' % (self.name, self.key,)
@property
def colour_rgb(self):
return self._colour_rgb
@colour_rgb.setter
def colour_rgb(self, value):
self._colour_rgb = value
self._last_changed_dt = datetime.now()
@property
def last_changed_dt(self):
for subcode_key in self.subthemes:
tmp_last_changed_dt = self.subthemes[subcode_key].last_changed_dt
if tmp_last_changed_dt > self._last_changed_dt:
self._last_changed_dt = tmp_last_changed_dt
for quotation in self.quotations:
tmp_last_changed_dt = quotation.last_changed_dt
if tmp_last_changed_dt > self._last_changed_dt:
self._last_changed_dt = tmp_last_changed_dt
return self._last_changed_dt
@last_changed_dt.setter
def last_changed_dt(self, value):
self._last_changed_dt = value
def GetAncestors(self):
ancestors = []
if self.parent != None:
ancestors.append(self.parent)
ancestors.extend(self.parent.GetAncestors())
return ancestors
def GetDescendants(self):
descendants = []
for subtheme in self.subthemes.values():
descendants.append(subtheme)
descendants.extend(subtheme.GetDescendants())
return descendants
def GetCodes(self, codes):
included_codes = []
for key in codes:
if key in self.code_keys:
included_codes.append(codes[key])
included_codes.extend(self.GetCodes(codes[key].subcodes))
return included_codes
def DestroyObject(self):
#any childrens
for theme_key in list(self.subthemes.keys()):
self.subthemes[theme_key].DestroyObject()
#remove self from parent if any
if self.parent is not None:
if self.key in self.parent.subthemes:
if self.parent.subthemes[self.key] == self:
del self.parent.subthemes[self.key]
self.parent.last_changed_dt = datetime.now()
self.parent = None
|
<reponame>knaaptime/proplot<filename>proplot/internals/warnings.py
#!/usr/bin/env python3
"""
Custom warning style and deprecation functions.
"""
import functools
import re
import sys
import warnings
ProPlotWarning = type('ProPlotWarning', (UserWarning,), {})
def _warn_proplot(message):
"""
Emit a `ProPlotWarning` and show the stack level outside of matplotlib and proplot.
"""
frame = sys._getframe()
stacklevel = 1
while True:
if frame is None:
break # when called in embedded context may hit frame is None
if not re.match(
r'\A(matplotlib|mpl_toolkits|proplot)\.',
frame.f_globals.get('__name__', '')
):
break
frame = frame.f_back
stacklevel += 1
warnings.warn(message, ProPlotWarning, stacklevel=stacklevel)
def _deprecate_getter_setter(version, property):
"""
Generate `set_name` and `get_name` methods for property setters and getters,
and issue warnings when they are used.
"""
def getter(self):
_warn_proplot(
f'get_{property}() was deprecated in {version}. Please use '
f'{type(self).__name__}.{property} instead.'
)
return getattr(self, '_' + property)
def setter(self, value):
_warn_proplot(
f'set_{property}() was deprecated in {version}. The property is '
f'now read-only.'
)
return
getter.__name__ = f'get_{property}'
setter.__name__ = f'set_{property}'
return getter, setter
def _rename_objs(version, **kwargs):
"""
Emit a basic deprecation warning after renaming function(s), method(s), or
class(es). Do not document the deprecated object to discourage use.
"""
wrappers = []
for old_name, new_obj in kwargs.items():
# Add info as keywords to avoid overwriting by loop scope
def deprecate_obj(*args, _old_name=old_name, _new_obj=new_obj, **kwargs):
_new_name = _new_obj.__name__
_warn_proplot(
f'{_old_name!r} was deprecated in version {version} and will be '
f'removed in the next major release. Please use {_new_name!r} instead.'
)
return _new_obj(*args, **kwargs)
# Replace name
deprecate_obj.__name__ = old_name
wrappers.append(deprecate_obj)
if len(wrappers) == 1:
return wrappers[0]
else:
return tuple(wrappers)
def _rename_kwargs(version, **kwargs_rename):
"""
Emit a basic deprecation warning after removing or renaming function keyword
arguments. Each key should be an old keyword, and each argument should be the
new keyword or *instructions* for what to use instead.
"""
def decorator(func_orig):
@functools.wraps(func_orig)
def deprecate_kwargs(*args, **kwargs):
for key_old, key_new in kwargs_rename.items():
if key_old not in kwargs:
continue
value = kwargs.pop(key_old)
if key_new.isidentifier():
# Rename argument
kwargs[key_new] = value
elif '{}' in key_new:
# Nice warning message, but user's desired behavior fails
key_new = key_new.format(value)
_warn_proplot(
f'Keyword arg {key_old!r} was deprecated in {version} and will be '
'removed in the next major release. Please use {key_new!r} instead.'
)
return func_orig(*args, **kwargs)
return deprecate_kwargs
return decorator
|
<filename>archs/R2plus1D.py<gh_stars>0
import torch.hub
import torch.nn as nn
from einops.layers.torch import Rearrange, Reduce
from torchvision.models.video.resnet import VideoResNet, BasicBlock, R2Plus1dStem, Conv2Plus1D
model_urls = {
"r2plus1d_34_8_ig65m": "https://github.com/moabitcoin/ig65m-pytorch/releases/download/v1.0.0/r2plus1d_34_clip8_ig65m_from_scratch-9bae36ae.pth",
"r2plus1d_34_32_ig65m": "https://github.com/moabitcoin/ig65m-pytorch/releases/download/v1.0.0/r2plus1d_34_clip32_ig65m_from_scratch-449a7af9.pth",
"r2plus1d_34_8_kinetics": "https://github.com/moabitcoin/ig65m-pytorch/releases/download/v1.0.0/r2plus1d_34_clip8_ft_kinetics_from_ig65m-0aa0550b.pth",
"r2plus1d_34_32_kinetics": "https://github.com/moabitcoin/ig65m-pytorch/releases/download/v1.0.0/r2plus1d_34_clip32_ft_kinetics_from_ig65m-ade133f1.pth",
}
class VideoResNetWithFeatureReturn(VideoResNet):
def __init__(self,block,conv_makers,layers,stem):
super().__init__(block=block,conv_makers=conv_makers,layers=layers,stem=stem)
self.pool_spatial = Reduce("n c t h w -> n c t", reduction="mean")
self.pool_temporal = Reduce("n c t -> n c", reduction="mean")
def forward(self, x):
x = self.stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.pool_spatial(x)
x = self.pool_temporal(x)
# x = self.avgpool(x)
# Flatten the layer to fc
# print(x.size())
# x = x.flatten(1)
x = self.fc(x)
return x
def r2plus1d_34(pretrain=None):
model = VideoResNetWithFeatureReturn(block=BasicBlock,
conv_makers=[Conv2Plus1D] * 4,
layers=[3, 4, 6, 3],
stem=R2Plus1dStem)
# model = VideoResNet(block=BasicBlock,conv_makers=[Conv2Plus1D] * 4,layers=[3, 4, 6, 3],stem=R2Plus1dStem)
model.fc = nn.Linear(model.fc.in_features, out_features=400)
# Fix difference in PyTorch vs Caffe2 architecture
# https://github.com/facebookresearch/VMZ/issues/89
# https://github.com/pytorch/vision/issues/1265
model.layer2[0].conv2[0] = Conv2Plus1D(128, 128, 288)
model.layer3[0].conv2[0] = Conv2Plus1D(256, 256, 576)
model.layer4[0].conv2[0] = Conv2Plus1D(512, 512, 1152)
# We need exact Caffe2 momentum for BatchNorm scaling
for m in model.modules():
if isinstance(m, nn.BatchNorm3d):
m.eps = 1e-3
m.momentum = 0.9
if pretrain:
state_dict = torch.hub.load_state_dict_from_url(model_urls[pretrain],
progress=True)
model.load_state_dict(state_dict)
# print()
return model
def get_vmz_fine_tuning_parameters(model, tune_last_k_layer):
layer_max_index = 4
if tune_last_k_layer == layer_max_index:
return model.parameters()
ft_begin_index = layer_max_index - tune_last_k_layer + 1
ft_module_names = []
for i in range(ft_begin_index, layer_max_index + 1):
ft_module_names.append('module.base_model.layer{}'.format(i))
ft_module_names.append('module.base_model.stem.{}'.format(i))
ft_module_names.append('module.base_model.fc')
parameters = []
freeze = []
tune = []
names = []
# for k, v in model.named_parameters():
# print(k)
# raise RuntimeError("stop")
for k, v in model.named_parameters():
names.append(k)
no_grad = True
for ft_module in ft_module_names:
if k.startswith(ft_module):
parameters.append({'params': v})
tune.append(k)
no_grad = False
break
if no_grad:
v.requires_grad = False
freeze.append(k)
print('fine_tune:', len(tune), tune)
print('freeze', len(freeze), freeze)
print('all', len(names))
print('param', len(parameters))
return parameters
if __name__ == "__main__":
base_model = r2plus1d_34("r2plus1d_34_32_kinetics").cuda()
print(base_model)
input = torch.randn((8,3,32,112,112)).cuda()
out = base_model.stem(input)
out = base_model.layer1(out)
out = base_model.layer2(out)
print(out.size())
# for i, b in enumerate(blocks):
# print(blocks[i].conv1[0][-1].in_channels) |
<reponame>rfrye-github/ixnetwork_restpy<gh_stars>0
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class DceTopologyRange(Base):
"""Sets the DCE Topology of a particular DCE ISIS Topology Range.
The DceTopologyRange class encapsulates a list of dceTopologyRange resources that are managed by the user.
A list of resources can be retrieved from the server using the DceTopologyRange.find() method.
The list can be managed by using the DceTopologyRange.add() and DceTopologyRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'dceTopologyRange'
_SDM_ATT_MAP = {
'EnableFtag': 'enableFtag',
'Enabled': 'enabled',
'NicknameList': 'nicknameList',
'NoOfTreesToCompute': 'noOfTreesToCompute',
'StartFtagValue': 'startFtagValue',
'TopologyCount': 'topologyCount',
'TopologyId': 'topologyId',
'TopologyIdStep': 'topologyIdStep',
}
def __init__(self, parent):
super(DceTopologyRange, self).__init__(parent)
@property
def DceInterestedVlanRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.dceinterestedvlanrange_a919b22a4f996c9450fcdac90437e05d.DceInterestedVlanRange): An instance of the DceInterestedVlanRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.dceinterestedvlanrange_a919b22a4f996c9450fcdac90437e05d import DceInterestedVlanRange
return DceInterestedVlanRange(self)
@property
def EnableFtag(self):
"""
Returns
-------
- bool: If true, the F tag is enabled.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableFtag'])
@EnableFtag.setter
def EnableFtag(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableFtag'], value)
@property
def Enabled(self):
"""
Returns
-------
- bool: Signifies if DCE Topology is enabled or disabled.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def NicknameList(self):
"""
Returns
-------
- list(dict(arg1:number,arg2:number,arg3:number)): The list of nicknames.
"""
return self._get_attribute(self._SDM_ATT_MAP['NicknameList'])
@NicknameList.setter
def NicknameList(self, value):
self._set_attribute(self._SDM_ATT_MAP['NicknameList'], value)
@property
def NoOfTreesToCompute(self):
"""
Returns
-------
- number: The number of trees to compute.
"""
return self._get_attribute(self._SDM_ATT_MAP['NoOfTreesToCompute'])
@NoOfTreesToCompute.setter
def NoOfTreesToCompute(self, value):
self._set_attribute(self._SDM_ATT_MAP['NoOfTreesToCompute'], value)
@property
def StartFtagValue(self):
"""
Returns
-------
- number: If true, the Ftag value is started.
"""
return self._get_attribute(self._SDM_ATT_MAP['StartFtagValue'])
@StartFtagValue.setter
def StartFtagValue(self, value):
self._set_attribute(self._SDM_ATT_MAP['StartFtagValue'], value)
@property
def TopologyCount(self):
"""
Returns
-------
- number: The count of the topology.
"""
return self._get_attribute(self._SDM_ATT_MAP['TopologyCount'])
@TopologyCount.setter
def TopologyCount(self, value):
self._set_attribute(self._SDM_ATT_MAP['TopologyCount'], value)
@property
def TopologyId(self):
"""
Returns
-------
- number: The unique identification number of the topology range.
"""
return self._get_attribute(self._SDM_ATT_MAP['TopologyId'])
@TopologyId.setter
def TopologyId(self, value):
self._set_attribute(self._SDM_ATT_MAP['TopologyId'], value)
@property
def TopologyIdStep(self):
"""
Returns
-------
- number: It shows the Increment Step of the ID of DCE Topology Range. Default is 1.
"""
return self._get_attribute(self._SDM_ATT_MAP['TopologyIdStep'])
@TopologyIdStep.setter
def TopologyIdStep(self, value):
self._set_attribute(self._SDM_ATT_MAP['TopologyIdStep'], value)
def update(self, EnableFtag=None, Enabled=None, NicknameList=None, NoOfTreesToCompute=None, StartFtagValue=None, TopologyCount=None, TopologyId=None, TopologyIdStep=None):
"""Updates dceTopologyRange resource on the server.
Args
----
- EnableFtag (bool): If true, the F tag is enabled.
- Enabled (bool): Signifies if DCE Topology is enabled or disabled.
- NicknameList (list(dict(arg1:number,arg2:number,arg3:number))): The list of nicknames.
- NoOfTreesToCompute (number): The number of trees to compute.
- StartFtagValue (number): If true, the Ftag value is started.
- TopologyCount (number): The count of the topology.
- TopologyId (number): The unique identification number of the topology range.
- TopologyIdStep (number): It shows the Increment Step of the ID of DCE Topology Range. Default is 1.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, EnableFtag=None, Enabled=None, NicknameList=None, NoOfTreesToCompute=None, StartFtagValue=None, TopologyCount=None, TopologyId=None, TopologyIdStep=None):
"""Adds a new dceTopologyRange resource on the server and adds it to the container.
Args
----
- EnableFtag (bool): If true, the F tag is enabled.
- Enabled (bool): Signifies if DCE Topology is enabled or disabled.
- NicknameList (list(dict(arg1:number,arg2:number,arg3:number))): The list of nicknames.
- NoOfTreesToCompute (number): The number of trees to compute.
- StartFtagValue (number): If true, the Ftag value is started.
- TopologyCount (number): The count of the topology.
- TopologyId (number): The unique identification number of the topology range.
- TopologyIdStep (number): It shows the Increment Step of the ID of DCE Topology Range. Default is 1.
Returns
-------
- self: This instance with all currently retrieved dceTopologyRange resources using find and the newly added dceTopologyRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained dceTopologyRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, EnableFtag=None, Enabled=None, NicknameList=None, NoOfTreesToCompute=None, StartFtagValue=None, TopologyCount=None, TopologyId=None, TopologyIdStep=None):
"""Finds and retrieves dceTopologyRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve dceTopologyRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all dceTopologyRange resources from the server.
Args
----
- EnableFtag (bool): If true, the F tag is enabled.
- Enabled (bool): Signifies if DCE Topology is enabled or disabled.
- NicknameList (list(dict(arg1:number,arg2:number,arg3:number))): The list of nicknames.
- NoOfTreesToCompute (number): The number of trees to compute.
- StartFtagValue (number): If true, the Ftag value is started.
- TopologyCount (number): The count of the topology.
- TopologyId (number): The unique identification number of the topology range.
- TopologyIdStep (number): It shows the Increment Step of the ID of DCE Topology Range. Default is 1.
Returns
-------
- self: This instance with matching dceTopologyRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of dceTopologyRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the dceTopologyRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
from dash.dependencies import Input, Output, State
from propnet import log_stream
from propnet.web.layouts_models import model_layout, models_index
from propnet.web.layouts_symbols import symbol_layout, symbols_index
from propnet.web.layouts_ashby import ashby_layout
from propnet.web.layouts_interactive import interactive_layout
from mp_dash_components import GraphComponent
from propnet.web.utils import graph_conversion, parse_path, AESTHETICS
from propnet.core.graph import Graph
from propnet.ext.matproj import MPRester
from pydash import set_, get
from flask_caching import Cache
import logging
log = logging.getLogger(__name__)
# TODO: Fix math rendering
app = dash.Dash()
server = app.server
app.config.supress_callback_exceptions = True # TODO: remove this?
app.scripts.config.serve_locally = True
app.title = "Property Network Project"
route = dcc.Location(id='url', refresh=False)
cache = Cache(app.server, config={
'CACHE_TYPE': 'filesystem', 'CACHE_DIR': '.tmp'
})
mpr = MPRester()
g = Graph().graph
# Define default graph component
@app.callback(Output('graph_explorer', 'children'),
[Input('graph_options', 'values')])
def get_graph_component(props):
aesthetics = AESTHETICS.copy()
show_properties = 'show_properties' in props
show_models = 'show_models' in props
#print(props)
#print("Updating graph to {}, {}".format(show_properties, show_models))
set_(aesthetics, "node_aesthetics.Symbol.show_labels", show_properties)
set_(aesthetics, "node_aesthetics.Model.show_labels", show_models)
graph_data = graph_conversion(g, aesthetics=aesthetics)
#print(graph_data)
from uuid import uuid4
graph_component = html.Div(
id=str(uuid4()),
children=[GraphComponent(id=str(uuid4()), graph=graph_data,
options=AESTHETICS['global_options'])],
style={'width': '100%', 'height': '800px'})
return [graph_component]
# Do I really need to redo this code?
graph_data = graph_conversion(g)
graph_component = html.Div(
id='graph_component',
children=[GraphComponent(id='propnet-graph', graph=graph_data,
options=AESTHETICS['global_options'])],
style={'width': '100%', 'height': '800px'})
graph_layout = html.Div(
id='graph_top_level',
children=[
dcc.Checklist(id='graph_options',
options=[{'label': 'Show models',
'value': 'show_models'},
{'label': 'Show properties',
'value': 'show_properties'}],
values=['show_properties'],
labelStyle={'display': 'inline-block'}),
html.Div(id='graph_explorer',
children=[graph_component])])
layout_menu = html.Div(
children=[dcc.Link('Explore Graph', href='/graph'),
html.Span(' • '),
dcc.Link('All Symbols', href='/property'),
html.Span(' • '),
dcc.Link('All Models', href='/model'),
html.Span(' • '),
dcc.Link('Explore with Materials Project', href='/load_material'),
html.Span(' • '),
dcc.Link('Ashby Plots', href='/ashby'),
html.Span(' • '),
dcc.Link('Interact', href='/interactive'),
])
# home page
home_manifesto = """
**Under active development, pre-alpha.**
Real materials are complex. In the field of Materials Science,
we often rely on empirical relationships and rules-of-thumb to
provide insights into the behavior of materials. This project
is designed to codify our knowledge of these empirical models
and the relationships between them, along with providing tested,
unit-aware implementations of each model.
When given a set of known properties of a material, the knowledge
graph can help derive additional properties automatically.
Integration with the [Materials Project](https://materialsproject.org)
and other databases provides these sets of initial properties for
a given material, as well as information on the real world
distributions of these properties.
We also provide interfaces to machine-learned models. Machine
learning is **great**, and one day might replace our conventional
wisdom, but until then as scientists we still need to understand
how to use and interpret these machine-learned models.
Additionally, formally codifying our existing models will help
train further machine-learned models in the future.
"""
graph_log = """
```
Graph initialization log:
{}
```
""".format(log_stream.getvalue())
index = html.Div([html.Br(),
dcc.Markdown(home_manifesto),
dcc.Markdown(graph_log)])
# header
app.layout = html.Div(
children=[route,
html.Div([html.H3(app.title), layout_menu, html.Br()],
style={'textAlign': 'center'}),
html.Div(id='page-content'),
# hidden table to make sure table component loads
# (Dash limitation; may be removed in future)
html.Div(children=[dt.DataTable(rows=[{}]), graph_layout],
style={'display': 'none'})],
style={'marginLeft': 200, 'marginRight': 200, 'marginTop': 30})
# standard Dash css, fork this for a custom theme
# we real web devs now
app.css.append_css(
{'external_url': 'https://codepen.io/mkhorton/pen/zPgJJw.css'})
# app.css.append_css(
# {'external_url': 'https://codepen.io/montoyjh/pen/YjPKae.css'})
app.css.append_css(
{'external_url': 'https://codepen.io/mikesmith1611/pen/QOKgpG.css'})
@app.callback(Output('material-content', 'children'),
[Input('submit-query', 'n_clicks')],
[State('query-input', 'value'),
State('derive_options', 'values')])
def retrieve_material(n_clicks, query, derive_properties):
"""
Gets the material view from options
Args:
n_clicks (int): load material click
formula (string): formula to find
derive_properties ([str]): list of derivation options
Returns:
Div of graph component with fulfilled options
"""
if n_clicks is None:
return ""
log.info("Fetching data from MP for query {}".format(query))
if query.startswith("mp-") or query.startswith("mvc-"):
mpid = query
else:
mpid = mpr.get_mpid_from_formula(query)
material = mpr.get_material_for_mpid(mpid)
if not material:
return "Material not found."
log.info("Retrieved material {} for formula {}".format(mpid, material['pretty_formula']))
log.debug("Adding material to graph.")
p = Graph()
material_quantity_names = [q.symbol.name for q in material.get_quantities()]
g = p.graph
if 'derive' in derive_properties:
log.info("Deriving quantities for {}".format(mpid))
material = p.evaluate(material)
if 'aggregate' in derive_properties:
log.debug("Aggregating quantities for material {}".format(mpid))
# TODO: get aggregated quantities should return a list
quantities = material.get_aggregated_quantities().items()
else:
quantities = [(q.symbol, q) for q in material.get_quantities()]
else:
quantities = [(q.symbol, q) for q in material.get_quantities()]
rows = []
for symbol, quantity in quantities:
rows.append(
{
'Symbol': symbol.display_names[0],
'Value': quantity.pretty_string(3),
# TODO: node.node_value.value? this has to make sense
# 'Units': str(node.node_value.symbol.unit_as_string)
}
)
table = dt.DataTable(
rows=rows,
row_selectable=True,
filterable=True,
sortable=True,
editable=False,
selected_row_indices=[],
id='datatable'
)
derived_quantity_names = set([symbol.name for symbol, quantity in quantities]) -\
set(material_quantity_names)
material_graph_data = graph_conversion(
g, nodes_to_highlight_green=material_quantity_names,
nodes_to_highlight_yellow=list(derived_quantity_names))
options = AESTHETICS['global_options']
options['edges']['color'] = '#000000'
material_graph_component = html.Div(GraphComponent(
id='material-graph',
graph=material_graph_data,
options=options
), style={'width': '100%', 'height': '400px'})
return html.Div([
html.H3('Graph'),
material_graph_component,
html.H3('Table'),
table
])
material_layout = html.Div([
dcc.Input(
placeholder='Enter a formula or mp-id...',
type='text',
value='',
id='query-input',
style={"width": "50%"}
),
html.Button('Load Material', id='submit-query'),
dcc.Checklist(
id='derive_options',
options=[
{'label': 'Derive properties', 'value': 'derive'},
{'label': 'Aggregate', 'value': 'aggregate'}
],
values=['derive', 'aggregate'],
labelStyle={'display': 'inline-block'}
),
html.Br(),
html.Br(),
html.Div(id='material-content')
])
ASHBY_LAYOUT = ashby_layout(app)
INTERACTIVE_LAYOUT = interactive_layout(app)
# routing, current routes defined are:
# / for home page
# /model for model summary
# /model/model_name for information on that model
# /property for property summary
# /property/property_name for information on that property
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
"""
Args:
pathname:
Returns:
"""
path_info = parse_path(pathname)
if path_info:
if path_info['mode'] == 'model':
if path_info['value']:
return model_layout(path_info['value'])
else:
return models_index
elif path_info['mode'] == 'property':
if path_info['value']:
property_name = path_info['value']
return symbol_layout(property_name)
else:
return symbols_index()
elif path_info['mode'] == 'load_material':
return material_layout
elif path_info['mode'] == 'graph':
return graph_layout
elif path_info['mode'] == 'ashby':
return ASHBY_LAYOUT
elif path_info['mode'] == 'interactive':
return INTERACTIVE_LAYOUT
else:
return '404'
else:
return index
if __name__ == '__main__':
app.run_server(debug=True)
|
from __future__ import annotations
import json
import re
from dataclasses import MISSING
from dataclasses import Field
from dataclasses import asdict
from datetime import datetime
from pathlib import Path
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union
T = TypeVar("T")
JsonValue = Union[dict, list, str, int, float, bool, Literal[None]]
class Encoder:
def __init__(self, obj_type: Type[T], encoder: Callable[[T], JsonValue]):
self.obj_type = obj_type
self.encoder = encoder
def __repr__(self) -> str:
return f"{self.__class__.__name__}(obj_type={self.obj_type.__name__})"
def check(self, obj: Any) -> bool:
"""Check if object can be encoded."""
return self.obj_type == obj.__class__
def encode(self, obj: Any) -> Dict[str, JsonValue]:
"""Encodes the python object into a JSON object."""
return {
"__type__": self.obj_type.__name__,
"value": self.encoder(obj),
}
class Decoder:
def __init__(self, obj_type: Type[T], decoder: Callable[[JsonValue], T]):
self.obj_type = obj_type
self.decoder = decoder
def __repr__(self) -> str:
return f"{self.__class__.__name__}(obj_type={self.obj_type.__name__})"
def check(self, obj: Dict[str, JsonValue]) -> bool:
"""Check if object can be decoded."""
return self.obj_type.__name__ == obj["__type__"]
def decode(self, obj: Dict[str, JsonValue]) -> Any:
"""Decodes the JSON object into a python object."""
return self.decoder(obj["value"])
class TableEncoder(Encoder):
def __init__(self):
super().__init__(Table, asdict)
def __hash__(self) -> int:
return hash(Table)
def check(self, obj: Any) -> bool:
"""Checks if object can be encoded."""
return isinstance(obj, Table)
def encode(self, obj: Any) -> Dict[str, JsonValue]:
"""Encodes the python object into a JSON object."""
return {
"__type__": obj.__class__.__name__,
"value": self.encoder(obj),
}
class TableDecoder(Decoder):
def __init__(self):
super().__init__(Table, lambda x: None)
def __hash__(self) -> int:
return hash(Table)
@staticmethod
def sub_types(_type: type) -> Dict[str, type]:
sub_types = {_type.__name__: _type}
for sub in _type.__subclasses__():
sub_types.update(TableDecoder.sub_types(sub))
return sub_types
def check(self, obj: Dict[str, JsonValue]) -> bool:
"""Check if object can be decoded."""
return obj["__type__"] in TableDecoder.sub_types(Table)
def decode(self, obj: Dict[str, JsonValue]) -> Any:
"""Decodes the JSON object into a python object."""
return TableDecoder.sub_types(Table)[obj["__type__"]](**obj["value"])
class Comparison:
def __init__(self, compare: Callable[[Table], bool]):
self.compare = compare
def __and__(self, other: Comparison):
return Comparison(lambda e: self.compare(e) and other.compare(e))
def __or__(self, other: Comparison):
return Comparison(lambda e: self.compare(e) or other.compare(e))
class TableField(Field):
# noinspection PyMissingConstructor
def __init__(self, field: Field):
for attr in Field.__slots__:
setattr(self, attr, getattr(field, attr))
@property
def get_default(self) -> Any:
if self.default_factory is MISSING:
return self.default
return self.default_factory()
def __repr__(self) -> str:
default = "MISSING" if self.get_default is MISSING else self.get_default
return f"TableAttr(name={self.name}, type={self.type}, default={default})"
def __lt__(self, other: Any):
return Comparison(lambda e: getattr(e, self.name) < other)
def __le__(self, other: Any):
return Comparison(lambda e: getattr(e, self.name) <= other)
def __eq__(self, other: Any):
try:
pattern = re.compile(other)
return Comparison(
lambda e: pattern.search(getattr(e, self.name)) is not None
)
except (re.error, TypeError):
return Comparison(lambda e: getattr(e, self.name) == other)
def __ne__(self, other: Any):
try:
pattern = re.compile(other)
return Comparison(lambda e: pattern.search(getattr(e, self.name)) is None)
except (re.error, TypeError):
return Comparison(lambda e: getattr(e, self.name) != other)
def __gt__(self, other: Any):
return Comparison(lambda e: getattr(e, self.name) > other)
def __ge__(self, other: Any):
return Comparison(lambda e: getattr(e, self.name) >= other)
class MetaTable(type):
def __getattribute__(self, name):
if name != "__dataclass_fields__" and name in (
field_dict := getattr(self, "__dataclass_fields__", {})
):
return field_dict.setdefault(f"{name}_attr", TableField(field_dict[name]))
return super().__getattribute__(name)
class Table(metaclass=MetaTable):
@property
def id(self):
return self._id
def __post_init__(self):
self._id = -1
def __eq__(self, other) -> bool:
if self.__class__ == other.__class__:
# Both objects are table entries
if self.id == -1 and other.id == -1:
# Both objects are not in database
return super().__eq__(other)
if self.id == other.id:
# Both objects refer to same table entry
return True
return False
TableType = TypeVar("TableType", bound=Table)
ComparisonType = Union[Comparison, bool]
class Database:
_file: Path = Path("data.json")
_data: Dict[str, Any] = None
_encoders: Dict[str, Encoder] = {}
_decoders: Dict[str, Decoder] = {}
@classmethod
def file(cls, new_file: Path = None) -> Path:
if new_file is not None:
cls._file = new_file
return cls._file
@classmethod
def register_encoder(cls, encoder: Encoder) -> None:
_type = encoder.obj_type.__name__
if _type in cls._encoders:
raise TypeError(f"Encoder for type '{_type}' already registered")
cls._encoders[_type] = encoder
@classmethod
def register_decoder(cls, decoder: Decoder) -> None:
_type = decoder.obj_type.__name__
if _type in cls._decoders:
raise TypeError(f"Decoder for type '{_type}' already registered")
cls._decoders[_type] = decoder
@classmethod
def _encode(cls, obj: Any) -> Dict[str, JsonValue]:
found = [e for e in cls._encoders.values() if e.check(obj)]
_len = len(found)
if _len > 1:
obj_type = obj.__class__.__name__
TypeError(f"Multiple encoders found for {obj_type}: {found}")
if _len == 1:
return found[0].encode(obj)
obj_type = obj.__class__.__name__
raise TypeError(f"Object of type {obj_type} is not JSON serializable")
@classmethod
def _decode(cls, obj: Dict[str, JsonValue]) -> Any:
if "__type__" in obj:
found = [d for d in cls._decoders.values() if d.check(obj)]
_len = len(found)
if _len > 1:
obj_type = obj["__type__"]
TypeError(f"Multiple decoders found for {obj_type}: {found}")
if _len == 1:
return found[0].decode(obj)
obj_type = obj["__type__"]
raise TypeError(f"Object of type {obj_type} is not JSON deserializable")
return obj
@classmethod
def last_commit(cls) -> datetime:
if cls._data is None:
cls.rollback()
return datetime.fromisoformat(cls._data["last_commit"])
@classmethod
def rollback(cls) -> None:
try:
cls._data = json.loads(cls._file.read_text(), object_hook=cls._decode)
except (FileNotFoundError, json.decoder.JSONDecodeError):
cls._data = {"last_commit": datetime.now()}
@classmethod
def commit(cls) -> bool:
if cls._data is None:
cls.rollback()
previous_commit = (
cls._data["last_commit"] if "last_commit" in cls._data else None
)
try:
cls._data["last_commit"] = datetime.now()
cls._file.write_text(json.dumps(cls._data, indent=2, default=cls._encode))
return True
except (IOError, Exception) as e:
cls._data["last_commit"] = (
datetime.now() if previous_commit is None else previous_commit
)
print(f"I/O error({e.errno}): {e.strerror}")
return False
@classmethod
def get(
cls, table: Type[TableType], where: ComparisonType = None
) -> Tuple[TableType, ...]:
results: List[TableType] = []
for id, entry in cls._get(table, where):
entry._id = int(id)
results.append(entry)
return tuple(results)
@classmethod
def pop(
cls, table: Type[TableType], where: ComparisonType = None
) -> Tuple[TableType, ...]:
results: List[TableType] = []
for id, table_entry in cls._get(table, where):
results.append(table(**table_entry))
return tuple(results)
@classmethod
def put(cls, table: Type[TableType], *entries: TableType):
if cls._data is None:
cls.rollback()
table_name = table.__name__.lower()
if table_name not in cls._data:
cls._data[table_name] = {}
table_data = cls._data[table_name]
for entry in entries:
if entry.id < 0:
id = 0
while str(id) in table_data:
id += 1
entry._id = id
table_data[str(entry.id)] = entry
@classmethod
def _get(
cls, table: Type[TableType], where: ComparisonType = None
) -> List[Tuple[str, TableType]]:
if cls._data is None:
cls.rollback()
table_name = table.__name__.lower()
if table_name not in cls._data:
return []
pre_check = where is None or (isinstance(where, bool) and where)
results: List[Tuple[str, TableType]] = []
for id, table_entry in cls._data[table_name].items():
if pre_check or where.compare(table_entry):
results.append((id, table_entry))
return results
for register, coder in {
(
Database.register_encoder,
Encoder(datetime, lambda datetime: datetime.isoformat()),
),
(
Database.register_decoder,
Decoder(datetime, lambda datetime_str: datetime.fromisoformat(datetime_str)),
),
(Database.register_encoder, TableEncoder()),
(Database.register_decoder, TableDecoder()),
}:
try:
register(coder)
except KeyError:
pass
|
<filename>loom/crossvalidate.py
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
# Copyright (c) 2015, Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from itertools import izip
import numpy
import numpy.random
from distributions.io.stream import (
protobuf_stream_load,
protobuf_stream_dump,
json_dump,
)
from loom.util import LOG
import loom.store
import loom.config
import loom.runner
import loom.generate
import loom.query
import parsable
parsable = parsable.Parsable()
def crossvalidate_one(
seed,
test_count,
train_count,
inputs,
results,
extra_passes,
debug):
LOG('running seed {}:'.format(seed))
results['train'] = os.path.join(
results['root'],
'train',
'diffs.pbs.gz')
results['test'] = os.path.join(results['root'], 'test', 'rows.pbs.gz')
results['scores'] = os.path.join(results['root'], 'scores.json.gz')
config = {
'seed': seed,
'schedule': {'extra_passes': extra_passes},
}
loom.config.config_dump(config, results['samples'][0]['config'])
numpy.random.seed(seed)
split = [True] * train_count + [False] * test_count
numpy.random.shuffle(split)
diffs_in = protobuf_stream_load(inputs['ingest']['diffs'])
protobuf_stream_dump(
(row for s, row in izip(split, diffs_in) if s),
results['train'])
rows_in = protobuf_stream_load(inputs['ingest']['rows'])
protobuf_stream_dump(
(row for s, row in izip(split, rows_in) if not s),
results['test'])
LOG(' shuffle')
loom.runner.shuffle(
rows_in=results['train'],
rows_out=results['samples'][0]['shuffled'],
seed=seed,
debug=debug)
LOG(' init')
loom.generate.generate_init(
encoding_in=inputs['ingest']['encoding'],
model_out=results['samples'][0]['init'],
seed=seed)
LOG(' infer')
loom.runner.infer(
config_in=results['samples'][0]['config'],
rows_in=results['samples'][0]['shuffled'],
tares_in=inputs['ingest']['tares'],
model_in=results['samples'][0]['init'],
model_out=results['samples'][0]['model'],
groups_out=results['samples'][0]['groups'],
debug=debug)
LOG(' query')
rows = loom.query.load_data_rows(results['test'])
loom.config.config_dump({}, results['query']['config'])
with loom.query.get_server(results['root'], debug=debug) as query:
scores = [query.score(row) for row in rows]
json_dump(scores, results['scores'])
LOG(' done\n')
return numpy.mean(scores)
@parsable.command
def crossvalidate(
name=None,
sample_count=10,
portion=0.9,
extra_passes=loom.config.DEFAULTS['schedule']['extra_passes'],
debug=False):
'''
Randomly split dataset; train models; score held-out data.
'''
assert 0 < portion and portion < 1, portion
assert sample_count > 0, sample_count
loom.store.require(name, [
'ingest.encoding',
'ingest.tares',
'ingest.diffs',
])
inputs = loom.store.get_paths(name)
row_count = sum(1 for _ in protobuf_stream_load(inputs['ingest']['diffs']))
assert row_count > 1, 'too few rows to crossvalidate: {}'.format(row_count)
train_count = max(1, min(row_count - 1, int(round(portion * row_count))))
test_count = row_count - train_count
assert 1 <= train_count and 1 <= test_count
mean_scores = []
for seed in xrange(sample_count):
results = loom.store.get_paths(
os.path.join(name, 'crossvalidate/{}'.format(seed)))
mean = crossvalidate_one(
seed,
test_count,
train_count,
inputs,
results,
extra_passes,
debug)
mean_scores.append(mean)
results = loom.store.get_paths(os.path.join(name, 'crossvalidate'))
results['scores'] = os.path.join(results['root'], 'scores.json.gz')
json_dump(mean_scores, results['scores'])
print 'score = {} +- {}'.format(
numpy.mean(mean_scores),
numpy.std(mean_scores))
if __name__ == '__main__':
parsable.dispatch()
|
################################################################################
# Imports
from django.test import TestCase
from ..helperFuncsForTesting import getInfoPost, setUpHelper
from .views import ERROR, FAIL, INCORRECT_CREDENTIALS, INCORRECT_FIELDS, STATUS, SUCCESS
################################################################################
# Vars
LOGIN = 'login'
################################################################################
# Tests
# Tenant Login Tests
class TenantLogin(TestCase):
def setUp(self):
setUpHelper()
def test_tenant_allCorrect(self):
'''Everything is correct'''
email = '<EMAIL>'
password = '<PASSWORD>'
data = {'email': email, 'password': password}
responseData = getInfoPost(LOGIN, data)
self.assertEqual(responseData.get(STATUS), SUCCESS)
tenant = responseData.get('tenant')
self.assertEqual(tenant.get('firstName'), 'Adam')
self.assertEqual(tenant.get('lastName'), 'Berard')
self.assertEqual(tenant.get('email'), '<EMAIL>')
self.assertEqual(tenant.get('password'), '<PASSWORD>')
tenProp = responseData.get('properties')[0]
self.assertEqual(tenProp.get('streetAddress'), '200 N. Santa Rosa')
self.assertEqual(tenProp.get('city'), 'San Luis Obispo')
self.assertEqual(tenProp.get('state'), 'CA')
self.assertEqual(tenProp.get('numBath'), 2)
self.assertEqual(tenProp.get('numBed'), 3)
self.assertEqual(tenProp.get('maxTenants'), 5)
self.assertEqual(tenProp.get('pm'), '<NAME>')
pm = tenant.get('pm')[0]
self.assertEqual(pm.get('firstName'), 'Eeron')
self.assertEqual(pm.get('lastName'), 'Grant')
self.assertEqual(pm.get('email'), '<EMAIL>')
def test_tenant_incorrectEmail(self):
'''Incorrect Email'''
data = {'email': '<EMAIL>', 'password': '<PASSWORD>'}
responseData = getInfoPost(LOGIN, data)
self.assertEqual(responseData.get(STATUS), FAIL)
self.assertEqual(responseData.get(ERROR), INCORRECT_CREDENTIALS)
def test_tenant_incorrectPass(self):
'''Incorrect Pass'''
data = {'email': '<EMAIL>', 'password': '<PASSWORD>'}
responseData = getInfoPost(LOGIN, data)
self.assertEqual(responseData.get(STATUS), FAIL)
self.assertEqual(responseData.get(ERROR), INCORRECT_CREDENTIALS)
def test_tenant_incorrectPassAndEmail(self):
'''Incorrect Pass & Email'''
data = {'email': '<EMAIL>', 'password': '<PASSWORD>'}
responseData = getInfoPost(LOGIN, data)
self.assertEqual(responseData.get(STATUS), FAIL)
self.assertEqual(responseData.get(ERROR), INCORRECT_CREDENTIALS)
def test_tenant_incorrectEmailField(self):
'''No Email Field'''
data = {'gmail': '<EMAIL>', 'password': '<PASSWORD>'}
responseData = getInfoPost(LOGIN, data)
self.assertEqual(responseData.get(STATUS), FAIL)
self.assertEqual(responseData.get(ERROR), INCORRECT_FIELDS + ": email")
def test_tenant_incorrectPassField(self):
'''No Pass Field'''
data = {'email': '<EMAIL>', 'assword': '<PASSWORD>'}
responseData = getInfoPost(LOGIN, data)
self.assertEqual(responseData.get(STATUS), FAIL)
self.assertEqual(responseData.get(ERROR), INCORRECT_FIELDS + ": password")
def test_tenant_incorrectFields(self):
'''No Correct Fields'''
data = {'gmail': '<EMAIL>', 'assword': '<PASSWORD>'}
responseData = getInfoPost(LOGIN, data)
self.assertEqual(responseData.get(STATUS), FAIL)
self.assertEqual(responseData.get(ERROR), INCORRECT_FIELDS + ": email password")
|
<reponame>omad/datacube-experiments
import click
import os
import pathlib
import logging
from create_tiles import calc_output_filenames, create_tiles, list_tile_files
from ingester.utils import preserve_cwd
from netcdf_writer import append_to_netcdf, MultiVariableNetCDF, SingleVariableNetCDF
import eodatasets.drivers
import eodatasets.type
from eodatasets.serialise import read_yaml_metadata
_LOG = logging.getLogger(__name__)
CLICK_SETTINGS = dict(help_option_names=['-h', '--help'])
DEFAULT_TILE_OPTIONS = {
'output_format': 'GTiff',
'create_options': ['COMPRESS=DEFLATE', 'ZLEVEL=1']
}
def get_input_filenames(input_path, eodataset):
"""
Extracts absolute filenames from a DatasetMetadata object.
:type input_path: pathlib.Path
:type eodataset: eodatasets.type.DatasetMetadata
:return: list of filenames
"""
assert input_path.is_dir()
bands = sorted([band for band_num, band in eodataset.image.bands.items()], key=lambda band: band.number)
input_files = [input_path / band.path for band in bands]
return input_files
def is_yaml_file(path):
"""
Checks if this is a path to a yaml file
:type path: pathlib.Path
:rtype: boolean
"""
return path.is_file() and path.suffix == '.yaml'
def load_dataset(input_path):
"""
Loads a dataset metadata description
:param input_path:
:rtype: (pathlib.Path, eodataset.DatasetMetadata)
"""
input_path = pathlib.Path(input_path)
if is_yaml_file(input_path):
eodataset = read_yaml_metadata(input_path)
input_path = input_path.parent
elif input_path.is_dir():
eodriver = eodatasets.drivers.EODSDriver()
eodataset = eodatasets.type.DatasetMetadata()
eodriver.fill_metadata(eodataset, input_path)
else:
raise Exception("Unknown dataset type at: {}" % input_path)
return input_path, eodataset
def merge_tiles_to_netcdf(eodataset, filename_format, netcdf_class):
created_tiles = list_tile_files('test.csv')
tile_mappings = calc_output_filenames(created_tiles, filename_format, eodataset)
for geotiff, netcdf in tile_mappings:
append_to_netcdf(geotiff, netcdf, eodataset, netcdf_class=netcdf_class)
return [netcdf_path for _, netcdf_path in tile_mappings]
def setup_logging(verbosity):
"""
Setups up logging, defaults to WARN
:param verbosity: 1 for INFO, 2 for DEBUG
:return:
"""
logging_level = logging.WARN - 10 * verbosity
logging.basicConfig(level=logging_level)
@preserve_cwd
def ingest(input_path, output_dir, filename_format, netcdf_class=MultiVariableNetCDF, tile=True, merge=True):
"""
Runs a series of steps to: stack, split into tiles and re-merge into netcdf an input dataset
:param input_path: str, pathname to a ga-metadata.yaml file or directory that eo-datasets can process
:param output_dir: str, pathname
:param filename_format: string format for output filenames, extracts fields from the input EO-Dataset
:param netcdf_class: either MultiVariableNetCDF or SingleVariableNetCDF
:param tile: boolean, whether to run the tiling step
:param merge: boolean, whether
:return: list of created tile-files
"""
os.chdir(output_dir)
input_path, eodataset = load_dataset(input_path)
input_files = get_input_filenames(input_path, eodataset)
basename = eodataset.ga_label
if tile:
created_tiles = create_tiles(input_files, basename, DEFAULT_TILE_OPTIONS)
_LOG.info("Created tiles: {}".format(created_tiles))
# Import tiles into NetCDF files
if merge:
netcdf_paths = merge_tiles_to_netcdf(eodataset, filename_format, netcdf_class)
_LOG.info("Created/alterated storage units: {}".format(netcdf_paths))
return netcdf_paths
@click.command(help="Example output filename format: combined_{x}_{y}.nc", context_settings=CLICK_SETTINGS)
@click.option('--output-dir', '-o', default='.')
@click.option('--multi-variable', 'netcdf_class', flag_value=MultiVariableNetCDF, default=True)
@click.option('--single-variable', 'netcdf_class', flag_value=SingleVariableNetCDF)
@click.option('--tile/--no-tile', default=True, help="Allow partial processing")
@click.option('--merge/--no-merge', default=True, help="Allow partial processing")
@click.option('--verbose', '-v', count=True, help="Use multiple times for more verbosity")
@click.argument('input_path', type=click.Path(exists=True, readable=True))
@click.argument('filename-format')
def main(input_path, output_dir, filename_format, netcdf_class=MultiVariableNetCDF,
tile=True, merge=True, verbose=0):
"""
Runs ingest from the command line
"""
setup_logging(verbose)
ingest(input_path, output_dir, filename_format, netcdf_class, tile, merge)
if __name__ == '__main__':
try:
from ipdb import launch_ipdb_on_exception
with launch_ipdb_on_exception():
main()
except ImportError:
main()
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import re
import six
from autobahn.wamp.types import SubscribeOptions
__all__ = ('Pattern',)
class Pattern(object):
"""
A WAMP URI Pattern.
.. todo::
* suffix matches
* args + kwargs
* uuid converter
* multiple URI patterns per decorated object
* classes: Pattern, EndpointPattern, ..
"""
URI_TARGET_ENDPOINT = 1
URI_TARGET_HANDLER = 2
URI_TARGET_EXCEPTION = 3
URI_TYPE_EXACT = 1
URI_TYPE_PREFIX = 2
URI_TYPE_WILDCARD = 3
_URI_COMPONENT = re.compile(r"^[a-z0-9][a-z0-9_\-]*$")
"""
Compiled regular expression for a WAMP URI component.
"""
_URI_NAMED_COMPONENT = re.compile(r"^<([a-z][a-z0-9_]*)>$")
"""
Compiled regular expression for a named WAMP URI component.
.. note::
This pattern is stricter than a general WAMP URI component since a valid Python identifier is required.
"""
_URI_NAMED_CONVERTED_COMPONENT = re.compile(r"^<([a-z][a-z0-9_]*):([a-z]*)>$")
"""
Compiled regular expression for a named and type-converted WAMP URI component.
.. note::
This pattern is stricter than a general WAMP URI component since a valid Python identifier is required.
"""
def __init__(self, uri, target):
"""
:param uri: The URI or URI pattern, e.g. ``"com.myapp.product.<product:int>.update"``.
:type uri: unicode
:param target: The target for this pattern: a procedure endpoint (a callable),
an event handler (a callable) or an exception (a class).
:type target: callable or obj
"""
assert(type(uri) == six.text_type)
assert(target in [Pattern.URI_TARGET_ENDPOINT,
Pattern.URI_TARGET_HANDLER,
Pattern.URI_TARGET_EXCEPTION])
components = uri.split('.')
pl = []
nc = {}
for i in range(len(components)):
component = components[i]
match = Pattern._URI_NAMED_CONVERTED_COMPONENT.match(component)
if match:
ctype = match.groups()[1]
if ctype not in ['string', 'int', 'suffix']:
raise Exception("invalid URI")
if ctype == 'suffix' and i != len(components) - 1:
raise Exception("invalid URI")
name = match.groups()[0]
if name in nc:
raise Exception("invalid URI")
if ctype in ['string', 'suffix']:
nc[name] = str
elif ctype == 'int':
nc[name] = int
else:
# should not arrive here
raise Exception("logic error")
pl.append("(?P<{0}>[a-z0-9_]+)".format(name))
continue
match = Pattern._URI_NAMED_COMPONENT.match(component)
if match:
name = match.groups()[0]
if name in nc:
raise Exception("invalid URI")
nc[name] = str
pl.append("(?P<{0}>[a-z][a-z0-9_]*)".format(name))
continue
match = Pattern._URI_COMPONENT.match(component)
if match:
pl.append(component)
continue
raise Exception("invalid URI")
if nc:
# URI pattern
self._type = Pattern.URI_TYPE_WILDCARD
p = "^" + "\.".join(pl) + "$"
self._pattern = re.compile(p)
self._names = nc
else:
# exact URI
self._type = Pattern.URI_TYPE_EXACT
self._pattern = None
self._names = None
self._uri = uri
self._target = target
def uri(self):
"""
Returns the original URI (pattern) for this pattern.
:returns: The URI (pattern), e.g. ``"com.myapp.product.<product:int>.update"``.
:rtype: unicode
"""
return self._uri
def subscribe_options(self):
if self._type == Pattern.URI_TYPE_WILDCARD:
return SubscribeOptions(match=u"wildcard")
else:
return SubscribeOptions(match=u"exact")
def match(self, uri):
"""
Match the given (fully qualified) URI according to this pattern
and return extracted args and kwargs.
:param uri: The URI to match, e.g. ``"com.myapp.product.123456.update"``.
:type uri: unicode
:returns: A tuple ``(args, kwargs)``
:rtype: tuple
"""
args = []
kwargs = {}
if self._type == Pattern.URI_TYPE_EXACT:
return args, kwargs
elif self._type == Pattern.URI_TYPE_WILDCARD:
match = self._pattern.match(uri)
if match:
for key in self._names:
val = match.group(key)
val = self._names[key](val)
kwargs[key] = val
return args, kwargs
else:
raise Exception("no match")
def is_endpoint(self):
"""
Check if this pattern is for a procedure endpoint.
:returns: ``True``, iff this pattern is for a procedure endpoint.
:rtype: bool
"""
return self._target == Pattern.URI_TARGET_ENDPOINT
def is_handler(self):
"""
Check if this pattern is for an event handler.
:returns: ``True``, iff this pattern is for an event handler.
:rtype: bool
"""
return self._target == Pattern.URI_TARGET_HANDLER
def is_exception(self):
"""
Check if this pattern is for an exception.
:returns: ``True``, iff this pattern is for an exception.
:rtype: bool
"""
return self._target == Pattern.URI_TARGET_EXCEPTION
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
# import sphinx_readable_theme
import datetime
import codecs
import os
import re
import shutil
project = 'libcopp'
copyright = '{0}, libcopp'.format(datetime.datetime.now().year)
author = 'owent'
# The full version, including alpha/beta/rc tags
release = '1.0.0'
CMAKELISTS_FILE_CONTENT = codecs.open(os.path.join(os.path.dirname(__file__), '..', '..', 'CMakeLists.txt'), "r", "utf-8").read()
LIBCOPP_VERSION = re.search('VERSION\\s*"?(?P<VERSION>[\\d\\.]+)', CMAKELISTS_FILE_CONTENT)
if LIBCOPP_VERSION is not None:
release = LIBCOPP_VERSION.group('VERSION')
# README_FILE_CONTENT = codecs.open(os.path.join(os.path.dirname(__file__), '..', '..', 'README.rst'), "r", "utf-8").read()
# README_FILE_STREAM = codecs.open(os.path.join(os.path.dirname(__file__), 'README.rst'), "w", "utf-8")
# README_FILE_STREAM.write(
# # README_FILE_CONTENT
# re.sub('docs[\\/\\\\]sphinx[\\/\\\\]', '', README_FILE_CONTENT)
# )
# README_FILE_STREAM.close()
for COPY_FILE_NAME in ['README.rst', 'CHANGELOG.md']:
SRC_FILE_PATH = os.path.join(os.path.dirname(__file__), '..', '..', COPY_FILE_NAME)
DST_FILE_PATH = os.path.join(os.path.dirname(__file__), COPY_FILE_NAME)
if os.path.exists(DST_FILE_PATH):
os.remove(DST_FILE_PATH)
shutil.copy2(SRC_FILE_PATH, DST_FILE_PATH, follow_symlinks=True)
# -- General configuration ---------------------------------------------------
# see http://www.sphinx-doc.org/en/master/usage/configuration.html
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'recommonmark',
'breathe',
'sphinx_rtd_theme',
]
breathe_default_project = "libcopp"
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en_US'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
pygments_style = 'default'
highlight_options = {
'linenos': True,
'tabsize': 4
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'readable'
# html_theme_path = [sphinx_readable_theme.get_html_theme_path()] # [csp.get_theme_dir()]
html_theme = 'sphinx_rtd_theme'
html_logo = '_static/icon.png'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_css_files = ['css/highlight.css', 'css/custom.css']
html_js_files = ['js/setup_lineno.js']
html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# custom styles
def setup(app):
app.add_config_value('recommonmark_config', {
'enable_auto_toc_tree': True,
'enable_math': True,
'enable_inline_math': True
}, True)
|
<filename>mowgli/model/datasets.py
import pickle
import csv
import numpy as np
import tensorflow as tf
import tensorflow_text as text
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow_core.python.keras import backend as K
from tensorflow_core.python.keras import layers
from mowgli.utils import constants
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
def load_dataset(dataset_path):
with open(dataset_path, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=",")
labels = []
sentences = []
for line in reader:
labels.append(line[0])
sentences.append(",".join(line[1:]))
return np.array(labels).astype(int), sentences
def tokenize(dataset):
tokenizer = text.WhitespaceTokenizer()
return tokenizer.tokenize(dataset)
def persist_vectorizer(vectorizer):
pickle.dump(vectorizer, open(constants.VECTORIZER_PATH, 'wb'))
def encode_vectorize(dataset, vocabulary_count):
tokenizer = Tokenizer(num_words=vocabulary_count, oov_token="<OOH_TKN>")
# encoded_matrix = map(vectorizer.fit_transform, dataset)
# print('Encoded Matrix ', np.array(list(encoded_matrix)))
tokenizer.fit_on_texts(dataset)
return tokenizer.texts_to_sequences(dataset), tokenizer
def split(data):
length = int(len(data) * .8)
return data[0:length], data[length:]
def reformat_network_dataset(given_dataset, columne_size):
label_arr = np.array(given_dataset[:, 0:1], np.int32)
result_arr = np.zeros([len(given_dataset), columne_size])
for i, value in enumerate(label_arr):
result_arr[i][value[0]] = 1
return given_dataset[:, -1], result_arr
def build_network(train_x, train_y, test_x, test_y, epochs, total, max_length):
print("total words", total)
model = tf.keras.Sequential([
layers.Embedding(total + 1, 64, input_length=max_length),
layers.Dropout(.1),
layers.Flatten(),
layers.Dense(600, activation='relu'),
layers.Dense(300, activation='relu'),
layers.Dense(16, activation='softmax')
]
)
model.compile(optimizer='Adam', # Optimizer
# Loss function to minimize
loss="sparse_categorical_crossentropy"
,metrics=['acc']
)
model.summary()
print('# Fit model on training data')
print('validation sets', test_x.shape, test_y.shape)
# print('validation sets', test_x, test_y)
print('train sets', train_x.shape, train_y.shape)
history = model.fit(train_x, train_y,
batch_size=2,
epochs=10,
validation_data=(test_x, test_y)
)
print('\nhistory dict:', history.history)
return model
|
<reponame>kstoreyf/TreeCorr<gh_stars>0
# Copyright (c) 2003-2015 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
"""
.. module:: nncorrelation
"""
import treecorr
import numpy
import gc
class NNCorrelation(treecorr.BinnedCorr2):
"""This class handles the calculation and storage of a 2-point count-count correlation
function. i.e. the regular density correlation function.
Ojects of this class holds the following attributes:
:nbins: The number of bins in logr
:bin_size: The size of the bins in logr
:min_sep: The minimum separation being considered
:max_sep: The maximum separation being considered
In addition, the following attributes are numpy arrays of length (nbins):
:logr: The nominal center of the bin in log(r) (the natural logarithm of r).
:rnom: The nominal center of the bin converted to regular distance.
i.e. r = exp(logr).
:meanr: The (weighted) mean value of r for the pairs in each bin.
If there are no pairs in a bin, then exp(logr) will be used instead.
:meanlogr: The mean value of log(r) for the pairs in each bin.
If there are no pairs in a bin, then logr will be used instead.
:weight: The total weight in each bin.
:npairs: The number of pairs in each bin.
:tot: The total number of pairs processed, which is used to normalize
the randoms if they have a different number of pairs.
If `sep_units` are given (either in the config dict or as a named kwarg) then the distances
will all be in these units. Note however, that if you separate out the steps of the
:func:`process` command and use :func:`process_auto` and/or :func:`process_cross`, then the
units will not be applied to :meanr: or :meanlogr: until the :func:`finalize` function is
called.
The typical usage pattern is as follows:
>>> nn = treecorr.NNCorrelation(config)
>>> nn.process(cat) # For auto-correlation.
>>> nn.process(cat1,cat2) # For cross-correlation.
>>> rr.process... # Likewise for random-random correlations
>>> dr.process... # If desired, also do data-random correlations
>>> rd.process... # For cross-correlations, also do the reverse.
>>> nn.write(file_name,rr,dr,rd) # Write out to a file.
>>> xi,varxi = nn.calculateXi(rr,dr,rd) # Or get the correlation function directly.
:param config: A configuration dict that can be used to pass in kwargs if desired.
This dict is allowed to have addition entries in addition to those listed
in :class:`~treecorr.BinnedCorr2`, which are ignored here. (default: None)
:param logger: If desired, a logger object for logging. (default: None, in which case
one will be built according to the config dict's verbose level.)
See the documentation for :class:`~treecorr.BinnedCorr2` for the list of other allowed kwargs,
which may be passed either directly or in the config dict.
"""
def __init__(self, config=None, logger=None, **kwargs):
treecorr.BinnedCorr2.__init__(self, config, logger, **kwargs)
self.meanr = numpy.zeros(self.nbins, dtype=float)
self.meanlogr = numpy.zeros(self.nbins, dtype=float)
self.weight = numpy.zeros(self.nbins, dtype=float)
self.npairs = numpy.zeros(self.nbins, dtype=float)
self.idxpairs1 = numpy.full(self.res_size, -1, dtype=long)
self.idxpairs2 = numpy.full(self.res_size, -1, dtype=long)
self.dists = numpy.full(self.res_size, -1, dtype=float)
self.tot = 0.
self._build_corr()
self.logger.debug('Finished building NNCorr')
def _build_corr(self):
from treecorr.util import double_ptr as dp
from treecorr.util import long_ptr as lp
self.corr = treecorr._lib.BuildNNCorr(
self._min_sep,self._max_sep,self.nbins,self.bin_size,self.b,
self.min_rpar, self.max_rpar, self.res_size,
dp(self.meanr),dp(self.meanlogr),dp(self.weight),dp(self.npairs),
lp(self.idxpairs1), lp(self.idxpairs2), dp(self.dists));
def __del__(self):
# Using memory allocated from the C layer means we have to explicitly deallocate it
# rather than being able to rely on the Python memory manager.
if hasattr(self,'corr'): # In case __init__ failed to get that far
treecorr._lib.DestroyNNCorr(self.corr)
def copy(self):
import copy
return copy.deepcopy(self)
def __getstate__(self):
d = self.__dict__.copy()
del d['corr']
del d['logger'] # Oh well. This is just lost in the copy. Can't be pickled.
return d
def __setstate__(self, d):
self.__dict__ = d
self._build_corr()
self.logger = treecorr.config.setup_logger(
treecorr.config.get(self.config,'verbose',int,0),
self.config.get('log_file',None))
def __repr__(self):
return 'NNCorrelation(config=%r)'%self.config
def process_auto(self, cat, metric=None, num_threads=None):
"""Process a single catalog, accumulating the auto-correlation.
This accumulates the auto-correlation for the given catalog. After
calling this function as often as desired, the finalize() command will
finish the calculation of meanr, meanlogr.
:param cat: The catalog to process
:param metric: Which metric to use. See :meth:`~treecorr.NNCorrelation.process` for
details. (default: 'Euclidean'; this value can also be given in the
constructor in the config dict.)
:param num_threads: How many OpenMP threads to use during the calculation.
(default: use the number of cpu cores; this value can also be given in
the constructor in the config dict.) Note that this won't work if the
system's C compiler is clang prior to version 3.7.
"""
if cat.name == '':
self.logger.info('Starting process NN auto-correlations')
else:
self.logger.info('Starting process NN auto-correlations for cat %s.', cat.name)
self._set_metric(metric, cat.coords)
self._set_num_threads(num_threads)
min_size, max_size = self._get_minmax_size()
field = cat.getNField(min_size,max_size,self.split_method,self.max_top)
self.logger.info('Starting %d jobs.',field.nTopLevelNodes)
treecorr._lib.ProcessAutoNN(self.corr, field.data, self.output_dots,
self._coords, self._metric)
npairs_tot = int(numpy.sum(self.npairs))
self.idxpairs1 = self.idxpairs1[:npairs_tot]
self.idxpairs2 = self.idxpairs2[:npairs_tot]
self.dists = self.dists[:npairs_tot]
self.tot += 0.5 * cat.sumw**2
def process_cross(self, cat1, cat2, metric=None, num_threads=None):
"""Process a single pair of catalogs, accumulating the cross-correlation.
This accumulates the cross-correlation for the given catalogs. After
calling this function as often as desired, the finalize() command will
finish the calculation of meanr, meanlogr.
:param cat1: The first catalog to process
:param cat2: The second catalog to process
:param metric: Which metric to use. See :meth:`~treecorr.NNCorrelation.process` for
details. (default: 'Euclidean'; this value can also be given in the
constructor in the config dict.)
:param num_threads: How many OpenMP threads to use during the calculation.
(default: use the number of cpu cores; this value can also be given in
the constructor in the config dict.) Note that this won't work if the
system's C compiler is clang prior to version 3.7.
"""
if cat1.name == '' and cat2.name == '':
self.logger.info('Starting process NN cross-correlations')
else:
self.logger.info('Starting process NN cross-correlations for cats %s, %s.',
cat1.name, cat2.name)
self._set_metric(metric, cat1.coords, cat2.coords)
self._set_num_threads(num_threads)
min_size, max_size = self._get_minmax_size()
f1 = cat1.getNField(min_size,max_size,self.split_method,self.max_top)
f2 = cat2.getNField(min_size,max_size,self.split_method,self.max_top)
self.logger.info('Starting %d jobs.',f1.nTopLevelNodes)
treecorr._lib.ProcessCrossNN(self.corr, f1.data, f2.data, self.output_dots,
self._coords, self._metric)
npairs_tot = int(numpy.sum(self.npairs))
self.idxpairs1 = self.idxpairs1[:npairs_tot]
self.idxpairs2 = self.idxpairs2[:npairs_tot]
self.dists = self.dists[:npairs_tot]
self.tot += cat1.sumw*cat2.sumw
def process_pairwise(self, cat1, cat2, metric=None, num_threads=None):
"""Process a single pair of catalogs, accumulating the cross-correlation, only using
the corresponding pairs of objects in each catalog.
This accumulates the sums into the bins, but does not finalize the calculation.
After calling this function as often as desired, the finalize() command will
finish the calculation.
:param cat1: The first catalog to process
:param cat2: The second catalog to process
:param metric: Which metric to use. See :meth:`~treecorr.NNCorrelation.process` for
details. (default: 'Euclidean'; this value can also be given in the
constructor in the config dict.)
:param num_threads: How many OpenMP threads to use during the calculation.
(default: use the number of cpu cores; this value can also be given in
the constructor in the config dict.) Note that this won't work if the
system's C compiler is clang prior to version 3.7.
"""
if cat1.name == '' and cat2.name == '':
self.logger.info('Starting process NN pairwise-correlations')
else:
self.logger.info('Starting process NN pairwise-correlations for cats %s, %s.',
cat1.name, cat2.name)
self._set_metric(metric, cat1.coords, cat2.coords)
self._set_num_threads(num_threads)
f1 = cat1.getNSimpleField()
f2 = cat2.getNSimpleField()
treecorr._lib.ProcessPairNN(self.corr, f1.data, f2.data, self.output_dots,
self._coords, self._metric)
npairs_tot = int(numpy.sum(self.npairs))
self.idxpairs1 = self.idxpairs1[:npairs_tot]
self.idxpairs2 = self.idxpairs2[:npairs_tot]
self.dists = self.dists[:npairs_tot]
self.tot += cat1.weight
def finalize(self):
"""Finalize the calculation of the correlation function.
The process_auto and process_cross commands accumulate values in each bin,
so they can be called multiple times if appropriate. Afterwards, this command
finishes the calculation of meanr, meanlogr by dividing by the total weight.
"""
mask1 = self.weight != 0
mask2 = self.weight == 0
self.meanr[mask1] /= self.weight[mask1]
self.meanlogr[mask1] /= self.weight[mask1]
# Update the units of meanr, meanlogr
self._apply_units(mask1)
# Use meanr, meanlogr when available, but set to nominal when no pairs in bin.
self.meanr[mask2] = self.rnom[mask2]
self.meanlogr[mask2] = self.logr[mask2]
def clear(self):
"""Clear the data vectors
"""
self.meanr[:] = 0.
self.meanlogr[:] = 0.
self.weight[:] = 0.
self.npairs[:] = 0.
self.idxpairs1[:] = -1
self.idxpairs2[:] = -1
self.dists[:] = -1.
self.tot = 0.
def __iadd__(self, other):
"""Add a second NNCorrelation's data to this one.
Note: For this to make sense, both Correlation objects should have been using
process_auto and/or process_cross, and they should not have had finalize called yet.
Then, after adding them together, you should call finalize on the sum.
"""
if not isinstance(other, NNCorrelation):
raise AttributeError("Can only add another NNCorrelation object")
if not (self.nbins == other.nbins and
self.min_sep == other.min_sep and
self.max_sep == other.max_sep):
raise ValueError("NNCorrelation to be added is not compatible with this one.")
self.meanr[:] += other.meanr[:]
self.meanlogr[:] += other.meanlogr[:]
self.weight[:] += other.weight[:]
self.npairs[:] += other.npairs[:]
self.idxpairs1[:] += other.idxpairs1[:]
self.idxpairs2[:] += other.idxpairs2[:]
self.dists[:] += other.dists[:]
self.tot += other.tot
return self
def process(self, cat1, cat2=None, metric=None, num_threads=None):
"""Compute the correlation function.
If only 1 argument is given, then compute an auto-correlation function.
If 2 arguments are given, then compute a cross-correlation function.
Both arguments may be lists, in which case all items in the list are used
for that element of the correlation.
:param cat1: A catalog or list of catalogs for the first N field.
:param cat2: A catalog or list of catalogs for the second N field, if any.
(default: None)
:param metric: Which metric to use for distance measurements. Options are:
- 'Euclidean' = straight line Euclidean distance between two points.
For spherical coordinates (ra,dec without r), this is the chord
distance between points on the unit sphere.
- 'Rperp' = the perpendicular component of the distance. For two points
with distance from Earth `r1, r2`, if `d` is the normal Euclidean
distance and :math:`Rparallel = |r1-r2|`, then we define
:math:`Rperp^2 = d^2 - Rparallel^2`.
- 'Rlens' = the projected distance perpendicular to the first point
in the pair (taken to be a lens) to the line of sight to the second
point (e.g. a lensed source galaxy).
- 'Arc' = the true great circle distance for spherical coordinates.
(default: 'Euclidean'; this value can also be given in the constructor
in the config dict.)
:param num_threads: How many OpenMP threads to use during the calculation.
(default: use the number of cpu cores; this value can also be given in
the constructor in the config dict.) Note that this won't work if the
system's C compiler is clang prior to version 3.7.
"""
self.clear()
if not isinstance(cat1,list): cat1 = [cat1]
if cat2 is not None and not isinstance(cat2,list): cat2 = [cat2]
if len(cat1) == 0:
raise ValueError("No catalogs provided for cat1")
if cat2 is None or len(cat2) == 0:
self._process_all_auto(cat1,metric,num_threads)
else:
self._process_all_cross(cat1,cat2,metric,num_threads)
self.finalize()
def calculateXi(self, rr, dr=None, rd=None):
"""Calculate the correlation function given another correlation function of random
points using the same mask, and possibly cross correlations of the data and random.
The rr value is the NNCorrelation function for random points.
For a signal that involves a cross correlations, there should be two random
cross-correlations: data-random and random-data, given as dr and rd.
- If dr is None, the simple correlation function :math:`\\xi = (DD/RR - 1)` is used.
- if dr is given and rd is None, then :math:`\\xi = (DD - 2DR + RR)/RR` is used.
- If dr and rd are both given, then :math:`\\xi = (DD - DR - RD + RR)/RR` is used.
where DD is the data NN correlation function, which is the current object.
:param rr: An NNCorrelation object for the random-random pairs.
:param dr: An NNCorrelation object for the data-random pairs, if desired, in which
case the Landy-Szalay estimator will be calculated. (default: None)
:param rd: An NNCorrelation object for the random-data pairs, if desired and
different from dr. (default: None, which mean use rd=dr)
:returns: (xi, varxi) as a tuple
"""
# Each random weight value needs to be rescaled by the ratio of total possible pairs.
if rr.tot == 0:
raise RuntimeError("rr has tot=0.")
rrw = self.tot / rr.tot
if dr is None:
if rd is None:
xi = (self.weight - rr.weight * rrw)
else:
if rd.tot == 0:
raise RuntimeError("rd has tot=0.")
rdw = self.tot / rd.tot
xi = (self.weight - 2.*rd.weight * rdw + rr.weight * rrw)
else:
if dr.tot == 0:
raise RuntimeError("dr has tot=0.")
drw = self.tot / dr.tot
if rd is None:
xi = (self.weight - 2.*dr.weight * drw + rr.weight * rrw)
else:
if rd.tot == 0:
raise RuntimeError("rd has tot=0.")
rdw = self.tot / rd.tot
xi = (self.weight - rd.weight * rdw - dr.weight * drw + rr.weight * rrw)
if numpy.any(rr.weight == 0):
self.logger.warn("Warning: Some bins for the randoms had no pairs.")
self.logger.warn(" Probably max_sep is larger than your field.")
mask1 = rr.weight != 0
mask2 = rr.weight == 0
xi[mask1] /= (rr.weight[mask1] * rrw)
xi[mask2] = 0
varxi = numpy.zeros_like(rr.weight)
varxi[mask1] = 1./ (rr.weight[mask1] * rrw)
return xi, varxi
def write(self, file_name, rr=None, dr=None, rd=None, file_type=None, prec=None):
"""Write the correlation function to the file, file_name.
rr is the NNCorrelation function for random points.
If dr is None, the simple correlation function :math:`\\xi = (DD - RR)/RR` is used.
if dr is given and rd is None, then :math:`\\xi = (DD - 2DR + RR)/RR` is used.
If dr and rd are both given, then :math:`\\xi = (DD - DR - RD + RR)/RR` is used.
Normally, at least rr should be provided, but if this is also None, then only the
basic accumulated number of pairs are output (along with the separation columns).
The output file will include the following columns:
:R_nom: The nominal center of the bin in R.
:meanR: The mean value :math:`\\langle R\\rangle` of pairs that fell into each bin.
:meanlogR: The mean value :math:`\\langle logR\\rangle` of pairs that fell into each
bin.
Then if rr is None:
:DD: The total weight of pairs in each bin.
:npairs: The total number of pairs in each bin.
If rr is given, but not the cross-correlations:
:xi: The estimator :math:`\\xi = (DD-RR)/RR`.
:sigma_xi: The sqrt of the variance estimate of :math:`\\xi`.
:DD: The total weight of data pairs (aka DD) in each bin.
:RR: The total weight of random pairs (aka RR) in each bin.
:npairs: The number of pairs contributing ot each bin.
If one of dr or rd is given:
:xi: The estimator :math:`\\xi = (DD-2DR+RR)/RR`.
:sigma_xi: The sqrt of the variance estimate of :math:`\\xi`.
:DD: The total weight of DD pairs in each bin.
:RR: The total weight of RR pairs in each bin.
:DR: The total weight of DR pairs in each bin.
:npairs: The number of pairs contributing ot each bin.
If both dr and rd are given:
:xi: The estimator :math:`\\xi = (DD-DR-RD+RR)/RR`.
:sigma_xi: The sqrt of the variance estimate of :math:`\\xi`.
:DD: The total weight of DD pairs in each bin.
:RR: The total weight of RR pairs in each bin.
:DR: The total weight of DR pairs in each bin.
:RD: The total weight of RD pairs in each bin.
:npairs: The number of pairs contributing ot each bin.
If `sep_units` was given at construction, then the distances will all be in these units.
Otherwise, they will be in either the same units as x,y,z (for flat or 3d coordinates) or
radians (for spherical coordinates).
:param file_name: The name of the file to write to.
:param rr: An NNCorrelation object for the random-random pairs. (default: None,
in which case, no xi or varxi columns will be output)
:param dr: An NNCorrelation object for the data-random pairs, if desired, in which
case the Landy-Szalay estimator will be calculated. (default: None)
:param rd: An NNCorrelation object for the random-data pairs, if desired and
different from dr. (default: None, which mean use rd=dr)
:param file_type: The type of file to write ('ASCII' or 'FITS'). (default: determine
the type automatically from the extension of file_name.)
:param prec: For ASCII output catalogs, the desired precision. (default: 4;
this value can also be given in the constructor in the config dict.)
"""
self.logger.info('Writing NN correlations to %s',file_name)
col_names = [ 'R_nom','meanR','meanlogR' ]
columns = [ self.rnom, self.meanr, self.meanlogr ]
if rr is None:
col_names += [ 'DD', 'npairs' ]
columns += [ self.weight, self.npairs ]
if dr is not None:
raise AttributeError("rr must be provided if dr is not None")
if rd is not None:
raise AttributeError("rr must be provided if rd is not None")
else:
xi, varxi = self.calculateXi(rr,dr,rd)
col_names += [ 'xi','sigma_xi','DD','RR' ]
columns += [ xi, numpy.sqrt(varxi),
self.weight, rr.weight * (self.tot/rr.tot) ]
if dr is not None and rd is not None:
col_names += ['DR','RD']
columns += [ dr.weight * (self.tot/dr.tot), rd.weight * (self.tot/rd.tot) ]
elif dr is not None or rd is not None:
if dr is None: dr = rd
col_names += ['DR']
columns += [ dr.weight * (self.tot/dr.tot) ]
col_names += [ 'npairs' ]
columns += [ self.npairs ]
if prec is None:
prec = self.config.get('precision', 4)
treecorr.util.gen_write(
file_name, col_names, columns, prec=prec, file_type=file_type, logger=self.logger)
def read(self, file_name, file_type=None):
"""Read in values from a file.
This should be a file that was written by TreeCorr, preferably a FITS file, so there
is no loss of information.
Warning: The NNCorrelation object should be constructed with the same configuration
parameters as the one being read. e.g. the same min_sep, max_sep, etc. This is not
checked by the read function.
:param file_name: The name of the file to read in.
:param file_type: The type of file ('ASCII' or 'FITS'). (default: determine the type
automatically from the extension of file_name.)
"""
self.logger.info('Reading NN correlations from %s',file_name)
data = treecorr.util.gen_read(file_name, file_type=file_type)
self.rnom = data['R_nom']
self.logr = numpy.log(self.rnom)
self.meanr = data['meanR']
self.meanlogr = data['meanlogR']
self.weight = data['DD']
self.npairs = data['npairs']
self.idxpairs1 = data['idxpairs1']
self.idxpairs2 = data['idxpairs2']
self.dists = data['dists']
self._build_corr()
def calculateNapSq(self, rr, dr=None, rd=None, m2_uform=None):
"""Calculate the correlary to the aperture mass statistics for counts.
This is used by NGCorrelation.writeNorm. See that function and also
GGCorrelation.calculateMapSq() for more details.
:param rr: An NNCorrelation object for the random-random pairs.
:param dr: An NNCorrelation object for the data-random pairs, if desired, in which
case the Landy-Szalay estimator will be calculated. (default: None)
:param rd: An NNCorrelation object for the random-data pairs, if desired and
different from dr. (default: None, which mean use rd=dr)
:param m2_uform: Which form to use for the aperture mass. (default: 'Crittenden';
this value can also be given in the constructor in the config dict.)
:returns: (nsq, varnsq)
"""
if m2_uform is None:
m2_uform = treecorr.config.get(self.config,'m2_uform',str,'Crittenden')
if m2_uform not in ['Crittenden', 'Schneider']:
raise ValueError("Invalid m2_uform")
# Make s a matrix, so we can eventually do the integral by doing a matrix product.
r = self.rnom
s = numpy.outer(1./r, self.meanr)
ssq = s*s
if m2_uform == 'Crittenden':
exp_factor = numpy.exp(-ssq/4.)
Tp = (32. + ssq*(-16. + ssq)) / 128. * exp_factor
else:
Tp = numpy.zeros_like(s)
sa = s[s<2.]
ssqa = ssq[s<2.]
Tp[s<2.] = 12./(5.*numpy.pi) * (2.-15.*ssqa) * numpy.arccos(sa/2.)
Tp[s<2.] += 1./(100.*numpy.pi) * sa * numpy.sqrt(4.-ssqa) * (
120. + ssqa*(2320. + ssqa*(-754. + ssqa*(132. - 9.*ssqa))))
Tp *= ssq
xi, varxi = self.calculateXi(rr,dr,rd)
# Now do the integral by taking the matrix products.
# Note that dlogr = bin_size
Tpxi = Tp.dot(xi)
nsq = Tpxi * self.bin_size
varnsq = (Tp**2).dot(varxi) * self.bin_size**2
return nsq, varnsq
|
<reponame>zynga/jasy<gh_stars>10-100
#
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
#
import os
import jasy.core.Console as Console
from jasy.core.Permutation import getPermutation
from jasy.item.Class import ClassError
from jasy.js.Resolver import Resolver
from jasy.js.Sorter import Sorter
from jasy.js.parse.Parser import parse
from jasy.js.output.Compressor import Compressor
from jasy import UserError
from jasy.js.output.Optimization import Optimization
from jasy.js.output.Formatting import Formatting
from jasy.core.FileManager import FileManager
compressor = Compressor()
packCache = {}
def packCode(code):
"""Packs the given code by passing it to the compression engine"""
if code in packCache:
return packCache[code]
packed = compressor.compress(parse(code))
packCache[code] = packed
return packed
class OutputManager:
def __init__(self, session, assetManager=None, compressionLevel=1, formattingLevel=0):
Console.info("Initializing OutputManager...")
Console.indent()
Console.info("Formatting Level: %s", formattingLevel)
Console.info("Compression Level: %s", compressionLevel)
self.__session = session
self.__assetManager = assetManager
self.__fileManager = FileManager(session)
self.__scriptOptimization = Optimization()
self.__compressGeneratedCode = False
self.__kernelClasses = []
if compressionLevel > 0:
self.__scriptOptimization.enable("variables")
self.__scriptOptimization.enable("declarations")
self.__compressGeneratedCode = True
if compressionLevel > 1:
self.__scriptOptimization.enable("blocks")
self.__scriptOptimization.enable("privates")
self.__scriptFormatting = Formatting()
if formattingLevel > 0:
self.__scriptFormatting.enable("semicolon")
self.__scriptFormatting.enable("comma")
Console.outdent()
def deployAssets(self, classes, assetFolder=None):
"""
Deploys assets for the given classes and all their dependencies
:param classes: List of classes to deploy assets for
:type classes: list
:param assetFolder: Destination folder of assets (defaults to $prefix/asset)
:type assetFolder: string
"""
Console.info("Deploying assets...")
Console.indent()
resolver = Resolver(self.__session)
for className in classes:
resolver.addClassName(className)
self.__assetManager.deploy(resolver.getIncludedClasses(), assetFolder=assetFolder)
Console.outdent()
def storeKernel(self, fileName, classes=None, debug=False):
"""
Writes a so-called kernel script to the given location. This script contains
data about possible permutations based on current session values. It optionally
might include asset data (useful when boot phase requires some assets) and
localization data (if only one locale is built).
Optimization of the script is auto-enabled when no other information is given.
This method returns the classes which are included by the script so you can
exclude it from the real other generated output files.
"""
Console.info("Storing kernel...")
Console.indent()
# Use a new permutation based on debug settings and statically configured fields
self.__session.setStaticPermutation(debug=debug)
# Build resolver
# We need the permutation here because the field configuration might rely on detection classes
resolver = Resolver(self.__session)
detectionClasses = self.__session.getFieldDetectionClasses()
for className in detectionClasses:
resolver.addClassName(className)
# Jasy client side classes to hold data
resolver.addClassName("jasy.Env")
resolver.addClassName("jasy.Asset")
resolver.addClassName("jasy.Translate")
# Allow kernel level mass loading of scripts (required for source, useful for build)
resolver.addClassName("core.io.Script")
resolver.addClassName("core.io.Queue")
if classes:
for className in classes:
resolver.addClassName(className)
# Generate boot code
bootCode = "jasy.Env.setFields(%s);" % self.__session.exportFields()
if self.__compressGeneratedCode:
bootCode = packCode(bootCode)
# Sort resulting class list
sortedClasses = resolver.getSortedClasses()
self.storeCompressed(sortedClasses, fileName, bootCode)
# Remember classes for filtering in storeLoader/storeCompressed
self.__kernelClasses = set(sortedClasses)
# Reset static permutation
self.__session.resetCurrentPermutation()
Console.outdent()
def storeCompressed(self, classes, fileName, bootCode=None):
"""
Combines the compressed result of the stored class list
:param classes: List of sorted classes to compress
:type classes: list
:param fileName: Filename to write result to
:type fileName: string
:param bootCode: Code to execute once all the classes are loaded
:type bootCode: string
"""
if self.__kernelClasses:
filtered = [ classObj for classObj in classes if not classObj in self.__kernelClasses ]
else:
filtered = classes
Console.info("Compressing %s classes...", len(filtered))
Console.indent()
result = []
if self.__assetManager:
assetData = self.__assetManager.export(filtered)
if assetData:
assetCode = "jasy.Asset.addData(%s);" % assetData
if self.__compressGeneratedCode:
result.append(packCode(assetCode))
else:
result.append(assetCode)
permutation = self.__session.getCurrentPermutation()
try:
for classObj in filtered:
result.append(classObj.getCompressed(permutation,
self.__session.getCurrentTranslationBundle(), self.__scriptOptimization, self.__scriptFormatting))
except ClassError as error:
raise UserError("Error during class compression! %s" % error)
Console.outdent()
if bootCode:
bootCode = "(function(){%s})();" % bootCode
if self.__compressGeneratedCode:
result.append(packCode(bootCode))
else:
result.append(bootCode)
if self.__compressGeneratedCode:
compressedCode = "".join(result)
else:
compressedCode = "\n\n".join(result)
self.__fileManager.writeFile(fileName, compressedCode)
def storeLoader(self, classes, fileName, bootCode="", urlPrefix=""):
"""
Generates a source loader which is basically a file which loads the original JavaScript files.
This is super useful during development of a project as it supports pretty fast workflows
where most often a simple reload in the browser is enough to get the newest sources.
:param classes: List of sorted classes to compress
:type classes: list
:param fileName: Filename to write result to
:type fileName: string
:param bootCode: Code to execute once all classes have been loaded
:type bootCode: string
:param urlPrefix: Prepends the given URL prefix to all class URLs to load
:type urlPrefix: string
"""
if self.__kernelClasses:
filtered = [ classObj for classObj in classes if not classObj in self.__kernelClasses ]
else:
filtered = classes
Console.info("Generating loader for %s classes...", len(classes))
Console.indent()
main = self.__session.getMain()
files = []
for classObj in filtered:
path = classObj.getPath()
# Support for multi path classes
# (typically in projects with custom layout/structure e.g. 3rd party)
if type(path) is list:
for singleFileName in path:
files.append(main.toRelativeUrl(singleFileName, urlPrefix))
else:
files.append(main.toRelativeUrl(path, urlPrefix))
result = []
Console.outdent()
if self.__assetManager:
assetData = self.__assetManager.export(filtered)
if assetData:
assetCode = "jasy.Asset.addData(%s);" % assetData
if self.__compressGeneratedCode:
result.append(packCode(assetCode))
else:
result.append(assetCode)
translationBundle = self.__session.getCurrentTranslationBundle()
if translationBundle:
translationData = translationBundle.export(filtered)
if translationData:
translationCode = 'jasy.Translate.addData(%s);' % translationData
if self.__compressGeneratedCode:
result.append(packCode(translationCode))
else:
result.append(translationCode)
if self.__compressGeneratedCode:
loaderList = '"%s"' % '","'.join(files)
else:
loaderList = '"%s"' % '",\n"'.join(files)
wrappedBootCode = "function(){ %s }" % bootCode if bootCode else "null"
loaderCode = 'core.io.Queue.load([%s], %s, null, true);' % (loaderList, wrappedBootCode)
if self.__compressGeneratedCode:
result.append(packCode(loaderCode))
else:
result.append(loaderCode)
if self.__compressGeneratedCode:
loaderCode = "".join(result)
else:
loaderCode = "\n\n".join(result)
self.__fileManager.writeFile(fileName, loaderCode)
|
<filename>Data.py
import csv
from operator import sub
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
from zmq import has
with open(r'C:\Users\j_ney\Python\Python Project Medical Insurance Analysis\insurance.csv', 'r') as insurance_data_csv:
insurance_data = csv.reader(insurance_data_csv, delimiter = ' ')
insurance_data_list = []
for row in insurance_data:
insurance_data_list.append(', '.join(row))
insurance_data_list.pop(0) #removing the headers element
insurance_data_sublist = []
for data_list in insurance_data_list:
insurance_data_sublist.append(data_list.split(',')) #rows of data in sublist
#print(insurance_data_sublist)
len_insured_people = len(insurance_data_sublist) #how many insured people in the data
#print(len_insured_people)
#Make a lists of age, sex, bmi, no_of children, smoke, region, charges and then also put in dictionary form:
#List of Age
insurance_age = [int(age_data[0]) for age_data in insurance_data_sublist]
#print(insurance_age)
#List of Sex
insurance_sex = [sex_data[1] for sex_data in insurance_data_sublist]
#print(insurance_sex)
#List of Bmi
insurance_bmi = [float(bmi_data[2]) for bmi_data in insurance_data_sublist]
#print(insurance_bmi)
#List of No_of_children
insurance_children = [int(children_data[3]) for children_data in insurance_data_sublist]
#print(insurance_children)
#List of Smoker
insurance_smoker = [smoker_data[4] for smoker_data in insurance_data_sublist]
#print(insurance_smoker)
#List of Region
insurance_region = [region_data[5] for region_data in insurance_data_sublist]
#print(insurance_region)
#List of Charges
insurance_charges = [charges_data[-1] for charges_data in insurance_data_sublist]
#print(insurance_charges)
#Age Questions
class Age:
def __init__(self, age_list):
self.age_list = age_list
def find_average(self): #Method to find the average age with insurance
average_age = int(sum(self.age_list)/ len(self.age_list))
return average_age
def count_per_age(self): #Method dictionary of ages = csv to show how many people in ages
count_age = dict((age, self.age_list.count(age)) for age in self.age_list)
return count_age
def age_most_insurance(self): #Method to find the age with the most insurance
max_count = max(self.count_per_age().values())
for max_key, max_value in self.count_per_age().items():
if max_count == max_value:
return max_key, max_value
def age_least_insurance(self): #Method to find the age with the least insurance
min_count = min(self.count_per_age().values())
for min_key, min_value in self.count_per_age().items():
if min_count == min_value:
return min_key, min_value
def young_age(self): #Method to find youngest age of insured
min_age = min(self.age_list)
return min_age
def old_age(self): #Method to find oldest age of insured
max_age = max(self.age_list)
return max_age
def No_Insured_Young_People(self): #Method to find how many insured young people
key_value = dict(sorted(self.count_per_age().items()))
range_young_age = range(18, 25)
young_age_dict = {key: key_value[key] for key in key_value.keys() & range_young_age}
sum_insured_young_age = sum(young_age_dict.values())
return sum_insured_young_age
def No_Insured_Adult_People(self): #Method to find how many insured adult people
key_value = dict(sorted(self.count_per_age().items()))
range_adult_age = range(25, 65)
adult_age_dict = {key: key_value[key] for key in key_value.keys() & range_adult_age}
sum_insured_adult_age = sum(adult_age_dict.values())
return sum_insured_adult_age
age = Age(insurance_age)
print(f'The average age of people with insurance is {age.find_average()}')
#print(f'The total of people by ages with insurance {age.count_per_age()}') #csv data
max_key, max_value = age.age_most_insurance()
min_key, min_value = age.age_least_insurance()
print(f'The most common age with isurance is {max_key} with a number of {max_value} insured people')
print(f'The least common age with isurance is {min_key} with a number of {min_value} insured people')
print(f'The youngest insured age is {age.young_age()}')
print(f'The oldest insured age is {age.old_age()}')
print(f'The number of young insured people is {age.No_Insured_Young_People()}')
print(f'The number of adult insured people is {age.No_Insured_Adult_People()}')
#Sex Questions:
class Sex:
def __init__(self, sex):
self.sex = sex
def count_female(self): #Method to find the number of insured female
no_female = self.sex.count('female')
return no_female
def count_male(self): #Method to find the number of insured male
no_male = self.sex.count('male')
return no_male
#total charges of insured female and male
sex = Sex(insurance_sex)
#This show the counts(female and male) and graph(difference between female and male numbers of insured)
# female_count = sex.count_female()
# male_count = sex.count_male()
# data = [female_count, male_count]
# plt.bar(['female', 'male'], data)
# plt.show()
#bmi
class BMI:
def __init__(self, bmi):
self.bmi = bmi
def count_bmi(self): #dictionary for count of repetitive bmi's
bmi_match = dict((bmi, self.bmi.count(bmi)) for bmi in self.bmi)
return bmi_match
def bmi_range(self): #dictionary for underweight, normal, overweight and obese bmi
underweight = {}
normal = {}
overweight = {}
obese = {}
for key, value in sorted(self.count_bmi().items()):
if key < 18.5:
underweight.update({key:value})
elif 18.5 == key <=24.9:
normal.update({key:value})
elif 25 <= key <= 29.9:
overweight.update({key:value})
elif key > 30:
obese.update({key:value})
return underweight, normal, overweight, obese
def count_bmi_range(self):
underweight_sum = sum(underweight.values())
normal_sum = sum(normal.values())
overweight_sum = sum(overweight.values())
obese_sum = sum(obese.values())
return underweight_sum, normal_sum, overweight_sum, obese_sum
bmi = BMI(insurance_bmi)
# print(bmi.count_bmi())
underweight, normal, overweight, obese = bmi.bmi_range()
underweight_sum, normal_sum, overweight_sum, obese_sum = bmi.count_bmi_range() #HUH???
# print(f'Total underweight insured people: {underweight_sum}')
# print(f'Total normal insured people: {normal_sum}')
# print(f'Total overweight insured people: {overweight_sum}')
# print(f'Total obese insured people: {obese_sum}')
#chidren
class Children:
def __init__(self, children):
self.children = children
def highest_no_children(self):
high_no_children = max(self.children)
return high_no_children
def count_children(self): #Method to count how many people with 0, 1, 2, 3, 4, 5 children
count_children_dictionary = dict((child, self.children.count(child)) for child in self.children) #dictionary for count of repetitive number of children
child = Children(insurance_children)
print(child.highest_no_children())
print(child.count_children())
#smoker
class Smoker:
def __init__(self, smoker):
self.smoker = smoker
def smoker_count(self): #Method for getting number of people who smoke and not smoke
smoker_dictionary = dict((count, self.smoker.count(count)) for count in self.smoker)
value_yes = 0
value_no = 0
for key, value in smoker_dictionary.items():
if key == 'yes':
value_yes = value
elif key == 'no':
value_no = value
return value_yes, value_no
smoker = Smoker(insurance_smoker)
value_yes, value_no = smoker.smoker_count()
print(f'The number of insured people who smoke is {value_yes} and those who doesnt smoke is {value_no}')
#region
class Region:
def __init__(self, region, insurance_list):
self.region = region
self.insurance_list = insurance_list
def insured_per_region(self): #how many insured people per region
total_per_region = dict((region, self.region.count(region)) for region in self.region)
for key, value in total_per_region.items():
if key == 'northwest':
value_NW = value
elif key == 'northeast':
value_NE = value
elif key == 'southwest':
value_SW = value
elif key == 'southeast':
value_SE = value
return value_NW, value_NE, value_SW, value_SE, total_per_region
def per_region_list(self): #list of data per region
northwest = []
northeast = []
southwest = []
southeast = []
for list in self.insurance_list:
index = 0
if list[5] == 'northwest':
self.insurance_list[index] = list
northwest.append(list)
elif list[5] == 'northeast':
self.insurance_list[index] = list
northeast.append(list)
elif list[5] == 'southwest':
self.insurance_list[index] = list
southwest.append(list)
elif list[5] == 'southeast':
self.insurance_list[index] = list
southeast.append(list)
return northwest, northeast, southwest, southeast
def total_female_per_region(self, northwest, northeast, southwest, southeast): #how many female insured people per region
female_northwest = []
female_northeast = []
female_southwest = []
female_southeast = []
for list in northwest:
if list[1] == 'female':
female_northwest.append(list)
northwest_fem_total = len(female_northwest)
for list in northeast:
if list[1] == 'female':
female_northeast.append(list)
northeast_fem_total = len(female_northeast)
for list in southwest:
if list[1] == 'female':
female_southwest.append(list)
southwest_fem_total = len(female_southwest)
for list in southeast:
if list[1] == 'female':
female_southeast.append(list)
southeast_fem_total = len(female_southeast)
return northwest_fem_total, northeast_fem_total, southwest_fem_total, southeast_fem_total
def total_male_per_region(self, northwest, northeast, southwest, southeast): #how many male insured people per region
male_northwest = []
male_northeast = []
male_southwest = []
male_southeast = []
for list in northwest:
if list[1] == 'male':
male_northwest.append(list)
northwest_mal_total = len(male_northwest)
for list in northeast:
if list[1] == 'male':
male_northeast.append(list)
northeast_mal_total = len(male_northeast)
for list in southwest:
if list[1] == 'male':
male_southwest.append(list)
southwest_mal_total = len(male_southwest)
for list in southeast:
if list[1] == 'male':
male_southeast.append(list)
southeast_mal_total = len(male_southeast)
return northwest_mal_total, northeast_mal_total, southwest_mal_total, southeast_mal_total
def children_northwest(self): #total number of children in southwest range(0, 5)
children_nul_northwest = []
children_one_northwest = []
children_two_northwest = []
children_three_northwest = []
children_four_northwest = []
children_five_northwest = []
for list in northwest:
if int(list[3]) == 0:
children_nul_northwest.append(list)
northwest_nul_total = len(children_nul_northwest)
elif int(list[3]) == 1:
children_one_northwest.append(list)
northwest_one_total = len(children_one_northwest)
elif int(list[3]) == 2:
children_two_northwest.append(list)
northwest_two_total = len(children_two_northwest)
elif int(list[3]) == 3:
children_three_northwest.append(list)
northwest_three_total = len(children_three_northwest)
elif int(list[3]) == 4:
children_four_northwest.append(list)
northwest_four_total = len(children_four_northwest)
elif int(list[3]) == 5:
children_five_northwest.append(list)
northwest_five_total = len(children_five_northwest)
return northwest_nul_total, northwest_one_total, northwest_two_total,northwest_three_total, northwest_four_total, northwest_five_total
def children_northeast(self): #total number of children in northeast range(0, 5)
children_nul_northeast = []
children_one_northeast = []
children_two_northeast= []
children_three_northeast = []
children_four_northeast= []
children_five_northeast= []
for list in northeast:
if int(list[3]) == 0:
children_nul_northeast.append(list)
northeast_nul_total = len(children_nul_northeast)
elif int(list[3]) == 1:
children_one_northeast.append(list)
northeast_one_total = len(children_one_northeast)
elif int(list[3]) == 2:
children_two_northeast.append(list)
northeast_two_total = len(children_two_northeast)
elif int(list[3]) == 3:
children_three_northeast.append(list)
northeast_three_total = len(children_three_northeast)
elif int(list[3]) == 4:
children_four_northeast.append(list)
northeastt_four_total = len(children_four_northeast)
elif int(list[3]) == 5:
children_five_northeast.append(list)
northeast_five_total = len(children_five_northeast)
return northeast_nul_total, northeast_one_total, northeast_two_total, northeast_three_total, northeastt_four_total, northeast_five_total
def children_southwest(self):#total number of children in southwest range(0, 5)
children_nul_southwest = []
children_one_southwest = []
children_two_southwest = []
children_three_southwest = []
children_four_southwest = []
children_five_southwest= []
for list in southwest:
if int(list[3]) == 0:
children_nul_southwest.append(list)
southwest_nul_total = len(children_nul_southwest)
elif int(list[3]) == 1:
children_one_southwest.append(list)
southwest_one_total = len(children_one_southwest)
elif int(list[3]) == 2:
children_two_southwest.append(list)
southwest_two_total = len(children_two_southwest)
elif int(list[3]) == 3:
children_three_southwest.append(list)
southwest_three_total = len(children_three_southwest)
elif int(list[3]) == 4:
children_four_southwest.append(list)
southwest_four_total = len(children_four_southwest)
elif int(list[3]) == 5:
children_five_southwest.append(list)
southwest_five_total = len(children_five_southwest)
return southwest_nul_total, southwest_one_total, southwest_two_total, southwest_three_total, southwest_four_total, southwest_five_total
def children_southeast(self): #total number of children in southeast range(0, 5)
children_nul_southeast = []
children_one_southeast = []
children_two_southeast= []
children_three_southeast = []
children_four_southeast= []
children_five_southeast= []
for list in southeast:
if int(list[3]) == 0:
children_nul_southeast.append(list)
southeast_nul_total = len(children_nul_southeast)
elif int(list[3]) == 1:
children_one_southeast.append(list)
southeast_one_total = len(children_one_southeast)
elif int(list[3]) == 2:
children_two_southeast.append(list)
southeast_two_total = len(children_two_southeast)
elif int(list[3]) == 3:
children_three_southeast.append(list)
southeast_three_total = len(children_three_southeast)
elif int(list[3]) == 4:
children_four_southeast.append(list)
southeast_four_total = len(children_four_southeast)
elif int(list[3]) == 5:
children_five_southeast.append(list)
southeast_five_total = len(children_five_southeast)
return southeast_nul_total, southeast_one_total, southeast_two_total, southeast_three_total, southeast_four_total, southeast_five_total
def smoker_per_region(self): #total number of smoker per region
northwest_smoker = []
northeast_smoker = []
southwest_smoker = []
southeast_smoker = []
for list in northwest:
if list[4] == 'yes':
northwest_smoker.append(list)
northwest_smo_total = len(northwest_smoker)
for list in northeast:
if list[4] == 'yes':
northeast_smoker.append(list)
northeast_smo_total = len(northeast_smoker)
for list in southwest:
if list[4] == 'yes':
southwest_smoker.append(list)
southwest_smo_total = len(southwest_smoker)
for list in southeast:
if list[4] == 'yes':
southeast_smoker.append(list)
southeast_smo_total = len(southeast_smoker)
return northwest_smo_total, northeast_smo_total, southwest_smo_total, southeast_smo_total
def notsmoker_per_region(self): #total number of smoker per region
northwest_notsmoker = []
northeast_notsmoker = []
southwest_notsmoker = []
southeast_notsmoker = []
for list in northwest:
if list[4] == 'no':
northwest_notsmoker.append(list)
northwest_notsmo_total = len(northwest_notsmoker)
for list in northeast:
if list[4] == 'no':
northeast_notsmoker.append(list)
northeast_notsmo_total = len(northeast_notsmoker)
for list in southwest:
if list[4] == 'no':
southwest_notsmoker.append(list)
southwest_notsmo_total = len(southwest_notsmoker)
for list in southeast:
if list[4] == 'no':
southeast_notsmoker.append(list)
southeast_notsmo_total = len(southeast_notsmoker)
return northwest_notsmo_total, northeast_notsmo_total, southwest_notsmo_total, southeast_notsmo_total
def northwest_bmi(self): #total number of bmi(range) northwest
northwest_under = []
northwest_norm = []
northwest_over = []
northwest_obese = []
for list in northwest:
if float(list[2]) < 18.5:
northwest_under.append(list)
northwest_under_total = len(northwest_under)
elif 18.5 == float(list[2]) <= 24.9:
northwest_norm.append(list)
northwest_norm_total = len (northwest_norm)
def charges_per_region(self): #total charges per region
for list in northwest:
region = Region(insurance_region, insurance_data_sublist)
value_NW, value_NE, value_SW, value_SE, total_per_region = region.insured_per_region() #total insured people per region
print(f'Dictionary for total per region: {total_per_region}')
print(f'The total number of insured people in northwest is {value_NW}')
print(f'The total number of insured people in northeast is {value_NE}')
print(f'The total number of insured people in southwest is {value_SW}')
print(f'The total number of insured people in southeast is {value_SE}')
northwest, northeast, southwest, southeast = region.per_region_list() #list of data per region
northwest_fem_total, northeast_fem_total, southwest_fem_total, southeast_fem_total = region.total_female_per_region(northwest, northeast, southwest, southeast) #total female per region
northwest_mal_total, northeast_mal_total, southwest_mal_total, southeast_mal_total = region.total_male_per_region(northwest, northeast, southwest, southeast) #total male per region
print(f'The total number of female in northwest is {northwest_fem_total} and male {northwest_mal_total}')
print(f'The total number of female in northeast is {northeast_fem_total} and male {northeast_mal_total}')
print(f'The total number of female in southwest is {southwest_fem_total} and male {southwest_mal_total}')
print(f'The total number of female in southeast is {southeast_fem_total} and male {southeast_mal_total}')
northwest_nul_total, northwest_one_total, northwest_two_total,northwest_three_total, northwest_four_total, northwest_five_total = region.children_northwest() #northwest total number children range(0, 5)
northeast_nul_total, northeast_one_total, northeast_two_total, northeast_three_total, northeast_four_total, northeast_five_total = region.children_northeast()#northeast total number children range(0, 5)
southwest_nul_total, southwest_one_total, southwest_two_total, southwest_three_total, southwest_four_total, southwest_five_total = region.children_southwest() # southwest total number children range(0, 5)
southeast_nul_total, southeast_one_total, southeast_two_total, southeast_three_total, southeast_four_total, southeast_five_total = region.children_southeast() #southeasttotal number children range(0, 5)
print(f'The total number of 0 children in northwest: {northwest_nul_total}, northeast: {northeast_nul_total}, southwest: {southwest_nul_total}, southeast: {southeast_nul_total}')
print(f'The total number of 1 children in northwest: {northwest_one_total}, northeast: {northeast_one_total}, southwest: {southwest_one_total}, southeast: {southeast_one_total}')
print(f'The total number of 2 children in northwest: {northwest_two_total}, northeast: {northeast_two_total}, southwest: {southwest_two_total}, southeast: {southeast_two_total}')
print(f'The total number of 3 children in northwest: {northwest_three_total}, northeast: {northeast_three_total}, southwest: {southwest_three_total}, southeast: {southeast_three_total}')
print(f'The total number of 4 children in northwest: {northwest_four_total}, northeast: {northeast_four_total}, southwest: {southwest_four_total}, southeast: {southeast_four_total}')
print(f'The total number of 5 children in northwest: {northwest_five_total}, northeast: {northeast_five_total}, southwest: {southwest_five_total}, southeast: {southeast_five_total}')
northwest_smo_total, northeast_smo_total, southwest_smo_total, southeast_smo_total = region.smoker_per_region() #total smoker per region
northwest_notsmo_total, northeast_notsmo_total, southwest_notsmo_total, southeast_notsmo_total = region.notsmoker_per_region() #total not smoker per region
print(f'The total number of smoker in northwest is {northwest_smo_total} and not smoker {northwest_notsmo_total}')
print(f'The total number of smoker in northeast is {northeast_smo_total} and not smoker {northeast_notsmo_total}')
print(f'The total number of smoker in southwest is {southwest_smo_total} and not smoker {southwest_notsmo_total}')
print(f'The total number of smoker in southeast is {southeast_smo_total} and not smoker {southeast_notsmo_total}')
#class Charges
#Average yearly medical charges of the patients
|
<filename>tests/test_deserialize.py<gh_stars>0
"""Test deserializing."""
import os
import sys
from typing import Any, Callable, Dict, List, Optional, Pattern, Tuple, Union
import unittest
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# pylint: disable=wrong-import-position
import deserialize
# pylint: enable=wrong-import-position
class UnannotatedClass:
"""Test class with no type annotations."""
def __init__(self, value):
self.value = value
class SinglePropertySimpleType:
"""Test class with a single property of a simple type."""
my_property: int
def __str__(self):
return str({"my_property": self.my_property})
class MultiPropertySimpleType:
"""Test class with multiple properties of a simple type."""
my_int_property: int
my_str_property: str
def __str__(self):
return str(
{
"my_int_property": self.my_int_property,
"my_str_property": self.my_str_property,
}
)
class SinglePropertyComplexType:
"""Test class with a single property of a complex type."""
my_list: List[int]
def __str__(self):
return str([str(item) for item in self.my_list])
class ComplexNestedType:
"""Test class with complex nested information."""
one: int
two: Optional[str]
three: SinglePropertySimpleType
four: MultiPropertySimpleType
five: Optional[SinglePropertySimpleType]
six: List[SinglePropertyComplexType]
def __str__(self):
return str(
{
"one": self.one,
"two": self.two,
"three": str(self.three),
"four": str(self.four),
"five": str(self.five),
"six": str([str(item) for item in self.six]),
}
)
class TypeWithSimpleDict:
"""Test a class that has a simple dict embedded."""
value: int
dict_value: dict
class TypeWithSimpleDict:
"""Test a class that has a simple dict embedded."""
value: int
dict_value: dict
class TypeWithDict:
"""Test a class that has a dict embedded."""
value: int
dict_value: Dict[str, int]
class TypeWithComplexDict:
"""Test a class that has a complex dict embedded."""
value: int
dict_value: Dict[str, TypeWithDict]
any_dict_value: Dict[str, Any]
class TypeWithUnion:
"""Test a class that has a Union embedded."""
union_value: Union[str, int]
class TypeWithUnion:
"""Test a class that has a Union embedded."""
union_value: Union[str, int]
class TypeWithEllipseTuple:
"""Test a class that has a Union embedded."""
tuple_value: Tuple[str, ...]
class NonJsonTypes:
"""Test a class that uses base types that aren't JSON compatible."""
one: tuple
two: range
class DeserializationTestSuite(unittest.TestCase):
"""Deserialization test cases."""
def test_single_simple(self):
"""Test that items with a single property and simple types deserialize."""
valid_test_cases = [
{"my_property": 1},
{"my_property": 234},
{"my_property": -53},
]
invalid_test_cases = [
{"my_property": None},
{"my_property": 3.14156},
{"my_property": "Hello"},
]
for test_case in valid_test_cases:
instance = deserialize.deserialize(SinglePropertySimpleType, test_case)
self.assertEqual(test_case["my_property"], instance.my_property)
for test_case in invalid_test_cases:
with self.assertRaises(deserialize.DeserializeException):
_ = deserialize.deserialize(SinglePropertySimpleType, test_case)
def test_multi_simple(self):
"""Test that items with multiple properties and simple types deserialize."""
valid_test_cases = [
{"my_int_property": 1, "my_str_property": "Hello"},
{"my_int_property": 234, "my_str_property": "this"},
{"my_int_property": -53, "my_str_property": "is"},
{"my_int_property": 0, "my_str_property": "a"},
{"my_int_property": 99999999999, "my_str_property": "test"},
]
invalid_test_cases = [
{"my_int_property": None, "my_str_property": "Test"},
{"my_int_property": 3.14156, "my_str_property": "Test"},
{"my_int_property": "Hello", "my_str_property": "Test"},
{"my_int_property": 12, "my_str_property": None},
{"my_int_property": 34, "my_str_property": 42},
{"my_int_property": 56, "my_str_property": ["Test"]},
]
for test_case in valid_test_cases:
instance = deserialize.deserialize(MultiPropertySimpleType, test_case)
self.assertEqual(test_case["my_int_property"], instance.my_int_property)
self.assertEqual(test_case["my_str_property"], instance.my_str_property)
for test_case in invalid_test_cases:
with self.assertRaises(deserialize.DeserializeException):
_ = deserialize.deserialize(MultiPropertySimpleType, test_case)
def test_single_complex(self):
"""Test that items with a single property and complex types deserialize."""
valid_test_cases = [
{"my_list": []},
{"my_list": [1, 2, 3]},
{"my_list": [2, -4, 23]},
]
invalid_test_cases = [
{"my_list": [None]},
{"my_list": [1, None, 3]},
{"my_list": [2, 3.14, 23]},
{"my_list": [2, 3, "Hello"]},
{"my_list": 2},
{"my_list": (2, 3, 4)},
]
for test_case in valid_test_cases:
instance = deserialize.deserialize(SinglePropertyComplexType, test_case)
self.assertEqual(test_case["my_list"], instance.my_list)
for test_case in invalid_test_cases:
with self.assertRaises(deserialize.DeserializeException):
_ = deserialize.deserialize(SinglePropertyComplexType, test_case)
def test_complex_nested(self):
"""Test that items in a complex nested object deserialize."""
valid_test_cases = [
{
"one": 1,
"two": "2",
"three": {"my_property": 3},
"four": {"my_int_property": 34, "my_str_property": "Hello"},
"five": {"my_property": 3},
"six": [
{"my_list": []},
{"my_list": [1, 2, 3]},
{"my_list": [2, -4, 23]},
],
},
{
"one": 12312312,
"two": None,
"three": {"my_property": 3},
"four": {"my_int_property": 34, "my_str_property": "Hello"},
"five": None,
"six": [
{"my_list": []},
{"my_list": [1, 2, 3]},
{"my_list": [2, -4, 23]},
],
},
]
invalid_test_cases = [
{
"one": None,
"two": "2",
"three": {"my_property": 3},
"four": {"my_int_property": 34, "my_str_property": "Hello"},
"five": {"my_property": 3},
"six": [
{"my_list": []},
{"my_list": [1, 2, 3]},
{"my_list": [2, -4, 23]},
],
},
{
"one": 12312312,
"two": None,
"three": {"my_property": 3},
"four": {"my_int_property": 34, "my_str_property": "Hello"},
"five": None,
"six": [
{"my_list": []},
{"my_list": [1, "Test", 3]},
{"my_list": [2, -4, 23]},
],
},
]
for test_case in valid_test_cases:
instance = deserialize.deserialize(ComplexNestedType, test_case)
self.assertEqual(test_case["one"], instance.one)
self.assertEqual(test_case["two"], instance.two)
self.assertEqual(
test_case["three"]["my_property"], instance.three.my_property
)
self.assertEqual(
test_case["four"]["my_int_property"], instance.four.my_int_property
)
self.assertEqual(
test_case["four"]["my_str_property"], instance.four.my_str_property
)
if test_case["five"] is None:
self.assertIsNone(instance.five)
else:
self.assertEqual(
test_case["five"]["my_property"], instance.five.my_property
)
for i in range(0, len(test_case["six"])):
self.assertEqual(
test_case["six"][i]["my_list"], instance.six[i].my_list
)
for test_case in invalid_test_cases:
with self.assertRaises(deserialize.DeserializeException):
_ = deserialize.deserialize(ComplexNestedType, test_case)
def test_unannotated(self):
"""Test parsing unannotated classes."""
data = {"value": 1}
with self.assertRaises(deserialize.DeserializeException):
_ = deserialize.deserialize(UnannotatedClass, data)
def test_type_with_dict(self):
"""Test parsing types with dicts."""
test_cases = [
{"value": 1, "dict_value": {"Hello": 1, "World": 2}},
{"value": 1, "dict_value": {}},
]
for test_case in test_cases:
instance = deserialize.deserialize(TypeWithDict, test_case)
self.assertEqual(instance.value, test_case["value"])
for key, value in test_case["dict_value"].items():
self.assertEqual(instance.dict_value.get(key), value)
failure_cases = [
{"value": 1, "dict_value": {"Hello": "one", "World": "two"}},
{"value": 1, "dict_value": {1: "one", 2: "two"}},
{"value": 1, "dict_value": []},
]
for test_case in failure_cases:
with self.assertRaises(deserialize.DeserializeException):
_ = deserialize.deserialize(TypeWithDict, test_case)
def test_type_with_simple_dict(self):
"""Test parsing types with dicts."""
test_cases = [
{"value": 1, "dict_value": {"Hello": 1, "World": 2}},
{"value": 1, "dict_value": {}},
]
for test_case in test_cases:
instance = deserialize.deserialize(TypeWithSimpleDict, test_case)
self.assertEqual(instance.value, test_case["value"])
for key, value in test_case["dict_value"].items():
self.assertEqual(instance.dict_value.get(key), value)
failure_cases = [{"value": 1, "dict_value": []}]
for test_case in failure_cases:
with self.assertRaises(deserialize.DeserializeException):
_ = deserialize.deserialize(TypeWithDict, test_case)
def test_type_with_simple_dict(self):
"""Test parsing types with dicts."""
test_cases = [
{"value": 1, "dict_value": {"Hello": 1, "World": 2}},
{"value": 1, "dict_value": {}},
]
for test_case in test_cases:
instance = deserialize.deserialize(TypeWithSimpleDict, test_case)
self.assertEqual(instance.value, test_case["value"])
for key, value in test_case["dict_value"].items():
self.assertEqual(instance.dict_value.get(key), value)
failure_cases = [{"value": 1, "dict_value": []}]
for test_case in failure_cases:
with self.assertRaises(deserialize.DeserializeException):
_ = deserialize.deserialize(TypeWithDict, test_case)
def test_type_with_complex_dict(self):
"""Test parsing types with complex dicts."""
test_cases = [
{
"value": 1,
"dict_value": {
"Hello": {"value": 1, "dict_value": {"Hello": 1, "World": 2}}
},
"any_dict_value": {"Hello": 4, "World": ":D"},
}
]
for test_case in test_cases:
instance = deserialize.deserialize(TypeWithComplexDict, test_case)
self.assertEqual(instance.value, test_case["value"])
sub_instance = instance.dict_value["Hello"]
sub_test_case = test_case["dict_value"]["Hello"]
self.assertEqual(sub_instance.value, sub_test_case["value"])
for key, value in sub_test_case["dict_value"].items():
self.assertEqual(sub_instance.dict_value.get(key), value)
failure_cases = [
{"value": 1, "dict_value": {"Hello": {}}},
{
"value": 1,
"dict_value": {
"Hello": {"value": 1, "dict_value": {"Hello": "one", "World": 2}}
},
},
{"value": 1, "dict_value": {"Hello": {"value": 1}}},
]
for test_case in failure_cases:
with self.assertRaises(deserialize.DeserializeException):
_ = deserialize.deserialize(TypeWithComplexDict, test_case)
def test_type_with_union(self):
"""Test parsing types with complex dicts."""
test_cases = [{"union_value": "one"}, {"union_value": 1}]
for test_case in test_cases:
instance = deserialize.deserialize(TypeWithUnion, test_case)
self.assertEqual(instance.union_value, test_case["union_value"])
failure_cases = [{"union_value": None}]
for test_case in failure_cases:
with self.assertRaises(deserialize.DeserializeException):
_ = deserialize.deserialize(TypeWithUnion, test_case)
def test_type_with_ellipse_tuple(self):
"""Test parsing types with complex dicts."""
test_cases = [
{"tuple_value": tuple()},
{"tuple_value": ("one",)},
{"tuple_value": ("one", "two", "three", "four")},
]
for test_case in test_cases:
instance = deserialize.deserialize(TypeWithEllipseTuple, test_case)
self.assertEqual(instance.tuple_value, test_case["tuple_value"])
failure_cases = [{"tuple_value": (1,)}, {"tuple_value": ("one", 2)}]
for test_case in failure_cases:
with self.assertRaises(deserialize.DeserializeException):
_ = deserialize.deserialize(TypeWithEllipseTuple, test_case)
def test_non_json_types(self):
"""Test parsing types that are not JSON compatible."""
data = {"one": (1, 2), "two": range(3)}
result = deserialize.deserialize(NonJsonTypes, data)
self.assertEqual(data["one"], result.one)
self.assertEqual(data["two"], result.two)
|
import contextlib
import os
import subprocess
from python2.client.client import Py2Client
class Python2:
"""
Object representing a Python 2 session.
Initializing a `Python2` object spawns a Python 2 subprocess. To terminate
the subprocess, use the `Python2.shutdown()` method. A `Python2` object
may be used as a context manager to automatically shut down the session
when the context is exited.
"""
def __init__(self, executable='python',
logging_basic=None, logging_dict=None):
"""
Initialize a Python2 instance.
:param executable: Python 2 executable to use (default `'python'`).
:param logging_basic: Keyword args to pass to `logging.basicConfig()`
in the Python 2 process.
:param logging_dict: Dict to pass to `logging.dictConfig()` in the
Python 2 process.
"""
if logging_dict is not None:
logging_args = ['--logging-dict', repr(logging_dict)]
elif logging_basic is not None:
logging_args = ['--logging-basic', repr(logging_basic)]
else:
logging_args = []
with contextlib.ExitStack() as stack:
# Create two pipes for communication with the Python 2 server.
# We need to close the server end of each pipe after spawning the
# subprocess. We only need to close the client end if an
# exception is raised during initialization.
cread, swrite = os.pipe()
stack.callback(os.close, swrite)
fcread = _try_fdopen(cread, 'rb')
stack.push(_on_error(fcread.close))
sread, cwrite = os.pipe()
stack.callback(os.close, sread)
fcwrite = _try_fdopen(cwrite, 'wb')
stack.push(_on_error(fcwrite.close))
self._proc = subprocess.Popen(
[executable, '-m', 'python2.server',
'--in', str(sread), '--out', str(swrite)] + logging_args,
pass_fds=(sread, swrite),
start_new_session=True, # Avoid signal issues
universal_newlines=False)
stack.push(_on_error(_kill, self._proc))
self._client = Py2Client(fcread, fcwrite)
def ping(self):
""" Send a test message to the Python 2 process. """
return self._client.do_command('ping')
def project(self, obj):
""" Project an object into Python 2. """
return self._client.do_command('project', obj)
def lift(self, obj):
""" Lift an object from Python 2 to a native Python 3 object. """
return self._client.do_command('lift', obj)
def deeplift(self, obj):
""" Recursively lift an object from Python 2 to 3. """
return self._client.do_command('deeplift', obj)
def exec(self, code, scope={}):
""" Execute code in Python 2 in the given scope. """
return self._client.do_command('exec', code, scope)
def __getattr__(self, name):
""" Access Python 2 builtins. """
# True/False/None are keywords in Python 3
name_ = name[:-1] if name in ('None_', 'True_', 'False_') else name
result = self._client.do_command('builtin', name_)
setattr(self, name, result) # Remember builtins after first lookup
return result
def shutdown(self):
""" Shut down the Python 2 process and end the session. """
try:
self._client.close()
except Exception:
pass
try:
self._proc.wait(timeout=1)
except Exception:
_kill(self._proc)
def __enter__(self):
""" Enter a Python 2 session context. """
return self
def __exit__(self, *exc_info):
""" Shut down the Python 2 session. """
self.shutdown()
def _on_error(fn, *args, **kwargs):
""" Return a context exit function that invokes a callback on error. """
def __exit__(exc_type, exc_value, traceback):
if exc_type is not None:
fn(*args, **kwargs)
return __exit__
def _try_fdopen(fd, *args, **kwargs):
"""
Safely attempt to convert a file descriptor to a file object.
On success, returns a file object wrapping the file descriptor. On
failure, closes the file descriptor and raises an exception.
"""
try:
return os.fdopen(fd, *args, **kwargs)
except Exception:
os.close(fd)
raise
def _kill(proc):
""" Force-kill a process and wait for it to exit. """
try:
proc.kill()
finally:
proc.wait()
|
<filename>opt/snobfit/python/SQSnobFit/_snobupdt.py
from __future__ import print_function
# Python version of SNOBFIT v2.1 "snobfit.m" MATLAB version by <NAME>.
#
# Modified and redistributed with permission.
# Original copyright and license notice: #
# Copyright (c) 2003-2008, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of Vienna nor the
# names of its contributors may be used to orse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY ARNOLD NEUMAIER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ARNOLD NEUMAIER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# [iNCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# [iNCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
function xl, xu, x, f, nsplit, small, near, d, np, t, inew, fnan, u, v = \
snobupdt(xl, xu, x, f, nsplit, small, near, d, np, t, xnew, fnew, fnan, u, v, u1, v1, dx)
Updates the box parameters when a set of new points and their function
values are added; i.e. the boxes containing more than one point are
split and the nearest neighbors are computed or updated.
Input:
xl, xu rows contain lower and upper bounds of the old boxes
x rows contain the old points
f f[j] contains the function value at x[j], its
variation and other parameters
nsplit nsplit[j,i] number of times box j has been split along
the ith coordinate
small small[j] is an integer-valued logarithmic volume measure
of box j
near near[j] is a vector pointing to the nearest
neighbors of x[j]
d d[j] is the maximal distance between x[j] and one of
its neighbors
np np[j] is the number of times the function value of
x[j] has been measured
t t[j] is np[j] times the variance of the function values
measured for the point x[j]
xnew rows contain new points
fnew new function values and their variations
fnew[j,0] = f(xnew[j]), fnew[j,1] = df(xnew[j])
fnan pointer to all old points where the function value could
not be obtained
u, v box bounds
u1, v1 box in which the new points are to be generated
dx resolution vector
Output:
xl, xu updated version of xl, xu [including new boxes)
x updated version of x
f updated version of f
nsplit updated version of nsplit
small updated version of small
near updated version of near
d updated version of d
np updated version of np
t updated version of t
inew pointer pointing to the new boxes and boxes whose
nearest neighbors have changed
fnan possibly updated version of fnan [if a function value
was found for a point in the new iteration)
u, v possibly updated box bounds such that all new points
are in the box
"""
from ._gen_utils import find, min_, rsort, sort
from ._snobinput import snobinput
from ._snobnn import snobnn
from ._snobsplit import snobsplit
import numpy
def snobupdt(xl, xu, x, f, nsplit, small, near, d, np, t, xnew, fnew, fnan, u, v, u1, v1, dx):
n = u.shape[1] # dimension of the problem
nneigh = n+5
nxold = len(x) # number of points from the previous iteration
nxnew = len(xnew)
inew = numpy.array([], dtype=int)
if x.size > 0:
# if any of the new points are already among the old points, they are
# thrown away and the function value and its uncertainty are updated
dismiss = []
for j in range(nxnew):
i = find(sum(abs(numpy.ones((nxold,1))*xnew[j]-x),1) == 0)
if i.size > 0:
if not find(fnan==i).any() and numpy.isfinite(f[i,1]): # point i had finite
# function value
if not numpy.isnan(fnew[j,0]):
np[i] = np[i] + 1
delta = fnew[j,0] - f[i,0]
f[i,0] = f[i,0] + delta/np[i]
t[i] = t[i] + delta*(fnew[j,0]-f[i,0])
f[i,1] = math.sqrt(f[i,1]**2 + (delta*(fnew[j,0] - f[i,0]) \
+ fnew[j,1]**2 - f[i,1]**2) / np[i])
inew = numpy.concatenate((inew, i))
dismiss = numpy.concatenate((dismiss, [j]))
else: # point i had NaN function value
if not numpy.isnan(fnew[j,0]).any():
f[i,0] = fnew[j,0]
inew = numpy.concatenate((inew, i))
ii = find(fnan==i)
if ii.size > 0:
fnan[ii] = numpy.array([], dtype=int)
dismiss = numpy.concatenate((dismiss, [j]))
xnew = numpy.delete(xnew, dismiss, 0)
fnew = numpy.delete(fnew, dismiss, 0)
nxnew = len(xnew)
if not nxnew:
inew = numpy.sort(inew)
return xl, xu, x, f, nsplit, small, near, d, np, t, inew.astype(int), fnan, u, v
xnew, fnew, npnew, tnew = snobinput(xnew, fnew)
nxnew = xnew.shape[0]
nx = nxold + nxnew # current number of points
if numpy.sum(numpy.vstack((xnew, u)).min(0) < u) or \
numpy.sum(numpy.vstack((xnew, v)).max(0) > v) or \
(numpy.minimum(u, u1) < u).any() or (numpy.maximum(v, v1) > v).any():
xl, xu, small, u, v = snobnewb(xnew, xl, xu, small, u, v, u1, v1)
if x.size > 0:
x = numpy.concatenate((x, xnew))
else:
x = xnew.copy()
inew = numpy.concatenate((inew, numpy.arange(nxold, nx)), 0)
if f.size <= 0:
f = fnew.copy()
else:
if fnew.shape[1] < f.shape[1]:
fnew = numpy.append(fnew, numpy.zeros((len(fnew), f.shape[1]-fnew.shape[1])), axis=1)
f = numpy.vstack((f, fnew))
if np.size > 0:
np = numpy.append(np, npnew)
else:
np = npnew.copy()
if t.size > 0:
t = numpy.append(t, tnew)
else:
t = tnew.copy()
if not nxold:
xl, xu, x, f, nsplit, small = snobsplit(x, f, u, v, None, u, v)
else:
par = numpy.zeros((nxnew,))
for j in range(nxnew):
xx = numpy.ones((nxold,1))*xnew[j]
ind = find(numpy.sum( \
numpy.logical_and(numpy.less_equal(xl, xx), numpy.less_equal(xx, xu)), 1) == n)
if ind.size > 0:
minsmall, ismall = min_(small[ind])
par[j] = ind[ismall]
par1, ww, cdfx, dof = rsort(par)
inew = numpy.concatenate((inew, par1), 0)
for j in par1.astype(int):
ind = find(par==j)
ind = ind + nxold
spl = numpy.append([j], ind.flatten())
xl0, xu0, x0, f0, nsplit0, small0 = \
snobsplit(x[spl], f[spl], xl[j], xu[j], nsplit[j], u, v)
nxj = len(ind) + 1 # number of points in box [xl[j],xu[j]]
k = find(numpy.sum(x0 == numpy.ones((nxj,1))*x[j,:], axis=1) == n)
if len(k) != 1:
if 1 < len(k): k = k[min_(small[k])[1]]
else: k = 0
xl[j] = xl0[k]
xu[j] = xu0[k]
nsplit[j] = nsplit0[k]
small[j] = small0[k]
for k in range(nxj-1):
k1 = ind[k]
k2 = find(numpy.sum(x0 == numpy.ones((nxj,1))*x[k1,:],axis=1) == n)
if len(k2) != 1:
if 1 < len(k2): k2 = k2[min_(small[k2])[1]]
else: k2 = 0
ik1 = int(k1)
if len(xl) <= ik1:
xl = numpy.append(xl, numpy.zeros((ik1-len(xl)+1, xl.shape[1])), axis=0)
if len(xu) <= ik1:
xu = numpy.append(xu, numpy.zeros((ik1-len(xu)+1, xu.shape[1])), axis=0)
if len(nsplit) <= ik1:
nsplit = numpy.append(nsplit, numpy.zeros((ik1-len(nsplit)+1, nsplit.shape[1])), axis=0)
if len(small) <= ik1:
small = numpy.append(small, numpy.zeros((ik1-len(small)+1,)), axis=0)
xl[k1] = xl0[k2]
xu[k1] = xu0[k2]
nsplit[k1] = nsplit0[k2]
small[k1] = small0[k2]
notnan = numpy.arange(0, nx)
notnan = numpy.delete(notnan, find(numpy.isnan(f[:,0])))
if notnan.size > 0:
fmn = numpy.min(f[notnan,0])
fmx = numpy.max(f[notnan,0])
else:
fmn = 1
fmx = 0
if nx >= nneigh+1 and fmn < fmx:
if near.size <= 0:
near = numpy.zeros((nx, nneigh), dtype=int)
if d.size <= 0:
d = numpy.zeros((nx,))
for j in range(nxold, nx):
jnear, jd = snobnn(x[j], x, nneigh, dx)
if len(near) <= j:
near = numpy.append(near, numpy.zeros((j-len(near)+1, nneigh), dtype=int), axis=0)
near[j] = jnear
if len(d) <= j:
d = numpy.append(d, numpy.zeros((j-len(d)+1,)))
d[j] = jd
for j in range(nxold):
if numpy.min(numpy.sqrt(numpy.sum((numpy.ones((nxnew,1))*x[j]-xnew)**2,1))) < d[j]:
jnear, jd = snobnn(x[j], x, nneigh, dx)
if len(near) <= j:
near = numpy.append(near, numpy.zeros((j-len(near)+1, nneigh)), axis=0)
near[j] = jnear
if len(d) <= j:
d = numpy.append(d, numpy.zeros((j-len(d)+1,)))
d[j] = jd
inew = numpy.concatenate((inew, [j]))
inew = sort(inew)[0]
d = d.reshape((1, len(d)))
else:
near = numpy.array([])
d = numpy.inf*numpy.ones((1, nx))
return xl, xu, x, f, nsplit, small, near, d, np, t, inew.astype(int), fnan, u, v
def snobnewb(xnew, xl, xu, small, u, v, u1, v1):
nx = len(xl)
n = nx and xl.shape[1] or 0
uold = u
vold = v
u = numpy.concatenate((xnew, u)).min(0)
v = numpy.concatenate((xnew, v)).max(0)
u = numpy.min(u, u1)
v = numpy.max(v, v1)
i1 = find(u < uold)
i2 = find(v > vold)
ind = numpy.array([])
for j in range(len(i1)):
j1 = find(xl[:,i1[j]] == uold[i1[j]])
ind = numpy.concatenate((ind, j1))
xl[j1,i1[j]] = u[i1[j]]
for j in range(len(i2)):
j2 = find(xu[:, i2[j]] == vold[i2[j]])
ind = numpy.concatenate((ind, j2))
xu[j2,i2[j]] = v[i2[j]]
if len(i1) + len(i2): # at least one of the bounds was changed
small = -numpy.sum(numpy.round(numpy.log2((xu-xl)/(numpy.ones((nx,1))*(v-u)))), axis=1)
return xl, xu, small, u, v
|
<reponame>chmp/mdnav
from __future__ import print_function
import collections
import json
import os.path
import re
import sys
import subprocess
import webbrowser
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class FakeLogger(object):
def __init__(self, active=False):
self.active = active
def info(self, fmt, *args):
if not self.active:
return
print(fmt % args)
_logger = FakeLogger()
def plugin_entry_point():
import vim
if int(vim.eval("exists('g:mdnav#Extensions')")):
extensions = vim.eval('g:mdnav#Extensions')
extensions = [ext.strip() for ext in extensions.split(',')]
else:
extensions = []
if int(vim.eval("exists('g:mdnav#DebugMode')")):
_logger.active = vim.eval('g:mdnav#DebugMode') == 'true'
row, col = vim.current.window.cursor
cursor = (row - 1, col)
lines = vim.current.buffer
target = parse_link(cursor, lines)
_logger.info('open %s', target)
action = open_link(
target,
current_file=vim.eval("expand('%:p')"),
open_in_vim_extensions=extensions,
)
action()
def open_link(target, current_file, open_in_vim_extensions=set()):
"""
:returns: a callable that encapsulates the action to perform
"""
if target is not None:
target = target.strip()
if not target:
_logger.info('no target')
return NoOp(target)
if target.startswith('#'):
return JumpToAnchor(target)
if has_scheme(target):
_logger.info('has scheme -> open in browser')
return BrowserOpen(target)
if not has_extension(target, open_in_vim_extensions):
_logger.info('has no extension for opening in vim')
return OSOpen(anchor_path(target, current_file))
if target.startswith('|filename|'):
target = target[len('|filename|'):]
if target.startswith('{filename}'):
target = target[len('{filename}'):]
return VimOpen(anchor_path(target, current_file))
def anchor_path(target, current_file):
if os.path.isabs(target):
return target
_logger.info('anchor path relative to %s', current_file)
return os.path.join(os.path.dirname(current_file), target)
def has_extension(path, extensions):
if not extensions:
return True
path = parse_path(path)
_, ext = os.path.splitext(path.path)
return ext in extensions
def has_scheme(target):
return bool(urlparse(target).scheme)
class Action(object):
def __init__(self, target):
self.target = target
def __eq__(self, other):
return type(self) == type(other) and self.target == other.target
def __repr__(self):
return '{}({!r})'.format(type(self).__name__, self.target)
class NoOp(Action):
def __call__(self):
print('<mdnav: no link>')
class BrowserOpen(Action):
def __call__(self):
print('<mdnav: open browser tab>')
webbrowser.open_new_tab(self.target)
class OSOpen(Action):
def __call__(self):
if sys.platform.startswith('linux'):
call(['xdg-open', self.target])
elif sys.platform.startswith('darwin'):
call(['open', self.target])
else:
os.startfile(self.target)
class VimOpen(Action):
def __call__(self):
import vim
path = parse_path(self.target)
# TODO: make space handling more robust?
vim.command('e {}'.format(path.path.replace(' ', '\\ ')))
if path.line is not None:
try:
line = int(path.line)
except:
print('invalid line number')
return
else:
vim.current.window.cursor = (line, 0)
if path.anchor is not None:
JumpToAnchor(path.anchor)()
class JumpToAnchor(Action):
heading_pattern = re.compile(r'^#+(?P<title>.*)$')
attr_list_pattern = re.compile(r'{:\s+#(?P<id>\S+)\s')
def __call__(self):
import vim
line = self.find_anchor(self.target, vim.current.buffer)
if line is None:
return
vim.current.window.cursor = (line + 1, 0)
@classmethod
def find_anchor(cls, target, buffer):
needle = cls.norm_target(target)
for (idx, line) in enumerate(buffer):
m = cls.heading_pattern.match(line)
if (
m is not None and
cls.title_to_anchor(m.group('title')) == needle
):
return idx
m = cls.attr_list_pattern.search(line)
if m is not None and needle == m.group('id'):
return idx
@staticmethod
def title_to_anchor(title):
return '-'.join(fragment.lower() for fragment in title.split())
@staticmethod
def norm_target(target):
if target.startswith('#'):
target = target[1:]
return target.lower()
def call(args):
"""If available use vims shell mechanism to work around display issues
"""
try:
import vim
except ImportError:
subprocess.call(args)
else:
args = ['shellescape(' + json.dumps(arg) + ')' for arg in args]
vim.command('execute "! " . ' + ' . " " . '.join(args))
def parse_path(path):
"""Parse a path with optional line number of anchor into its parts.
For example::
parse_path('foo.md:30') == ParsedPath('foo.md', line=30)
parse_path('foo.md#anchor') == ParsedPath('foo.md', anchor='anchor')
"""
path, ext = os.path.splitext(path)
if '#' in ext:
ext, anchor = ext.rsplit('#', 1)
return ParsedPath(path + ext, anchor=anchor)
if ':' in ext:
ext, line = ext.rsplit(':', 1)
return ParsedPath(path + ext, line=line)
return ParsedPath(path + ext)
class ParsedPath(object):
def __init__(self, path, line=None, anchor=None):
self.path = path
self.line = line
self.anchor = anchor
def __repr__(self):
return 'ParsedPath({!r}, line={}, anchor={!r})'.format(self.path, self.line, self.anchor)
def parse_link(cursor, lines):
row, column = cursor
line = lines[row]
_logger.info('handle line %s (%s, %s)', line, row, column)
m = reference_definition_pattern.match(line)
if m is not None:
return m.group('link').strip()
link_text, rel_column = select_from_start_of_link(line, column)
if not link_text:
_logger.info('could not find link text')
return None
m = link_pattern.match(link_text)
if not m:
_logger.info('does not match link pattern')
return None
if m.end('link') <= rel_column:
_logger.info('cursor outside link')
return None
_logger.info('found match: %s', m.groups())
assert (m.group('direct') is None) != (m.group('indirect') is None)
if m.group('direct') is not None:
_logger.info('found direct link: %s', m.group('direct'))
return m.group('direct')
_logger.info('follow indirect link %s', m.group('indirect'))
indirect_ref = m.group('indirect')
if not indirect_ref:
indirect_ref = m.group('text')
indirect_link_pattern = re.compile(
r'^\[' + re.escape(indirect_ref) + r'\]:(.*)$'
)
for line in lines:
m = indirect_link_pattern.match(line)
if m:
return m.group(1).strip()
_logger.info('could not match for indirect link')
return None
reference_definition_pattern = re.compile(r'''
^
\[[^\]]*\]: # reference def at start of line
(?P<link>.*) # interpret everything else as link text
$
''', re.VERBOSE)
link_pattern = re.compile(r'''
^
(?P<link>
\[ # start of link text
(?P<text>[^\]]*) # link text
\] # end of link text
(?:
\( # start of target
(?P<direct>
[^\)]*
)
\) # collect
|
\[
(?P<indirect>
[^\]]*
)
\]
)
)
.* # any non matching characters
$
''', re.VERBOSE)
def select_from_start_of_link(line, pos):
"""Return the start of the link string and the new cursor
"""
if pos < len(line) and line[pos] == '[':
start = pos
else:
start = line[:pos].rfind('[')
# TODO: handle escapes
if start < 0:
return None, pos
# check for indirect links
if start != 0 and line[start - 1] == ']':
alt_start = line[:start].rfind('[')
if alt_start >= 0:
start = alt_start
return line[start:], pos - start
if __name__ == "__main__":
plugin_entry_point()
|
<reponame>t2y/python-study
import sys
import pytest
from boyer_moore_horspool import boyer_moore_horspool_search
from brute_force_search import brute_force_search
from simplified_boyer_moore import simplified_boyer_moore_search
from boyer_moore_sunday import boyer_moore_sunday_search, make_qs_table
from utils import read_hyogo, make_table
expected = list(map(lambda s: s.encode('utf-8'), [
'28102,"657 ","6570051","ヒョウゴケン","コウベシナダク","ヤハタチョウ","兵庫県","神戸市灘区","八幡町",0,0,1,0,0,0\n',
'28203,"673 ","6730871","ヒョウゴケン","アカシシ","オオクラハチマンチョウ","兵庫県","明石市","大蔵八幡町",0,0,0,0,0,0\n',
'28210,"67512","6751204","ヒョウゴケン","カコガワシ","ヤハタチョウカミサイジョウ","兵庫県","加古川市","八幡町上西条",0,0,0,0,0,0\n',
'28210,"67512","6751203","ヒョウゴケン","カコガワシ","ヤハタチョウシモムラ","兵庫県","加古川市","八幡町下村",0,0,0,0,0,0\n',
'28210,"67512","6751201","ヒョウゴケン","カコガワシ","ヤハタチョウソウサ","兵庫県","加古川市","八幡町宗佐",0,0,0,0,0,0\n',
'28210,"67512","6751205","ヒョウゴケン","カコガワシ","ヤハタチョウナカサイジョウ","兵庫県","加古川市","八幡町中西条",0,0,0,0,0,0\n',
'28210,"67512","6751202","ヒョウゴケン","カコガワシ","ヤハタチョウノムラ","兵庫県","加古川市","八幡町野村",0,0,0,0,0,0\n',
'28210,"67512","6751206","ヒョウゴケン","カコガワシ","ヤハタチョウフナマチ","兵庫県","加古川市","八幡町船町",0,0,0,0,0,0\n',
]))
def test_brute_force_search():
byte_word = '八幡町'.encode('utf-8')
with read_hyogo() as blob:
actual = brute_force_search(blob, byte_word)
assert expected == actual
def _boyer_moore_search(search_func, table_func):
byte_word = '八幡町'.encode('utf-8')
with read_hyogo() as blob:
actual = search_func(blob, byte_word, table_func(byte_word))
assert expected == actual
def test_simplified_boyer_moore_search():
_boyer_moore_search(simplified_boyer_moore_search, make_table)
def test_boyer_moore_horspool_search():
_boyer_moore_search(boyer_moore_horspool_search, make_table)
def test_boyer_moore_sunday_search():
_boyer_moore_search(boyer_moore_sunday_search, make_qs_table)
@pytest.mark.parametrize('word, num', [
('八', 104),
('八幡', 11),
('チョウ', 3229),
('65606', 4),
('28224', 110),
('0', 5223),
(',', 5223),
])
def test_search_results(word, num):
byte_word = word.encode('utf-8')
table = make_table(byte_word)
qs_table = make_qs_table(byte_word)
with read_hyogo() as blob:
bfs = brute_force_search(blob, byte_word)
sbm = simplified_boyer_moore_search(blob, byte_word, table)
bmh = boyer_moore_horspool_search(blob, byte_word, table)
bms = boyer_moore_sunday_search(blob, byte_word, qs_table)
assert num == len(bfs) == len(sbm) == len(bmh) == len(bms)
assert bfs == sbm == bmh == bms
|
<reponame>fakegit/asciimatics
# -*- coding: utf-8 -*-
"""
This module implements a fire effect renderer.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from builtins import range
import copy
from random import randint, random
from asciimatics.renderers.base import DynamicRenderer
from asciimatics.screen import Screen
class Fire(DynamicRenderer):
"""
Renderer to create a fire effect based on a specified `emitter` that
defines the heat source.
The implementation here uses the same techniques described in
http://freespace.virgin.net/hugo.elias/models/m_fire.htm, although a
slightly different implementation.
"""
_COLOURS_16 = [
(Screen.COLOUR_RED, 0),
(Screen.COLOUR_RED, 0),
(Screen.COLOUR_RED, 0),
(Screen.COLOUR_RED, 0),
(Screen.COLOUR_RED, 0),
(Screen.COLOUR_RED, 0),
(Screen.COLOUR_RED, 0),
(Screen.COLOUR_RED, Screen.A_BOLD),
(Screen.COLOUR_RED, Screen.A_BOLD),
(Screen.COLOUR_RED, Screen.A_BOLD),
(Screen.COLOUR_RED, Screen.A_BOLD),
(Screen.COLOUR_YELLOW, Screen.A_BOLD),
(Screen.COLOUR_YELLOW, Screen.A_BOLD),
(Screen.COLOUR_YELLOW, Screen.A_BOLD),
(Screen.COLOUR_YELLOW, Screen.A_BOLD),
(Screen.COLOUR_WHITE, Screen.A_BOLD),
]
_COLOURS_256 = [
(0, 0),
(52, 0),
(88, 0),
(124, 0),
(160, 0),
(196, 0),
(202, 0),
(208, 0),
(214, 0),
(220, 0),
(226, 0),
(227, 0),
(228, 0),
(229, 0),
(230, 0),
(231, 0),
]
_CHARS = " ...::$$$&&&@@"
def __init__(self, height, width, emitter, intensity, spot, colours,
bg=False):
"""
:param height: Height of the box to contain the flames.
:param width: Width of the box to contain the flames.
:param emitter: Heat source for the flames. Any non-whitespace
character is treated as part of the heat source.
:param intensity: The strength of the flames. The bigger the number,
the hotter the fire. 0 <= intensity <= 1.0.
:param spot: Heat of each spot source. Must be an integer > 0.
:param colours: Number of colours the screen supports.
:param bg: (Optional) Whether to render background colours only.
"""
super(Fire, self).__init__(height, width)
self._emitter = emitter
self._intensity = intensity
self._spot_heat = spot
self._count = len([c for c in emitter if c not in " \n"])
line = [0 for _ in range(self._canvas.width)]
self._buffer = [copy.deepcopy(line) for _ in range(self._canvas.width * 2)]
self._colours = self._COLOURS_256 if colours >= 256 else \
self._COLOURS_16
self._bg_too = bg
# Figure out offset of emitter to centre at the bottom of the buffer
e_width = 0
e_height = 0
for line in self._emitter.split("\n"):
e_width = max(e_width, len(line))
e_height += 1
self._x = (width - e_width) // 2
self._y = height - e_height
def _render_now(self):
# First make the fire rise with convection
for y in range(len(self._buffer) - 1):
self._buffer[y] = self._buffer[y + 1]
self._buffer[len(self._buffer) - 1] = [0 for _ in range(self._canvas.width)]
# Seed new hot spots
x = self._x
y = self._y
for c in self._emitter:
if c not in " \n" and random() < self._intensity:
self._buffer[y][x] += randint(1, self._spot_heat)
if c == "\n":
x = self._x
y += 1
else:
x += 1
# Seed a few cooler spots
for _ in range(self._canvas.width // 2):
self._buffer[randint(0, self._canvas.height - 1)][
randint(0, self._canvas.width - 1)] -= 10
# Simulate cooling effect of the resulting environment.
for y in range(len(self._buffer)):
for x in range(self._canvas.width):
new_val = self._buffer[y][x]
if y < len(self._buffer) - 1:
new_val += self._buffer[y + 1][x]
if x > 0:
new_val += self._buffer[y][x - 1]
if x < self._canvas.width - 1:
new_val += self._buffer[y][x + 1]
self._buffer[y][x] = new_val // 4
# Now build the rendered text from the simulated flames.
self._clear()
for x in range(self._canvas.width):
for y in range(len(self._buffer)):
if self._buffer[y][x] > 0:
colour = self._colours[min(len(self._colours) - 1,
self._buffer[y][x])]
if self._bg_too:
char = " "
bg = colour[0]
else:
char = self._CHARS[min(len(self._CHARS) - 1,
self._buffer[y][x])]
bg = 0
self._write(char, x, y, colour[0], colour[1], bg)
return self._plain_image, self._colour_map
|
<reponame>itsraina/keras
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras abstract base for depthwise convolutions."""
import tensorflow.compat.v2 as tf
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine.input_spec import InputSpec
from keras.layers.convolutional.base_conv import Conv
class DepthwiseConv(Conv):
"""Depthwise convolution.
Depthwise convolution is a type of convolution in which each input channel
is convolved with a different kernel (called a depthwise kernel). You can
understand depthwise convolution as the first step in a depthwise separable
convolution.
It is implemented via the following steps:
- Split the input into individual channels.
- Convolve each channel with an individual depthwise kernel with
`depth_multiplier` output channels.
- Concatenate the convolved outputs along the channels axis.
Unlike a regular convolution, depthwise convolution does not mix
information across different input channels.
The `depth_multiplier` argument determines how many filter are applied to
one input channel. As such, it controls the amount of output channels that
are generated per input channel in the depthwise step.
Args:
kernel_size: A tuple or list of integers specifying the spatial dimensions
of the filters. Can be a single integer to specify the same value for
all spatial dimensions.
strides: A tuple or list of integers specifying the strides of the
convolution. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means
no padding. `"same"` results in padding with zeros evenly to the
left/right or up/down of the input such that output has the same
height/width dimension as the input.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape `(batch_size, height,
width, channels)` while `channels_first` corresponds to inputs with
shape `(batch_size, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
'channels_last'.
dilation_rate: An integer or tuple/list of 2 integers, specifying the
dilation rate to use for dilated convolution. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any `strides`
value != 1.
activation: Activation function to use. If you don't specify anything, no
activation is applied (see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix (see
`keras.initializers`). If None, the default initializer
('glorot_uniform') will be used.
bias_initializer: Initializer for the bias vector (see
`keras.initializers`). If None, the default initializer ('zeros') will
be used.
depthwise_regularizer: Regularizer function applied to the depthwise
kernel matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (see
`keras.regularizers`).
activity_regularizer: Regularizer function applied to the output of the
layer (its 'activation') (see `keras.regularizers`).
depthwise_constraint: Constraint function applied to the depthwise kernel
matrix (see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (see
`keras.constraints`).
Input shape:
4D tensor with shape: `[batch_size, channels, rows, cols]` if
data_format='channels_first'
or 4D tensor with shape: `[batch_size, rows, cols, channels]` if
data_format='channels_last'.
Output shape:
4D tensor with shape: `[batch_size, channels * depth_multiplier, new_rows,
new_cols]` if `data_format='channels_first'`
or 4D tensor with shape: `[batch_size,
new_rows, new_cols, channels * depth_multiplier]` if
`data_format='channels_last'`. `rows` and `cols` values might have
changed due to padding.
Returns:
A tensor of rank 4 representing
`activation(depthwiseconv2d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
def __init__(
self,
rank,
kernel_size,
strides=1,
padding="valid",
depth_multiplier=1,
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
depthwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank,
filters=None,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
bias_constraint=bias_constraint,
**kwargs,
)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
if len(input_shape) != self.rank + 2:
raise ValueError(
"Inputs to `DepthwiseConv` should have "
f"rank {self.rank + 2}. "
f"Received input_shape={input_shape}."
)
input_shape = tf.TensorShape(input_shape)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError(
"The channel dimension of the inputs to `DepthwiseConv` "
"should be defined. "
f"The input_shape received is {input_shape}, "
f"where axis {channel_axis} (0-based) "
"is the channel dimension, which found to be `None`."
)
input_dim = int(input_shape[channel_axis])
depthwise_kernel_shape = self.kernel_size + (
input_dim,
self.depth_multiplier,
)
self.depthwise_kernel = self.add_weight(
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
name="depthwise_kernel",
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(input_dim * self.depth_multiplier,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(
min_ndim=self.rank + 2, axes={channel_axis: input_dim}
)
self.built = True
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = super().get_config()
config.pop("filters")
config.pop("kernel_initializer")
config.pop("kernel_regularizer")
config.pop("kernel_constraint")
config["depth_multiplier"] = self.depth_multiplier
config["depthwise_initializer"] = initializers.serialize(
self.depthwise_initializer
)
config["depthwise_regularizer"] = regularizers.serialize(
self.depthwise_regularizer
)
config["depthwise_constraint"] = constraints.serialize(
self.depthwise_constraint
)
return config
|
<reponame>pruthvireddypuresoftware/journald-2-cloudwatch
from unittest import TestCase
from unittest.mock import patch, mock_open, Mock
import os
import json
import urllib.request
from main import Format, IDENTITY_DOC_URL, get_instance_identity_document
IDENTITY_DOC_STR = b'''{
"devpayProductCodes" : null,
"availabilityZone" : "us-east-1d",
"privateIp" : "10.158.112.84",
"version" : "2010-08-31",
"region" : "us-east-1",
"instanceId" : "i-1234567890abcdef0",
"billingProducts" : null,
"instanceType" : "t1.micro",
"accountId" : "123456789012",
"pendingTime" : "2015-11-19T16:32:11Z",
"imageId" : "ami-5fb8c835",
"kernelId" : "aki-919dcaf8",
"ramdiskId" : null,
"architecture" : "x86_64"
}
'''
IDENTITY_DOC = json.loads(IDENTITY_DOC_STR.decode('utf-8'))
@patch('main.get_instance_identity_document', return_value=IDENTITY_DOC, autospec=True)
class FormatterTest(TestCase):
def test_default_formatting(self, _):
''' test formatting is same as default '''
for fmt, kwargs in [
['string', {}],
['abc {d}', {'d': 123}],
['{b} {a}', {'a': 123, 'b': 456}],
['formatting {x:03}', {'x': 2}],
]:
self.assertEqual(Format(fmt, **kwargs), fmt.format(**kwargs))
self.assertEqual(Format('{}', 123), '{}'.format(123))
self.assertRaises(KeyError, Format, '{a}')
def test_formatting_defaults(self, _):
''' test the a|b|c fallthrough defaulting '''
self.assertEqual(Format('xyz {a|b|c} 123', a=1, b=2, c=3), 'xyz 1 123')
self.assertEqual(Format('xyz {a|b|c} 123', b=2, c=3), 'xyz 2 123')
self.assertEqual(Format('xyz {a|b|c} 123', c=3), 'xyz 3 123')
self.assertRaises(KeyError, Format, 'xyz {a|b|c} 123')
def test_string_formatting(self, _):
''' test when key is a string '''
self.assertEqual(Format('xyz {a|b|"hello"} 123', b=5), 'xyz 5 123')
self.assertEqual(Format('xyz {a|b|"hello"} 123'), 'xyz hello 123')
self.assertEqual(Format("xyz {a|b|'hello'} 123"), 'xyz hello 123')
def test_identity_doc_formatting(self, _):
''' test variables in the identity doc '''
self.assertEqual(Format('xyz {$instanceId}'), 'xyz ' + IDENTITY_DOC['instanceId'])
self.assertEqual(Format('xyz {$region}'), 'xyz ' + IDENTITY_DOC['region'])
self.assertEqual(Format('xyz {invalid|$region}'), 'xyz ' + IDENTITY_DOC['region'])
def test_journald_vars(self, _):
''' test some convenience vars made from journald fields '''
# test $unit
self.assertEqual(Format('xyz {$unit}', _SYSTEMD_UNIT='systemd_unit', **{'$unit': 'not used'}), 'xyz systemd_unit')
self.assertEqual(Format('xyz {$unit}', USER_UNIT='user_unit', _SYSTEMD_UNIT='not used', **{'$unit': 'not used'}), 'xyz user_unit')
# test templated unit
self.assertEqual(Format('xyz {$unit}', _SYSTEMD_UNIT='<EMAIL>', **{'$unit': 'not used'}), 'xyz systemd_unit.service')
# test no unit name found
self.assertEqual(Format('xyz {$unit}', **{'$unit': 'hello'}), 'xyz hello')
# docker container
self.assertEqual(Format('xyz {$docker_container}', _SYSTEMD_UNIT='docker.service', CONTAINER_NAME='container', **{'$docker_container': 'not used'}), 'xyz container.container')
self.assertEqual(Format('xyz {$docker_container}', CONTAINER_NAME='container', **{'$docker_container': 'hello'}), 'xyz hello')
self.assertEqual(Format('xyz {$docker_container}', _SYSTEMD_UNIT='docker.service', **{'$docker_container': 'hello'}), 'xyz hello')
def test_env_vars(self, _):
''' test environment variables '''
with patch.dict(os.environ, ENV_VAR='hello'):
self.assertEqual(Format('xyz {$ENV_VAR}'), 'xyz hello')
def test_default_special_vars(self, _):
''' test when $var not found '''
self.assertEqual(Format('xyz {$other}', **{'$other': 'hello'}), 'xyz hello')
self.assertRaises(KeyError, Format, 'xyz {$not_found}')
class InstanceIdentityDocTest(TestCase):
DATA = dict(a=123, b='xyz')
NULL_DATA = dict(a=123, b='xyz', c=None)
def setUp(self):
# clear the lru_cache every time
get_instance_identity_document.cache_clear()
@patch('urllib.request.urlopen', mock_open())
def test_get_instance_identity_document(self):
urllib.request.urlopen.return_value.read.return_value = json.dumps(self.DATA).encode('utf-8')
self.assertEqual(get_instance_identity_document(), self.DATA)
urllib.request.urlopen.assert_called_with(IDENTITY_DOC_URL)
@patch('urllib.request.urlopen', mock_open())
def test_none_values_removed(self):
''' it drops where values are null '''
urllib.request.urlopen.return_value.read.return_value = json.dumps(self.NULL_DATA).encode('utf-8')
self.assertEqual(get_instance_identity_document(), self.DATA)
urllib.request.urlopen.assert_called_with(IDENTITY_DOC_URL)
|
import json
import logging
import netaddr
import random
from powergslb.server.http.handler.abstract import AbstractContentHandler
import powergslb.monitor
import powergslb.database
__all__ = ['PowerDNSContentHandler']
class PowerDNSContentHandler(AbstractContentHandler):
"""
PowerDNS content handler
"""
_lb_topology_map = {}
def _filter_records(self, qtype_records):
records = []
for qtype in qtype_records:
lb_method = 'p'
if len(qtype_records[qtype]) > 0 and 'lbmethod' in qtype_records[qtype][0] and qtype_records[qtype][0]['lbmethod'] != None:
lb_method = qtype_records[qtype][0]['lbmethod']
logging.debug('qtype_records: %s', str(qtype_records))
logging.debug('LB Method: %s', lb_method)
if lb_method == 'p':
filtered_records = self._lb_priority( qtype_records[qtype] )
filtered_records = self._lb_randomize( filtered_records )
elif lb_method == 'wrr':
filtered_records = self._lb_wrr( qtype_records[qtype] )
elif lb_method == 't':
filtered_records = self._lb_topology( qtype_records[qtype] )
filtered_records = self._lb_randomize( filtered_records )
elif lb_method == 'tp':
filtered_records = self._lb_topology( qtype_records[qtype] )
filtered_records = self._lb_priority( filtered_records )
elif lb_method == 'twrr':
filtered_records = self._lb_topology( qtype_records[qtype] )
filtered_records = self._lb_wrr( filtered_records )
elif lb_method == 'ltd':
filtered_records = self._lb_ltd( qtype_records[qtype] )
elif lb_method == 'persistence':
filtered_records = self._lb_persitence( qtype_records[qtype] )
else:
filtered_records = self._lb_priority( qtype_records[qtype] )
if not filtered_records:
continue
if filtered_records[0]['persistence']:
records.append(self._remote_ip_persistence(filtered_records))
else:
records.extend(filtered_records)
return records
def _get_lookup(self):
records = self.database.gslb_records(*self.dirs[2:])
qtype_records = self._split_records(records)
filtered_records = self._filter_records(qtype_records)
return self._strip_records(filtered_records)
def _is_in_view(self, record):
result = False
try:
result = bool(netaddr.smallest_matching_cidr(self.remote_ip, record.get('rule').split()))
except (AttributeError, netaddr.AddrFormatError, ValueError) as e:
logging.error('{}: record id {} view rule invalid: {}: {}'.format(
type(self).__name__, record['id'], type(e).__name__, e))
return result
def _remote_ip_persistence(self, records):
persistence_value = netaddr.IPAddress(self.remote_ip).value >> records[0]['persistence']
return records[hash(persistence_value) % len(records)]
def _split_records(self, records):
qtype_records = {}
for record in records:
if record['qtype'] in ['MX', 'SRV']:
content_split = record['content'].split()
try:
record['priority'] = int(content_split[0])
record['content'] = ' '.join(content_split[1:])
except (KeyError, ValueError) as e:
logging.error('{}: record id {} priority missing or invalid: {}: {}'.format(
type(self).__name__, record['id'], type(e).__name__, e))
continue
if record['qtype'] not in qtype_records:
qtype_records[record['qtype']] = []
qtype_records[record['qtype']].append(record)
return qtype_records
def _lb_priority(self, records):
fallback_records = {}
live_records = {}
for record in records:
if not self._is_in_view(record):
continue
if record['fallback']:
if record['weight'] not in fallback_records:
fallback_records[record['weight']] = []
fallback_records[record['weight']].append(record)
if record['id'] not in powergslb.monitor.get_status():
if record['weight'] not in live_records:
live_records[record['weight']] = []
live_records[record['weight']].append(record)
if live_records:
filtered_records = live_records[max(live_records)]
elif fallback_records:
filtered_records = fallback_records[max(fallback_records)]
else:
filtered_records = []
return filtered_records
def _lb_wrr(self, records):
fallback_records = {}
live_records = {}
nrecords = len(records)
sum_weight = 0
sum_weight_fallback = 0
for record in records:
if not record['fallback']:
sum_weight += record['weight']
else:
sum_weight_fallback += record['weight']
# Live records
if sum_weight > 0 :
rand = random.random()
proba_max = 0.0
for record in records:
if not self._is_in_view(record) or record['fallback']:
continue
if record['id'] not in powergslb.monitor.get_status():
proba_max = proba_max + float(record['weight'])/float(sum_weight)
logging.debug("live_records - sum_weight: %d - rand: %f - record['weight']: %d - proba_max = w+rw/s: %f", sum_weight, rand, record['weight'], proba_max)
if proba_max >= rand:
if '0' not in live_records:
live_records['0'] = []
live_records['0'].append(record)
break
else:
live_records['0'] = []
for record in records:
if not self._is_in_view(record):
continue
if not record['fallback'] and record['id'] not in powergslb.monitor.get_status():
live_records['0'].append(record)
# Fallback records
if sum_weight_fallback > 0:
rand = random.random()
proba_max = 0.0
for record in records:
if not self._is_in_view(record) and not record['fallback']:
continue
proba_max = proba_max + float(record['weight'])/float(sum_weight)
logging.debug("fallback_records - sum_weight: %d - rand: %f - record['weight']: %d - proba_max = w+rw/s: %f", sum_weight, rand, record['weight'], proba_max)
if proba_max >= rand:
if '0' not in fallback_records:
fallback_records['0'] = []
fallback_records['0'].append(record)
break
else:
fallback_records['0'] = []
for record in records:
if not self._is_in_view(record):
continue
if record['fallback']:
fallback_records['0'].append(record)
# Final record list
if live_records:
filtered_records = live_records[max(live_records)]
elif fallback_records:
filtered_records = fallback_records[max(fallback_records)]
else:
filtered_records = []
return filtered_records
def _lb_topology( self, records):
if len(records) > 0 and 'lboption_json' in records[0] and records[0]['lboption_json'] != None:
try:
self._lb_topology_map = json.loads( records[0]['lboption_json'] )
except ValueError:
logging.error( "Unable to load topology map: %s !!!", str(records[0]['lboption_json']) )
logging.debug( "Topology map: %s", str(self._lb_topology_map) )
client_region = self._lb_get_topology_region( self.remote_ip )
logging.debug("= TOPOLOGY = ip: %s - region: %s", self.remote_ip, client_region)
if client_region == '':
return records
nfallback_records = 0
nlive_records = 0
topology_fallback_records = []
topology_live_records = []
for record in records:
logging.debug("= TOPOLOGY - _lb_topology = record: %s", str(record))
if not self._is_in_view(record) or (record['qtype'] != 'A' and record['qtype'] != 'AAAA'):
continue
if record['fallback']:
record_region = self._lb_get_topology_region( record['content'] )
if record_region == client_region:
topology_fallback_records.append( record )
if record['id'] not in powergslb.monitor.get_status():
record_region = self._lb_get_topology_region( record['content'] )
logging.debug("= TOPOLOGY = record content: %s - region: %s", record['content'], record_region)
if record_region == client_region:
topology_live_records.append( record )
# Final record list
if topology_live_records:
return topology_live_records
return records
def _lb_get_topology_region( self, ip):
logging.debug("= TOPOLOGY - _lb_get_topology_region = ip: %s", ip)
ip = netaddr.IPAddress( ip ).value
region = ''
logging.debug("= TOPOLOGY - _lb_get_topology_region = ip: %s", ip)
for region_name, net_list in self._lb_topology_map.iteritems():
if region != '':
break
for net in net_list:
network = netaddr.IPNetwork( net )
if ip >= network.first and ip <= network.last:
region = region_name
break
return region
def _lb_randomize(self, records):
random.shuffle( records )
return records
def _lb_ltd( self, records):
logging.debug( "alain: %s",str(records) )
ts = powergslb.database.TimeSeries( **powergslb.system.get_config().items('redis') )
filtered_record = []
avg_td = 0.0
for record in records:
avg_td_tmp = ts.get_response_time_avg( record['id'] )
logging.debug(' ltd - content: %s - avg_td: %f - avg_ltd: %f', record['content'], avg_td_tmp, avg_td)
if len( filtered_record ) == 0:
filtered_record.append( record )
avg_td = avg_td_tmp
elif avg_td_tmp < avg_td and avg_td_tmp > 0.0:
filtered_record[0] = record
avg_td = avg_td_tmp
return filtered_record
@staticmethod
def _strip_records(records):
result = []
for record in records:
# record['qname'] = record['qname'].rstrip('.')
# logging.error('ok')
# logging.error("qname: %s" , record['qname'])
if record['qtype'] in ['MX', 'SRV']:
names = ['qname', 'qtype', 'content', 'ttl', 'priority']
values = [record['qname'], record['qtype'], record['content'], record['ttl'], record['priority']]
else:
names = ['qname', 'qtype', 'content', 'ttl']
values = [record['qname'], record['qtype'], record['content'], record['ttl']]
result.append(dict(zip(names, values)))
return result
def content(self):
if len(self.dirs) == 4 and self.dirs[1] == 'lookup':
content = {'result': self._get_lookup()}
else:
content = {'result': False}
return json.dumps(content, separators=(',', ':'))
|
<reponame>rimmartin/cctbx_project<gh_stars>0
from __future__ import division
from cctbx import crystal
from libtbx.utils import Sorry, date_and_time, multi_out
import iotbx.phil
from iotbx import reflection_file_reader
from iotbx import reflection_file_utils
from iotbx import crystal_symmetry_from_any
import mmtbx.scaling
from mmtbx.scaling import pair_analyses
from libtbx.str_utils import StringIO
from mmtbx.scaling import pre_scale, make_param
import sys, os
from mmtbx.scaling import fa_estimation
params_generator = make_param.phil_lego()
master_params = iotbx.phil.parse( params_generator.default_2wmad() )
def run(args):
if len(args)==0:
master_params.show(expert_level=100)
elif ( "--help" in args ):
print "no help available"
elif ( "--h" in args ):
print "no help available"
elif ( "--show_defaults" in args ):
master_params.show(expert_level=0)
elif ( "--show_defaults_all" in args ):
master_params.show(expert_level=10)
else:
log = multi_out()
if (not "--quiet" in args):
log.register(label="stdout", file_object=sys.stdout)
string_buffer = StringIO()
string_buffer_plots = StringIO()
log.register(label="log_buffer", file_object=string_buffer)
log_plots = StringIO()
print >> log,"#phil __OFF__"
print >> log
print >> log, date_and_time()
print >> log
print >> log
phil_objects = []
argument_interpreter = master_params.command_line_argument_interpreter(
home_scope="scaling")
reflection_file = None
for arg in args:
command_line_params = None
arg_is_processed = False
if arg == '--quiet':
arg_is_processed = True
## The associated action with this keyword is implemented above
if (os.path.isfile(arg)): ## is this a file name?
## Check if this is a phil file
try:
command_line_params = iotbx.phil.parse(file_name=arg)
except KeyboardInterrupt: raise
except Exception : pass
if command_line_params is not None:
phil_objects.append(command_line_params)
arg_is_processed = True
## Check if this file is a reflection file
if command_line_params is None:
reflection_file = reflection_file_reader.any_reflection_file(
file_name=arg, ensure_read_access=False)
if (reflection_file is not None):
reflection_file = arg
arg_is_processed = True
## If it is not a file, it must be a phil command
else:
try:
command_line_params = argument_interpreter.process(arg=arg)
if command_line_params is not None:
phil_objects.append(command_line_params)
arg_is_processed = True
except KeyboardInterrupt: raise
except Exception : pass
if not arg_is_processed:
print >> log, "##----------------------------------------------##"
print >> log, "## Unknown phil-file or phil-command:", arg
print >> log, "##----------------------------------------------##"
print >> log
raise Sorry("Unknown file format or phil command: %s" % arg)
effective_params = master_params.fetch(sources=phil_objects)
params = effective_params.extract()
## Now please read in the reflections files
## get symmetry and cell data first please
## By default, the native cell and symmetry are used
## as reference
crystal_symmetry_nat = None
print params.scaling.input.xray_data.wavelength1.file_name
crystal_symmetry_nat = crystal_symmetry_from_any.extract_from(
file_name=params.scaling.input.xray_data.wavelength1.file_name)
if params.scaling.input.xray_data.space_group is None:
params.scaling.input.xray_data.space_group =\
crystal_symmetry_nat.space_group_info()
print >> log, "Using symmetry of native data"
if params.scaling.input.xray_data.unit_cell is None:
params.scaling.input.xray_data.unit_cell =\
crystal_symmetry_nat.unit_cell()
print >> log, "Using cell of native data"
## Check if a unit cell is defined
if params.scaling.input.xray_data.space_group is None:
raise Sorry("No space group defined")
if params.scaling.input.xray_data.unit_cell is None:
raise Sorry("No unit cell defined")
crystal_symmetry = crystal_symmetry = crystal.symmetry(
unit_cell = params.scaling.input.xray_data.unit_cell,
space_group_symbol = str(
params.scaling.input.xray_data.space_group) )
effective_params = master_params.fetch(sources=phil_objects)
new_params = master_params.format(python_object=params)
print >> log, "Effective parameters"
print >> log, "#phil __ON__"
new_params.show(out=log,expert_level=params.scaling.input.expert_level)
print >> log, "#phil __END__"
print >> log
## define a xray data server
xray_data_server = reflection_file_utils.reflection_file_server(
crystal_symmetry = crystal_symmetry,
force_symmetry = True,
reflection_files=[])
## Read in native data and make appropriate selections
miller_array_w1 = None
miller_array_w1 = xray_data_server.get_xray_data(
file_name = params.scaling.input.xray_data.wavelength1.file_name,
labels = params.scaling.input.xray_data.wavelength1.labels,
ignore_all_zeros = True,
parameter_scope = 'scaling.input.SIR_scale.xray_data.native'
)
info_native = miller_array_w1.info()
miller_array_w1=miller_array_w1.map_to_asu().select(
miller_array_w1.indices()!=(0,0,0) )
miller_array_w1 = miller_array_w1.select(
miller_array_w1.data() > 0 )
## Convert to amplitudes
if (miller_array_w1.is_xray_intensity_array()):
miller_array_w1 = miller_array_w1.f_sq_as_f()
elif (miller_array_w1.is_complex_array()):
miller_array_w1 = abs(miller_array_w1)
if not miller_array_w1.is_real_array():
raise Sorry("miller_array_native is not a real array")
miller_array_w1.set_info(info = info_native)
## Read in derivative data and make appropriate selections
miller_array_w2 = None
miller_array_w2 = xray_data_server.get_xray_data(
file_name = params.scaling.input.xray_data.wavelength2.file_name,
labels = params.scaling.input.xray_data.wavelength2.labels,
ignore_all_zeros = True,
parameter_scope = 'scaling.input.SIR_scale.xray_data.derivative'
)
info_w2 = miller_array_w2.info()
miller_array_w2=miller_array_w2.map_to_asu().select(
miller_array_w2.indices()!=(0,0,0) )
miller_array_w2 = miller_array_w2.select(
miller_array_w2.data() > 0 )
## Convert to amplitudes
if (miller_array_w2.is_xray_intensity_array()):
miller_array_w2 = miller_array_w2.f_sq_as_f()
elif (miller_array_w2.is_complex_array()):
miller_array_w2 = abs(miller_array_w2)
if not miller_array_w2.is_real_array():
raise Sorry("miller_array_derivative is not a real array")
miller_array_w2.set_info(info = info_w2)
## Make sure we have anomalous diffs in both files
assert miller_array_w1.anomalous_flag()
assert miller_array_w2.anomalous_flag()
## Print info
print >> log
print >> log, "Wavelength 1"
print >> log, "============"
miller_array_w1.show_comprehensive_summary(f=log)
print >> log
w1_pre_scale = pre_scale.pre_scaler(
miller_array_w1,
params.scaling.input.scaling_strategy.pre_scaler_protocol,
params.scaling.input.basic)
miller_array_w1 = w1_pre_scale.x1.deep_copy()
del w1_pre_scale
print >> log
print >> log, "Wavelength 2"
print >> log, "============"
miller_array_w2.show_comprehensive_summary(f=log)
print >> log
w2_pre_scale = pre_scale.pre_scaler(
miller_array_w2,
params.scaling.input.scaling_strategy.pre_scaler_protocol,
params.scaling.input.basic)
miller_array_w2 = w2_pre_scale.x1.deep_copy()
del w2_pre_scale
print >> log
print >> log, "Checking for possible reindexing schemes"
print >> log, "----------------------------------------"
print >> log
print >> log, "Reindexing operator derived as described in:"
print >> log, "Grosse-Kunstleve, Afonine, Sauter & Adams. (2005)."
print >> log, " IUCr Computing Commission Newsletter 5."
print >> log
reindex_object = pair_analyses.reindexing(
set_a=miller_array_w1,
set_b=miller_array_w2,
out=log)
miller_array_w2 = reindex_object.select_and_transform()
miller_array_w2.map_to_asu()
print >> log
print >> log, "Relative scaling of 2-wavelength mad data"
print >> log, "-----------------------------------------"
print >> log
scaler = fa_estimation.combined_scaling(
miller_array_w1,
miller_array_w2,
params.scaling.input.scaling_strategy.iso_protocol)
miller_array_w1 = scaler.x1.deep_copy()
miller_array_w2 = scaler.x2.deep_copy()
del scaler
print >> log
print >> log, "Estimating f\" and f' ratios"
print >> log, "----------------------------"
print >> log
# now things are scaled see if we can guestimate the ratio
fdpratio = pair_analyses.f_double_prime_ratio(
miller_array_w1,
miller_array_w2)
fpfdpratio = pair_analyses.delta_f_prime_f_double_prime_ratio(
miller_array_w1,
miller_array_w2)
k1 = fdpratio.ratio
k2 = fpfdpratio.ratio
if k1 is not None:
print >> log
print >> log, " The estimate of f\"(w1)/f\"(w2) is %3.2f"\
%(fdpratio.ratio)
if k2 is not None:
print >> log, " The estimate of (f'(w1)-f'(w2))/f\"(w2) is %3.2f"\
%(fpfdpratio.ratio)
print >> log
print >> log, " The quality of these estimates depends to a large extend"
print >> log, " on the quality of the data. If user supplied values"
print >> log, " of f\" and f' are given, they will be used instead "
print >> log, " of the estimates."
print >> log
if params.scaling.input.xray_data.wavelength1.f_double_prime is not None:
if params.scaling.input.xray_data.wavelength2.f_double_prime is not None:
k1 = (params.scaling.input.xray_data.wavelength1.f_double_prime/
params.scaling.input.xray_data.wavelength2.f_double_prime)
print >> log, " Using user specified f\" values"
print >> log, " user specified f\"(w1)/f\"(w2) is %3.2f"\
%(k1)
print >> log
if params.scaling.input.xray_data.wavelength1.f_prime is not None:
if params.scaling.input.xray_data.wavelength2.f_prime is not None:
if params.scaling.input.xray_data.wavelength2.f_double_prime is not None:
k2 = (params.scaling.input.xray_data.wavelength1.f_prime-
params.scaling.input.xray_data.wavelength2.f_prime)\
/params.scaling.input.xray_data.wavelength2.f_double_prime
print >> log, " Using user specified f\" and f' values"
print >> log, " user specified f\"(w1)/f\"(w2) is %3.2f"\
%(k2)
print >> log
fa_gen = fa_estimation.twmad_fa_driver(miller_array_w1,
miller_array_w2,
k1,
k2,
params.scaling.input.fa_estimation)
print >> log
print >> log, "writing mtz file"
print >> log, "----------------"
print >> log
## Please write out the abs_delta_f array
fa = fa_gen.fa_values
mtz_dataset = fa.as_mtz_dataset(
column_root_label='F'+params.scaling.input.output.outlabel)
mtz_dataset.mtz_object().write(
file_name=params.scaling.input.output.hklout)
if (__name__ == "__main__"):
run(sys.argv[1:])
|
<gh_stars>10-100
from unittest import mock
from unittest.mock import MagicMock
import pytest
from airflow.exceptions import AirflowException, TaskDeferred
from airflow.models import DAG
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance
from airflow.utils.timezone import datetime
from airflow.utils.types import DagRunType
from google.cloud.exceptions import Conflict
from astronomer.providers.google.cloud.operators.bigquery import (
BigQueryCheckOperatorAsync,
BigQueryGetDataOperatorAsync,
BigQueryInsertJobOperatorAsync,
BigQueryIntervalCheckOperatorAsync,
BigQueryValueCheckOperatorAsync,
)
from astronomer.providers.google.cloud.triggers.bigquery import (
BigQueryCheckTrigger,
BigQueryGetDataTrigger,
BigQueryInsertJobTrigger,
BigQueryIntervalCheckTrigger,
BigQueryValueCheckTrigger,
)
TEST_DATASET_LOCATION = "EU"
TEST_GCP_PROJECT_ID = "test-project"
TEST_DATASET = "test-dataset"
TEST_TABLE = "test-table"
@pytest.fixture
def context():
"""
Creates an empty context.
"""
context = {}
yield context
@mock.patch("astronomer.providers.google.cloud.operators.bigquery._BigQueryHook")
def test_bigquery_insert_job_operator_async(mock_hook):
"""
Asserts that a task is deferred and a BigQueryInsertJobTrigger will be fired
when the BigQueryInsertJobOperatorAsync is executed.
"""
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
configuration = {
"query": {
"query": "SELECT * FROM any",
"useLegacySql": False,
}
}
mock_hook.return_value.insert_job.return_value = MagicMock(job_id=real_job_id, error_result=False)
op = BigQueryInsertJobOperatorAsync(
task_id="insert_query_job",
configuration=configuration,
location=TEST_DATASET_LOCATION,
job_id=job_id,
project_id=TEST_GCP_PROJECT_ID,
)
with pytest.raises(TaskDeferred) as exc:
op.execute(create_context(op))
assert isinstance(
exc.value.trigger, BigQueryInsertJobTrigger
), "Trigger is not a BigQueryInsertJobTrigger"
def test_bigquery_insert_job_operator_execute_failure(context):
"""Tests that an AirflowException is raised in case of error event"""
configuration = {
"query": {
"query": "SELECT * FROM any",
"useLegacySql": False,
}
}
job_id = "123456"
operator = BigQueryInsertJobOperatorAsync(
task_id="insert_query_job",
configuration=configuration,
location=TEST_DATASET_LOCATION,
job_id=job_id,
project_id=TEST_GCP_PROJECT_ID,
)
with pytest.raises(AirflowException):
operator.execute_complete(context=None, event={"status": "error", "message": "test failure message"})
def create_context(task):
dag = DAG(dag_id="dag")
execution_date = datetime(2022, 1, 1, 0, 0, 0)
dag_run = DagRun(
dag_id=dag.dag_id,
execution_date=execution_date,
run_id=DagRun.generate_run_id(DagRunType.MANUAL, execution_date),
)
task_instance = TaskInstance(task=task)
task_instance.dag_run = dag_run
task_instance.dag_id = dag.dag_id
task_instance.xcom_push = mock.Mock()
return {
"dag": dag,
"run_id": dag_run.run_id,
"task": task,
"ti": task_instance,
"task_instance": task_instance,
}
def test_bigquery_insert_job_operator_execute_complete():
"""Asserts that logging occurs as expected"""
configuration = {
"query": {
"query": "SELECT * FROM any",
"useLegacySql": False,
}
}
job_id = "123456"
operator = BigQueryInsertJobOperatorAsync(
task_id="insert_query_job",
configuration=configuration,
location=TEST_DATASET_LOCATION,
job_id=job_id,
project_id=TEST_GCP_PROJECT_ID,
)
with mock.patch.object(operator.log, "info") as mock_log_info:
operator.execute_complete(
context=create_context(operator),
event={"status": "success", "message": "Job completed", "job_id": job_id},
)
mock_log_info.assert_called_with("%s completed with response %s ", "insert_query_job", "Job completed")
@mock.patch("airflow.providers.google.cloud.operators.bigquery.hashlib.md5")
@pytest.mark.parametrize(
"test_dag_id, expected_job_id",
[("test-dag-id-1.1", "airflow_test_dag_id_1_1_test_job_id_2020_01_23T00_00_00_00_00_hash")],
ids=["test-dag-id-1.1"],
)
def test_job_id_validity(mock_md5, test_dag_id, expected_job_id):
"""Asserts that job id is correctly generated"""
hash_ = "hash"
mock_md5.return_value.hexdigest.return_value = hash_
context = {"logical_date": datetime(2020, 1, 23)}
configuration = {
"query": {
"query": "SELECT * FROM any",
"useLegacySql": False,
}
}
with DAG(dag_id=test_dag_id, start_date=datetime(2020, 1, 23)):
op = BigQueryInsertJobOperatorAsync(
task_id="test_job_id", configuration=configuration, project_id=TEST_GCP_PROJECT_ID
)
assert op._job_id(context) == expected_job_id
@mock.patch("airflow.providers.google.cloud.operators.bigquery.hashlib.md5")
@mock.patch("astronomer.providers.google.cloud.operators.bigquery._BigQueryHook")
def test_execute_reattach(mock_hook, mock_md5):
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mock_md5.return_value.hexdigest.return_value = hash_
configuration = {
"query": {
"query": "SELECT * FROM any",
"useLegacySql": False,
}
}
mock_hook.return_value.insert_job.side_effect = Conflict("any")
job = MagicMock(
job_id=real_job_id,
error_result=False,
state="PENDING",
done=lambda: False,
)
mock_hook.return_value.get_job.return_value = job
op = BigQueryInsertJobOperatorAsync(
task_id="insert_query_job",
configuration=configuration,
location=TEST_DATASET_LOCATION,
job_id=job_id,
project_id=TEST_GCP_PROJECT_ID,
reattach_states={"PENDING"},
)
with pytest.raises(TaskDeferred):
op.execute(create_context(op))
mock_hook.return_value.get_job.assert_called_once_with(
location=TEST_DATASET_LOCATION,
job_id=real_job_id,
project_id=TEST_GCP_PROJECT_ID,
)
job._begin.assert_called_once_with()
@mock.patch("airflow.providers.google.cloud.operators.bigquery.hashlib.md5")
@mock.patch("astronomer.providers.google.cloud.operators.bigquery._BigQueryHook")
def test_execute_force_rerun(mock_hook, mock_md5):
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mock_md5.return_value.hexdigest.return_value = hash_
configuration = {
"query": {
"query": "SELECT * FROM any",
"useLegacySql": False,
}
}
mock_hook.return_value.insert_job.side_effect = Conflict("any")
job = MagicMock(
job_id=real_job_id,
error_result=False,
state="DONE",
done=lambda: False,
)
mock_hook.return_value.get_job.return_value = job
op = BigQueryInsertJobOperatorAsync(
task_id="insert_query_job",
configuration=configuration,
location=TEST_DATASET_LOCATION,
job_id=job_id,
project_id=TEST_GCP_PROJECT_ID,
reattach_states={"PENDING"},
)
with pytest.raises(AirflowException) as exc:
op.execute(context)
expected_exception_msg = (
f"Job with id: {real_job_id} already exists and is in {job.state} state. "
f"If you want to force rerun it consider setting `force_rerun=True`."
f"Or, if you want to reattach in this scenario add {job.state} to `reattach_states`"
)
assert str(exc.value) == expected_exception_msg
mock_hook.return_value.get_job.assert_called_once_with(
location=TEST_DATASET_LOCATION,
job_id=real_job_id,
project_id=TEST_GCP_PROJECT_ID,
)
@mock.patch("astronomer.providers.google.cloud.operators.bigquery._BigQueryHook")
def test_bigquery_check_operator_async(mock_hook):
"""
Asserts that a task is deferred and a BigQueryCheckTrigger will be fired
when the BigQueryCheckOperatorAsync is executed.
"""
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mock_hook.return_value.insert_job.return_value = MagicMock(job_id=real_job_id, error_result=False)
op = BigQueryCheckOperatorAsync(
task_id="bq_check_operator_job",
sql="SELECT * FROM any",
location=TEST_DATASET_LOCATION,
)
with pytest.raises(TaskDeferred) as exc:
op.execute(create_context(op))
assert isinstance(exc.value.trigger, BigQueryCheckTrigger), "Trigger is not a BigQueryCheckTrigger"
def test_bigquery_check_operator_execute_failure(context):
"""Tests that an AirflowException is raised in case of error event"""
operator = BigQueryCheckOperatorAsync(
task_id="bq_check_operator_execute_failure", sql="SELECT * FROM any", location=TEST_DATASET_LOCATION
)
with pytest.raises(AirflowException):
operator.execute_complete(context=None, event={"status": "error", "message": "test failure message"})
def test_bigquery_check_op_execute_complete_with_no_records():
"""Asserts that exception is raised with correct expected exception message"""
operator = BigQueryCheckOperatorAsync(
task_id="bq_check_operator_execute_complete", sql="SELECT * FROM any", location=TEST_DATASET_LOCATION
)
with pytest.raises(AirflowException) as exc:
operator.execute_complete(context=None, event={"status": "success", "records": None})
expected_exception_msg = "The query returned None"
assert str(exc.value) == expected_exception_msg
def test_bigquery_check_op_execute_complete_with_non_boolean_records():
"""Executing a sql which returns a non-boolean value should raise exception"""
test_sql = "SELECT * FROM any"
operator = BigQueryCheckOperatorAsync(
task_id="bq_check_operator_execute_complete", sql=test_sql, location=TEST_DATASET_LOCATION
)
expected_exception_msg = f"Test failed.\nQuery:\n{test_sql}\nResults:\n{[20, False]!s}"
with pytest.raises(AirflowException) as exc:
operator.execute_complete(context=None, event={"status": "success", "records": [20, False]})
assert str(exc.value) == expected_exception_msg
def test_bigquery_check_operator_execute_complete():
"""Asserts that logging occurs as expected"""
operator = BigQueryCheckOperatorAsync(
task_id="bq_check_operator_execute_complete", sql="SELECT * FROM any", location=TEST_DATASET_LOCATION
)
with mock.patch.object(operator.log, "info") as mock_log_info:
operator.execute_complete(context=None, event={"status": "success", "records": [20]})
mock_log_info.assert_called_with("Success.")
def test_bigquery_interval_check_operator_execute_complete():
"""Asserts that logging occurs as expected"""
operator = BigQueryIntervalCheckOperatorAsync(
task_id="bq_interval_check_operator_execute_complete",
table="test_table",
metrics_thresholds={"COUNT(*)": 1.5},
location=TEST_DATASET_LOCATION,
)
with mock.patch.object(operator.log, "info") as mock_log_info:
operator.execute_complete(context=None, event={"status": "success", "message": "Job completed"})
mock_log_info.assert_called_with(
"%s completed with response %s ", "bq_interval_check_operator_execute_complete", "success"
)
def test_bigquery_interval_check_operator_execute_failure(context):
"""Tests that an AirflowException is raised in case of error event"""
operator = BigQueryIntervalCheckOperatorAsync(
task_id="bq_interval_check_operator_execute_complete",
table="test_table",
metrics_thresholds={"COUNT(*)": 1.5},
location=TEST_DATASET_LOCATION,
)
with pytest.raises(AirflowException):
operator.execute_complete(context=None, event={"status": "error", "message": "test failure message"})
@mock.patch("astronomer.providers.google.cloud.operators.bigquery._BigQueryHook")
def test_bigquery_interval_check_operator_async(mock_hook):
"""
Asserts that a task is deferred and a BigQueryIntervalCheckTrigger will be fired
when the BigQueryIntervalCheckOperatorAsync is executed.
"""
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mock_hook.return_value.insert_job.return_value = MagicMock(job_id=real_job_id, error_result=False)
op = BigQueryIntervalCheckOperatorAsync(
task_id="bq_interval_check_operator_execute_complete",
table="test_table",
metrics_thresholds={"COUNT(*)": 1.5},
location=TEST_DATASET_LOCATION,
)
with pytest.raises(TaskDeferred) as exc:
op.execute(create_context(op))
assert isinstance(
exc.value.trigger, BigQueryIntervalCheckTrigger
), "Trigger is not a BigQueryIntervalCheckTrigger"
@mock.patch("astronomer.providers.google.cloud.operators.bigquery._BigQueryHook")
def test_bigquery_get_data_operator_async_with_selected_fields(mock_hook):
"""
Asserts that a task is deferred and a BigQuerygetDataTrigger will be fired
when the BigQuerygetDataOperatorAsync is executed.
"""
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mock_hook.return_value.insert_job.return_value = MagicMock(job_id=real_job_id, error_result=False)
op = BigQueryGetDataOperatorAsync(
task_id="get_data_from_bq",
dataset_id=TEST_DATASET,
table_id=TEST_TABLE,
max_results=100,
selected_fields="value,name",
)
with pytest.raises(TaskDeferred) as exc:
op.execute(create_context(op))
assert isinstance(exc.value.trigger, BigQueryGetDataTrigger), "Trigger is not a BigQueryGetDataTrigger"
@mock.patch("astronomer.providers.google.cloud.operators.bigquery._BigQueryHook")
def test_bigquery_get_data_operator_async_without_selected_fields(mock_hook):
"""
Asserts that a task is deferred and a BigQuerygetDataTrigger will be fired
when the BigQuerygetDataOperatorAsync is executed.
"""
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mock_hook.return_value.insert_job.return_value = MagicMock(job_id=real_job_id, error_result=False)
op = BigQueryGetDataOperatorAsync(
task_id="get_data_from_bq",
dataset_id=TEST_DATASET,
table_id=TEST_TABLE,
max_results=100,
)
with pytest.raises(TaskDeferred) as exc:
op.execute(create_context(op))
assert isinstance(exc.value.trigger, BigQueryGetDataTrigger), "Trigger is not a BigQueryGetDataTrigger"
def test_bigquery_get_data_operator_execute_failure(context):
"""Tests that an AirflowException is raised in case of error event"""
operator = BigQueryGetDataOperatorAsync(
task_id="get_data_from_bq",
dataset_id=TEST_DATASET,
table_id="any",
max_results=100,
)
with pytest.raises(AirflowException):
operator.execute_complete(context=None, event={"status": "error", "message": "test failure message"})
def test_bigquery_get_data_op_execute_complete_with_records():
"""Asserts that exception is raised with correct expected exception message"""
operator = BigQueryGetDataOperatorAsync(
task_id="get_data_from_bq",
dataset_id=TEST_DATASET,
table_id="any",
max_results=100,
)
with mock.patch.object(operator.log, "info") as mock_log_info:
operator.execute_complete(context=None, event={"status": "success", "records": [20]})
mock_log_info.assert_called_with("Total extracted rows: %s", 1)
def _get_value_check_async_operator(use_legacy_sql: bool = False):
"""Helper function to initialise BigQueryValueCheckOperatorAsync operator"""
query = "SELECT COUNT(*) FROM Any"
pass_val = 2
return BigQueryValueCheckOperatorAsync(
task_id="check_value",
sql=query,
pass_value=pass_val,
use_legacy_sql=use_legacy_sql,
)
@mock.patch("astronomer.providers.google.cloud.operators.bigquery._BigQueryHook")
def test_bigquery_value_check_async(mock_hook):
"""
Asserts that a task is deferred and a BigQueryValueCheckTrigger will be fired
when the BigQueryValueCheckOperatorAsync is executed.
"""
operator = _get_value_check_async_operator(True)
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mock_hook.return_value.insert_job.return_value = MagicMock(job_id=real_job_id, error_result=False)
with pytest.raises(TaskDeferred) as exc:
operator.execute(create_context(operator))
assert isinstance(
exc.value.trigger, BigQueryValueCheckTrigger
), "Trigger is not a BigQueryValueCheckTrigger"
def test_bigquery_value_check_operator_execute_complete_success():
"""Tests response message in case of success event"""
operator = _get_value_check_async_operator()
assert (
operator.execute_complete(context=None, event={"status": "success", "message": "Job completed!"})
is None
)
def test_bigquery_value_check_operator_execute_complete_failure():
"""Tests that an AirflowException is raised in case of error event"""
operator = _get_value_check_async_operator()
with pytest.raises(AirflowException):
operator.execute_complete(context=None, event={"status": "error", "message": "test failure message"})
@pytest.mark.parametrize(
"kwargs, expected",
[
({"sql": "SELECT COUNT(*) from Any"}, "missing keyword argument 'pass_value'"),
({"pass_value": "Any"}, "missing keyword argument 'sql'"),
],
)
def test_bigquery_value_check_missing_param(kwargs, expected):
"""Assert the exception if require param not pass to BigQueryValueCheckOperatorAsync operator"""
with pytest.raises(AirflowException) as missing_param:
BigQueryValueCheckOperatorAsync(**kwargs)
assert missing_param.value.args[0] == expected
def test_bigquery_value_check_empty():
"""Assert the exception if require param not pass to BigQueryValueCheckOperatorAsync operator"""
expected, expected1 = (
"missing keyword arguments 'sql', 'pass_value'",
"missing keyword arguments 'pass_value', 'sql'",
)
with pytest.raises(AirflowException) as missing_param:
BigQueryValueCheckOperatorAsync(kwargs={})
assert (missing_param.value.args[0] == expected) or (missing_param.value.args[0] == expected1)
|
""" This module contains a class to represent a Tichu Deck. """
import random
from env.card import Card
from env.cards import Cards
class Deck():
"""
A class to represent a Tichu Deck.
Contains instances of all Cards in a Tichu deck.
Attributes
----------
all_cards: list of Card
A list containing all Card objects in a Tichu deck.
size: int
The size of a Tichu deck (56).
Methods
-------
shuffle_and_deal():
Shuffles the deck and returns a list of 4 Cards instances to
start a game.
"""
def __init__(self):
""" Instantiates all Tichu Cards. """
Spd_2 = Card(name='2', suit='Spade')
Hrt_2 = Card(name='2', suit='Heart')
Dia_2 = Card(name='2', suit='Dia')
Clb_2 = Card(name='2', suit='Club')
Spd_3 = Card(name='3', suit='Spade')
Hrt_3 = Card(name='3', suit='Heart')
Dia_3 = Card(name='3', suit='Dia')
Clb_3 = Card(name='3', suit='Club')
Spd_4 = Card(name='4', suit='Spade')
Hrt_4 = Card(name='4', suit='Heart')
Dia_4 = Card(name='4', suit='Dia')
Clb_4 = Card(name='4', suit='Club')
Spd_5 = Card(name='5', suit='Spade')
Hrt_5 = Card(name='5', suit='Heart')
Dia_5 = Card(name='5', suit='Dia')
Clb_5 = Card(name='5', suit='Club')
Spd_6 = Card(name='6', suit='Spade')
Hrt_6 = Card(name='6', suit='Heart')
Dia_6 = Card(name='6', suit='Dia')
Clb_6 = Card(name='6', suit='Club')
Spd_7 = Card(name='7', suit='Spade')
Hrt_7 = Card(name='7', suit='Heart')
Dia_7 = Card(name='7', suit='Dia')
Clb_7 = Card(name='7', suit='Club')
Spd_8 = Card(name='8', suit='Spade')
Hrt_8 = Card(name='8', suit='Heart')
Dia_8 = Card(name='8', suit='Dia')
Clb_8 = Card(name='8', suit='Club')
Spd_9 = Card(name='9', suit='Spade')
Hrt_9 = Card(name='9', suit='Heart')
Dia_9 = Card(name='9', suit='Dia')
Clb_9 = Card(name='9', suit='Club')
Spd_10 = Card(name='10', suit='Spade')
Hrt_10 = Card(name='10', suit='Heart')
Dia_10 = Card(name='10', suit='Dia')
Clb_10 = Card(name='10', suit='Club')
Spd_J = Card(name='J', suit='Spade')
Hrt_J = Card(name='J', suit='Heart')
Dia_J = Card(name='J', suit='Dia')
Clb_J = Card(name='J', suit='Club')
Spd_Q = Card(name='Q', suit='Spade')
Hrt_Q = Card(name='Q', suit='Heart')
Dia_Q = Card(name='Q', suit='Dia')
Clb_Q = Card(name='Q', suit='Club')
Spd_K = Card(name='K', suit='Spade')
Hrt_K = Card(name='K', suit='Heart')
Dia_K = Card(name='K', suit='Dia')
Clb_K = Card(name='K', suit='Club')
Spd_A = Card(name='A', suit='Spade')
Hrt_A = Card(name='A', suit='Heart')
Dia_A = Card(name='A', suit='Dia')
Clb_A = Card(name='A', suit='Club')
Majong = Card(name='Majong', suit='Special')
Dragon = Card(name='Dragon', suit='Special')
Phoenix = Card(name='Phoenix', suit='Special')
Dog = Card(name='Dog', suit='Special')
self.all_cards = [Spd_2, Hrt_2, Dia_2, Clb_2,
Spd_3, Hrt_3, Dia_3, Clb_3,
Spd_4, Hrt_4, Dia_4, Clb_4,
Spd_5, Hrt_5, Dia_5, Clb_5,
Spd_6, Hrt_6, Dia_6, Clb_6,
Spd_7, Hrt_7, Dia_7, Clb_7,
Spd_8, Hrt_8, Dia_8, Clb_8,
Spd_9, Hrt_9, Dia_9, Clb_9,
Spd_10, Hrt_10, Dia_10, Clb_10,
Spd_J, Hrt_J, Dia_J, Clb_J,
Spd_Q, Hrt_Q, Dia_Q, Clb_Q,
Spd_K, Hrt_K, Dia_K, Clb_K,
Spd_A, Hrt_A, Dia_A, Clb_A,
Phoenix, Dragon, Majong, Dog]
self.size = len(self.all_cards)
def shuffle_and_deal(self):
""" Shuffles the deck and returns them as a list of 4 Cards. """
all_cards = self.all_cards
random.shuffle(all_cards)
chunk_size = int(self.size/4)
set_0 = Cards(card_list=all_cards[0:chunk_size])
set_1 = Cards(card_list=all_cards[chunk_size:2*chunk_size])
set_2 = Cards(card_list=all_cards[2*chunk_size:3*chunk_size])
set_3 = Cards(card_list=all_cards[3*chunk_size:])
return [set_0, set_1, set_2, set_3]
|
<filename>mmc_export/Helpers/resourceAPI.py
import asyncio
from collections import namedtuple
from datetime import datetime
from json import loads as parse_json
from pathlib import Path
from re import compile as re_compile
from urllib.parse import urlparse
from zipfile import ZipFile
import tenacity as tn
from aiohttp_client_cache.session import CachedSession
from .structures import Intermediate, Resource
from .utils import delete_github_token, get_github_token, get_hash
from .. import config
class ResourceAPI(object):
modrinth_search_type: str
excluded_providers: list[str]
def __init__(self, session: CachedSession, intermediate: Intermediate) -> None:
self.session = session
self.intermediate = intermediate
self.session.headers["X-Api-Key"] = config.CURSEFORGE_API_TOKEN
self.session.headers["Content-Type"] = "application/json"
self.session.headers["Accept"] = "application/json"
self.github = "https://api.github.com"
self.modrinth = "https://api.modrinth.com/v2"
self.curseforge = "https://api.curseforge.com/v1"
self.cache_directory = config.DEFAULT_CACHE_DIR / "v5"
self.cache_directory.mkdir(parents=True, exist_ok=True)
super().__init__()
def _get_raw_info(self, path: Path) -> tuple[dict, Resource]:
from pickle import HIGHEST_PROTOCOL
from pickle import dumps as serialize
from pickle import loads as deserialize
cache_file = self.cache_directory / get_hash(path, "xxhash")
if cache_file.exists():
data = cache_file.read_bytes()
meta, resource = deserialize(data)
else:
meta = {"name": path.stem,
"id": None,
"version": "0.0.0"}
if path.suffix in (".jar", ".disabled"):
with ZipFile(path) as modArchive:
filenames = [Path(file).name for file in modArchive.namelist()]
if "fabric.mod.json" in filenames:
data = modArchive.read("fabric.mod.json")
meta = parse_json(data, strict=False)
elif "pack.mcmeta" in filenames:
data = modArchive.read("pack.mcmeta")
json = parse_json(data, strict=False)
meta['name'] = json['pack']['description']
resource = Resource(meta['name'])
file_data = path.read_bytes()
resource.file.hash.sha1 = get_hash(file_data, "sha1")
resource.file.hash.sha256 = get_hash(file_data, "sha256")
resource.file.hash.sha512 = get_hash(file_data, "sha512")
resource.file.hash.murmur2 = get_hash(file_data, "murmur2")
data = serialize((meta, resource), HIGHEST_PROTOCOL)
cache_file.write_bytes(data)
resource.file.path = path
resource.file.name = path.name
resource.file.relativePath = path.parent.name
return meta, resource
@tn.retry(stop=tn.stop.stop_after_attempt(5), wait=tn.wait.wait_fixed(1))
async def _get_github(self, meta: dict, resource: Resource) -> None:
if "contact" not in meta or "GitHub" in self.excluded_providers: return
for link in meta['contact'].values():
parsed_link = urlparse(link)
if parsed_link.netloc == "github.com":
owner, repo = parsed_link.path[1:].split('/')[:2]
repo = repo.removesuffix(".git")
resource.links.append(f"https://github.com/{owner}/{repo}")
break
else: return
async with self.session.get(f"https://api.github.com/repos/{owner}/{repo}/releases") as response:
if response.status != 200 and response.status != 504: return
for release in await response.json():
for asset in release['assets']:
if asset['name'] == resource.file.name:
url = asset['browser_download_url']
author = release['author']['login']
break
else: continue
break
else: return
resource.providers['Other'] = Resource.Provider(
ID = None,
fileID = None,
url = url,
slug = meta['id'],
author = author)
class ResourceAPI_Batched(ResourceAPI):
def __init__(self, session: CachedSession, intermediate: Intermediate) -> None:
self.queue: list[tuple[dict, Resource]] = list()
super().__init__(session, intermediate)
def queue_resource(self, path: Path) -> None:
meta, resource = self._get_raw_info(path)
if path.suffix == ".disabled":
resource.optional = True
resource.file.path = path.replace(path.with_suffix(''))
resource.file.name = resource.file.path.name
self.queue.append((meta, resource))
async def gather(self) -> list[Resource]:
futures = (
self._get_batched_curseforge(),
self._get_batched_modrinth(),
self._get_batched_github()
)
await asyncio.gather(*futures)
resources = [resource for _, resource in self.queue]
return resources
@tn.retry(stop=tn.stop.stop_after_attempt(5), wait=tn.wait.wait_fixed(1))
async def _get_batched_curseforge(self) -> None:
if "CurseForge" in self.excluded_providers: return
payload = {"fingerprints":[resource.file.hash.murmur2 for _, resource in self.queue]}
async with self.session.post(f"{self.curseforge}/fingerprints", json=payload) as response:
if response.status != 200 and response.status != 504: return
if matches := (await response.json())['data']['exactMatches']:
versions = {str(version['file']['fileFingerprint']): version for version in matches}
else: return
payload = {"modIds": [version['id'] for version in versions.values()]}
async with self.session.post(f"{self.curseforge}/mods", json=payload) as response:
if response.status != 200 and response.status != 504: return
if addons_array := (await response.json())['data']:
addons = {addon['id']: addon for addon in addons_array}
else: return
for _, resource in self.queue:
if version := versions.get(resource.file.hash.murmur2):
if addon := addons.get(version['id']):
resource.name = addon['name']
resource.links.append(addon['links']['websiteUrl'])
if srcUrl := addon['links']['sourceUrl']:
resource.links.append(srcUrl)
resource.providers['CurseForge'] = Resource.Provider(
ID = addon['id'],
fileID = version['file']['id'],
url = version['file']['downloadUrl'],
slug = addon['slug'],
author = addon['authors'][0]['name'])
@tn.retry(stop=tn.stop.stop_after_attempt(5), wait=tn.wait.wait_fixed(1))
async def _get_batched_modrinth(self) -> None:
if "Modrinth" in self.excluded_providers: return
search_queue: list[tuple[dict, Resource]] = list()
payload = {"algorithm": "sha1", "hashes": [resource.file.hash.sha1 for _, resource in self.queue]}
async with self.session.post(f"{self.modrinth}/version_files", json=payload) as response:
if response.status != 200 and response.status != 504 and response.status != 423: return
versions = await response.json()
for meta, resource in self.queue:
if version := versions.get(resource.file.hash.sha1):
file = next(file for file in version['files']
if resource.file.hash.sha1 == file['hashes']['sha1']
and resource.file.hash.sha512 == file['hashes']['sha512'])
resource.providers['Modrinth'] = Resource.Provider(
ID = version['project_id'],
fileID = version['id'],
url = file['url'],
slug = meta['id'])
else: search_queue.append((meta, resource))
if self.modrinth_search_type != "exact": await self._get_batched_modrinth_loose(search_queue)
@tn.retry(stop=tn.stop.stop_after_attempt(5), wait=tn.wait.wait_fixed(1))
async def _get_batched_modrinth_loose(self, search_queue: list[tuple[dict, Resource]]) -> None:
version_ids: list[str] = list()
@tn.retry(stop=tn.stop.stop_after_attempt(5), wait=tn.wait.wait_incrementing(1, 15, 60))
async def get_project_id(meta: dict, resource: Resource):
if self.modrinth_search_type == "loose":
async with self.session.get(f"{self.modrinth}/search?query={resource.name}") as response:
if response.status != 200 and response.status != 504 and response.status != 423: return resource, None
if hits := (await response.json())['hits']: return resource, hits[0]['project_id']
return resource, meta['id']
futures = (get_project_id(meta, resource) for meta, resource in search_queue)
project_ids = {resource.name: id for resource, id in await asyncio.gather(*futures) if id}
if not project_ids: return
l2s = lambda l: "[{}]".format(",".join(map('"{}"'.format, l))) # list to string convesion
async with self.session.get(f"{self.modrinth}/projects?ids={l2s(project_ids.values())}") as response:
if response.status != 200 and response.status != 504 and response.status != 423: return
for project in (projects := await response.json()): version_ids.extend(project['versions'])
if not version_ids: return
async with self.session.get(f"{self.modrinth}/versions?ids={l2s(version_ids)}") as response:
if response.status != 200 and response.status != 504 and response.status != 423: return
for project in projects: project['versions'] = [version for version in await response.json()
if version['project_id'] == project['id']]
for meta, resource in search_queue:
if project := next((project for project in projects
if project['id'] == project_ids.get(resource.name, "Get out of here")), None):
for version in project['versions']:
if meta['version'] in version['version_number'] \
and self.intermediate.minecraft_version in version['game_versions'] \
and self.intermediate.modloader.type in version['loaders']:
file = next(file for file in version['files']
if file['filename'] == resource.file.name or file['primary'])
resource.providers['Modrinth'] = Resource.Provider(
ID = version['project_id'],
fileID = version['id'],
url = file['url'],
slug = meta['id'])
resource.file.hash.sha1 = file['hashes']['sha1']
resource.file.hash.sha512 = file['hashes']['sha512']
break
@tn.retry(stop=tn.stop.stop_after_attempt(5), wait=tn.wait.wait_fixed(1))
async def _get_batched_github(self) -> None:
if "GitHub" in self.excluded_providers: return
if not self.session.headers.get('Authorization'):
if token := get_github_token():
self.session.headers['Authorization'] = f"Bearer {token}"
else:
futures = [self._get_github(meta, resource) for meta, resource in self.queue]
await asyncio.gather(*futures)
async with self.session.disabled():
async with self.session.get("https://api.github.com/rate_limit") as response:
ratelimit = (await response.json())['resources']['core']
time_remaining = datetime.fromtimestamp(float(ratelimit['reset'])).strftime("%H:%M")
if ratelimit['remaining'] == 0:
print("You have exceeded the GitHub API rate-limit, only cached results will be used.")
print(f"Please sign in with `mmc-export gh-login` or try again at {time_remaining}")
return
Repository = namedtuple('Repository', ['name', 'owner', 'alias'])
repositories: list[Repository] = list()
pattern = re_compile(r"[\W_]+")
for meta, resource in self.queue:
if "contact" not in meta: continue
for link in meta['contact'].values():
parsed_link = urlparse(link)
if parsed_link.netloc == "github.com":
alias = pattern.sub('', meta['id'])
owner, name = parsed_link.path[1:].split('/')[:2]
repo = Repository(name.removesuffix(".git"), owner, alias)
resource.links.append(f"https://github.com/{repo.owner}/{repo.name}")
repositories.append(repo)
break
else: continue
from gql_query_builder import GqlQuery
queries: list[str] = list()
for repo in repositories:
query = GqlQuery() \
.fields(['...repoReleaseAssets']) \
.query('repository', alias=repo.alias, input={"name": f'"{repo.name}"', "owner": f'"{repo.owner}"'}) \
.generate()
queries.append(query)
payload = """
fragment repoReleaseAssets on Repository {
releases(last: 100) { edges { node {
releaseAssets(last: 10) { nodes {
name
downloadUrl
} } } } } } """ + GqlQuery().operation(queries=queries).generate()
async with self.session.post(f"{self.github}/graphql", json={"query": payload}) as response:
if response.status == 401: delete_github_token(); raise tn.TryAgain
if response.status != 200 and response.status != 504: return
data = (await response.json())['data']
for meta, resource in self.queue:
if not data.get(alias := pattern.sub('', meta['id']) if meta['id'] else "unknown"): continue
for release in data.get(alias, {}).get('releases', {}).get('edges', []):
for asset in release.get('node', {}).get('releaseAssets', {}).get('nodes', []):
if asset['name'] == resource.file.name: url = asset['downloadUrl']; break
else: continue
break
else: continue
resource.providers['Other'] = Resource.Provider(
ID = None,
fileID = None,
url = url,
slug = meta['id'])
|
# ============================================================================
#
# Copyright (C) 2007-2016 Conceptive Engineering bvba.
# www.conceptive.be / <EMAIL>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Conceptive Engineering nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ============================================================================
import six
from ....core.qt import variant_to_py, Qt, QtGui
from .customdelegate import DocumentationMetaclass, CustomDelegate
from camelot.view.proxy import ValueLoading
@six.add_metaclass(DocumentationMetaclass)
class IntervalsDelegate(CustomDelegate):
"""Custom delegate for visualizing camelot.container.IntervalsContainer
data:
"""
def paint(self, painter, option, index):
painter.save()
self.drawBackground(painter, option, index)
intervals_container = variant_to_py(index.model().data(index, Qt.EditRole))
field_attributes = variant_to_py(index.data(Qt.UserRole))
# background_color = QtGui.QColor(index.model().data(index, Qt.BackgroundRole))
# editable is defaulted to False, because there is no editor, no need for one currently
editable, color, background_color = False, None, None
if field_attributes != ValueLoading:
editable = field_attributes.get( 'editable', False )
background_color = field_attributes.get( 'background_color', QtGui.QColor(index.model().data(index, Qt.BackgroundRole)) )
color = field_attributes.get('color', None)
if( option.state & QtGui.QStyle.State_Selected ):
painter.fillRect(option.rect, option.palette.highlight())
else:
if not editable:
painter.fillRect(option.rect, option.palette.window())
else:
painter.fillRect(option.rect, background_color)
if intervals_container and intervals_container!=ValueLoading:
rect = option.rect
xscale = float(rect.width()-4)/(intervals_container.max - intervals_container.min)
xoffset = intervals_container.min * xscale + rect.x()
yoffset = rect.y() + rect.height()/2
for interval in intervals_container.intervals:
qcolor = QtGui.QColor( interval.color or color )
pen = QtGui.QPen( qcolor )
pen.setWidth(3)
painter.setPen(pen)
xscale_interval = xscale
x1, x2 = xoffset + interval.begin *xscale_interval, xoffset + interval.end*xscale_interval
painter.drawLine(x1, yoffset, x2, yoffset)
painter.drawEllipse(x1-1, yoffset-1, 2, 2)
painter.drawEllipse(x2-1, yoffset-1, 2, 2)
pen = QtGui.QPen(Qt.white)
painter.restore()
def createEditor(self, parent, option, index):
pass
def setEditorData(self, editor, index):
pass
def setModelData(self, editor, model, index):
pass
|
<reponame>AlexRogalskiy/splitgraph
"""Module imported by Multicorn on the Splitgraph engine server: a foreign data wrapper
that communicates to Socrata datasets using sodapy."""
import json
import logging
from typing import Any, Dict, Optional
import splitgraph.config
from splitgraph.config import get_singleton
from splitgraph.ingestion.socrata.querying import (
cols_to_socrata,
estimate_socrata_rows_width,
quals_to_socrata,
sortkeys_to_socrata,
)
try:
from multicorn import ANY, ForeignDataWrapper
except ImportError:
# Multicorn not installed (OK if we're not on the engine -- tests).
ForeignDataWrapper = object
ANY = object()
_PG_LOGLEVEL = logging.INFO
def to_json(row, columns, column_map):
result = {}
for col in columns:
val = row.get(column_map.get(col, col))
if isinstance(val, (dict, list)):
val = json.dumps(val)
result[col] = val
return result
class SocrataForeignDataWrapper(ForeignDataWrapper):
def __init__(self, fdw_options, fdw_columns):
"""The foreign data wrapper is initialized on the first query.
Args:
fdw_options (dict): The foreign data wrapper options. It is a dictionary
mapping keys from the sql "CREATE FOREIGN TABLE"
statement options. It is left to the implementor
to decide what should be put in those options, and what
to do with them.
"""
# Initialize the logger that will log to the engine's stderr: log timestamp and PID.
from sodapy import Socrata
logging.basicConfig(
format="%(asctime)s [%(process)d] %(levelname)s %(message)s",
level=get_singleton(splitgraph.config.CONFIG, "SG_LOGLEVEL"),
)
# Dict of connection parameters as well as the table, repository and image hash to query.
self.fdw_options = fdw_options
# The foreign datawrapper columns (name -> ColumnDefinition).
self.fdw_columns = fdw_columns
self.table = self.fdw_options["table"]
# Mappings from SG to Socrata columns (for query building)
self.column_map = json.loads(self.fdw_options.get("column_map") or "{}")
self.app_token = self.fdw_options.get("app_token")
self.domain = self.fdw_options["domain"]
self.batch_size = int(self.fdw_options.get("batch_size", 1000))
self.client = Socrata(domain=self.domain, app_token=self.app_token)
# Cached table metadata
self._metadata: Optional[Dict[str, Any]] = None
def can_sort(self, sortkeys):
"""
:param sortkeys: List of SortKey
:return: List of SortKey the FDW can sort on
"""
# Mostly, we can push all sort clauses down to Socrata.
logging.debug("can_sort %r", sortkeys)
supported = []
for key in sortkeys:
# Socrata sorts nulls first by default (TODO both asc and desc?)
if key.nulls_first != key.is_reversed:
continue
supported.append(key)
return supported
def get_rel_size(self, quals, columns):
"""
Method called from the planner to estimate the resulting relation
size for a scan.
It will help the planner in deciding between different types of plans,
according to their costs.
Args:
quals (list): A list of Qual instances describing the filters
applied to this scan.
columns (list): The list of columns that must be returned.
Returns:
A tuple of the form (expected_number_of_rows, avg_row_width (in bytes))
"""
try:
return estimate_socrata_rows_width(columns, self.table_meta, self.column_map)
except Exception:
logging.exception("Failed planning Socrata query, returning dummy values")
return 1000000, len(columns) * 10
def explain(self, quals, columns, sortkeys=None, verbose=False):
query = quals_to_socrata(quals, self.column_map)
select = cols_to_socrata(columns, self.column_map)
order = sortkeys_to_socrata(sortkeys, self.column_map)
return [
"Socrata query to %s" % self.domain,
"Socrata dataset ID: %s" % self.table,
"Query: %s" % query,
"Columns: %s" % select,
"Order: %s" % order,
]
def execute(self, quals, columns, sortkeys=None):
"""Main Multicorn entry point."""
query = quals_to_socrata(quals, self.column_map)
select = cols_to_socrata(columns, self.column_map)
order = sortkeys_to_socrata(sortkeys, self.column_map)
logging.debug("Socrata query: %r, select: %r, order: %r", query, select, order)
# TODO offsets stop working after some point?
result = self.client.get_all(
dataset_identifier=self.table,
where=query,
select=select,
limit=self.batch_size,
order=order,
exclude_system_fields="false",
)
for r in result:
r = to_json(r, columns, self.column_map)
yield r
@property
def table_meta(self):
if not self._metadata:
self._metadata = self.client.get_metadata(dataset_identifier=self.table)
return self._metadata
|
<gh_stars>0
"""Client side message class
Authors:
<NAME> <EMAIL>
Date:
02.05.2020
"""
import sys
import selectors
import json
import io
import struct
class Message:
def __init__(self, selector, sock, addr, request):
self.selector = selector
self.sock = sock
self.addr = addr
self.request = request
self._recv_buffer = b""
self._send_buffer = b""
self._request_queued = False
self._json_header_len = None
self.json_header = None
self.response = None
def _set_selector_events_mask(self, mode):
""" Set selector to listen for events: mode is 'r', 'w' or 'rw'. """
if mode == 'r':
events = selectors.EVENT_READ
elif mode == 'w':
events = selectors.EVENT_WRITE
elif mode == 'rw':
events = selectors.EVENT_WRITE | selectors.EVENT_READ
else:
raise ValueError(f'Invalid event mas mode {repr(mode)}.')
self.selector.modify(self.sock, events, data=self)
def _read(self):
try:
# Data ready to read
data = self.sock.recv(4096)
except BlockingIOError:
# Resource temporarily unavailable
pass
else:
if data:
self._recv_buffer += data
else:
raise RuntimeError('Peer closed.')
def _write(self):
if self._send_buffer:
## TODO print send data??
try:
sent = self.sock.send(self._send_buffer)
except BlockingIOError:
pass
else:
self._send_buffer = self._send_buffer[sent:]
@staticmethod
def _json_encode(obj, encoding):
return json.dump(obj, ensure_ascii=False).encode(encoding)
@staticmethod
def _json_decode(json_bytes, encoding):
tiow = io.TextIOWrapper(
io.BytesIO(json_bytes), encoding=encoding, newline=''
)
obj = json.load(tiow)
tiow.close()
return obj
def _create_message(self, *, content_bytes, content_type, content_encoding):
json_header = {
'byteorder': sys.byteorder,
'content-type': content_type,
'content-encoding': content_encoding,
'content-length': len(content_bytes),
}
json_header_bytes = self._json_encode(json_header, 'utf-8')
message_header = struct.pack('>H', len(json_header_bytes))
message = message_header + json_header_bytes + content_bytes
return message
def _process_response_json_content(self):
content = self.response
result = content.get('result')
print(f'Got result: {result}')
def _process_response_binary_content(self):
content = self.response
print(f'Got response: {repr(content)}')
def process_events(self, mask):
if mask & selectors.EVENT_READ:
self.read()
if mask & selectors.EVENT_WRITE:
self.write()
def read(self):
self._read()
if self._json_header_len is None:
self.process_proto_header()
if self._json_header_len is not None:
if self.json_header is None:
self.process_json_header()
if self.json_header:
if self.response is None:
self.process_response()
def write(self):
if not self._request_queued:
self.queue_request()
self._write()
if self._request_queued:
if not self._send_buffer:
self._set_selector_events_mask('r')
def close(self):
print('Closing conntection to ', self.addr)
try:
self.selector.unregister(self.sock)
except Exception as e:
print(f'Error: selector.unregister() exception for', f'{self.addr}: {repr(e)}')
try:
self.sock.close()
except OSError as e:
print(f'error: socket.close() exception for', f'{self.addr}: {repr(e)}')
finally:
self.sock = None
def queue_request(self):
content = self.request['content']
content_type = self.request['type']
content_encoding = self.request['encoding']
if content_type == 'text/json':
req = {
'content_bytes': self._json_encode(content, content_encoding),
'content_type': content_type,
'content_encoding': content_encoding,
}
else:
req = {
'content_bytes': content,
'content_type': content_type,
'content_encoding': content_encoding,
}
message = self._create_message(**req)
self._send_buffer += message
self._request_queued = True
def process_proto_header(self):
hdrlen = 2
if len(self._recv_buffer) >= hdrlen:
self._json_header_len = struct.unpack(
'>H', self._recv_buffer[:hdrlen]
)[0]
self._recv_buffer = self._recv_buffer[hdrlen:]
def process_json_header(self):
hdrlen = self._json_header_len
if len(self._recv_buffer) >= hdrlen:
self.json_header = self._json_decode(
self._recv_buffer[:hdrlen], 'utf-8'
)
self._recv_buffer = self._recv_buffer[hdrlen:]
for reqhdr in ('byteorder', 'content-length', 'content-type', 'content-encoding'):
if reqhdr not in self.json_header:
raise ValueError(f'Missing required header "{reqhdr}".')
def process_response(self):
content_len = self.json_header['content-length']
if not len(self._recv_buffer) >= content_len:
return
data = self._recv_buffer[:content_len]
self._recv_buffer = self._recv_buffer[content_len:]
if self.json_header['content-type'] == 'text/json':
encoding = self.json_header['content-encoding']
self.response = self._json_decode(data, encoding)
# TODO print received data
self._process_response_json_content()
else:
self.response = data
print(f'Received {self.json_header["content-type"]} response from', self.addr)
self._process_response_binary_content()
self.close()
|
<reponame>benebjoern/XAI_MovieBot<filename>moviebot/controller/messenger.py
"""This file contains a Messenger class which sends post requests to the facebook API."""
import requests
class Messenger:
def __init__(self, user_id, token):
"""Initializes structs and uri's for Messenger."""
self.user_id = user_id
self.buttons = {}
self.token = token
self.quick_reply_uri = "https://graph.facebook.com/v10.0/me/messages?access_token="+self.token
self.url_button_uri = "https://graph.facebook.com/v2.6/me/messages?access_token="+self.token
self.text_uri = 'https://graph.facebook.com/v9.0/me/messages?access_token='+self.token
self.template_uri = 'https://graph.facebook.com/v9.0/me/messages?access_token='+self.token
self.button_template_uri = 'https://graph.facebook.com/v2.6/me/messages?access_token='+self.token
self.typing_on_uri = 'https://graph.facebook.com/v2.6/me/messages?access_token='+self.token
self.mark_seen_uri = 'https://graph.facebook.com/v2.6/me/messages?access_token='+self.token
def quickreply(self, text, title, payload):
"""Posts a list of quickreply buttons.
Args:
text: quickreply title
title: button text
payload: button payload
Returns:
post request containing quickreply json and quickreply uri
"""
replies = []
for i, title in enumerate(title):
replies.append({
"content_type":"text",
"title": title,
"payload":payload[i]
})
quick_reply = {
"recipient": {
"id": self.user_id
},
"messaging_type": "RESPONSE",
"message":{
"text": text,
"quick_replies":replies
}
}
return requests.post(self.quick_reply_uri, json=quick_reply).json()
def create_buttons(self, options):
"""Creates a list of buttons.
Args:
options: structs with values
Returns:
list of buttons
"""
buttons = []
for option in options:
if option['button_type'] == "postback":
buttons.append(
{"type": option['button_type'], "title": option['title'], "payload": option['payload']}
)
if option['button_type'] == "web_url":
buttons.append(
{"type": option['button_type'], "title": option['title'], "url": option['url']}
)
return buttons
def url_button(self, title, options):
"""Posts a button template of type url_button.
Args:
title: button title
options: structs with values
Returns:
post request containing url_button json and url_button uri
"""
buttons = self.create_buttons(options)
template = \
{
"recipient":{
"id": self.user_id
},
"message":{
"attachment":{
"type":"template",
"payload":{
"template_type":"button",
"text": title,
"buttons": buttons
}
}
}
}
return requests.post(self.url_button_uri, json=template).json()
def typing_on(self):
"""Displays typing bubble."""
typing = {
"recipient":{"id": self.user_id},
"sender_action": "typing_on"
}
return requests.post(self.typing_on_uri, json=typing).json()
def mark_seen(self):
"""Displays mark seen icon until new reply is recieved."""
mark_seen = {
"recipient": {"id": self.user_id},
"sender_action": "mark_seen"
}
return requests.post(self.mark_seen_uri, json=mark_seen).json()
def text(self, message):
"""Sends text response.
Args:
message: string
Returns:
post request with text json and text uri
"""
text = {
'recipient': {'id': self.user_id},
'message': {'text': message}
}
return requests.post(self.text_uri, json=text).json()
def template(self, buttons, image, url, subtitle, title):
"""Sends a template response.
Args:
buttons: list of buttons
image: image url
url: url
subtitle: text below title
title: template title
Returns:
post request with template json and template uri
"""
template = {
"recipient":{ "id": self.user_id},
"message":{
"attachment":{
"type":"template",
"payload":{
"template_type":"generic",
"elements":[
{
"title":title,
"image_url":image,
"subtitle":subtitle,
"default_action": {
"type": "web_url",
"url": url,
"webview_height_ratio": "full",
},
"buttons": buttons
}
]
}
}
}
}
return requests.post(self.template_uri, json=template).json()
def buttons_template(self, buttons, text):
"""Sends a button template with different button types.
Args:
buttons: list of buttons
text: template title
Returns:
post request with button template json and button template uri
"""
template = {
"recipient":{ "id": self.user_id},
"message":{
"attachment":{
"type":"template",
"payload":{
"template_type":"button",
"text":text,
"buttons":buttons
}
}
}
}
return requests.post(self.button_template_uri, json=template).json()
|
<filename>workchains/wc_phonon.py<gh_stars>0
# Works run by the daemon (using submit)
from aiida import load_dbenv, is_dbenv_loaded
if not is_dbenv_loaded():
load_dbenv()
from aiida.work.workchain import WorkChain, ToContext
from aiida.work.workfunction import workfunction
from aiida.work.run import run, submit, async
from aiida.orm import Code, CalculationFactory
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.array import ArrayData
from aiida.orm.data.structure import StructureData
from aiida.orm.data.array.kpoints import KpointsData
from aiida.orm.data.upf import UpfData
from aiida.orm.data.base import Str, Float, Bool
#from aiida.orm.calculation.job.quantumespresso.pw import PwCalculation
#from aiida.orm.calculation.job.vasp.vasp import VaspCalculation
from aiida.work.workchain import if_
PwCalculation = CalculationFactory('quantumespresso.pw')
PhonopyCalculation = CalculationFactory('phonopy')
import numpy as np
from generate_inputs import *
# Function obtained from aiida's quantumespresso plugin. Copied here for convinence
def get_pseudos(structure, family_name):
"""
Set the pseudo to use for all atomic kinds, picking pseudos from the
family with name family_name.
:note: The structure must already be set.
:param family_name: the name of the group containing the pseudos
"""
from collections import defaultdict
from aiida.orm.data.upf import get_pseudos_from_structure
# A dict {kind_name: pseudo_object}
kind_pseudo_dict = get_pseudos_from_structure(structure, family_name)
# We have to group the species by pseudo, I use the pseudo PK
# pseudo_dict will just map PK->pseudo_object
pseudo_dict = {}
# Will contain a list of all species of the pseudo with given PK
pseudo_species = defaultdict(list)
for kindname, pseudo in kind_pseudo_dict.iteritems():
pseudo_dict[pseudo.pk] = pseudo
pseudo_species[pseudo.pk].append(kindname)
pseudos = {}
for pseudo_pk in pseudo_dict:
pseudo = pseudo_dict[pseudo_pk]
kinds = pseudo_species[pseudo_pk]
for kind in kinds:
pseudos[kind] = pseudo
return pseudos
def generate_phonopy_params(code, structure, parameters, machine, data_sets):
"""
Generate inputs parameters needed to do a remote phonopy calculation
:param code: Aiida Code object
:param structure: Aiida StructureData Object
:param parameters: Aiida Parametersdata object containing a dictionary with the data neede to run a phonopy
calculation: supercell matrix, primitive matrix, displacement, distance and mesh (others may be included in the future)
:param machine: Aiida Parametersdata object containing a dictioary with the computational resources information
:param data_sets: Aiida ParametersData object containing the collected forces and displacement onformation of all the supercells
:return: Calculation process object, input dictionary
"""
# The inputs
inputs = PhonopyCalculation.process().get_inputs_template()
# code
inputs.code = code
# structure
inputs.structure = structure
# parameters
inputs.parameters = parameters
# resources
inputs._options.resources = machine.dict.resources
inputs._options.max_wallclock_seconds = machine.dict.max_wallclock_seconds
# data_sets
inputs.data_sets = data_sets
return PhonopyCalculation.process(), inputs
@workfunction
def create_supercells_with_displacements_using_phonopy(structure, phonopy_input):
"""
Create the supercells with the displacements to use the finite displacements methodology to calculate the
force constants
:param structure: Aiida StructureData Object
:param phonopy_input: Aiida Parametersdata object containing a dictionary with the data needed to run phonopy:
supercells matrix, primitive matrix and displacement distance.
:return: dictionary of Aiida StructureData Objects containing the cells with displacements
"""
from phonopy.structure.atoms import Atoms as PhonopyAtoms
from phonopy import Phonopy
import numpy as np
# Generate phonopy phonon object
bulk = PhonopyAtoms(symbols=[site.kind_name for site in structure.sites],
positions=[site.position for site in structure.sites],
cell=structure.cell)
phonopy_input = phonopy_input.get_dict()
phonon = Phonopy(bulk,
phonopy_input['supercell'],
primitive_matrix=phonopy_input['primitive'])
phonon.generate_displacements(distance=phonopy_input['distance'])
cells_with_disp = phonon.get_supercells_with_displacements()
# Transform cells to StructureData and set them ready to return
data_sets = phonon.get_displacement_dataset()
data_sets_object = ArrayData()
for i, first_atoms in enumerate(data_sets['first_atoms']):
data_sets_array = np.array([first_atoms['direction'], first_atoms['number'], first_atoms['displacement']])
data_sets_object.set_array('data_sets_{}'.format(i), data_sets_array)
disp_cells = {'data_sets':data_sets_object}
for i, phonopy_supercell in enumerate(cells_with_disp):
supercell = StructureData(cell=phonopy_supercell.get_cell())
for symbol, position in zip(phonopy_supercell.get_chemical_symbols(),
phonopy_supercell.get_positions()):
supercell.append_atom(position=position, symbols=symbol)
disp_cells["structure_{}".format(i)] = supercell
return disp_cells
@workfunction
def create_forces_set(**kwargs):
# Build data_sets from forces of supercells with displacments
import numpy as np
data_set = kwargs.pop('data_sets')
force_sets = ArrayData()
for i in data_set.get_arraynames():
force_array = kwargs.pop(i.replace('data_sets', 'forces')).get_array('forces')[0]
data_set_array = np.array([data_set.get_array(i)[0], data_set.get_array(i)[1], data_set.get_array(i)[2], force_array])
force_sets.set_array(i, data_set_array)
return {'force_sets': force_sets}
@workfunction
def get_force_constants_from_phonopy(**kwargs):
"""
Calculate the force constants using phonopy
:param kwargs:
:return:
"""
from phonopy.structure.atoms import Atoms as PhonopyAtoms
from phonopy import Phonopy
import numpy as np
# print 'function',kwargs
structure = kwargs.pop('structure')
phonopy_input = kwargs.pop('phonopy_input').get_dict()
# Generate phonopy phonon object
bulk = PhonopyAtoms(symbols=[site.kind_name for site in structure.sites],
positions=[site.position for site in structure.sites],
cell=structure.cell)
phonon = Phonopy(bulk,
phonopy_input['supercell'],
primitive_matrix=phonopy_input['primitive'],
distance=phonopy_input['distance'])
phonon.generate_displacements(distance=phonopy_input['distance'])
# Build data_sets from forces of supercells with displacments
data_sets = phonon.get_displacement_dataset()
for i, first_atoms in enumerate(data_sets['first_atoms']):
forces = kwargs.pop('forces_{}'.format(i)).get_array('forces')[0]
first_atoms['forces'] = np.array(forces, dtype='double', order='c')
# LOCAL calculation
# Calculate and get force constants
phonon.set_displacement_dataset(data_sets)
phonon.produce_force_constants()
force_constants = phonon.get_force_constants()
array_data = ArrayData()
array_data.set_array('force_constants', force_constants)
return {'array_data': array_data}
@workfunction
def get_properties_from_phonopy(structure, phonopy_input, force_constants):
"""
Calculate DOS and thermal properties using phonopy (locally)
:param structure: Aiida StructureData Object
:param phonopy_input: Aiida Parametersdata object containing a dictionary with the data needed to run phonopy:
supercells matrix, primitive matrix and q-points mesh.
:param force_constants:
:return:
"""
from phonopy.structure.atoms import Atoms as PhonopyAtoms
from phonopy import Phonopy
# Generate phonopy phonon object
bulk = PhonopyAtoms(symbols=[site.kind_name for site in structure.sites],
positions=[site.position for site in structure.sites],
cell=structure.cell)
phonopy_input = phonopy_input.get_dict()
force_constants = force_constants.get_array('force_constants')
phonon = Phonopy(bulk,
phonopy_input['supercell'],
primitive_matrix=phonopy_input['primitive'])
phonon.set_force_constants(force_constants)
#Normalization factor primitive to unit cell
normalization_factor = phonon.unitcell.get_number_of_atoms()/phonon.primitive.get_number_of_atoms()
phonon.set_mesh(phonopy_input['mesh'], is_eigenvectors=True, is_mesh_symmetry=False)
phonon.set_total_DOS()
phonon.set_partial_DOS()
# get DOS (normalized to unit cell)
total_dos = phonon.get_total_DOS()*normalization_factor
partial_dos = phonon.get_partial_DOS()*normalization_factor
# Stores DOS data in DB as a workflow result
dos = ArrayData()
dos.set_array('frequency',total_dos[0])
dos.set_array('total_dos',total_dos[1])
dos.set_array('partial_dos',partial_dos[1])
#THERMAL PROPERTIES (per primtive cell)
phonon.set_thermal_properties()
t, free_energy, entropy, cv = phonon.get_thermal_properties()
# Stores thermal properties (per unit cell) data in DB as a workflow result
thermal_properties = ArrayData()
thermal_properties.set_array('temperature', t)
thermal_properties.set_array('free_energy', free_energy*normalization_factor)
thermal_properties.set_array('entropy', entropy*normalization_factor)
thermal_properties.set_array('cv', cv*normalization_factor)
return {'thermal_properties': thermal_properties, 'dos': dos}
class FrozenPhonon(WorkChain):
"""
Workflow to calculate the force constants and phonon properties using phonopy
"""
@classmethod
def define(cls, spec):
super(FrozenPhonon, cls).define(spec)
spec.input("structure", valid_type=StructureData)
spec.input("machine", valid_type=ParameterData)
spec.input("ph_settings", valid_type=ParameterData)
spec.input("es_settings", valid_type=ParameterData)
# Should be optional
spec.input("optimize", valid_type=Bool)
spec.input("pressure", valid_type=Float)
# spec.dynamic_input("optimize")
#spec.outline(cls.create_displacement_calculations,
# if_(cls.remote_phonopy)(cls.get_force_constants_remote,
# cls.collect_phonopy_data).else_(
# cls.get_force_constants))
spec.outline(cls.create_displacement_calculations, cls.get_force_constants)
#spec.outline(cls.create_displacement_calculations, cls.get_force_constants_remote, cls.collect_phonopy_data)
# spec.dynamic_output()
#spec.outline(cls.test1, cls.test2)
def remote_phonopy(self):
return 'code' in self.inputs.ph_settings.get_dict()
def create_displacement_calculations(self):
print 'test2!', self.ctx
structures = create_supercells_with_displacements_using_phonopy(self.inputs.structure,
self.inputs.ph_settings)
print 'test!'
self.ctx.data_sets = structures.pop('data_sets')
self.ctx.number_of_displacements = len(structures)
generate_inputs = { 'quantumespresso.pw' : generate_qe_params,
'vasp.vasp': generate_vasp_params}
############### FOR TESTING ###############
# 1) Load data from nodes
if False: #For test
from aiida.orm import load_node
nodes = [461200, 461205, 461210, 461215] # VASP
labels = ['structure_1', 'structure_0', 'structure_3', 'structure_2']
for pk, label in zip(nodes, labels):
future = load_node(pk)
self.ctx._content[label] = future
return
calcs = {}
for label, structure in structures.iteritems():
print label, structure
print self.inputs.es_settings.dict.code
# plugin = self.inputs.code.get_attr('input_plugin')
try:
plugin = Code.get_from_string(self.inputs.es_settings.dict.code).get_attr('input_plugin')
# plugin = self.inputs.es_settings.dict.code.get_attr('input_plugin')
except:
plugin = Code.get_from_string(self.inputs.es_settings.dict.code_forces).get_attr('input_plugin')
# plugin = self.inputs.es_settings.dict.code_forces.get_attr('input_plugin')
JobCalculation, calculation_input = generate_inputs[plugin](structure,
self.inputs.machine,
self.inputs.es_settings)
calculation_input._label = label
future = submit(JobCalculation, **calculation_input)
calcs[label] = future
return ToContext(**calcs)
def get_force_constants(self):
#print self.ctx
wf_inputs = {}
#for key, calc in self.ctx._get_dict().iteritems():
# if key.startswith('structure_'):
# wf_inputs[key.replace('structure', 'forces')] = calc.get_outputs('output_array')
print 'DISP', self.ctx.number_of_displacements
for i in range(self.ctx.number_of_displacements):
print 'forces_{}'.format(i), self.ctx.get('structure_{}'.format(i))
wf_inputs['forces_{}'.format(i)] = self.ctx.get('structure_{}'.format(i)).get_outputs_dict()['output_array']
wf_inputs['structure'] = self.inputs.structure
wf_inputs['phonopy_input'] = self.inputs.ph_settings
wf_inputs['machine'] = self.inputs.machine
phonopy_output = get_force_constants_from_phonopy(**wf_inputs)
force_constants = phonopy_output['array_data']
phonon_properties = get_properties_from_phonopy(self.inputs.structure,
self.inputs.ph_settings,
force_constants)
self.out('force_constants', force_constants)
self.out('phonon_properties', phonon_properties['thermal_properties'])
self.out('dos', phonon_properties['dos'])
return
def get_force_constants_remote(self):
wf_inputs = {}
for key, value in self.ctx._get_dict().iteritems():
if key.startswith('structure_'):
wf_inputs[key.replace('structure', 'forces')] = value['output_array']
wf_inputs['data_sets'] = self.ctx.data_sets
force_sets = create_forces_set(**wf_inputs)['force_sets']
code_label = self.inputs.ph_settings.get_dict()['code']
JobCalculation, calculation_input = generate_phonopy_params(Code.get_from_string(code_label),
self.inputs.structure,
self.inputs.ph_settings,
self.inputs.machine,
force_sets)
future = submit(JobCalculation, **calculation_input)
calcs = {'phonopy_results': future}
return ToContext(**calcs)
def collect_phonopy_data(self):
force_constants = self.ctx.phonopy_results['array_data']
phonon_properties = get_properties_from_phonopy(self.inputs.structure,
self.inputs.ph_settings,
force_constants)
self.out('force_constants', force_constants)
self.out('phonon_properties', phonon_properties['thermal_properties'])
self.out('dos', phonon_properties['dos'])
return
################### EXAMPLE INPUT FOR VASP AND QUANTUM ESPRESSO ###################
if __name__ == "__main__":
# Define structure
import numpy as np
cell = [[ 3.1900000572, 0, 0],
[-1.5950000286, 2.762621076, 0],
[ 0.0, 0, 5.1890001297]]
structure = StructureData(cell=cell)
scaled_positions=[(0.6666669, 0.3333334, 0.0000000),
(0.3333331, 0.6666663, 0.5000000),
(0.6666669, 0.3333334, 0.3750000),
(0.3333331, 0.6666663, 0.8750000)]
symbols=['Ga', 'Ga', 'N', 'N']
positions = np.dot(scaled_positions, cell)
for i, scaled_position in enumerate(scaled_positions):
structure.append_atom(position=np.dot(scaled_position, cell).tolist(),
symbols=symbols[i])
# PHONOPY settings
ph_settings = ParameterData(dict={'supercell': [[2,0,0],
[0,2,0],
[0,0,2]],
'primitive': [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]],
'distance': 0.01,
'mesh': [40, 40, 40],
# 'code': 'phonopy@stern_outside' # comment to use local phonopy
})
# VASP SPECIFIC
if True: # Set TRUE to use VASP or FALSE to use Quantum Espresso
incar_dict = {
# 'PREC' : 'Accurate',
'EDIFF' : 1e-08,
'NELMIN' : 5,
'NELM' : 100,
'ENCUT' : 400,
'ALGO' : 38,
'ISMEAR' : 0,
'SIGMA' : 0.01,
'GGA' : 'PS'
}
es_settings = ParameterData(dict=incar_dict)
from pymatgen.io import vasp as vaspio
#kpoints
#kpoints_pg = vaspio.Kpoints.monkhorst_automatic(
# kpts=[2, 2, 2],
# shift=[0.0, 0.0, 0.0])
#kpoints = ParameterData(dict=kpoints_pg.as_dict())
potcar = vaspio.Potcar(symbols=['Ga', 'N'],
functional='PBE')
settings_dict = {'code': 'vasp541mpi@boston',
'parameters': incar_dict,
'kpoints_per_atom': 1000, # k-point density
'pseudos': potcar.as_dict()}
# pseudos = ParameterData(dict=potcar.as_dict())
es_settings = ParameterData(dict=settings_dict)
# QE SPECIFIC
if False:
parameters_dict = {
'CONTROL': {'calculation': 'scf',
'tstress': True, # Important that this stays to get stress
'tprnfor': True,},
'SYSTEM': {'ecutwfc': 30.,
'ecutrho': 200.,},
'ELECTRONS': {'conv_thr': 1.e-6,}
}
# Kpoints
#kpoints_mesh = 2
#kpoints = KpointsData()
#kpoints.set_kpoints_mesh([kpoints_mesh, kpoints_mesh, kpoints_mesh])
#code = Code.get_from_string('pw@stern_outside')
pseudos = Str('pbe_ps')
settings_dict = {'code': 'pw@stern_outside',
'parameters': parameters_dict,
'kpoints_per_atom': 1000, # k-point density
'pseudos_family': 'pbe_ps'}
es_settings = ParameterData(dict=settings_dict)
# LAMMPS SPECIFIC
if False:
# GaN Tersoff
tersoff_gan = {
'Ga Ga Ga': '1.0 0.007874 1.846 1.918000 0.75000 -0.301300 1.0 1.0 1.44970 410.132 2.87 0.15 1.60916 535.199',
'N N N': '1.0 0.766120 0.000 0.178493 0.20172 -0.045238 1.0 1.0 2.38426 423.769 2.20 0.20 3.55779 1044.77',
'Ga Ga N': '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 0.0 0.00000 0.00000 2.90 0.20 0.00000 0.00000',
'Ga N N': '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 1.0 2.63906 3864.27 2.90 0.20 2.93516 6136.44',
'N Ga Ga': '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 1.0 2.63906 3864.27 2.90 0.20 2.93516 6136.44',
'N Ga N ': '1.0 0.766120 0.000 0.178493 0.20172 -0.045238 1.0 0.0 0.00000 0.00000 2.20 0.20 0.00000 0.00000',
'N N Ga': '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 0.0 0.00000 0.00000 2.90 0.20 0.00000 0.00000',
'Ga N Ga': '1.0 0.007874 1.846 1.918000 0.75000 -0.301300 1.0 0.0 0.00000 0.00000 2.87 0.15 0.00000 0.00000'}
# Silicon(C) Tersoff
# tersoff_si = {'Si Si Si ': '3.0 1.0 1.7322 1.0039e5 16.218 -0.59826 0.78734 1.0999e-6 1.7322 471.18 2.85 0.15 2.4799 1830.8'}
potential = {'pair_style': 'tersoff',
'data': tersoff_gan}
parameters = {'relaxation': 'tri', # iso/aniso/tri
'pressure': 0.0, # kbars
'vmax': 0.000001, # Angstrom^3
'energy_tolerance': 1.0e-25, # eV
'force_tolerance': 1.0e-25, # eV angstrom
'max_evaluations': 1000000,
'max_iterations': 500000}
settings_dict = {'code_forces': 'lammps_force@stern',
'code_optimize': 'lammps_optimize@stern',
'parameters': parameters,
'potential': potential}
es_settings = ParameterData(dict=settings_dict)
# CODE INDEPENDENT
machine_dict = {'resources': {'num_machines': 1,
'parallel_env': 'mpi*',
'tot_num_mpiprocs': 16},
'max_wallclock_seconds': 30 * 60,
}
machine = ParameterData(dict=machine_dict)
results = run(FrozenPhonon,
structure=structure,
machine=machine,
es_settings=es_settings,
ph_settings=ph_settings,
# Optional settings
pressure=Float(10),
optimize=Bool(0)
)
# Check results
print results
print results['force_constants'].get_array('force_constants')
print results['force_constants'].pk
print results['phonon_properties'].pk
print results['dos'].pk
|
<filename>json_settings/number_setting.py<gh_stars>0
from numpy import linspace
import json_settings as js
class NumberSetting(js.TerminusSetting):
"""The special Terminus variant that is for numerical values.
This class support range values in the form of arrays of min/max/num
definitions.
Attributes
----------
value : :obj:`Union`[:obj:`float`, :obj:`int`, :obj:`List`]
The value or list of values stored.
"""
@property
def is_range(self):
""":obj:`bool` : True if the instance contains a range of values.
"""
return self._range
@property
def match(self):
""":obj:`Union`[None, :obj:`str`] : The match parameter.
"""
return self._match
def distribute(self, value):
"""Method called by the decorator :meth:`Terminus.assign` that
tries to assign the values passed to the constructor of the
:class:`Number` derived class.
Parameters
----------
value : :obj:`Union`[:obj:`float`, :obj:`int`, :obj:`dict`]
Raises
------
:class:`~.TypeAttributeNotImplementedError`
If the type attribute has not been defined in the derived class
constructor.
:class:`~.TypeAttributeTypeError`
If the :attr:`type` is not of type :obj:`type`
:class:`~.SettingTypeError`
If `values` is not a number or valid range dictionary.
"""
if not hasattr(self, "type"):
raise js.TypeAttributeNotImplementedError(self.__class__)
if not isinstance(self.type, type):
raise js.TypeAttributeTypeError(self.__class__)
if type(value) is self.type:
self.__value(value)
elif type(value) is dict and "array" in value:
self.__array(value)
elif type(value) is dict:
self.__range(value)
else:
raise js.SettingTypeError(
f"{self.type} || {{'array': [{self.type}]}} || "
f"{{'min': {self.type}, 'max': {self.type}, 'num': {int}}}",
type(value))
def __value(self, value):
"""The method that assigns the attributes if a single value is passed.
Parameters
----------
value : :obj:`Union`[:obj:`float`, :obj:`int`]
The value to be stored.
"""
self.value = value
self._range = False
self._match = None
def __array(self, value: dict):
"""The method that assigns the attributes if a array of value is passed.
Parameters
----------
value : :obj:`dict`[:obj:`str`, :obj:`list`]
The dictionary with "array": listofvalues.
Raises
------
:class:`~.SettingTypeError`
If the values in the array are not the same type as :attr:`type`
"""
for item in value["array"]:
if not isinstance(item, self.type):
raise js.SettingTypeError(self.type, type(item))
self.value = value["array"]
self._range = True
try:
self._match = value["match"]
except KeyError:
self._match = None
def __range(self, value: dict):
"""The method that assigns the attributes if a array of value is passed.
Parameters
----------
value : :obj:`dict`
The dictionary with the range definition. Must be of the form::
{
"max": value,
"min": value,
"num": int
}
Where value is of the defined type.
Raises
------
:class:`~.SettingRangeTypeError`
If type(`value`["min"]) is not :attr`type`.
:class:`~.SettingRangeKeyError`
If `value`["min"] does not exist.
:class:`~.SettingRangeTypeError`
If type(`value`["max"]) is not :attr`type`.
:class:`~.SettingRangeKeyError`
If `value`["max"] does not exist.
:class:`~.SettingRangeTypeError`
If type(`value`["num"]) is not :obj:`int`.
:class:`~.SettingRangeKeyError`
If `value`["num"] does not exist.
"""
try:
if not isinstance(value["min"], self.type):
raise js.SettingRangeTypeError("min", self.type)
except KeyError:
raise js.SettingRangeKeyError("min")
try:
if not isinstance(value["max"], self.type):
raise js.SettingRangeTypeError("max", self.type)
except KeyError:
raise js.SettingRangeKeyError("max")
try:
if not isinstance(value["num"], int):
raise js.SettingRangeTypeError('num', int)
except KeyError:
raise js.SettingRangeKeyError("num")
self.value = linspace(value['min'], value['max'], abs(value['num']))
self.value = [self.type(item) for item in self.value]
self._range = True
try:
self._match = value["match"]
except KeyError:
self._match = None
def lower_bound(self, value):
"""Checks if values are above or equal to a lower bound.
Helper function to be called in derived class check implementation.
Parameters
----------
value : :obj:`Union`[:obj:`float`, :obj:`int`]
The lower bound.
Raises
------
:obj:`ValueError`
If any values are less than or equal to the provided bound.
"""
if isinstance(self.value, self.type):
if self.value < value:
raise ValueError(f"must be >= {value}")
elif isinstance(self.value, list):
for item in self.value:
if item < value:
raise ValueError(f"must be >= {value}")
elif isinstance(self.value, dict):
if self.value["min"] < value:
raise ValueError(f"must be >= {value}")
if self.value["max"] < value:
raise ValueError(f"must be >= {value}")
def upper_bound(self, value):
"""Checks if values are below or equal to an upper bound.
Helper function to be called in derived class check implementation.
Parameters
----------
value : :obj:`Union`[:obj:`float`, :obj:`int`]
The upper bound.
Raises
------
:obj:`ValueError`
If any values are greater than or equal to the provided bound.
"""
if isinstance(self.value, self.type):
if self.value > value:
raise ValueError(f"must be <= {value}")
elif isinstance(self.value, list):
for item in self.value:
if item > value:
raise ValueError(f"must be <= {value}")
elif isinstance(self.value, dict):
if self.value["min"] > value:
raise ValueError(f"must be <= {value}")
if self.value["max"] > value:
raise ValueError(f"must be <= {value}")
def lower_bound_exclusive(self, value):
"""Checks if values are above an upper bound.
Helper function to be called in derived class check implementation.
Parameters
----------
value : :obj:`Union`[:obj:`float`, :obj:`int`]
The lower bound.
Raises
------
:obj:`ValueError`
If any values are greater than the provided bound.
"""
if isinstance(self.value, self.type):
if self.value <= value:
raise ValueError(f"must be > {value}")
elif isinstance(self.value, list):
for item in self.value:
if item <= value:
raise ValueError(f"must be > {value}")
elif isinstance(self.value, dict):
if self.value["min"] <= value:
raise ValueError(f"must be > {value}")
if self.value["max"] <= value:
raise ValueError(f"must be > {value}")
def upper_bound_exclusive(self, value):
"""Checks if values are above an lower bound.
Helper function to be called in derived class check implementation.
Parameters
----------
value : :obj:`Union`[:obj:`float`, :obj:`int`]
The lower bound.
Raises
------
:obj:`ValueError`
If any values are greater than the provided bound.
"""
if isinstance(self.value, self.type):
if self.value >= value:
raise ValueError(f"must be < {value}")
elif isinstance(self.value, list):
for item in self.value:
if item >= value:
raise ValueError(f"must be < {value}")
elif isinstance(self.value, dict):
if self.value["min"] >= value:
raise ValueError(f"must be < {value}")
if self.value["max"] >= value:
raise ValueError(f"must be < {value}")
|
import pandas as pd
from settings.language_strings import LANGUAGE_RECOMMENDER_ALGORITHMS_STOP, \
LANGUAGE_RECOMMENDER_ALGORITHMS_START
from posprocessing.distributions import multiprocess_get_distribution
from processing.multiprocessing_recommender import all_recommenders_multiprocessing
from processing.singleprocessing_recommender import item_knn_recommender, user_knn_recommender, svd_recommender, \
svdpp_recommender, nmf_recommender, slope_one_recommender
# #################################################################################################################### #
# ################################################# Single Process ################################################### #
# #################################################################################################################### #
def collaborative_filtering_singleprocess(trainset, users_prefs_distr_df, trainset_df, testset_df, item_mapping):
evaluation_results_df = pd.DataFrame()
# # Item KNN
recommender_results_df = item_knn_recommender(trainset, users_prefs_distr_df, trainset_df, testset_df, item_mapping)
evaluation_results_df = pd.concat([evaluation_results_df, recommender_results_df], sort=False)
# # User KNN
recommender_results_df = user_knn_recommender(trainset, users_prefs_distr_df, trainset_df, testset_df, item_mapping)
evaluation_results_df = pd.concat([evaluation_results_df, recommender_results_df], sort=False)
# # SVD
recommender_results_df = svd_recommender(trainset, users_prefs_distr_df, trainset_df, testset_df, item_mapping)
evaluation_results_df = pd.concat([evaluation_results_df, recommender_results_df], sort=False)
# # SVDpp
recommender_results_df = svdpp_recommender(trainset, users_prefs_distr_df, trainset_df, testset_df, item_mapping)
evaluation_results_df = pd.concat([evaluation_results_df, recommender_results_df], sort=False)
# # NMF
recommender_results_df = nmf_recommender(trainset, users_prefs_distr_df, trainset_df, testset_df, item_mapping)
evaluation_results_df = pd.concat([evaluation_results_df, recommender_results_df], sort=False)
# # Sloope One
recommender_results_df = slope_one_recommender(trainset, users_prefs_distr_df, trainset_df, testset_df,
item_mapping)
evaluation_results_df = pd.concat([evaluation_results_df, recommender_results_df], sort=False)
return evaluation_results_df
# #################################################################################################################### #
# ################################################# Multi Process #################################################### #
# #################################################################################################################### #
# #################################################################################################################### #
# ############################################### Recommender Caller ################################################# #
# #################################################################################################################### #
def recommender_algorithms(trainset, trainset_df, testset_df, item_mapping):
# users_prefs_distr_df = get_distribution(trainset_df, item_mapping)
users_prefs_distr_df = multiprocess_get_distribution(trainset_df, item_mapping)
print(LANGUAGE_RECOMMENDER_ALGORITHMS_START)
# evaluation_results_df = collaborative_filtering_singleprocess(trainset, users_prefs_distr_df, trainset_df,
# testset_df, item_mapping)
# evaluation_results_df = collaborative_filtering_multiprocess(trainset, users_prefs_distr_df, trainset_df,
# testset_df, item_mapping)
evaluation_results_df = all_recommenders_multiprocessing(trainset, users_prefs_distr_df, trainset_df, testset_df,
item_mapping)
print(LANGUAGE_RECOMMENDER_ALGORITHMS_STOP)
return evaluation_results_df
|
import argparse
def get_args():
parser = argparse.ArgumentParser(description='Run QTL analysis given genotype, phenotype, and annotation.')
parser.add_argument('--bgen','-bg',required=False)
parser.add_argument('--plink','-pg',required=False)
parser.add_argument('--annotation_file','-af', required=True)
parser.add_argument('--phenotype_file','-pf', required=True)
parser.add_argument('--output_directory','-od', required=True)
parser.add_argument('--window','-w', required=False,
help=
'The size of the cis window to take SNPs from.'
'The window will extend between: '
' (feature_start - (window)) '
' and: '
' (feature_end + (window)) ',default=250000)
parser.add_argument('--genomic_range','-gr',required=False,
help=
'A genomic range to do selecte features to be considered in the analysis.'
'Available options: all (default), a chromsome or chromosome:start-end.',default='all')
parser.add_argument('--covariates_file','-cf',required=False,default=None)
parser.add_argument('--randomeff_files','-rf',required=False,default=None,
help = """The random effect files divided by a comma. The first one should be the kinship file and the second one should contain the read depth. You can also
just input only one of them but the order is important""")
parser.add_argument('--sample_mapping_file','-smf',required=False,default=None)
parser.add_argument('--minor_allel_frequency','-maf',required=False,default=0.05)
parser.add_argument('--hardy_weinberg_equilibrium','-hwe',required=False,default=0.0001)
parser.add_argument('--call_rate','-cr',required=False,default=0.95)
parser.add_argument('--block_size','-bs',required=False,default=1500)
parser.add_argument('--number_of_permutations','-np',required=False,default=10)
parser.add_argument('--variant_filter','-vf',required=False,default=None)
parser.add_argument('--feature_variant_covariate','-fvc',required=False,default=None)
parser.add_argument('--feature_variant_filter','-fvf',required=False,default=None)
parser.add_argument('--feature_filter','-ff',required=False,default=None)
parser.add_argument('--seed','-s',required=False)
parser.add_argument('--extended_annotation_file','-eaf',
help=
'Secondary annotation file, to add a multiple locations to one feature.'
'This can be used to either link multiple test regions to one feature or exclude multiple regions while testing a feature.', required=False)
parser.add_argument('--relatedness_score','-rs',required=False,default=None)
parser.add_argument('--write_permutations','-wp',action="store_true",required=False,default=False)
parser.add_argument('--write_feature_top_permutations','-wftp',action='store_true',required=False,default=False)
parser.add_argument('--minimum_test_samples','-mts',
help="The minimal number of samples with non-NA values to consider a feature for a QTL test, if covariates are used the number of covariates is added to this value.",required=False,default=10)
parser.add_argument("--gaussianize_method","-gm",
help="Force normal distribution on phenotypes.", default=None)
parser.add_argument("--cis","-c",
action="store_true",
help="Run cis analysis.", default=False)
parser.add_argument("--trans","-t",
action="store_true",
help="Run trans analysis.", default=False)
parser.add_argument("--no_chromosome_filter","-ncf",
action="store_true",
help="Don't filter on autosomes. By default only autosomes are selected, this is where the defaults are designed for."
"When running on X/Y/MT please be aware that these defaults might not be appropriate.", default=False)
parser.add_argument("--regress_covariates","-rc",
action="store_true",
help="Regress-out covariates, using a LMM, before running the QTL mapping.", default=False)
parser.add_argument("--debugger","-d",
action="store_true",
help="Print the time needed for each step", default=False)
args = parser.parse_args()
return args
def get_interaction_args():
parser = argparse.ArgumentParser(description='Run QTL analysis given genotype, phenotype, and annotation.')
parser.add_argument('--bgen','-bg',required=False)
parser.add_argument('--plink','-pg',required=False)
parser.add_argument('--annotation_file','-af', required=True)
parser.add_argument('--phenotype_file','-pf', required=True)
parser.add_argument('--output_directory','-od', required=True)
parser.add_argument('--interaction_term','-it',
help=
'Term to use for the interaction analysis, values are extracted from the covariate matrix.'
'The interaction term is also taken along in the covariate matrix.',required=True,default=None)
parser.add_argument('--window','-w', required=False,
help=
'The size of the cis window to take SNPs from.'
'The window will extend between: '
' (feature_start - (window)) '
' and: '
' (feature_end + (window)) ',default=250000)
parser.add_argument('--genomic_range','-gr',required=False,
help=
'A genomic range to do selecte features to be considered in the analysis.'
'Available options: all (default), a chromsome or chromosome:start-end.',default='all')
parser.add_argument('--covariates_file','-cf',required=False,default=None)
parser.add_argument('--kinship_file','-kf',required=False,default=None)
parser.add_argument('--sample_mapping_file','-smf',required=False,default=None)
parser.add_argument('--minor_allel_frequency','-maf',required=False,default=0.05)
parser.add_argument('--hardy_weinberg_equilibrium','-hwe',required=False,default=0.0001)
parser.add_argument('--call_rate','-cr',required=False,default=0.95)
parser.add_argument('--block_size','-bs',required=False,default=1500)
parser.add_argument('--number_of_permutations','-np',required=False,default=10)
parser.add_argument('--variant_filter','-vf',required=False,default=None)
parser.add_argument('--feature_variant_covariate','-fvc',required=False,default=None)
parser.add_argument('--feature_variant_filter','-fvf',required=False,default=None)
parser.add_argument('--feature_filter','-ff',required=False,default=None)
parser.add_argument('--seed','-s',required=False)
parser.add_argument('--extended_annotation_file','-eaf',
help=
'Secondary annotation file, to add a multiple locations to one feature.'
'This can be used to either link multiple test regions to one feature or exclude multiple regions while testing a feature.', required=False)
parser.add_argument('--regress_snp_interaction','-rsi', action="store_true",required=False,default=False)
parser.add_argument("--regress_covariates","-rc",
action="store_true",
help="Regress-out covariates, using a LMM, before running the QTL mapping.", default=False)
parser.add_argument('--relatedness_score','-rs',required=False,default=None)
parser.add_argument('--write_permutations','-wp',action="store_true",required=False,default=False)
parser.add_argument('--minimum_test_samples','-mts',
help="The minimal number of samples with non-NA values to consider a feature for a QTL test, if covariates are used the number of covariates is added to this value.",required=False,default=10)
parser.add_argument("--gaussianize_method","-gm",
help="Force normal distribution on phenotypes.", default=None)
parser.add_argument("--cis","-c",
action="store_true",
help="Run cis analysis.", default=False)
parser.add_argument("--trans","-t",
action="store_true",
help="Run trans analysis.", default=False)
parser.add_argument("--no_chromosome_filter","-ncf",
action="store_true",
help="Don't filter on autosomes. By default only autosomes are selected, this is where the defaults are designed for."
"When running on X/Y/MT please be aware that these defaults might not be appropriate.", default=False)
args = parser.parse_args()
return args
def get_struct_args():
parser = argparse.ArgumentParser(description='Run structLMM QTL analysis given genotype, phenotype, environments and annotation.')
parser.add_argument('--bgen','-bg', required=False)
parser.add_argument('--plink','-pg', required=False)
parser.add_argument('--annotation_file','-af', required=True)
parser.add_argument('--phenotype_file','-pf', required=True)
parser.add_argument('--output_directory','-od', required=True)
parser.add_argument('--environment_file','-ef', required=True)
parser.add_argument('--window','-w', required=False,
help=
'The size of the cis window to take SNPs from.'
'The window will extend between: '
' (feature_start - (window)) '
' and: '
' (feature_end + (window)) ', default=250000)
parser.add_argument('--genomic_range','-gr', required=False,
help=
'A genomic range to do selecte features to be considered in the analysis.'
'Available options: all (default), a chromsome or chromosome:start-end.', default='all')
parser.add_argument('--covariates_file','-cf', required=False, default=None)
parser.add_argument('--kinship_file','-kf', required=False, default=None)
parser.add_argument('--sample_mapping_file','-smf', required=False, default=None)
parser.add_argument('--minor_allel_frequency','-maf', required=False, default=0.05)
parser.add_argument('--hardy_weinberg_equilibrium','-hwe', required=False, default=0.0001)
parser.add_argument('--call_rate','-cr', required=False, default=0.95)
parser.add_argument('--block_size','-bs', required=False, default=1500)
parser.add_argument('--number_of_permutations','-np', required=False, default=10)
parser.add_argument('--variant_filter','-vf', required=False, default=None)
parser.add_argument('--feature_variant_covariate','-fvc', required=False, default=None)
parser.add_argument('--feature_variant_filter','-fvf', required=False, default=None)
parser.add_argument('--feature_filter','-ff', required=False, default=None)
parser.add_argument('--seed','-s', required=False)
parser.add_argument('--extended_annotation_file','-eaf',
help=
'Secondary annotation file, to add a multiple locations to one feature.'
'This can be used to either link multiple test regions to one feature or exclude multiple regions while testing a feature.', required=False)
parser.add_argument('--relatedness_score','-rs', required=False, default=None)
parser.add_argument('--write_permutations','-wp', action="store_true", required=False, default=False)
parser.add_argument('--minimum_test_samples','-mts',
help="The minimal number of samples with non-NA values to consider a feature for a QTL test, if covariates are used the number of covariates is added to this value.", required=False, default=10)
parser.add_argument("--gaussianize_method","-gm",
help="Force normal distribution on phenotypes.", default=None)
parser.add_argument("--cis","-c",
action="store_true",
help="Run cis analysis.", default=False)
parser.add_argument("--trans","-t",
action="store_true",
help="Run trans analysis.", default=False)
parser.add_argument("--interaction_mode","-im",
action="store_true",
help="Run structLMM in interaction mode.", default=False)
parser.add_argument("--association_mode","-am",
action="store_true",
help="Run structLMM in association mode.", default=False)
parser.add_argument("--no_chromosome_filter","-ncf",
action="store_true",
help="Don't filter on autosomes. By default only autosomes are selected, this is where the defaults are designed for."
"When running on X/Y/MT please be aware that these defaults might not be appropriate.", default=False)
args = parser.parse_args()
return args
def get_grsQtl_args():
parser = argparse.ArgumentParser(description='Run GRS QT analysis given genotype, phenotype, and annotation.')
parser.add_argument('--genetic_risk_scores','-grs',required=True)
parser.add_argument('--annotation_file','-af', required=True)
parser.add_argument('--phenotype_file','-pf', required=True)
parser.add_argument('--output_directory','-od', required=True)
parser.add_argument('--genomic_range','-gr',required=False,
help=
'A genomic range to do selecte features to be considered in the analysis.'
'Available options: all (default), a chromsome or chromosome:start-end.',default='all')
parser.add_argument('--covariates_file','-cf',required=False,default=None)
parser.add_argument('--kinship_file','-kf',required=False,default=None)
parser.add_argument('--sample_mapping_file','-smf',required=False,default=None)
parser.add_argument('--call_rate','-cr',required=False,default=0.95)
parser.add_argument('--block_size','-bs',required=False,default=1500)
parser.add_argument('--number_of_permutations','-np',required=False,default=10)
parser.add_argument('--variant_filter','-vf',required=False,default=None)
parser.add_argument('--feature_variant_covariate','-fvc',required=False,default=None)
parser.add_argument('--feature_variant_filter','-fvf',required=False,default=None)
parser.add_argument('--feature_filter','-ff',required=False,default=None)
parser.add_argument('--seed','-s',required=False)
parser.add_argument('--relatedness_score','-rs',required=False,default=None)
parser.add_argument('--write_permutations','-wp',action="store_true",required=False,default=False)
parser.add_argument('--minimum_test_samples','-mts',
help="The minimal number of samples with non-NA values to consider a feature for a QTL test, if covariates are used the number of covariates is added to this value.",required=False,default=10)
parser.add_argument("--gaussianize_method","-gm",
help="Force normal distribution on phenotypes.", default=None)
parser.add_argument("--no_chromosome_filter","-ncf",
action="store_true",
help="Don't filter on autosomes. By default only autosomes are selected, this is where the defaults are designed for."
"When running on X/Y/MT please be aware that these defaults might not be appropriate.", default=False)
parser.add_argument("--regress_covariates","-rc",
action="store_true",
help="Regress-out covariates, using a LMM, before running the QTL mapping.", default=False)
args = parser.parse_args()
return args
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import keystoneclient.v2_0.client as ksclient
import glanceclient.v2.client as glclient
from novaclient import client
from datetime import datetime
##
#Max old snapshot count
snap_max = 2
def get_keystone_creds():
try:
d = {}
d['version'] = "2.0"
d['username'] = os.environ['OS_USERNAME']
d['password'] = os.environ['<PASSWORD>']
d['auth_url'] = os.environ['OS_AUTH_URL']
d['tenant_name'] = os.environ['OS_TENANT_NAME']
except KeyError:
print "Credentials error. Run source user-operc.sh"
sys.exit(1)
return d
def get_nova_creds():
try:
d = {}
d['version'] = "2.0"
d['username'] = os.environ['OS_USERNAME']
d['api_key'] = os.environ['OS_PASSWORD']
d['auth_url'] = os.environ['OS_AUTH_URL']
d['project_id'] = os.environ['OS_TENANT_NAME']
except KeyError:
print "Credentials error. Run source user-operc.sh"
sys.exit(1)
return d
creds = get_keystone_creds()
keystone = ksclient.Client(**creds)
glance_endpoint = keystone.service_catalog.url_for(service_type='image', endpoint_type='publicURL')
glance = glclient.Client(glance_endpoint, token=keystone.auth_token)
nvcreds = get_nova_creds()
nova = client.Client(**nvcreds)
servers = nova.servers.list()
def get_servers_list():
uid_server = []
for server in servers:
if (server.image != ""):
uid_server.append(server.id)
return uid_server
def get_servers_snap_list(server_uuid):
images = glance.images.list()
snaps = []
x = 1
for image in images:
if image.has_key('instance_uuid') and image['instance_uuid'] == server_uuid and image.has_key('image_state'):
snapd = {'id':image['id'],'created':image['created_at'],'status':image['image_state'],'order':x}
snaps.append(snapd)
x = (x + 1)
snaps.sort(key=lambda a: a['created'])
return snaps
def get_new_snap_name(server_uuid):
srv_name = nova.servers.find(id=server_uuid)
snap_name = "snap_" + srv_name.name + "_" + datetime.now().strftime('%d%m%Y-%H%M%S')
return snap_name
def main(argv):
uuids = get_servers_list()
for uuid in uuids:
snapshots = get_servers_snap_list(uuid)
c = len(snapshots)
##
#remove old snapshots
for x in snapshots:
if x['order'] > 2:
print 'Delete ' + x['id'] + ' - ' + x['created']
try:
glance.images.delete(x['id'])
except HTTPNotFound:
print "Could not find image " + img
##
#Create new snapshot
s_name = get_new_snap_name(uuid)
print "Server:" + uuid + "snapshot. name:" + s_name
srv = nova.servers.find(id=uuid)
try:
srv.create_image(s_name)
except:
print "Sorry exception!"
if __name__ == "__main__":
main(sys.argv)
|
<reponame>constantinpape/cluster_tools
#! /usr/bin/python
import os
import sys
import json
import numpy as np
import luigi
import nifty.tools as nt
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
class MergeRegionFeaturesBase(luigi.Task):
""" Merge edge feature base class
"""
task_name = 'merge_region_features'
src_file = os.path.abspath(__file__)
# retry is too complecated for now ...
allow_retry = False
# input and output volumes
output_path = luigi.Parameter()
output_key = luigi.Parameter()
number_of_labels = luigi.IntParameter()
dependency = luigi.TaskParameter()
prefix = luigi.Parameter(default='')
def requires(self):
return self.dependency
def run_impl(self):
# get the global config and init configs
shebang, block_shape, roi_begin, roi_end = self.global_config_values()
self.init(shebang)
# load the task config
config = self.get_task_config()
chunk_size = min(10000, self.number_of_labels)
# temporary output dataset
tmp_path = os.path.join(self.tmp_folder, 'region_features_tmp.n5')
tmp_key = 'block_feats'
with vu.file_reader(tmp_path, 'r') as f:
ds_tmp = f[tmp_key]
n_features = len(ds_tmp.attrs['feature_names'])
# require the output dataset
with vu.file_reader(self.output_path) as f:
f.require_dataset(self.output_key, dtype='float32', shape=(self.number_of_labels, n_features),
chunks=(chunk_size, 1), compression='gzip')
# update the task config
config.update({'output_path': self.output_path, 'output_key': self.output_key,
'tmp_path': tmp_path, 'tmp_key': tmp_key,
'node_chunk_size': chunk_size})
node_block_list = vu.blocks_in_volume([self.number_of_labels], [chunk_size])
n_jobs = min(len(node_block_list), self.max_jobs)
# prime and run the jobs
self.prepare_jobs(n_jobs, node_block_list, config, consecutive_blocks=True,
job_prefix=self.prefix)
self.submit_jobs(n_jobs, self.prefix)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs, self.prefix)
# part of the luigi API
def output(self):
return luigi.LocalTarget(os.path.join(self.tmp_folder,
self.task_name + '_%s.log' % self.prefix))
class MergeRegionFeaturesLocal(MergeRegionFeaturesBase, LocalTask):
""" MergeRegionFeatures on local machine
"""
pass
class MergeRegionFeaturesSlurm(MergeRegionFeaturesBase, SlurmTask):
""" MergeRegionFeatures on slurm cluster
"""
pass
class MergeRegionFeaturesLSF(MergeRegionFeaturesBase, LSFTask):
""" MergeRegionFeatures on lsf cluster
"""
pass
#
# Implementation
#
def merge_feats(feat_name, this_feats, prev_feats,
this_counts, prev_counts, tot_counts):
assert len(this_feats) == len(prev_feats) == len(this_counts)
if feat_name == 'count':
return tot_counts
elif feat_name == 'mean':
return (this_counts * this_feats + prev_counts * prev_feats) / tot_counts
elif feat_name == 'minimum':
return np.minimum(this_feats, prev_feats)
elif feat_name == 'maximum':
return np.maximum(this_feats, prev_feats)
else:
raise ValueError("Invalid feature name %s" % feat_name)
def _extract_and_merge_region_features(blocking, ds_in, ds, node_begin, node_end, feature_names):
fu.log("processing node range %i to %i" % (node_begin, node_end))
n_nodes_chunk = node_end - node_begin
n_features = len(feature_names)
features = np.zeros((n_nodes_chunk, n_features), dtype='float32')
chunks = ds_in.chunks
for block_id in range(blocking.numberOfBlocks):
block = blocking.getBlock(block_id)
chunk_id = tuple(beg // ch for beg, ch in zip(block.begin, chunks))
# load the data
data = ds_in.read_chunk(chunk_id)
if data is None:
continue
# extract the ids from the serialization
n_cols = len(feature_names) + 1
ids = data[::n_cols].astype('uint64')
# check if any ids overlap with our id range
overlap_mask = np.logical_and(ids >= node_begin,
ids < node_end)
if overlap_mask.sum() == 0:
continue
# extract the region features from the serialization
feats = {}
for feat_id, feat_name in enumerate(feature_names, 1):
feats[feat_name] = data[feat_id::n_cols]
# normalize the ids to the chunk
overlapping_ids = ids[overlap_mask]
overlapping_ids -= node_begin
# compute the count features
this_counts = feats['count'][overlap_mask]
prev_counts = features[overlapping_ids, 0]
assert len(this_counts) == len(prev_counts)
tot_counts = prev_counts + this_counts
# update all features
for feat_id, feat_name in enumerate(feature_names):
features[overlapping_ids, feat_id] = merge_feats(feat_name,
feats[feat_name][overlap_mask],
features[overlapping_ids, feat_id],
this_counts, prev_counts, tot_counts)
features[np.isnan(features)] = 0.
ds[node_begin:node_end, :] = features
def merge_region_features(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
# get the config
with open(config_path, 'r') as f:
config = json.load(f)
output_path = config['output_path']
output_key = config['output_key']
tmp_path = config['tmp_path']
tmp_key = config['tmp_key']
node_block_list = config['block_list']
node_chunk_size = config['node_chunk_size']
with vu.file_reader(output_path) as f,\
vu.file_reader(tmp_path) as f_in:
ds_in = f_in[tmp_key]
feature_names = ds_in.attrs['feature_names']
assert feature_names[0] == 'count'
ds = f[output_key]
n_nodes = ds.shape[0]
node_blocking = nt.blocking([0], [n_nodes], [node_chunk_size])
node_begin = node_blocking.getBlock(node_block_list[0]).begin[0]
node_end = node_blocking.getBlock(node_block_list[-1]).end[0]
shape = list(ds_in.shape)
chunks = list(ds_in.chunks)
blocking = nt.blocking([0, 0, 0], shape, chunks)
_extract_and_merge_region_features(blocking, ds_in, ds,
node_begin, node_end, feature_names)
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
merge_region_features(job_id, path)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__copyright__ = "Copyright 2022, TheCodingJ's"
__credits__: "list[str]" = ["<NAME>"]
__license__ = "MIT"
__name__ = "HBNI Audio Stream Listener"
__version__ = "v1.3.0"
__updated__ = '2022-02-20 14:50:16'
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import json
import os
import re
import sys
import threading
import urllib
import urllib.request
import webbrowser
from datetime import datetime
from functools import partial
import miniaudio
import qdarktheme
import requests
from PyQt5 import uic
from PyQt5.QtCore import (QCoreApplication, QProcess, QRunnable, QSettings, Qt,
QThreadPool, QTimer, pyqtSignal, pyqtSlot)
from PyQt5.QtGui import QFont, QIcon, QPalette, QPixmap
from PyQt5.QtWidgets import (QAction, QActionGroup, QApplication, QDialog,
QGroupBox, QLabel, QLineEdit, QMainWindow, QMenu,
QMessageBox, QPushButton, QScrollArea, QStyle,
QSystemTrayIcon, QTabWidget, QToolButton,
QVBoxLayout, QWidget, qApp)
from win10toast import ToastNotifier
toaster = ToastNotifier()
class Worker(QRunnable):
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
self.fn = fn
@pyqtSlot()
def run(self):
self.fn()
class ScrollLabel(QScrollArea):
def __init__(self, *args, **kwargs):
QScrollArea.__init__(self, *args, **kwargs)
self.setWidgetResizable(True)
content = QWidget(self)
self.setWidget(content)
lay = QVBoxLayout(content)
self.label = QLabel(content)
self.label.setAlignment(Qt.AlignCenter | Qt.AlignTop)
self.label.setWordWrap(True)
lay.addWidget(self.label)
def setText(self, text):
self.label.setText(text)
class Button(QPushButton):
entered = pyqtSignal()
leaved = pyqtSignal()
def enterEvent(self, event):
super().enterEvent(event)
self.entered.emit()
def leaveEvent(self, event):
super().leaveEvent(event)
self.leaved.emit()
class licensewindowUI(QDialog):
def __init__(self):
super(licensewindowUI, self).__init__()
uic.loadUi('license.ui', self)
self.setWindowTitle(__name__)
self.settings = QSettings("A", "B")
self.setWindowIcon(QIcon('icons/icon.png'))
self.icon = self.findChild(QLabel, 'lblIcon')
self.icon.setFixedSize(128, 128)
pixmap = QPixmap('icons/icon.png')
myScaledPixmap = pixmap.scaled(self.icon.size(), Qt.KeepAspectRatio)
self.icon.setPixmap(myScaledPixmap)
self.lisenceText = self.findChild(QLabel, 'label_2')
with open('LICENSE', 'r') as f:
self.lisenceText.setText(f.read())
self.btnClose = self.findChild(QPushButton, 'btnClose')
self.btnClose.clicked.connect(self.close)
self.setFixedSize(780, 470)
if self.settings.contains("Dark theme") \
and self.settings.value("Dark theme") == 'true':
self.toggle_darktheme()
else:
self.toggle_lighttheme()
def toggle_darktheme(self):
self.setStyleSheet(qdarktheme.load_stylesheet())
def toggle_lighttheme(self):
self.setStyleSheet(qdarktheme.load_stylesheet("light"))
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.hbni_html: str
self.active_events: str
self.active_listeners: str
self.active_streams: str
self.threadpool: QThreadPool() = QThreadPool()
self.threadpool.setMaxThreadCount(12)
self.startTime: datetime.now() = datetime.now()
self.currentTime: datetime.now() = datetime.now()
self.settings = QSettings("A", "B")
self.streamPlaying: bool = False
self.streamsOnline: bool = False
self.streamsForceStop: bool = False
self.enabledNotifications: bool = True
self.darkThemeEnabled: bool = False
self.isFullScreen: bool = False
self.tabWidget: QTabWidget() = QTabWidget()
self.tabWidget.tabBarClicked.connect(self.loadArchive)
self.tabWidget.setMovable(True)
self.tabWidget.setStyleSheet("QTabBar{font-size: 12pt} QTabBar::tab { width: 150px; height: 25px} QTabWidget::tab-bar{alignment: center}")
self.streamsTab = QWidget()
self.archivesTab = QWidget()
self.tabWidget.addTab(self.streamsTab, "Streams/Events Tab")
self.tabWidget.addTab(self.archivesTab, "Archives Tab")
self.loadStreamsLayoutTab()
self.loadArchivesLayoutTab()
self._extracted_from_toggle_lighttheme_10()
self.loadFileMenu()
self.loadTrayMenu()
self.setCentralWidget(self.tabWidget)
self.check_for_updates(on_start_up=True)
self.setMinimumSize(400, 700)
def loadStreamsLayoutTab(self):
streamsLayout: QVBoxLayout() = QVBoxLayout()
self.layoutStreams: QVBoxLayout() = QVBoxLayout()
self.setWindowTitle(__name__)
self.setWindowIcon(QIcon('icons/icon.png'))
self.hbnilogo: QLabel() = QLabel()
self.hbnilogo.setAlignment(Qt.AlignCenter | Qt.AlignTop)
streamsLayout.addWidget(self.hbnilogo)
header: QLabel() = QLabel(f"<h1>{__name__}</h1>")
header.setAlignment(Qt.AlignCenter | Qt.AlignTop)
streamsLayout.addWidget(header)
self.lblCallBack: QLabel() = QLabel()
self.lblCallBack.setAlignment(Qt.AlignCenter | Qt.AlignTop)
streamsLayout.addWidget(self.lblCallBack)
self.lblActiveListeners: QLabel() = QLabel('Loading...')
self.lblActiveListeners.setAlignment(Qt.AlignCenter | Qt.AlignTop)
streamsLayout.addLayout(self.layoutStreams)
streamsLayout.addWidget(self.lblActiveListeners)
self.btnKillAllStreams: Button = Button(' Stop')
self.btnKillAllStreams.clicked.connect(partial(self.kill_all_threads,
True))
self.btnKillAllStreams.setFixedSize(200, 60)
self.btnKillAllStreams.setVisible(False)
self.btnKillAllStreams.entered.connect(self.handle_entered)
self.btnKillAllStreams.leaved.connect(self.handle_leaved)
self.btnKillAllStreams.setStyleSheet('font-size: 22px')
streamsLayout.addWidget(self.btnKillAllStreams, alignment=Qt.AlignCenter)
self.streamsTab.setLayout(streamsLayout)
def loadArchivesLayoutTab(self):
archivesLayout: QVBoxLayout() = QVBoxLayout()
self.setWindowTitle(__name__)
self.setWindowIcon(QIcon('icons/icon.png'))
self.hbnilogo2: QLabel() = QLabel()
self.hbnilogo2.setAlignment(Qt.AlignCenter | Qt.AlignTop)
archivesLayout.addWidget(self.hbnilogo2)
header: QLabel() = QLabel("<h1>HBNI Audio Streaming Archive</h1>")
header.setAlignment(Qt.AlignCenter | Qt.AlignTop)
archivesLayout.addWidget(header)
l: QLabel() = QLabel("Search:")
archivesLayout.addWidget(l)
self.inputArchiveSearch: QLineEdit() = QLineEdit(self)
self.inputArchiveSearch.returnPressed.connect(self.loadArchive)
self.inputArchiveSearch.setFont(QFont('Arial', 14))
archivesLayout.addWidget(self.inputArchiveSearch)
scroll = QScrollArea(self)
scroll.setWidgetResizable(True)
scrollContent = QWidget(scroll)
self.layoutArchive: QVBoxLayout(scrollContent) = QVBoxLayout(scrollContent)
self.layoutArchive.setStretch(11, 1)
scrollContent.setLayout(self.layoutArchive)
scroll.setWidget(scrollContent)
archivesLayout.addWidget(scroll)
self.archivesTab.setLayout(archivesLayout)
def loadFileMenu(self):
fileMenu = QMenu("File", self)
actionExit = QAction('Exit', self)
actionExit.triggered.connect(self.close)
fileMenu.addAction(actionExit)
settingsMenu = QMenu("Settings", self)
checkBox = QAction('Auto start stream', self, checkable=True)
if self.settings.contains("Auto start stream"):
checkBox.setChecked(self.settings.value("Auto start stream") == 'true')
checkBox.toggled.connect(partial(self.saved_toggle_menu_settings,
checkBox))
settingsMenu.addAction(checkBox)
checkBox = QAction('Enable notifications', self, checkable=True)
checkBox.toggled.connect(partial(self.saved_toggle_menu_settings,
checkBox))
if self.settings.contains("Enable notifications"):
checkBox.setChecked(self.settings.value("Enable notifications") == 'true')
self.enabledNotifications = self.settings.value("Enable notifications") == 'true'
else:
checkBox.setChecked(True)
self.enabledNotifications = True
settingsMenu.addAction(checkBox)
checkBox = QAction('Dark theme', self, checkable=True)
if self.settings.contains("Dark theme"):
checkBox.setChecked(self.settings.value("Dark theme") == 'true')
if checkBox.isChecked():
self.darkThemeEnabled = True
self.toggle_darktheme()
elif not checkBox.isChecked():
self.darkThemeEnabled = False
self.toggle_lighttheme()
else:
self.toggle_lighttheme()
checkBox.toggled.connect(partial(self.saved_toggle_menu_settings,
checkBox))
settingsMenu.addAction(checkBox)
helpMenu = QMenu("Help", self)
actionAbout_Qt = QAction('About Qt', self)
actionAbout_Qt.triggered.connect(qApp.aboutQt)
helpMenu.addAction(actionAbout_Qt)
actionLicense = QAction('View License', self)
actionLicense.triggered.connect(self.open_license_window)
helpMenu.addAction(actionLicense)
helpMenu.addSeparator()
actionCheckForUpdates = QAction('Check for Updates...', self)
actionCheckForUpdates.triggered.connect(self.check_for_updates)
helpMenu.addAction(actionCheckForUpdates)
helpMenu.addSeparator()
actionAbout = QAction('About', self)
actionAbout.triggered.connect(self.open_about_window)
helpMenu.addAction(actionAbout)
ViewMenu = QMenu("View", self)
actionFullScreen = QAction('Toggle Fullscreen', self)
actionFullScreen.triggered.connect(self.toggle_fullscreen)
ViewMenu.addAction(actionFullScreen)
actionHide = QAction('Hide', self)
actionHide.triggered.connect(self.hide)
ViewMenu.addAction(actionHide)
self.menuBar().addMenu(fileMenu)
self.menuBar().addMenu(settingsMenu)
self.menuBar().addMenu(helpMenu)
self.menuBar().addMenu(ViewMenu)
def loadTrayMenu(self):
self.tray_icon = QSystemTrayIcon(self)
self.tray_icon.setIcon(QIcon("icons/icon.png"))
show_action = QAction("Show", self)
quit_action = QAction("Exit", self)
show_action.triggered.connect(self.show)
quit_action.triggered.connect(qApp.quit)
tray_menu = QMenu()
tray_menu.addAction(show_action)
tray_menu.addAction(quit_action)
self.tray_icon.setContextMenu(tray_menu)
self.tray_icon.show()
self.check_for_website_changes()
self.timerCheckForStreams = QTimer()
self.timerCheckForStreams.setInterval(5000)
self.timerCheckForStreams.timeout.connect(self.check_for_website_changes)
self.timerCheckForStreams.start()
self.timerUpdateTimer = QTimer()
self.timerUpdateTimer.setInterval(1000)
self.timerUpdateTimer.timeout.connect(self.update_timer)
self.timerUpdateTimer.start()
if not self.settings.contains("fullscreen"):
self.isFullScreen = False
self.settings.setValue("fullscreen", False)
else:
self.isFullScreen = self.settings.value("fullscreen") == 'true'
if self.isFullScreen:
self.show()
self.showFullScreen()
else:
self.load_geometry()
self.show()
def closeEvent(self, event):
try:
self.timerUpdateTimer.stop()
self.timerCheckForStreams.stop()
self.device.stop()
except AttributeError:
pass
self.timerUpdateTimer.stop()
self.timerCheckForStreams.stop()
self.save_geometry()
super().closeEvent(event)
def save_geometry(self):
self.settings.setValue("geometry", self.saveGeometry())
def load_geometry(self):
if self.settings.contains("geometry"):
self.restoreGeometry(self.settings.value("geometry"))
else:
self.setGeometry(100, 100, 480, 740)
def open_about_window(self) -> None:
fmt = '%Y-%m-%d %H:%M:%S'
d1 = datetime.strptime(__updated__, fmt)
days_ago: int = int((d1 - datetime.now()).days * 24 * 60)
QMessageBox.information(self,
__name__,
f"Developed by: TheCodingJ's\nVersion: {__version__}\nDate: {__updated__} ({days_ago} days ago)",
QMessageBox.Ok,
QMessageBox.Ok)
def check_for_updates(self, on_start_up: bool = False) -> None:
try:
response = requests.get("https://api.github.com/repos/thecodingjsoftware/HBNI-Audio-Stream-Listener/releases/latest")
version: str = response.json()["name"].replace(' ', '')
if version != __version__:
QMessageBox.information(self,
__name__,
"There is a new update available",
QMessageBox.Ok,
QMessageBox.Ok)
elif not on_start_up:
QMessageBox.information(self,
__name__,
"There are currently no updates available.",
QMessageBox.Ok,
QMessageBox.Ok)
except Exception as e:
if not on_start_up:
QMessageBox.information(
self,
__name__,
f'Error!\n\n{e}',
QMessageBox.Ok,
QMessageBox.Ok,
)
def open_license_window(self) -> None:
self.licenseUI = licensewindowUI()
self.licenseUI.show()
def saved_toggle_menu_settings(self, checkBox: QAction()) -> None:
self.settings.setValue(checkBox.text(), checkBox.isChecked())
self.btnKillAllStreams.setIcon(QIcon('icons/stop_black.png'))
if checkBox.text() == 'Dark theme':
if checkBox.isChecked():
self.toggle_darktheme()
elif not checkBox.isChecked():
self.toggle_lighttheme()
def toggle_fullscreen(self):
if self.isFullScreen:
self.showNormal()
else:
self.showFullScreen()
self.isFullScreen = not self.isFullScreen
self.settings.setValue("fullscreen", self.isFullScreen)
def toggle_darktheme(self) -> None:
self.darkThemeEnabled = True
logo: QPixmap = QPixmap('icons/hbni_logo_dark.png')
self.btnKillAllStreams.setIcon(QIcon('icons/stop_white.png'))
self._extracted_from__extracted_from_toggle_lighttheme_10_5(logo)
self.setStyleSheet(qdarktheme.load_stylesheet())
def toggle_lighttheme(self) -> None:
self.darkThemeEnabled = False
self._extracted_from_toggle_lighttheme_10()
self.setStyleSheet(qdarktheme.load_stylesheet("light"))
# TODO Rename this here and in `loadStreamsLayoutTab` and `toggle_lighttheme`
def _extracted_from_toggle_lighttheme_10(self):
logo: QPixmap = QPixmap('icons/hbni_logo_light.png')
self._extracted_from__extracted_from_toggle_lighttheme_10_5(logo)
# TODO Rename this here and in `toggle_darktheme` and `_extracted_from_toggle_lighttheme_10`
def _extracted_from__extracted_from_toggle_lighttheme_10_5(self, logo):
logo = logo.scaled(200, 200, Qt.KeepAspectRatio)
self.hbnilogo.setPixmap(logo)
self.hbnilogo2.setPixmap(logo)
def clearLayout(self, layout) -> None:
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
self.clearLayout(item.layout())
def play_stream(self, stream_link: str) -> None:
try:
with miniaudio.IceCastClient(stream_link) as source:
stream = miniaudio.stream_any(source, source.audio_format)
with miniaudio.PlaybackDevice() as self.device:
self.device.start(stream)
input()
except Exception:
pass
def listen_to_stream(self, stream_link: str) -> None:
self.streamsForceStop = False
self.startTime = datetime.now().replace(microsecond=0)
self.streamPlaying = True
self.btnKillAllStreams.setVisible(True)
self.worker = Worker(partial(self.play_stream, stream_link))
self.threadpool.start(self.worker)
@pyqtSlot()
def kill_all_threads(self, pressed_by_button: bool = False) -> None:
try:
self.device.close()
self.device.stop()
except AttributeError:
pass
if pressed_by_button:
self.streamsForceStop = True
self.btnKillAllStreams.setVisible(False)
if self.streamPlaying:
self.streamPlaying = False
# restart()
def find_active_events(self, html: str) -> str:
regex: re = r'(?=(<div class="event">))(\w|\W)*(?<=<\/div>)'
matches = re.finditer(regex, html, re.MULTILINE)
for match in matches:
if 'no schedule' in match[0].lower() or 'no upcoming events' in match[0].lower() or 'no events' in match[0].lower():
return ''
return match[0]
return ''
def find_active_lisenters(self, html: str) -> str:
regex: re = r'Current Number of Listeners: ([0-9]*)'
matches = re.finditer(regex, html, re.MULTILINE)
for match in matches:
return match[0]
return ''
def find_active_streams(self, tag: str, html: str,
replace_text: bool = True) -> 'list[str]':
'''
regex_finder finds strings after a tag using regex matching
Args:
tag (str): a tag in the html such as "data-mnt" or "data-streams"
html (str): the html string to parserTest
Returns:
str: the string attached to the tag.
'''
regex = r"{}=([\"'])((?:(?=(?:\\)*)\\.|.)*?)\1".format(tag)
matches = re.finditer(regex, html, re.MULTILINE)
list_matches: list[str] = []
for match in matches:
m = match.group()
m = m.replace(tag, '').replace('=', '').replace('\'', '')
if replace_text:
m = m.replace('/', '').title()
list_matches.append(m)
return list_matches
def update_timer(self) -> None:
try:
if self.streamPlaying:
self.currentTime = datetime.now().replace(microsecond=0)
timeDifference: datetime = self.currentTime - self.startTime
self.lblActiveListeners.setText(f'{self.active_listeners}\nStreaming for:\n{timeDifference}')
else:
self.lblActiveListeners.setText(f'{self.active_listeners}')
except AttributeError:
pass
def loadArchive(self) -> None:
self.downloadArchiveDatabase()
data = self.loadJson()
self.clearLayout(self.layoutArchive)
allFileNames = [
fileName
for fileName in data
if self.inputArchiveSearch.text().lower() in fileName.lower()
]
allFileNames.reverse()
for fileName in allFileNames:
text = fileName.replace('_', ':').replace('.mp3', '')
btnDownloadArchive: Button = Button(text)
btnDownloadArchive.setStyleSheet('font-size: 18px')
btnDownloadArchive.setFixedHeight(50)
btnDownloadArchive.entered.connect(self.handle_entered)
btnDownloadArchive.leaved.connect(self.handle_leaved)
if self.darkThemeEnabled:
btnDownloadArchive.setIcon(QIcon('icons/download_white.png'))
else:
btnDownloadArchive.setIcon(QIcon('icons/download_black.png'))
btnDownloadArchive.clicked.connect(partial(self.open_website, self.getDownloadLink(fileName=fileName)))
self.layoutArchive.addWidget(btnDownloadArchive)
def downloadArchiveDatabase(self) -> None:
url = "https://raw.githubusercontent.com/TheCodingJsoftware/HBNI-Audio-Stream-Recorder/master/downloadLinks.json"
req = requests.get(url)
if req.status_code == requests.codes.ok:
data = dict(req.json()) # the response is a JSON
with open("/websiteDownloadLinks.json", "w+") as f:
json.dump(data, f, ensure_ascii=False, indent=4)
else:
print("Content was not found.")
def loadJson(self) -> dict:
with open("/websiteDownloadLinks.json", "r") as f:
data = json.load(f)
return data
def getDownloadLink(self, fileName: str) -> str:
data = self.loadJson()
try:
return data[fileName]["downloadLink"]
except KeyError:
return None
def update_ui(self) -> None:
self.clearLayout(self.layoutStreams)
self.active_events = self.find_active_events(html=self.hbni_html)
self.active_events = self.active_events.replace('h3', 'h1').replace('<p>', '<h2>').replace('</p>', '</h2>').replace('<p class="date">', '<h2>').replace('</div>', '</div><br>')
if self.active_events != '' and 'No streams currently online.' in self.hbni_html:
self.lblCallBack.setText('<h2>Upcoming Events:</h2>')
lblEvents: ScrollLabel() = ScrollLabel()
lblEvents.setText(self.active_events)
self.layoutStreams.addWidget(lblEvents)
self.lblActiveListeners.setText('')
self.kill_all_threads()
if 'No streams currently online.' not in self.hbni_html:
self.clearLayout(self.layoutStreams)
self.lblCallBack.setText('<h2>Streams currently online:</h2>')
titles: list[str] = self.find_active_streams(tag='data-mnt', html=self.hbni_html)
bodies: list[str] = self.find_active_streams(tag='data-stream', html=self.hbni_html)
host_addresses: list[str] = self.find_active_streams(tag='data-mnt', html=self.hbni_html, replace_text=False)
for title, body, host_address in zip(titles, bodies, host_addresses):
btnStream: Button = Button(f' {title} - {body}')
btnStream.setToolTip(f'http://hbniaudio.hbni.net:8000{host_address}')
if self.darkThemeEnabled:
btnStream.setIcon(QIcon('icons/play_white.png'))
else:
btnStream.setIcon(QIcon('icons/play_black.png'))
btnStream.setStyleSheet('font-size: 18px')
btnStream.setEnabled(not self.streamPlaying)
btnStream.clicked.connect(
partial(
self.listen_to_stream,
f'http://hbniaudio.hbni.net:8000{host_address}',
)
)
self.layoutStreams.addWidget(btnStream, alignment=Qt.AlignCenter)
if not self.streamsOnline and self.settings.contains("Auto start stream") and self.settings.value("Auto start stream") != 'true' and self.enabledNotifications:
toaster.show_toast(__name__,
f'{titles[0]} just started a stream.',
icon_path='icons/icon.ico',
duration=3,
threaded=True)
self.streamsOnline = True
if self.settings.contains("Auto start stream") and self.settings.value("Auto start stream") == 'true' and not self.streamPlaying and not self.streamsForceStop:
if self.enabledNotifications:
try:
toaster.show_toast(__name__,
f'Autoplaying currently active stream.\n{titles[0]} - {bodies[0]}',
icon_path='icons/icon.ico',
duration=3,
threaded=True)
except IndexError:
pass
self.listen_to_stream(f'http://hbniaudio.hbni.net:8000{host_addresses[0]}')
elif self.active_events == '':
self.lblCallBack.setText('<h2>No streams currently online or events scheduled</h2>')
self.kill_all_threads()
if 'No streams currently online.' in self.hbni_html:
self.streamsOnline = False
self.active_listeners = self.find_active_lisenters(html=self.hbni_html)
def handle_entered(self):
QApplication.setOverrideCursor(Qt.PointingHandCursor)
def handle_leaved(self):
QApplication.restoreOverrideCursor()
def open_website(self, website: str) -> None:
webbrowser.open(website)
def check_for_website_changes(self) -> None:
try:
fp = urllib.request.urlopen("http://hbniaudio.hbni.net", timeout=3)
html_bytes: str = fp.read()
self.hbni_html = html_bytes.decode("utf8")
fp.close()
self.update_ui()
except (urllib.error.URLError, ConnectionResetError, TimeoutError, Exception):
if self.streamPlaying:
self.kill_all_threads()
self.clearLayout(self.layoutStreams)
self.lblCallBack.setText('<h2>Network error</h2>')
self.lblActiveListeners.setText('Network error')
lblEvents: ScrollLabel() = ScrollLabel()
lblEvents.setText('<h3>Check if you are connected to the internet or logged into your network.</h3>')
self.layoutStreams.addWidget(lblEvents)
def restart():
os.execl(sys.executable, os.path.abspath(__file__), *sys.argv)
def main():
app: QApplication([]) = QApplication([])
app.setAttribute(Qt.ApplicationAttribute.AA_UseHighDpiPixmaps)
MainWindow()
app.exec_()
main()
|
<reponame>CornellDataScience/Deep-Learning-Crash-Course<filename>03-Convolutional-Neural-Networks/cnn_train.py<gh_stars>10-100
"""
Train a CNN on the CIFAR-10 dataset.
Adapted from https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10
"""
import argparse
import os
import numpy as np
import tarfile
import tensorflow as tf
import time
import sys
from six.moves import urllib
from cnn_model import CNN
DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
TRAIN_FILES = ['../data/cifar-10-batches-bin/data_batch_%d.bin' % i for i in np.arange(1, 6, dtype=int)]
TEST_FILES = ['../data/cifar-10-batches-bin/test_batch.bin']
def train(args):
"""Train model"""
data = CIFAR10(args.batch_size, TRAIN_FILES)
# create save directory if it does not already exist
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
print('Initializing model...')
images = tf.placeholder(tf.float32, [None, 32, 32, 3], 'input_images')
distorted = distort_images(images)
model = CNN(distorted, learning_rate=args.learning_rate)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
print('Starting training...')
for n in range(args.num_epochs):
for i in range(data.n_batches):
start = time.time()
x, y = data.next_batch()
loss, _ = sess.run([model.loss, model.train_step], feed_dict={images: x, model.labels: y})
end = time.time()
print('{}/{} (epoch {}), train_loss={:.3f}, time/batch={:.3f}'
.format(n * data.n_batches + i, args.num_epochs * data.n_batches, n, loss, end - start))
checkpoint_path = os.path.join(args.save_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=n * data.n_batches)
print("model saved to {}".format(checkpoint_path))
class CIFAR10:
"""Object representing dataset"""
def __init__(self, batch_size, files):
self.batch_size = batch_size
self.images, self.labels = None, None
self.n_batches = 0
self.x_batches, self.y_batches = None, None
self.pointer = 0
if files is None:
download_cifar('../data/')
self.files = TRAIN_FILES
else:
self.files = files
self.pre_process()
self.create_batches()
def pre_process(self):
"""Load and pre-process data"""
print('Pre-processing data...')
batches = ()
binary = tf.placeholder(tf.string)
for file in self.files:
with open(file, 'rb') as f:
byte_string = f.read()
decoded = tf.decode_raw(binary, tf.uint8)
with tf.Session() as sess:
vectors = sess.run(decoded, feed_dict={binary: byte_string})
batches += (np.reshape(vectors, [-1, 3073]), )
data = np.vstack(batches)
self.images = data[:, 1:].reshape(-1, 3, 32, 32).transpose([0, 2, 3, 1])
self.labels = data[:, 0]
def create_batches(self):
"""Split data into training mini-batches"""
self.n_batches = int(self.labels.size / self.batch_size)
self.x_batches = np.array_split(self.images, self.n_batches)
self.y_batches = np.array_split(self.labels, self.n_batches)
def next_batch(self):
"""Return current batch, increment pointer by 1 (modulo n_batches)"""
x, y = self.x_batches[self.pointer], self.y_batches[self.pointer]
self.pointer = (self.pointer + 1) % self.n_batches
return x, y
def distort_images(images):
"""Randomly distort a batch of images"""
with tf.variable_scope('data_augmentation'):
# Randomly crop a [height, width] section of the image
distorted = tf.map_fn(lambda img: tf.random_crop(img, [24, 24, 3]), images)
# Randomly flip the image horizontally
distorted = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), distorted)
# Subtract off the mean and divide by the variance of the pixels
normalized = tf.map_fn(lambda img: tf.image.per_image_standardization(img), distorted)
return normalized
def download_cifar(save_dir):
"""Download and extract CIFAR-10 dataset"""
if not os.path.exists(save_dir):
os.makedirs(save_dir)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(save_dir, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, (count * block_size) / total_size * 100))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
stat_info = os.stat(filepath)
print('\nSuccessfully downloaded {} {} bytes.'.format(filename, stat_info.st_size))
extracted_dir_path = os.path.join(save_dir, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(save_dir)
def parse_arguments(argv):
"""Parse command line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='../data/', help='path to save/load CIFAR-10 data to/from.')
parser.add_argument('--save_dir', type=str, default='../models/cnn/', help='directory to save trained models.')
parser.add_argument('--batch_size', type=int, default=100, help='mini-batch size.')
parser.add_argument('--num_epochs', type=int, default=100, help='number of epochs')
parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate for Adam optimizer.')
args = parser.parse_args(argv)
return args
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
download_cifar(args.data_dir)
train(args)
|
from skmultiflow.data import MultilabelGenerator
from skmultiflow.meta.classifier_chains import ClassifierChain, MCC, ProbabilisticClassifierChain
from skmultiflow.data import make_logical
from sklearn.linear_model import SGDClassifier
import numpy as np
def test_classifier_chains():
stream = MultilabelGenerator(random_state=112, n_targets=3, n_samples=5150)
stream.prepare_for_use()
estimator = SGDClassifier(random_state=112, max_iter=10)
learner = ClassifierChain(base_estimator=estimator, random_state=112)
X, y = stream.next_sample(150)
learner.partial_fit(X, y)
cnt = 0
max_samples = 5000
predictions = []
true_labels = []
wait_samples = 100
correct_predictions = 0
while cnt < max_samples:
X, y = stream.next_sample()
# Test every n samples
if (cnt % wait_samples == 0) and (cnt != 0):
predictions.append(learner.predict(X)[0])
true_labels.append(y[0])
if np.array_equal(y[0], predictions[-1]):
correct_predictions += 1
learner.partial_fit(X, y)
cnt += 1
expected_predictions = [[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 1.0],
[1.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[1.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[1.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0]]
expected_correct_predictions = 21
assert np.alltrue(np.array_equal(predictions, expected_predictions))
assert correct_predictions == expected_correct_predictions
assert type(learner.predict(X)) == np.ndarray
# assert type(learner.predict_proba(X)) == np.ndarray Not available because default loss is set to 'hinge'
def test_classifier_chains_all():
seed = 1
X, Y = make_logical(random_state=seed)
# CC
cc = ClassifierChain(SGDClassifier(max_iter=100, loss='log', random_state=seed))
cc.fit(X, Y)
y_predicted = cc.predict(X)
y_expected = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [1, 1, 0]]
assert np.alltrue(y_predicted == y_expected)
# RCC
rcc = ClassifierChain(SGDClassifier(max_iter=100, loss='log', random_state=seed), order='random', random_state=seed)
rcc.fit(X, Y)
rcc.fit(X, Y)
y_predicted = rcc.predict(X)
y_expected = [[1, 1, 0], [1, 1, 0], [1, 1, 0], [1, 1, 0]]
assert np.alltrue(y_predicted == y_expected)
# MCC
mcc = MCC(SGDClassifier(max_iter=100, loss='log', random_state=seed), M=1000)
mcc.fit(X, Y)
mcc.fit(X, Y)
y_predicted = mcc.predict(X)
y_expected = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [1, 1, 0]]
assert np.alltrue(y_predicted == y_expected)
# PCC
pcc = ProbabilisticClassifierChain(SGDClassifier(max_iter=100, loss='log', random_state=seed))
pcc.fit(X, Y)
pcc.fit(X, Y)
y_predicted = pcc.predict(X)
y_expected = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [1, 1, 0]]
assert np.alltrue(y_predicted == y_expected)
|
"""
mfsub module. Contains the ModflowSub class. Note that the user can access
the ModflowSub class as `flopy.modflow.ModflowSub`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/sub.htm>`_.
"""
import sys
import numpy as np
from ..pakbase import Package
from ..utils import Util2d, Util3d, read1d
class ModflowSub(Package):
"""
MODFLOW SUB Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
ipakcb : int
A flag that is used to determine if cell-by-cell budget data should be
saved. If ipakcb is non-zero cell-by-cell budget data will be saved.
(default is 0).
isuboc : int
isuboc is a flag used to control output of information generated by the
SUB Package. (default is 0).
idsave : int
idsave is a flag and a unit number on which restart records for delay
interbeds will be saved at the end of the simulation. (default is 0).
idrest : int
idrest is a flag and a unit number on which restart records for delay
interbeds will be read in at the start of the simulation (default is 0).
nndb : int
nndb is the number of systems of no-delay interbeds. (default is 1).
ndb : int
ndb is the number of systems of delay interbeds. (default is 1).
nmz : int
nmz is the number of material zones that are needed to define the
hydraulic properties of systems of delay interbeds. Each material zone
is defined by a combination of vertical hydraulic conductivity, elastic
specific storage, and inelastic specific storage. (default is 1).
nn : int
nn is the number of nodes used to discretize the half space to approximate
the head distributions in systems of delay interbeds. (default is 20).
ac1 : float
ac1 is an acceleration parameter. This parameter is used to predict the
aquifer head at the interbed boundaries on the basis of the head change
computed for the previous iteration. A value of 0.0 results in the use
of the aquifer head at the previous iteration. Limited experience indicates
that optimum values may range from 0.0 to 0.6. (default is 0).
ac2 : float
ac2 is an acceleration parameter. This acceleration parameter is a multiplier
for the head changes to compute the head at the new iteration. Values
are normally between 1.0 and 2.0, but the optimum is probably closer to 1.0
than to 2.0. However this parameter also can be used to help convergence of
the iterative solution by using values between 0 and 1. (default is 1.0).
itmin : int
ITMIN is the minimum number of iterations for which one-dimensional equations
will be solved for flow in interbeds when the Strongly Implicit Procedure (SIP)
is used to solve the ground-water flow equations. If the current iteration
level is greater than ITMIN and the SIP convergence criterion for head
closure (HCLOSE) is met at a particular cell, the one-dimensional equations
for that cell will not be solved. The previous solution will be used. The value
of ITMIN is not used if a solver other than SIP is used to solve the
ground-water flow equations. (default is 5).
ln : int or array of ints (nndb)
ln is a one-dimensional array specifying the model layer assignments for each
system of no-delay interbeds. (default is 0).
ldn : int or array of ints (ndb)
ldn is a one-dimensional array specifying the model layer assignments for each
system of delay interbeds.(default is 0).
rnb : float or array of floats (ndb, nrow, ncol)
rnb is an array specifying the factor nequiv at each cell for each system of
delay interbeds. The array also is used to define the areal extent of each
system of interbeds. For cells beyond the areal extent of the system of
interbeds, enter a number less than 1.0 in the corresponding element of
this array. (default is 1).
hc : float or array of floats (nndb, nrow, ncol)
hc is an array specifying the preconsolidation head or preconsolidation stress
in terms of head in the aquifer for systems of no-delay interbeds. For any
model cells in which specified HC is greater than the corresponding value of
starting head, the value of HC will be set to that of starting head.
(default is 100000).
sfe : float or array of floats (nndb, nrow, ncol)
sfe is an array specifying the dimensionless elastic skeletal storage
coefficient for systems of no-delay interbeds. (default is 1.e-4).
sfv : float or array of floats (nndb, nrow, ncol)
sfv is an array specifying the dimensionless inelastic skeletal storage
coefficient for systems of no-delay interbeds. (default is 1.e-3).
com : float or array of floats (nndb, nrow, ncol)
com is an array specifying the starting compaction in each system of
no-delay interbeds. Compaction values computed by the package are added to
values in this array so that printed or stored values of compaction and land
subsidence may include previous components. Values in this array do not
affect calculations of storage changes or resulting compaction. For simulations
in which output values are to reflect compaction and subsidence since the start
of the simulation, enter zero values for all elements of this array. (default is 0).
dp : list or array of floats (nmz, 3)
Data item includes nmz records, each with a value of vertical hydraulic
conductivity, elastic specific storage, and inelastic specific storage.
(default is [1.e-6, 6.e-6, 6.e-4]).
dstart : float or array of floats (ndb, nrow, ncol)
dstart is an array specifying starting head in interbeds for systems of delay
interbeds. For a particular location in a system of interbeds, the starting head
is applied to every node in the string of nodes that approximates flow in half
of a doubly draining interbed. (default is 1).
dhc : float or array of floats (ndb, nrow, ncol)
dhc is an array specifying the starting preconsolidation head in interbeds for
systems of delay interbeds. For a particular location in a system of interbeds,
the starting preconsolidation head is applied to every node in the string of
nodes that approximates flow in half of a doubly draining interbed. For any
location at which specified starting preconsolidation head is greater than the
corresponding value of the starting head, Dstart, the value of the starting
preconsolidation head will be set to that of the starting head. (default is 100000).
dcom : float or array of floats (ndb, nrow, ncol)
dcom is an array specifying the starting compaction in each system of delay interbeds.
Compaction values computed by the package are added to values in this array so that
printed or stored values of compaction and land subsidence may include previous
components. Values in this array do not affect calculations of storage changes or
resulting compaction. For simulations in which output values are to reflect compaction
and subsidence since the start of the simulation, enter zero values for all elements
of this array. (default is 0).
dz : float or array of floats (ndb, nrow, ncol)
dz is an array specifying the equivalent thickness for a system of delay interbeds.
(default is 1).
nz : int or array of ints (ndb, nrow, ncol)
nz is an array specifying the material zone numbers for systems of delay interbeds.
The zone number for each location in the model grid selects the hydraulic conductivity,
elastic specific storage, and inelastic specific storage of the interbeds.
(default is 1).
ids15 : list or array of ints (12)
Format codes and unit numbers for subsidence, compaction by model layer, compaction
by interbed system, vertical displacement, no-delay preconsolidation, and delay
preconsolidation will be printed. If ids15 is None and isuboc>0 then print code 0
will be used for all data which is output to the binary subsidence output file
(unit=1051). The 12 entries in ids15 correspond to ifm1, iun1, ifm2, iun2, ifm3,
iun3, ifm4, iun4, ifm5, iun5, ifm6, and iun6 variables. (default is None).
ids16 : list or array of ints (isuboc, 17)
Stress period and time step range and print and save flags used to control printing
and saving of information generated by the SUB Package during program execution. Each
row of ids16 corresponds to isp1, isp2, its1, its2, ifl1, ifl2, ifl3, ifl4, ifl5,
ifl6, ifl7, ifl8, ifl9, ifl10, ifl11, ifl12, and ifl13 variables for isuboc entries.
isp1, isp2, its1, and its2 are stress period and time step ranges. ifl1 and ifl2
control subsidence printing and saving. ifl3 and ifl4 control compaction by model
layer printing and saving. ifl5 and ifl6 control compaction by interbed system
printing and saving. ifl7 and ifl8 control vertical displacement printing and
saving. ifl9 and ifl10 control critical head for no-delay interbeds printing and saving.
ifl11 and ifl12 control critical head for delay interbeds printing and saving. ifl13
controls volumetric budget for delay interbeds printing. If ids16 is None and isuboc>0
then all available subsidence output will be printed and saved to the binary
subsidence output file (unit=1051). (default is None).
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the cbc output name and other sub output
files will be created using the model name and .cbc and swt output
extensions (for example, modflowtest.cbc), if ipakcbc and other
sub output files (dataset 15) are numbers greater than zero.
If a single string is passed the package name will be set to the
string and other sub output files will be set to the model name with
the appropriate output file extensions. To define the names for all
package files (input and output) the length of the list of strings
should be 9. Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Parameters are supported in Flopy only when reading in existing models.
Parameter values are converted to native values in Flopy and the
connection to "parameters" is thus nonexistent. Parameters are not supported in the SUB Package.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> sub = flopy.modflow.ModflowSub(m)
"""
def __init__(self, model, ipakcb=None, isuboc=0, idsave=None, idrest=None,
nndb=1, ndb=1, nmz=1, nn=20, ac1=0., ac2=1.0, itmin=5,
ln=0, ldn=0, rnb=1,
hc=100000., sfe=1.e-4, sfv=1.e-3, com=0.,
dp=[[1.e-6, 6.e-6, 6.e-4]],
dstart=1., dhc=100000., dcom=0., dz=1., nz=1,
ids15=None, ids16=None,
extension='sub', unitnumber=None,
filenames=None):
"""
Package constructor.
"""
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowSub.defaultunit()
# set filenames
if filenames is None:
filenames = [None for x in range(9)]
elif isinstance(filenames, str):
filenames = [filenames] + [None for x in range(8)]
elif isinstance(filenames, list):
if len(filenames) < 9:
n = 9 - len(filenames) + 1
filenames = filenames + [None for x in range(n)]
# update external file information with cbc output, if necessary
if ipakcb is not None:
fname = filenames[1]
model.add_output_file(ipakcb, fname=fname,
package=ModflowSub.ftype())
else:
ipakcb = 0
if idsave is not None:
fname = filenames[2]
model.add_output_file(idsave, fname=fname, extension='rst',
package=ModflowSub.ftype())
else:
idsave = 0
if idrest is None:
idrest = 0
item15_extensions = ["subsidence.hds", "total_comp.hds",
"inter_comp.hds", "vert_disp.hds",
"nodelay_precon.hds", "delay_precon.hds"]
item15_units = [2052 + i for i in range(len(item15_extensions))]
if isuboc > 0:
idx = 0
for k in range(1, 12, 2):
ext = item15_extensions[idx]
if ids15 is None:
iu = item15_units[idx]
else:
iu = ids15[k]
fname = filenames[idx+3]
model.add_output_file(iu, fname=fname, extension=ext,
package=ModflowSub.ftype())
idx += 1
extensions = [extension]
name = [ModflowSub.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extensions, name=name,
unit_number=units, extra=extra, filenames=fname)
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
self.heading = '# {} package for '.format(self.name[0]) + \
' {}, '.format(model.version_types[model.version]) + \
'generated by Flopy.'
self.url = 'sub.htm'
self.ipakcb = ipakcb
self.isuboc = isuboc
self.idsave = idsave
self.idrest = idrest
self.nndb = nndb
self.ndb = ndb
self.nmz = nmz
self.nn = nn
self.ac1 = ac1
self.ac2 = ac2
self.itmin = itmin
# no-delay bed data
self.ln = None
self.hc = None
self.sfe = None
self.sfv = None
if nndb > 0:
self.ln = Util2d(model, (nndb,), np.int, ln, name='ln')
self.hc = Util3d(model, (nndb, nrow, ncol), np.float32, hc,
name='hc',
locat=self.unit_number[0])
self.sfe = Util3d(model, (nndb, nrow, ncol), np.float32, sfe,
name='sfe',
locat=self.unit_number[0])
self.sfv = Util3d(model, (nndb, nrow, ncol), np.float32, sfv,
name='sfv',
locat=self.unit_number[0])
self.com = Util3d(model, (nndb, nrow, ncol), np.float32, com,
name='com',
locat=self.unit_number[0])
# delay bed data
self.ldn = None
self.rnb = None
self.dstart = None
self.dhc = None
self.dz = None
self.nz = None
if ndb > 0:
self.ldn = Util2d(model, (ndb,), np.int, ldn, name='ldn')
self.rnb = Util3d(model, (ndb, nrow, ncol), np.float32, rnb,
name='rnb',
locat=self.unit_number[0])
self.dstart = Util3d(model, (ndb, nrow, ncol), np.float32, dstart,
name='dstart',
locat=self.unit_number[0])
self.dhc = Util3d(model, (ndb, nrow, ncol), np.float32, dhc,
name='dhc',
locat=self.unit_number[0])
self.dcom = Util3d(model, (ndb, nrow, ncol), np.float32, dcom,
name='dcom',
locat=self.unit_number[0])
self.dz = Util3d(model, (ndb, nrow, ncol), np.float32, dz,
name='dz',
locat=self.unit_number[0])
self.nz = Util3d(model, (ndb, nrow, ncol), np.int, nz, name='nz',
locat=self.unit_number[0])
# material zone data
if isinstance(dp, list):
dp = np.array(dp)
self.dp = dp
# output data
if isuboc > 0:
if ids15 is None:
ids15 = np.zeros(12, dtype=np.int)
iu = 0
for i in range(1, 12, 2):
ids15[i] = item15_units[iu]
iu += 1
self.ids15 = ids15
else:
if isinstance(ids15, list):
ids15 = np.array(ids15)
self.ids15 = ids15
if ids16 is None:
self.isuboc = 1
# save and print everything
ids16 = np.ones((1, 17), dtype=np.int)
ids16[0, 0] = 0
ids16[0, 1] = nper - 1
ids16[0, 2] = 0
ids16[0, 3] = 9999
else:
if isinstance(ids16, list):
ids16 = np.array(ids16)
if len(ids16.shape) == 1:
ids16 = np.reshape(ids16, (1, ids16.shape[0]))
self.ids16 = ids16
# add package to model
self.parent.add_package(self)
def write_file(self, check=False, f=None):
"""
Write the package file.
Returns
-------
None
"""
if check:
print("warning: check not implemented for sub")
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
# Open file for writing
if f is None:
f = open(self.fn_path, 'w')
# First line: heading
f.write('{}\n'.format(self.heading))
# write dataset 1
f.write(
'{} {} {} {} {} {} '.format(self.ipakcb, self.isuboc, self.nndb,
self.ndb, self.nmz, self.nn))
f.write('{} {} {} {} {}\n'.format(self.ac1, self.ac2,
self.itmin, self.idsave,
self.idrest))
if self.nndb > 0:
t = self.ln.array
for tt in t:
f.write('{} '.format(tt + 1))
f.write('\n')
if self.ndb > 0:
t = self.ldn.array
for tt in t:
f.write('{} '.format(tt + 1))
f.write('\n')
# write dataset 4
if self.ndb > 0:
for k in range(self.ndb):
f.write(self.rnb[k].get_file_entry())
# write dataset 5 to 8
if self.nndb > 0:
for k in range(self.nndb):
f.write(self.hc[k].get_file_entry())
f.write(self.sfe[k].get_file_entry())
f.write(self.sfv[k].get_file_entry())
f.write(self.com[k].get_file_entry())
# write dataset 9
if self.ndb > 0:
for k in range(self.nmz):
f.write(
'{:15.6g} {:15.6g} {:15.6g} #material zone {} data\n'.format(
self.dp[k, 0], self.dp[k, 1],
self.dp[k, 2], k + 1))
# write dataset 10 to 14
if self.ndb > 0:
for k in range(self.ndb):
f.write(self.dstart[k].get_file_entry())
f.write(self.dhc[k].get_file_entry())
f.write(self.dcom[k].get_file_entry())
f.write(self.dz[k].get_file_entry())
f.write(self.nz[k].get_file_entry())
# write dataset 15 and 16
if self.isuboc > 0:
# dataset 15
for i in self.ids15:
f.write('{} '.format(i))
f.write(' #dataset 15\n')
# dataset 16
for k in range(self.isuboc):
t = self.ids16[k, :]
t[0:4] += 1
for i in t:
f.write('{} '.format(i))
f.write(' #dataset 16 isuboc {}\n'.format(k + 1))
# close sub file
f.close()
@staticmethod
def load(f, model, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
sub : ModflowSub object
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> sub = flopy.modflow.ModflowSub.load('test.sub', m)
"""
if model.verbose:
sys.stdout.write('loading sub package file...\n')
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
# determine problem dimensions
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
# read dataset 1
if model.verbose:
sys.stdout.write(' loading sub dataset 1\n')
t = line.strip().split()
ipakcb, isuboc, nndb, ndb, nmz, nn = int(t[0]), int(t[1]), int(t[2]), \
int(t[3]), int(t[4]), int(t[5])
ac1, ac2 = float(t[6]), float(t[7])
itmin, idsave, idrest = int(t[8]), int(t[9]), int(t[10])
# if ipakcb > 0:
# ipakcb = 53
# if idsave > 0:
# idsave = 2052
# if idrest > 0:
# ext_unit_dict[2053] = ext_unit_dict.pop(idrest)
# idrest = 2053
ln = None
if nndb > 0:
if model.verbose:
sys.stdout.write(' loading sub dataset 2\n')
ln = np.empty((nndb), dtype=np.int)
ln = read1d(f, ln) - 1
ldn = None
if ndb > 0:
if model.verbose:
sys.stdout.write(' loading sub dataset 3\n')
ldn = np.empty((ndb), dtype=np.int)
ldn = read1d(f, ldn) - 1
rnb = None
if ndb > 0:
if model.verbose:
sys.stdout.write(' loading sub dataset 4\n')
rnb = [0] * ndb
for k in range(ndb):
t = Util2d.load(f, model, (nrow, ncol), np.float32,
'rnb delay bed {}'.format(k + 1),
ext_unit_dict)
rnb[k] = t
hc = None
sfe = None
sfv = None
com = None
if nndb > 0:
hc = [0] * nndb
sfe = [0] * nndb
sfv = [0] * nndb
com = [0] * nndb
for k in range(nndb):
kk = ln[k] + 1
# hc
if model.verbose:
sys.stdout.write(
' loading sub dataset 5 for layer {}\n'.format(kk))
t = Util2d.load(f, model, (nrow, ncol), np.float32,
'hc layer {}'.format(kk),
ext_unit_dict)
hc[k] = t
# sfe
if model.verbose:
sys.stdout.write(
' loading sub dataset 6 for layer {}\n'.format(kk))
t = Util2d.load(f, model, (nrow, ncol), np.float32,
'sfe layer {}'.format(kk),
ext_unit_dict)
sfe[k] = t
# sfv
if model.verbose:
sys.stdout.write(
' loading sub dataset 7 for layer {}\n'.format(kk))
t = Util2d.load(f, model, (nrow, ncol), np.float32,
'sfv layer {}'.format(kk),
ext_unit_dict)
sfv[k] = t
# com
if model.verbose:
sys.stdout.write(
' loading sub dataset 8 for layer {}\n'.format(kk))
t = Util2d.load(f, model, (nrow, ncol), np.float32,
'com layer {}'.format(kk),
ext_unit_dict)
com[k] = t
# dp
dp = None
if ndb > 0:
dp = np.zeros((nmz, 3), dtype=np.float32)
for k in range(nmz):
if model.verbose:
sys.stdout.write(
' loading sub dataset 9 for material zone {}\n'.format(
k + 1))
line = f.readline()
t = line.strip().split()
dp[k, :] = float(t[0]), float(t[1]), float(t[2])
dstart = None
dhc = None
dcom = None
dz = None
nz = None
if ndb > 0:
dstart = [0] * ndb
dhc = [0] * ndb
dcom = [0] * ndb
dz = [0] * ndb
nz = [0] * ndb
for k in range(ndb):
kk = ldn[k] + 1
# dstart
if model.verbose:
sys.stdout.write(
' loading sub dataset 10 for layer {}\n'.format(kk))
t = Util2d.load(f, model, (nrow, ncol), np.float32,
'dstart layer {}'.format(kk),
ext_unit_dict)
dstart[k] = t
# dhc
if model.verbose:
sys.stdout.write(
' loading sub dataset 11 for layer {}\n'.format(kk))
t = Util2d.load(f, model, (nrow, ncol), np.float32,
'dhc layer {}'.format(kk),
ext_unit_dict)
dhc[k] = t
# dcom
if model.verbose:
sys.stdout.write(
' loading sub dataset 12 for layer {}\n'.format(kk))
t = Util2d.load(f, model, (nrow, ncol), np.float32,
'dcom layer {}'.format(kk),
ext_unit_dict)
dcom[k] = t
# dz
if model.verbose:
sys.stdout.write(
' loading sub dataset 13 for layer {}\n'.format(kk))
t = Util2d.load(f, model, (nrow, ncol), np.float32,
'dz layer {}'.format(kk),
ext_unit_dict)
dz[k] = t
# nz
if model.verbose:
sys.stdout.write(
' loading sub dataset 14 for layer {}\n'.format(kk))
t = Util2d.load(f, model, (nrow, ncol), np.int,
'nz layer {}'.format(kk),
ext_unit_dict)
nz[k] = t
ids15 = None
ids16 = None
if isuboc > 0:
# dataset 15
if model.verbose:
sys.stdout.write(
' loading sub dataset 15 for layer {}\n'.format(kk))
ids15 = np.empty(12, dtype=np.int)
ids15 = read1d(f, ids15)
#iu = 1
#for k in range(1, 12, 2):
# model.add_pop_key_list(ids15[k])
# ids15[k] = 2051 + iu # all subsidence data sent to unit 2051
# iu += 1
# dataset 16
ids16 = [0] * isuboc
for k in range(isuboc):
if model.verbose:
sys.stdout.write(
' loading sub dataset 16 for isuboc {}\n'.format(
k + 1))
t = np.empty(17, dtype=np.int)
t = read1d(f, t)
t[0:4] -= 1
ids16[k] = t
model.add_pop_key_list(2051)
# close file
f.close()
# determine specified unit number
unitnumber = None
filenames = [None for x in range(9)]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=ModflowSub.ftype())
if ipakcb > 0:
iu, filenames[1] = \
model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
if idsave > 0:
iu, filenames[2] = \
model.get_ext_dict_attr(ext_unit_dict, unit=idsave)
if isuboc > 0:
ipos = 3
for k in range(1, 12, 2):
unit = ids15[k]
if unit > 0:
iu, filenames[ipos] = \
model.get_ext_dict_attr(ext_unit_dict, unit=unit)
model.add_pop_key_list(unit)
ipos += 1
# create sub instance
sub = ModflowSub(model, ipakcb=ipakcb, isuboc=isuboc, idsave=idsave,
idrest=idrest,
nndb=nndb, ndb=ndb, nmz=nmz, nn=nn, ac1=ac1, ac2=ac2,
itmin=itmin,
ln=ln, ldn=ldn, rnb=rnb,
hc=hc, sfe=sfe, sfv=sfv, com=com, dp=dp,
dstart=dstart, dhc=dhc, dcom=dcom, dz=dz, nz=nz,
ids15=ids15, ids16=ids16, unitnumber=unitnumber,
filenames=filenames)
# return sub instance
return sub
@staticmethod
def ftype():
return 'SUB'
@staticmethod
def defaultunit():
return 32
|
from posixpath import join as urljoin
import requests
from ._meta import __project_link__, __project_name__, __version__
from .models import Category, Page, Post, PostRevision, PostStatus, Tag
class WordPress(object):
def __init__(self, url, verify_ssl=True):
"""
WordPress Library.
Arguments
---------
url : str
The WordPress URL (ex https://example.org/).
verify_ssl : bool
Should we verify that the WordPress site is using a good SSL cert.
"""
self.url = self._get_wp_api_url(url)
self.version = 'v2'
self.headers = {
'User-Agent': '{0}/{1} +{2}'.format(
__project_name__,
__version__,
__project_link__
)
}
# Private Methods
def _get_wp_api_url(self, url):
"""
Private function for finding the WP-API URL.
Arguments
---------
url : str
WordPress instance URL.
"""
resp = requests.head(url)
# Search the Links for rel="https://api.w.org/".
wp_api_rel = resp.links.get('https://api.w.org/')
if wp_api_rel:
return wp_api_rel['url']
else:
# TODO: Rasie a better exception to the rel doesn't exist.
raise Exception
def _get(self, endpoint, params={}):
"""
Private function for making GET requests.
Arguments
---------
endpoint : str
WordPress endpoint.
params : dict
HTTP parameters when making the connection.
Returns
-------
dict/list
Returns the data from the endpoint.
"""
url = urljoin(self.url, 'wp', self.version, endpoint)
resp = requests.get(url, params=params, headers=self.headers)
if not resp.status_code == 200:
msg = ('WordPress REST API returned the status code '
'{0}.'.format(resp.status_code))
raise Exception(msg)
return resp.json()
def _post(self, endpoint, data={}, params={}):
"""
Private function for making POST requests.
Arguments
---------
endpoint : str
WordPress endpoint.
data : dict
Data to send.
params : dict
HTTP parameters to use when making the connection.
Returns
-------
dict/list
Returns the data from the endpoint.
"""
url = urljoin(self.url, 'wp', self.version, endpoint)
resp = requests.get(url, data=data, params=params,
headers=self.headers)
if not resp.status_code == 200:
msg = ('WordPress REST API returned the status code '
'{0}.'.format(resp.status_code))
raise Exception(msg)
return resp.json()
def _delete(self, endpoint, params={}):
"""
Private function for making DELETE requests.
Arguments
---------
endpoint : str
WordPress endpoint.
params : dict
HTTP parameters when making the connection.
Returns
-------
dict/list
Returns the data from the endpoint.
"""
url = urljoin(self.url, 'wp', self.version, endpoint)
resp = requests.delete(url, params=params, headers=self.headers)
if not resp.status_code == 200:
msg = ('WordPress REST API returned the status code '
'{0}.'.foramt(resp.status_code))
raise Exception(msg)
return resp.json()
# Post Methods
def list_posts(self, context='view', page=1, pre_page=10, search=None,
after=None, author=None, author_exclude=None, before=None,
exclude=None, include=None, offset=None, order='desc',
orderby='date', slug=None, status='publish',
categories=None, cateogries_exclude=None, tags=None,
tags_exclude=None, sticky=None):
"""
Get a list of posts.
Arguments
---------
context : str
Scope under which the request is made; determines fields present in
response.
Default: view
One of: view, embed, edit
page : int
Current page of the collection.
pre_page : int
Maximum number of items to be returned in result set.
search : str
Limit results to those matching a string.
after : datetime
Limit response to posts published after a given date.
author : int
Limit result set to posts assigned to specific authors.
author_exclude : int
Ensure result set excludes posts assigned to specific authors.
before : datetime
Limit response to posts published before a given date.
exclude : int
Ensure result set excludes specific IDs.
include : int
Limit result set to specific IDs.
offset : int
Offset the result set by a specific number of items.
order : str
Order sort attribute ascending or descending.
Default: desc
One of: asc, desc
orderby : str
Sort collection by object attribute.
Default: date
One of: date, relevance, id, include, title, slug
slug : str
Limit result set to posts with one or more specific slugs.
status : str
Limit result set to posts assigned one or more statuses.
Default: publish
One of: publish, future, draft, pending, private
categories : str
Limit result set to all items that have the specified term assigned
in the categories taxonomy.
categories_exclude : str
Limit result set to all items except those that have the specified
term assigned in the categories taxonomy.
tags : str
Limit result set to all items that have the specified term assigned
in the tags taxonomy.
tags_exclude : str
Limit result set to all items except those that have the specified
term assigned in the tags taxonomy.
sticky : bool
Limit result set to items that are sticky.
Returns
-------
list
A list of wordpress.models.Post.
"""
if context not in ['view', 'embed', 'edit']:
raise ValueError('The context {0} is not allowed.'.format(context))
if after:
after = after.isoformat()
if before:
before = before.isoformat()
if order not in ['asc', 'desc']:
raise ValueError("You can't order {0}.".format(order))
if orderby not in ['date', 'relevance', 'id', 'include', 'title',
'slug']:
raise ValueError("You can't order by {0}.".format(orderby))
posts = self._get('posts', params=locals())
return Post.parse_list(self, posts)
def get_post(self, pk, context='view', password=None):
"""
Retrieve a Post.
Arguments
---------
pk : in
The post id you want to retrieve.
context : str
Scope under which the request is made; determines fields present in
response.
Default: view
One of: view, embed, edit
password : str
The password for the post if it is password protected.
Returns
-------
wordpress.models.Post
"""
post = self._get('posts/{0}'.format(pk), params=locals())
return Post.parse(self, post)
def create_post(self, date=None, date_gmt=None, slug=None, status=None,
password=None, title=None, content=None, author=None,
excerpt=None, featured_media=None, comment_status=None,
ping_status=None, format=None, meta=None, sticky=None,
template=None, categories=None, tags=None,
liveblog_links=None):
"""
Create a Post.
Arguments
---------
date : datetime
The date the object was published, in the site’s timezone.
date_gmt : datetime
The date the object was published, as GMT.
slug : str
An alphanumeric identifier for the object unique to its type.
status : str
A named status for the object.
One of: publish, future, draft, pending, private
password : str
<PASSWORD> access to the content and excerpt.
title : str
The title for the object.
content : str
The content for the object.
author : id
The ID for the author of the object.
excerpt : str
The excerpt for the object.
featured_media : int
The ID of the featured media for the object.
comment_status : str
Whether or not comments are open on the object.
One of: open, closed
ping_status : str
Whether or not the object can be pinged.
One of: open, closed
format : str
The format for the object.
One of: standard
meta : dict
Meta fields.
sticky : bool
Whether or not the object should be treated as sticky.
template : str
The theme file to use to display the object.
One of:
categories : str
The terms assigned to the object in the category taxonomy.
tags : str
The terms assigned to the object in the post_tag taxonomy.
liveblog_likes : str
The number of Liveblog Likes the post has.
"""
post = self._post('posts', data=locals())
return Post.parse(self, post)
def update_post(self, pk, date=None, date_gmt=None, slug=None, status=None,
password=None, title=None, content=None, author=None,
excerpt=None, featured_media=None, comment_status=None,
ping_status=None, format=None, meta=None, sticky=None,
template=None, categories=None, tags=None,
liveblog_links=None):
"""
Update a Post.
Arguments
---------
pk : int
The ID of the post you want to update.
date : datetime
The date the object was published, in the site’s timezone.
date_gmt : datetime
The date the object was published, as GMT.
slug : str
An alphanumeric identifier for the object unique to its type.
status : str
A named status for the object.
One of: publish, future, draft, pending, private
password : str
<PASSWORD> to protect access to the content and excerpt.
title : str
The title for the object.
content : str
The content for the object.
author : id
The ID for the author of the object.
excerpt : str
The excerpt for the object.
featured_media : int
The ID of the featured media for the object.
comment_status : str
Whether or not comments are open on the object.
One of: open, closed
ping_status : str
Whether or not the object can be pinged.
One of: open, closed
format : str
The format for the object.
One of: standard
meta : dict
Meta fields.
sticky : bool
Whether or not the object should be treated as sticky.
template : str
The theme file to use to display the object.
One of:
categories : str
The terms assigned to the object in the category taxonomy.
tags : str
The terms assigned to the object in the post_tag taxonomy.
liveblog_likes : str
The number of Liveblog Likes the post has.
"""
post = self._post('posts/{0}'.format(pk), data=locals())
return Post.parse(self, post)
def delete_post(self, pk, force=False):
"""
Delete a Post.
Arguments
---------
pk : int
The post id you want to delete.
force : bool
Whether to bypass trash and force deletion.
"""
resp = self._delete('posts/{0}'.format(pk), params=locals())
if resp.status_code == 200:
return True
else:
raise Exception(resp.json())
# Post Reivion Methods
def list_post_revisions(self, parent, context='view'):
"""
List Post Revisions.
Arguments
---------
parent : int/wordpress.models.Post/wordpress.models.Page
The id for the parent of the object.
context : str
Scope under which the request is made; determines fields present in
response.
Default: view
One of: view
Returns
-------
list
A list of wordpress.models.PostRevision.
"""
if type(parent) == int:
parent_id = parent
elif type(parent) in [Page, Post]:
parent_id = parent.id
resp = self._get('posts/{0}/revisions'.format(parent_id),
params=locals())
return PostRevision.parse_list(self, resp.json())
def get_post_revision(self, parent, pk, context='view'):
"""
Get a Post Revision.
Arguments
---------
parent : int/wordpress.models.Post/wordpress.models.Page
The id for the parent of the object.
pk : int
Unique identifier for the object.
context : str
Scope under which the request is made; determines fields present in
response.
Default: view
One of: view
Returns
-------
wordpress.models.PostRevision
"""
if isinstance(parent, int):
parent_id = parent
elif isinstance(parent, Page) or isinstance(parent, Post):
parent_id = parent.id
resp = self._get('posts/{0}/revisions/{1}'.format(parent_id, pk),
params=locals())
return PostRevision.parse(self, resp.json())
def delete_post_revision(self, parent, pk):
"""
Delete Post Revision.
Arguments
---------
parent : int/wordpress.models.Post/wordpress.models.Page
The id for the parent of the object.
pk : int
Unique identifier for the object.
Returns
-------
dict
"""
if isinstance(parent, int):
parent_id = parent
elif isinstance(parent, Page) or isinstance(parent, Post):
parent_id = parent.id
resp = self._delete('posts/{0}/revisions/{1}'.format(parent_id, pk))
return PostRevision.parse(self, resp.json())
# Category Methods
def list_categories(self, context='view', page=1, pre_page=10, search=None,
exclude=None, include=None, order='asc',
orderby='name', hide_empty=False, parent=None,
post=None, slug=None):
"""
Get a list of categories.
Arguments
---------
context : str
Scope under which the request is made; determines fields present in
response.
Default: view
One of: view, embed, edit
page : int
Current page of the collection.
Default: 1
pre_page : int
Maximum number of items to be returned in result set.
Default: 10
search : str
Limit results to those matching a string.
exclude : int
Ensure result set excludes specific IDs.
Default:
include : int
Limit result set to specific IDs.
Default:
order : str
Order sort attribute ascending or descending.
Default: asc
One of: asc, desc
orderby : str
Sort collection by term attribute.
Default: name
One of: id, include, name, slug, term_group, description, count
hide_empty : bool
Whether to hide terms not assigned to any posts.
parent : int/wordpress.models.Category
Limit result set to terms assigned to a specific parent.
post : int/wordpress.models.Post
Limit result set to terms assigned to a specific post.
slug : str
Limit result set to terms with a specific slug.
Returns
-------
list
A list of wordpress.models.Category.
"""
if context not in ['view', 'embed', 'edit']:
raise ValueError('The context {0} is not allowed.'.format(context))
if order not in ['asc', 'desc']:
raise ValueError('The order {0} is not allowed.'.format(order))
if orderby not in ['id', 'include', 'name', 'slug', 'term_group',
'description', 'count']:
raise ValueError('The order by {0} is not '
'allowed.'.format(orderby))
if isinstance(parent, Category):
parent_id = Category.id
elif isinstance(parent, int):
parent_id = parent
if isinstance(post, Post):
post_id = Post.id
elif isinstance(post, int):
post_id = post
category_list = self._get('categories', params=locals())
return Category.parse_list(self, category_list)
def get_category(self, pk, context='view'):
"""
Retrieve a Category.
Arguments
---------
pk : int
The category id you want to retrieve.
context : str
Scope under which the request is made; determines fields present in
response.
Default: view
One of: view, embed, edit
Returns
-------
wordpress.models.Category
"""
if context not in ['view', 'embed', 'edit']:
raise ValueError('The context {0} is not allowed.'.format(context))
category = self._get('categories/{0}'.format(pk), params=locals())
return Category.parse(self, category)
# Tag Methods
def list_tags(self, context='view', page=1, pre_page=10, search=None,
include=[], offset=0, order='asc', orderby='name',
hide_empty=False, post=None, slug=None):
"""
Get a list of tags.
Arguments
---------
context : str
Scope under which the request is made; determines fields present in
response.
Default: view
One of: view, embed, edit
page : int
Current page of the collection.
Default: 1
per_page : int
Maximum number of items to be returned in result set.
Default: 10
search : str
Limit results to those matching a string.
exclude : str
Ensure result set excludes specific IDs.
Default:
include : list
Limit result set to specific IDs.
Default:
offset : int
Offset the result set by a specific number of items.
order : str
Order sort attribute ascending or descending.
Default: asc
One of: asc, desc
orderby : str
Sort collection by term attribute.
Default: name
One of: id, include, name, slug, term_group, description, count
hide_empty : bool
Whether to hide terms not assigned to any posts.
post : int
Limit result set to terms assigned to a specific post.
slug : str
Limit result set to terms with a specific slug.
Returns
-------
list
A list of wordpress.models.Category.
"""
if context not in ['view', 'embed', 'edit']:
raise ValueError('The context {0} is not allowed.'.format(context))
if order not in ['asc', 'desc']:
raise ValueError("You can't order {0}.".format(order))
if orderby not in ['id', 'include', 'name', 'slug', 'term_group',
'description', 'count']:
raise ValueError("You can't order by {0}.".format(orderby))
tag_list = self._get('tags', params=locals())
return Category.parse_list(self, tag_list)
def get_tag(self, pk, context='view'):
"""
Retrieve a Tag.
Arguments
---------
pk : int
The tag id you want to retrieve.
context : str
Scope under which the request is made; determines fields present in
response.
Default: view
One of: view, embed, edit
Returns
-------
wordpress.models.Tag
"""
if context not in ['view', 'embed', 'edit']:
raise ValueError('The context {0} is not allowed.'.format(context))
tag = self._get('tags/{0}'.format(pk), params=locals())
return Tag.parse(self, tag)
def create_tag(self, **kwargs):
raise NotImplementedError
def update_tag(self, **kwargs):
raise NotImplementedError
def delete_tag(self, **kwargs):
raise NotImplementedError
# Page Methods
def list_pages(self, **kwargs):
raise NotImplementedError
def get_page(self, **kwargs):
raise NotImplementedError
def create_page(self, **kwargs):
raise NotImplementedError
def update_page(self, **kwargs):
raise NotImplementedError
def delete_page(self, **kwargs):
raise NotImplementedError
# Comment Methods
def list_comments(self, **kwargs):
raise NotImplementedError
def get_comment(self, **kwargs):
raise NotImplementedError
def create_comment(self, **kwargs):
raise NotImplementedError
def update_comment(self, **kwargs):
raise NotImplementedError
def delete_comment(self, **kwargs):
raise NotImplementedError
# Taxonomy Methods
def list_taxonomies(self, **kwargs):
raise NotImplementedError
def get_taxonomy(self, **kwargs):
raise NotImplementedError
# Media Methods
def list_media(self, **kwargs):
raise NotImplementedError
def get_media(self, **kwargs):
raise NotImplementedError
def create_media(self, **kwargs):
raise NotImplementedError
def update_media(self, **kwargs):
raise NotImplementedError
def delete_media(self, **kwargs):
raise NotImplementedError
# User Methods
def list_users(self, **kwargs):
raise NotImplementedError
def get_user(self, **kwargs):
raise NotImplementedError
def create_user(self, **kwargs):
raise NotImplementedError
def update_user(self, **kwargs):
raise NotImplementedError
def delete_user(self, **kwargs):
raise NotImplementedError
# Post Type Methods
def list_post_types(self, **kwargs):
raise NotImplementedError
def get_post_type(self, **kwargs):
raise NotImplementedError
# Post Status Methods
def list_post_statuses(self, context='view'):
"""
Get a list of post statuses.
Arguments
---------
context : str
Scope under which the request is made; determines fields present in
response.
Default: view
One of: view, embed, edit
Returns
-------
list
A list of wordpress.models.PostStatus
"""
if context not in ['view', 'embed', 'edit']:
raise ValueError('The context {0} is not allowed.'.format(context))
post_status_list = self._get('statuses', params=locals())
return PostStatus.parse_list(self, post_status_list)
def get_post_status(self, slug, context='view'):
"""
Retrieve a Post statuses
Arguments
---------
slug : str
The name of the status.
context : str
Scope under which the request is made; determines fields present in
response.
Default: view
One of: view, embed, edit
Returns
-------
wordpress.models.PostStatus
"""
if context not in ['view', 'embed', 'edit']:
raise ValueError('The context {0} is not allowed.'.format(context))
post_status = self._get('statuses/{0}'.format(slug), params=locals())
# Setting Methods
def update_setting(self, title=None, description=None, url=None,
email=None, timezone=None, date_format=None,
time_format=None, start_of_week=None, language=None,
use_smilies=None, default_category=None,
default_post_format=None, post_pre_page=None):
"""
Update WordPress settings.
Arguments
---------
title : str
Site title.
description : str
Site description.
url : str
Site URL.
email : str
This address is used for admin purposes. If you change this we will
send you an email at your new address to confirm it. The new
address will not become active until confirmed.
timezone : str
A city in the same timezone as you.
date_format : str
A date format for all date strings.
time_format : str
A time format for all time strings.
start_of_week : int
A day number of the week that the week should start on.
language : str
WordPress locale code.
use_smilies : bool
Convert emoticons like :-) and :-P to graphics on display.
default_category : int
Default category.
default_post_format : str
Default post format.
posts_per_page : int
Blog pages show at most.
"""
return self._post('settings', params=locals())
|
import os
import configparser
import functools
from click.globals import get_current_context
import click
import keyring
from ..api import APIClient, API_URL
from ..core import MWDB
from ..exc import MWDBError
class MwdbAuthenticator(object):
CONFIG_PATH = os.path.expanduser("~/.mwdb")
CONFIG_FIELDS = [
"username",
"api_url",
"verify_ssl",
"obey_ratelimiter"
]
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read(['mwdb.cfg', self.CONFIG_PATH])
def get_authenticated_mwdb(self, api_url=None):
"""
Gets pre-authenticated MWDB object based on local configuration
:param api_url: Alternative API url provided explicitly by user
:rtype: MWDB
"""
api_url = api_url or self.config.get("mwdb", "api_url", fallback=API_URL)
api = APIClient(api_url=api_url,
verify_ssl=self.config.getboolean("mwdb", "verify_ssl", fallback=True),
obey_ratelimiter=self.config.getboolean("mwdb", "obey_ratelimiter", fallback=True))
username = self.config.get("mwdb", "username", fallback=None)
if username is not None:
api_key = keyring.get_password("mwdb-apikey", username)
if api_key is not None:
api.set_api_key(api_key)
else:
password = keyring.get_password("<PASSWORD>", username)
api.login(username, password, warn=False)
mwdb = MWDB(api=api)
# If not authenticated: ask for credentials
if mwdb.api.api_key is None:
mwdb.login(warn=False)
return mwdb
def store_login(self, username, password, api_key, api_url=None):
"""
Sets credentials into user configuration file and keyring
:param username: Username to store
:param password: Password to store
:param api_key: API key to store
:param api_url: Alternative API url to store
"""
if api_key is not None:
api = APIClient(api_key=api_key)
username = api.logged_user
self.set_config("username", username)
keyring.set_password("mwdb-apikey", username, api_key)
else:
self.set_config("username", username)
keyring.set_password("<PASSWORD>", username, password)
if api_url is not None:
self.set_config("api_url", api_url)
def reset_login(self):
"""
Removes credentials from user configuration file and keyring
"""
username = self.config.get("mwdb", "username", fallback=None)
if username is None:
return
self.set_config("username", None)
if keyring.get_password("mwdb-apikey", username):
keyring.delete_password("mwdb-apikey", username)
if keyring.get_password("mwdb", username):
keyring.delete_password("<PASSWORD>", username)
def set_config(self, field, value):
"""
Sets provided field in user configuration file
:param field: Field name
:param value: New value or None if field should be erased from configuration
"""
if field not in self.CONFIG_FIELDS:
raise ValueError("Incorrect field '{}'".format(field))
if not self.config.has_section("mwdb"):
self.config.add_section("mwdb")
if value is not None:
self.config.set("mwdb", field, value)
elif self.config.has_option("mwdb", field):
self.config.remove_option("mwdb", field)
with open(self.CONFIG_PATH, "w") as f:
self.config.write(f)
def pass_mwdb(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
ctx = get_current_context()
authenticator = MwdbAuthenticator()
mwdb = authenticator.get_authenticated_mwdb(ctx.obj.get("api_url", None))
try:
return fn(mwdb=mwdb, *args, **kwargs)
except MWDBError as error:
click.echo("{}: {}".format(error.__class__.__name__, error.args[0]), err=True)
ctx.abort()
return wrapper
|
<filename>batch_processing_solution.py
# coding: utf-8
# Image Analysis with Python - Solution for Batch Processing
# The following is the script version of the tutorial's solution pipeline, where all the code
# has been wrapped in a single function that can be called many times for many images.
# Please refer to the jupyter notebooks ('image_analysis_tutorial[_solutions].ipynb') for
# more information, including detailed comments on every step.
## Importing Modules & Packages
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage as ndi
## Defining the pipeline function
def run_pipeline(dirpath, filename):
"""Run 2D single-cell segmentation pipeline optimized for membrane-labeled
spinning-disk confocal images of membrane markers in zebrafish early embryos.
Parameters
----------
dirpath : string
Path to the directory containing the input image.
filename : string
Name of the input file, including file ending (should be .tif).
Returns
-------
clean_ws : 3D numpy array of same shape as input image
The single-cell segmentation. Every cell is labeled with a unique
integer ID. Background is 0.
results : dict
A number of measurements extracted from each cell. The dict keys
name the type of measurement. The dict values are lists containing
the measured values. The order of all lists is the same and relates
to the segmentation IDs through the list in results['cell_id'].
"""
## Importing & Handling Image Data
from os.path import join
filepath = join(dirpath, filename)
from skimage.io import imread
img = imread(filepath)
## Preprocessing
sigma = 3
img_smooth = ndi.filters.gaussian_filter(img, sigma)
## Adaptive Thresholding
i = 31
SE = (np.mgrid[:i,:i][0] - np.floor(i/2))**2 + (np.mgrid[:i,:i][1] - np.floor(i/2))**2 <= np.floor(i/2)**2
from skimage.filters import rank
bg = rank.mean(img_smooth, selem=SE)
mem = img_smooth > bg
## Improving Masks with Binary Morphology
mem_holefilled = ~ndi.binary_fill_holes(~mem) # Short form
i = 15
SE = (np.mgrid[:i,:i][0] - np.floor(i/2))**2 + (np.mgrid[:i,:i][1] - np.floor(i/2))**2 <= np.floor(i/2)**2
pad_size = i+1
mem_padded = np.pad(mem_holefilled, pad_size, mode='reflect')
mem_final = ndi.binary_closing(mem_padded, structure=SE)
mem_final = mem_final[pad_size:-pad_size, pad_size:-pad_size]
## Cell Segmentation by Seeding & Expansion
### Seeding by Distance Transform
dist_trans = ndi.distance_transform_edt(~mem_final)
dist_trans_smooth = ndi.filters.gaussian_filter(dist_trans, sigma=5)
from skimage.feature import peak_local_max
seeds = peak_local_max(dist_trans_smooth, indices=False, min_distance=10)
seeds_labeled = ndi.label(seeds)[0]
### Expansion by Watershed
from skimage.morphology import watershed
ws = watershed(img_smooth, seeds_labeled)
## Postprocessing: Removing Cells at the Image Border
border_mask = np.zeros(ws.shape, dtype=np.bool)
border_mask = ndi.binary_dilation(border_mask, border_value=1)
clean_ws = np.copy(ws)
for cell_ID in np.unique(ws):
cell_mask = ws==cell_ID
cell_border_overlap = np.logical_and(cell_mask, border_mask)
total_overlap_pixels = np.sum(cell_border_overlap)
if total_overlap_pixels > 0:
clean_ws[cell_mask] = 0
for new_ID, cell_ID in enumerate(np.unique(clean_ws)[1:]):
clean_ws[clean_ws==cell_ID] = new_ID+1
## Identifying Cell Edges
edges = np.zeros_like(clean_ws)
for cell_ID in np.unique(clean_ws)[1:]:
cell_mask = clean_ws==cell_ID
eroded_cell_mask = ndi.binary_erosion(cell_mask, iterations=1)
edge_mask = np.logical_xor(cell_mask, eroded_cell_mask)
edges[edge_mask] = cell_ID
## Extracting Quantitative Measurements
results = {"cell_id" : [],
"int_mean" : [],
"int_mem_mean" : [],
"cell_area" : [],
"cell_edge" : []}
for cell_id in np.unique(clean_ws)[1:]:
cell_mask = clean_ws==cell_id
edge_mask = edges==cell_id
results["cell_id"].append(cell_id)
results["int_mean"].append(np.mean(img[cell_mask]))
results["int_mem_mean"].append(np.mean(img[edge_mask]))
results["cell_area"].append(np.sum(cell_mask))
results["cell_edge"].append(np.sum(edge_mask))
## Returning the results
return clean_ws, results
|
""" Allows people in Dublin to ask the Google assistant when their bus is coming to a particular bus stop.
This module provides a web service using the falcon framework which receives and responds to requests from
Google's Dialogflow. The actual information is obtained by doing some good old scrapin' of the RTPI.ie site
Ireland, March 2020.
"""
import json
import logging
import re
from enum import Enum, IntEnum
import messages as msgs
import pandas
import requests
from falcon import API, MEDIA_JSON, HTTP_200
from pydialogflow_fulfillment import DialogflowResponse, DialogflowRequest, SimpleResponse
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler())
LOGGER.setLevel(logging.DEBUG)
BUS_SERVICE_API = BUS_APP = API()
BUS_STOP_ROUTE = '/busstop'
class APIException(Exception):
""" Generic Exception for this API"""
class Availability(Enum):
"""Represents the state of the bus availability"""
ONE_BUS = 1
MANY_BUSES = 2
NO_BUSES = 3
class Constants(IntEnum):
"""Limits in this module"""
MAX_BUSES = 5
class BusStopRequest():
"""Represents a request to this API"""
def on_post(self, req, resp):
"""Handles POST requests"""
try:
LOGGER.info('new request received')
json_request = json.load(req.bounded_stream)
google_request = DialogflowRequest(json.dumps(json_request))
bus_stop = self.get_bus_stop(google_request)
if bus_stop:
LOGGER.info("Resolved bus stop: {}".format(bus_stop))
query_response = self.query_bus_stop(bus_stop)
bus_times_response_state = self.deserialize_response(query_response)
resp.body = BusStopResponse(bus_times_response_state).provide_good_response()
resp.content_type = MEDIA_JSON
resp.status = HTTP_200
elif google_request.get_action() == 'call_busstop_api':
LOGGER.info('bus stop request did no contain valid stop parameter')
resp.body = BusStopResponse().request_stop_response()
resp.content_type = MEDIA_JSON
resp.status = HTTP_200
else:
raise APIException()
except Exception as error:
LOGGER.error('There was an error processing this request')
LOGGER.error(str(error))
resp.body = BusStopResponse().provide_error_response()
resp.content_type = MEDIA_JSON
resp.status = HTTP_200
def query_bus_stop(self, stop_number):
""" Build full query string to send to RTPI site"""
return self.send_request(self.get_rtpi_site() + '?stopRef=' + str(stop_number))
@staticmethod
def get_bus_stop(google_request):
bus_stop = None
stop_param = '' if 'stop' not in google_request.get_parameters() else \
google_request.get_parameters().get('stop')
if not stop_param:
return None
stop_param = stop_param.replace('/', '') # convert 24/72 to 2472
match_numbers = re.findall('\d+', stop_param)
if google_request.get_action() == 'call_busstop_api' and stop_param and match_numbers:
LOGGER.info('stop parameter is {}'.format(stop_param))
if ' to ' in stop_param:
# This converts '70 to 94' to '7294'
index = stop_param.index(' to ')
stop_param = stop_param[:index - 1] + '2' + stop_param[index + 4:]
else:
stop_param = match_numbers[0] # This will grab the digits in the string
stop_param = stop_param.replace(' ', '') # ensure we don't have spaces
bus_stop = int(stop_param.split('.', 1)[0])
return bus_stop
@staticmethod
def get_rtpi_site():
""" Get the actual RTPI site"""
return 'http://rtpi.ie/Text/WebDisplay.aspx'
@staticmethod
def send_request(full_query):
"""
Send a request to the RTPI site containing the full site
and stop we are looking for
"""
return requests.get(full_query)
@staticmethod
def deserialize_response(raw_response):
"""
Parse the response into a Pandas dataframe
"""
body = raw_response.content.decode('utf-8')
tables = pandas.read_html(body) # Returns list of all tables on page
html_table = tables[0] # Grab the first table we are interested in
service_times = html_table[['Service', 'Time']]
return service_times
class BusStopResponse():
"""
Represents a response to the client from this API
"""
def __init__(self, bus_response=None):
self.bus_response = bus_response
self.availability = None
self.response_message = ''
self.set_availability()
self.set_message()
def request_stop_response(self):
return self.create_google_response(msgs.get_greeting_with_question(), True)
def provide_error_response(self):
return self.create_google_response(msgs.get_error_message() + msgs.get_goodbye_message())
def provide_good_response(self):
final_response = self.response_message + msgs.get_goodbye_message()
LOGGER.info('Setting response to \n {}'.format(final_response))
return self.create_google_response(final_response)
def create_google_response(self, message, expect_user_response=False):
""" Build the dialogflow response in the correct format"""
google_response = DialogflowResponse()
google_response.expect_user_response = expect_user_response
google_response.add(SimpleResponse(message, msgs.text_to_ssml(message)))
return google_response.get_final_response()
def set_message(self):
"""Sets the correct message"""
if self.availability == Availability.MANY_BUSES:
self.response_message = msgs.get_many_buses_initial_greeting()
elif self.availability == Availability.ONE_BUS:
self.response_message = msgs.get_single_bus_message_initial_greeting()
else:
self.response_message = msgs.get_random_message('no_buses')
if self.availability == Availability.MANY_BUSES or self.availability == Availability.ONE_BUS:
LOGGER.info('Buses are available. Building message')
bus_details_message = '\n '.join(self.get_incoming_buses_message(self.bus_response))
self.response_message = self.response_message + bus_details_message
def set_availability(self):
""" Set the type of bus availability based on response from RTPI """
try:
if self.bus_response.Service.size > 1:
self.availability = Availability.MANY_BUSES
if self.bus_response.Service.size > Constants.MAX_BUSES:
self.bus_response = self.bus_response.head(Constants.MAX_BUSES)
elif self.bus_response.Service.size == 1:
self.availability = Availability.ONE_BUS
else:
self.availability = Availability.NO_BUSES
except Exception:
self.availability = Availability.NO_BUSES
@staticmethod
def get_incoming_buses_message(bus_response):
""" Gets a user friendly message with bus service information from source system"""
def is_time(time_value):
"""
Check if the reply had a time-like value such as 22:05
"""
return ":" in str(time_value)
def is_due(time_value):
"""
check if we value is 'due'
"""
return str(time_value).lower() == 'due'
def prepare_message(service_time):
"""
Prepare a user-friendly message
"""
message = str(service_time[0]) + ' '
if is_due(service_time[1]):
message += ' is due'
elif is_time(service_time[1]):
message += ' is coming at ' + service_time[1]
else:
message += service_time[1].replace('Mins', 'minutes')
return message
resp = bus_response
service_time_message = [prepare_message((service, time))
for service, time in zip(resp['Service'], resp['Time'])]
return service_time_message
BUS_APP.add_route(BUS_STOP_ROUTE, BusStopRequest())
|
# -*- coding: utf-8 -*-
###########
# IMPORTS #
###########
# Libraries
import numpy as _np
import numpy.testing as _npt
import pytest as _pt
# Internal
from pydtmc import (
MarkovChain as _MarkovChain
)
#########
# TESTS #
#########
def test_absorption_probabilities(p, absorption_probabilities):
mc = _MarkovChain(p)
actual = mc.absorption_probabilities()
expected = absorption_probabilities
if actual is not None and expected is not None:
expected = _np.asarray(expected)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
else:
assert actual == expected
def test_committor_probabilities(p, states1, states2, value_backward, value_forward):
mc = _MarkovChain(p)
actual = mc.committor_probabilities('backward', states1, states2)
expected = value_backward
if actual is not None and expected is not None:
expected = _np.asarray(expected)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
else:
assert actual == expected
actual = mc.committor_probabilities('forward', states1, states2)
expected = value_forward
if actual is not None and expected is not None:
expected = _np.asarray(expected)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
else:
assert actual == expected
def test_expected_rewards(p, steps, rewards, value):
mc = _MarkovChain(p)
actual = mc.expected_rewards(steps, rewards)
expected = _np.asarray(value)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
def test_expected_transitions(p, steps, initial_distribution, value):
mc = _MarkovChain(p)
actual = mc.expected_transitions(steps, initial_distribution)
expected = _np.asarray(value)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
def test_first_passage_probabilities(p, steps, initial_state, first_passage_states, value):
mc = _MarkovChain(p)
actual = mc.first_passage_probabilities(steps, initial_state, first_passage_states)
expected = _np.asarray(value)
if first_passage_states is not None:
assert actual.size == steps
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
def test_first_passage_reward(p, steps, initial_state, first_passage_states, rewards, value):
mc = _MarkovChain(p)
if mc.size <= 2:
_pt.skip('Markov _chain size is less than or equal to 2.')
else:
actual = mc.first_passage_reward(steps, initial_state, first_passage_states, rewards)
expected = value
assert _np.isclose(actual, expected)
def test_hitting_probabilities(p, targets, value):
mc = _MarkovChain(p)
actual = mc.hitting_probabilities(targets)
expected = _np.asarray(value)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
if mc.is_irreducible:
expected = _np.ones(mc.size, dtype=float)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
def test_hitting_times(p, targets, value):
mc = _MarkovChain(p)
actual = mc.hitting_times(targets)
expected = _np.asarray(value)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
def test_mean_first_passage_times_between(p, origins, targets, value):
mc = _MarkovChain(p)
actual = mc.mean_first_passage_times_between(origins, targets)
expected = value
if actual is not None and expected is not None:
expected = _np.asarray(expected)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
else:
assert actual == expected
def test_mean_first_passage_times_to(p, targets, value):
mc = _MarkovChain(p)
actual = mc.mean_first_passage_times_to(targets)
expected = value
if actual is not None and expected is not None:
expected = _np.asarray(expected)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
if targets is None:
expected = _np.dot(mc.p, expected) + _np.ones((mc.size, mc.size), dtype=float) - _np.diag(mc.mean_recurrence_times())
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
else:
assert actual == expected
def test_mean_absorption_times(p, mean_absorption_times):
mc = _MarkovChain(p)
actual = mc.mean_absorption_times()
expected = mean_absorption_times
if actual is not None and expected is not None:
expected = _np.asarray(expected)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
else:
assert actual == expected
if mc.is_absorbing and len(mc.transient_states) > 0:
actual = actual.size
expected = mc.size - len(mc.absorbing_states)
assert actual == expected
def test_mean_number_visits(p, mean_number_visits):
mc = _MarkovChain(p)
actual = mc.mean_number_visits()
expected = _np.asarray(mean_number_visits)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
def test_mean_recurrence_times(p, mean_recurrence_times):
mc = _MarkovChain(p)
actual = mc.mean_recurrence_times()
expected = mean_recurrence_times
if actual is not None and expected is not None:
expected = _np.asarray(expected)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
else:
assert actual == expected
if mc.is_ergodic:
actual = _np.nan_to_num(actual**-1.0)
expected = _np.dot(actual, mc.p)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
def test_mixing_time(p, initial_distribution, jump, cutoff_type, value):
mc = _MarkovChain(p)
actual = mc.mixing_time(initial_distribution, jump, cutoff_type)
expected = value
assert actual == expected
def test_sensitivity(p, state, value):
mc = _MarkovChain(p)
actual = mc.sensitivity(state)
expected = value
if actual is not None and expected is not None:
expected = _np.asarray(expected)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
else:
assert actual == expected
def test_time_correlations(p, walk1, walk2, time_points, value):
mc = _MarkovChain(p)
actual = _np.asarray(mc.time_correlations(walk1, walk2, time_points))
expected = value
if actual is not None and expected is not None:
expected = _np.asarray(expected)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
else:
assert actual == expected
def test_time_relaxations(p, walk, initial_distribution, time_points, value):
mc = _MarkovChain(p)
actual = _np.asarray(mc.time_relaxations(walk, initial_distribution, time_points))
expected = value
if actual is not None and expected is not None:
expected = _np.asarray(expected)
_npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
else:
assert actual == expected
|
<gh_stars>10-100
from sys import argv
import os
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from tensorflow import ConfigProto, Session
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dropout, Dense, Input, Add, Activation, AlphaDropout
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import binary_crossentropy, categorical_crossentropy
from tensorflow.keras.utils import multi_gpu_model
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
TRAIN_FILE = argv[1]
TEST_FILE = argv[2]
CLASS_OPTIONS = argv[3].split(',')
NUM_CORES = argv[4]
VERBOSE = argv[5] == 'true'
ALGORITHM_ARGS = argv[6].split(";")
LAYER_WIDTH = int(ALGORITHM_ARGS[0])
NUM_LAYERS = int(ALGORITHM_ARGS[1])
DROPOUT_RATE = float(ALGORITHM_ARGS[2])
REGULARIZATION = float(ALGORITHM_ARGS[3])
ACTIVATION = ALGORITHM_ARGS[4]
LEARNING_RATE = float(ALGORITHM_ARGS[5])
EPOCHS = int(ALGORITHM_ARGS[6])
# KEY_FILE = ALGORITHM_ARGS[7]
train_df = pd.read_csv(TRAIN_FILE, sep='\t', index_col=0)
x_train = train_df.drop('Class', axis=1).values
y_train = np.array([CLASS_OPTIONS.index(str(y[0])) for y in train_df.loc[:, ["Class"]].values.tolist()])
y_train = y_train.reshape(-1, 1)
y_train = OneHotEncoder(categories='auto').fit_transform(y_train).toarray()
x_test = pd.read_csv(TEST_FILE, sep='\t', index_col=0)
# y_test = pd.read_csv(KEY_FILE, sep='\t', index_col=0)
# y_test = np.array([CLASS_OPTIONS.index(str(y[0])) for y in y_test.loc[:, ["Class"]].values.tolist()])
# y_test = y_test.reshape(-1, 1)
# y_test = OneHotEncoder().fit_transform(y_test).toarray()
def gpu_setup():
cfg = ConfigProto()
cfg.gpu_options.allow_growth = True
K.set_session(Session(config=cfg))
def resnet(x, y, test):
dropout = AlphaDropout if ACTIVATION == 'snn' else Dropout
n_inputs = x.shape[1]
n_outputs = y.shape[1]
input_layer = Input(shape=(n_inputs,), name='input')
input = Dense(LAYER_WIDTH,
kernel_regularizer=l2(REGULARIZATION),
kernel_initializer='lecun_normal',
bias_initializer='zeros',
activation=ACTIVATION,
name='reinjected_input')(input_layer)
layer = None
for i in range(NUM_LAYERS):
layer = Dense(LAYER_WIDTH,
kernel_regularizer=l2(REGULARIZATION),
kernel_initializer='lecun_normal',
bias_initializer='zeros',
activation=ACTIVATION,
name='dense{}_1'.format(i + 1))(input if i == 0 else layer)
layer = dropout(DROPOUT_RATE, name='dropout{}_1'.format(i + 1))(layer)
layer = Dense(LAYER_WIDTH,
kernel_regularizer=l2(REGULARIZATION),
kernel_initializer='lecun_normal',
bias_initializer='zeros',
name='dense{}_2'.format(i + 1))(layer)
layer = Add(name='reinjection_{}'.format(i + 1))([input, layer])
layer = Activation(ACTIVATION, name='block_activation_{}'.format(i + 1))(layer)
layer = dropout(DROPOUT_RATE, name='dropout{}_2'.format(i + 1))(layer)
logits = Dense(n_outputs,
kernel_initializer='lecun_normal',
bias_initializer='zeros',
name='logits')(layer)
probabilities = Activation('softmax', name='output')(logits)
if n_outputs == 2:
loss = binary_crossentropy
else:
loss = categorical_crossentropy
model = Model(input_layer, probabilities)
try:
gpu_setup()
model = multi_gpu_model(model, gpus=2)
except:
pass
model.compile(optimizer=Adam(LEARNING_RATE), loss=loss)
model.fit(x, y, verbose=0, epochs=EPOCHS, batch_size=200)
predictions = model.predict(test, verbose=False)
for prediction in predictions:
probs = [str(prob) for prob in list(prediction)]
print('{}\t{}'.format(CLASS_OPTIONS[np.argmax(prediction)], '\t'.join(probs)))
resnet(x_train, y_train, x_test)
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
import logging
import re
from typing import List, NamedTuple, Optional
from magma.common.job import Job
from magma.magmad.check import subprocess_workflow
from magma.pipelined.metrics import (
GTP_PORT_USER_PLANE_DL_BYTES,
GTP_PORT_USER_PLANE_UL_BYTES,
)
OVSDBDumpCommandParams = NamedTuple('OVSDBCommandParams',
[('table', str), ('columns', List[str])])
ParsedInterfaceStats = NamedTuple('ParsedInterfaceStats', [
('Interface', str),
('rx_bytes', str),
('tx_bytes', str),
('remote_ip', str),
])
OVSDBCommandResult = NamedTuple('OVSDBCommandResult',
[('out', List[ParsedInterfaceStats]),
('err', Optional[str])])
interface_group = r"(?P<Interface>\w+)"
remote_ip_group = r"(?P<remote_ip>.*)"
rx_bytes_group = r"(?P<rx_bytes>\d+)"
tx_bytes_group = r"(?P<tx_bytes>\d+)"
stats_re_str = r'"{}"(.*)(remote_ip="{}")(.*)?rx_bytes={}.+tx_bytes={}'
interface_tx_rx_stats_re = re.compile(
stats_re_str.format(interface_group, remote_ip_group, rx_bytes_group,
tx_bytes_group))
MIN_OVSDB_DUMP_POLLING_INTERVAL = 60
GTP_IP_INTERFACE_PREFIX = 'g_'
GTP_INTERFACE_PREFIX = 'gtp0'
class GTPStatsCollector(Job):
def __init__(self, polling_interval: int, service_loop):
self._polling_interval = max(polling_interval,
MIN_OVSDB_DUMP_POLLING_INTERVAL)
super().__init__(interval=self._polling_interval, loop=service_loop)
self._loop = service_loop
logging.info("Running GTP stats collector...")
@asyncio.coroutine
def _ovsdb_dump_async(self, table: str, columns: List[str]):
"""
Execute ovsdb-client dump command asynchronously and parse stdout
results.
"""
params = [OVSDBDumpCommandParams(table=table, columns=columns)]
return subprocess_workflow.exec_and_parse_subprocesses_async(
params,
_get_ovsdb_dump_params,
_parse_ovsdb_dump_output,
self._loop,
)
async def _run(self) -> None:
dump_stats_results = await self._ovsdb_dump_async(
'Interface', ['name', 'statistics', 'options'])
for r in list(dump_stats_results)[0].out:
if GTP_IP_INTERFACE_PREFIX in r.Interface or \
r.Interface == GTP_INTERFACE_PREFIX:
GTP_PORT_USER_PLANE_DL_BYTES.labels(r.remote_ip).set(
float(r.tx_bytes))
GTP_PORT_USER_PLANE_UL_BYTES.labels(r.remote_ip).set(
float(r.rx_bytes))
def _get_ovsdb_dump_params(params: OVSDBDumpCommandParams) -> List[str]:
params_list = ['ovsdb-client', 'dump', params.table]
params_list.extend(params.columns)
return params_list
def _parse_ovsdb_dump_output(stdout: str, stderr: str,
_) -> OVSDBCommandResult:
"""
Parse stdout output from ovsdb-client dump command.
Raises:
ValueError: If any errors are encountered while parsing output.
"""
def create_error_result(error_msg):
return OVSDBCommandResult(
out='',
err=error_msg,
)
def find_header_line_idx(lines):
line_re = re.compile('^---.+$')
for i, line in enumerate(lines):
if line_re.match(line):
return i
raise ValueError('Could not find header in ovsdb-client output')
def match_gtp_lines(lines):
line_matches = []
for line in lines:
line_remote_ip_match = interface_tx_rx_stats_re.match(line)
if line_remote_ip_match:
match_dict = line_remote_ip_match.groupdict()
if not 'remote_ip' in match_dict:
match_dict['remote_ip'] = ""
line_matches.append(match_dict)
return line_matches
if stderr:
return create_error_result(stderr)
try:
stdout_lines = stdout.decode('ascii').split('\n')
header_line_idx = find_header_line_idx(stdout_lines)
gtp_matches = match_gtp_lines(stdout_lines[header_line_idx + 1:])
results = []
for m in gtp_matches:
results.append(ParsedInterfaceStats(**m))
return OVSDBCommandResult(
out=results,
err=None,
)
except ValueError as e:
return create_error_result(str(e.args[0]))
|
"""
Stochastic Shortest Paths - Dynamic
Static Model
The code implementing the basic model for the Static
Version. This implements the class, do not try to run
this code. Run the DynamicModel_main instead.
Author: <NAME>
"""
from collections import (namedtuple, defaultdict)
import math
import numpy as np
import pandas as pd
import xlrd
from Policy import LookaheadPolicy
class StaticModel():
"""
Base class for the static model
"""
def __init__(self, state_names, x_names, s_0, params, G):
"""
Initializes the model
:param state_names: list(str) - state variable dimension names
:param x_names: list(str) - decision variable dimension names
:param s_0: dict - contains the inital state information
:param s_0[meanCosts]: dict- meanCosts[k][l] is the mean of the cost on the link k-l
:param s_0[spreads]: dict - spreads[k][l] represents the spread of the distribution of
cost on link k-l
:param Horizon: int - the horizon over which we are looking ahead
:param vertexCount - the number of nodes in our network
:param seed: int - seed for random number generator
"""
self.init_args = params
self.init_state = s_0
self.state_names = state_names
self.State = namedtuple('State', state_names)
self.state = self.build_state(self.init_state)
self.x_names = x_names
self.Decision = namedtuple('Decision', x_names)
self.G = G
self.theta = 0.5
self.n = 0
self.time = 1
self.obs = 1
self.estimated_costs = defaultdict(dict)
self.prng = np.random.RandomState(params['seed'])
def start_new_theta(self,theta):
self.theta = theta
self.estimated_costs = defaultdict(dict)
self.n = 0
self.obs = 1
self.prng = np.random.RandomState(self.init_args['seed'])
print("*****************Reseting model for theta {:.2f}".format(self.theta))
def update_estimated_costs(self):
for k in range(self.G.vertexCount):
for l in self.G.neighbors[k]:
m_hat = self.sample_from_uniform(k,l)
alpha = self.get_step_size()
if alpha < 1:
self.estimated_costs[k][l] = (1-alpha)* self.estimated_costs[k][l] + alpha * m_hat
else:
self.estimated_costs[k][l] = m_hat
self.estimated_costs[self.G.end_node][self.G.end_node] = 0
def sample_from_uniform(self,fromNode,toNode):
spread = self.G.spreads[fromNode][toNode]
deviation = self.prng.uniform(- spread, spread) * self.G.meanCosts[fromNode][toNode]
m_hat = self.G.meanCosts[fromNode][toNode] + deviation
return m_hat
def get_step_size(self):
#alpha = 1/self.n
#alpha = 1./self.time
alpha = 1./self.obs
return alpha
def build_state(self, info):
return self.State(*[info[k] for k in self.state_names])
def build_decision(self, info):
return self.Decision(*[info[k] for k in self.x_names])
# exog_info_fn: function - returns the real experienced cost of traversing a link
# from 'fromNode' to 'toNode'
def exog_info_fn(self, fromNode, toNode):
cost_hat = self.sample_from_uniform(fromNode,toNode)
return cost_hat
# transition_fn: function - updates the state within the model and returns new state
def transition_fn(self, decision):
self.state = self.build_state({'node':decision})
self.time += 1
self.obs += 1
# :param objective_fn: function - returns the cost we would experience by taking 'decision'
# as our next node from the current state 'state'
def objective_fn(self, decision):
cost = self.exog_info_fn(self.state.node, decision)
return cost
'''
the function for running trials; it simulates solving the problem a bunch of
times (nbTrials times), then takes the squared mean of the costs incurred,
and then returns that mean value
'''
def runTrials(self,nbTrials,deadline):
# variables to store values along iterations
totalPenalty = 0.0
totalCost = 0.0
totalTime = 0.0
for i in range(nbTrials):
self.state = self.build_state(self.init_state)
self.time = 1
self.n += 1
cost=0.0
#print("Theta {:.2f} Iteration {}".format(self.theta,self.n))
#Following a path - the policy function is a lookahead
while self.state.node != self.G.end_node:
self.update_estimated_costs()
P = LookaheadPolicy(self)
decision = P.get_decision("PERCENTILE")
#self.build_decision({'nextNode':decision})
stepCost = self.objective_fn(decision)
cost += stepCost
#print("\t Theta {:.2f}, Iteration {}, Time {}, CurrNode {}, Decision {}, Step Cost {:.2f} Cum Cost {:.2f}".format(self.theta,self.n,self.time,self.state.node,decision,stepCost,cost))
self.transition_fn(decision)
#end of path calculations
totalCost += cost
if cost > deadline:
#latenessSquared = (cost - deadline) ** 2
latenessSquared = 1
totalPenalty += latenessSquared
else:
latenessSquared=0
totalTime += self.time-1
print("End of Theta {:.2f}, Iteration {}. Cost: {:.2f}, Lateness: {:.2f} and number of steps {}".format(self.theta,self.n,cost,math.sqrt(latenessSquared),self.time-1))
#end of trials
avgCost = totalCost/nbTrials
avgPenalty = totalPenalty / nbTrials
avgTime = totalTime / nbTrials
return avgCost,avgPenalty,avgTime
|
<gh_stars>1-10
from __future__ import annotations # for python 3.8
from pathlib import Path
from PyQt6 import QtCore, QtGui, QtWidgets
from PyQt6.QtWidgets import QWidget, QFileDialog, QStyle, QFrame
from agstoolbox.core.settings import ConstSettings
from agstoolbox.core.utils.file import dir_is_valid
class DirListWidget(QWidget):
default_dirs_value = None
def is_selection_valid(self) -> bool:
if self.list.count() == 0:
return False
itm = self.getSelectedItem()
if itm is None:
return False
row = self.list.row(itm)
if row >= self.list.count():
return False
return True
def appendDir(self, d):
itm = QtWidgets.QListWidgetItem(d, parent=self.list)
self.list.addItem(itm)
def setDirs(self, dirs: list[str]):
self.list.clear()
for d in dirs:
self.appendDir(d)
def getSelectedItem(self) -> QtWidgets.QListWidgetItem:
sel = self.list.selectedItems()
i = self.list.count() - 1
itm = self.list.item(i)
if sel is not None and len(sel) > 0:
itm = sel[0]
return itm
def getDirs(self) -> list[str]:
dirs = []
for i in range(self.list.count()):
itm = self.list.item(i)
d = itm.text()
dirs.append(d)
return dirs
def btn_new_clicked(self):
dir_path = QFileDialog.getExistingDirectory(
self, 'Select Folder',
options=QFileDialog.Option.ShowDirsOnly,
directory=ConstSettings().user_docs
)
if dir_path is None or len(dir_path) <= 1:
return
if not dir_is_valid(dir_path):
return
dir_path = str(Path(str(dir_path)).as_posix())
self.appendDir(dir_path)
def btn_edit_clicked(self):
if self.list.count() == 0:
return
itm = self.getSelectedItem()
dir_path = itm.text()
if not dir_is_valid(dir_path):
dir_path = ConstSettings().user_docs
dir_path = QFileDialog.getExistingDirectory(
self, 'Select Folder',
options=QFileDialog.Option.ShowDirsOnly,
directory=dir_path)
if not dir_is_valid(dir_path):
return
dir_path = str(Path(str(dir_path)).as_posix())
itm.setText(dir_path)
def btn_del_clicked(self):
if not self.is_selection_valid():
return
itm = self.getSelectedItem()
row = self.list.row(itm)
self.list.takeItem(row)
del itm
def btn_defaults_clicked(self):
self.setDirs(self.default_dirs_value)
def btn_move_up_clicked(self):
if not self.is_selection_valid():
return
if self.list.count() <= 1:
return
itm = self.getSelectedItem()
row = self.list.row(itm)
if row > 0:
self.list.takeItem(row)
target_row = row - 1
self.list.insertItem(target_row, itm)
self.list.setCurrentRow(target_row)
def btn_move_down_clicked(self):
if not self.is_selection_valid():
return
if self.list.count() <= 1:
return
itm = self.getSelectedItem()
row = self.list.row(itm)
if row < self.list.count() - 1:
self.list.takeItem(row)
target_row = row + 1
self.list.insertItem(target_row, itm)
self.list.setCurrentRow(target_row)
def __init__(self, dirs: list[str], default_dirs: list[str], parent: QWidget = None):
QWidget.__init__(self, parent)
self.setObjectName("dir_list_widget")
self.default_dirs_value = default_dirs
self.list = QtWidgets.QListWidget(self)
self.list.setObjectName("list")
self.list.setFrameStyle(QFrame.Shape.NoFrame)
self.list.setSelectionMode(QtWidgets.QAbstractItemView.SelectionMode.SingleSelection)
self.setDirs(dirs)
self.push_button_new = QtWidgets.QPushButton(self)
self.push_button_new.setObjectName("push_button_new")
self.push_button_edit = QtWidgets.QPushButton(self)
self.push_button_edit.setObjectName("push_button_edit")
self.push_button_del = QtWidgets.QPushButton(self)
self.push_button_del.setObjectName("push_button_del")
self.push_button_move_up = QtWidgets.QPushButton(self)
self.push_button_move_up.setObjectName("push_button_move_up")
icon = self.style().standardIcon(QStyle.StandardPixmap.SP_ArrowUp)
self.push_button_move_up.setIcon(icon)
self.push_button_move_down = QtWidgets.QPushButton(self)
self.push_button_move_down.setObjectName("push_button_move_down")
icon = self.style().standardIcon(QStyle.StandardPixmap.SP_ArrowDown)
self.push_button_move_down.setIcon(icon)
self.push_button_defaults = QtWidgets.QPushButton(self)
self.push_button_defaults.setObjectName("push_button_defaults")
# Do Layout
spacer_item_fixed = QtWidgets.QSpacerItem(
20, 16, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Fixed)
spacer_item_expanding = QtWidgets.QSpacerItem(
20, 16, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout.addWidget(self.push_button_new)
self.verticalLayout.addWidget(self.push_button_edit)
self.verticalLayout.addWidget(self.push_button_del)
self.verticalLayout.addItem(spacer_item_fixed)
self.h_move_layout = QtWidgets.QHBoxLayout()
self.h_move_layout.setObjectName("h_move_layout")
self.h_move_layout.addWidget(self.push_button_move_up)
self.h_move_layout.addWidget(self.push_button_move_down)
self.verticalLayout.addLayout(self.h_move_layout)
self.verticalLayout.addWidget(self.push_button_defaults)
self.verticalLayout.addItem(spacer_item_expanding)
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.horizontalLayout.setObjectName("horizontalLayout")
self.horizontalLayout.addWidget(self.list)
self.horizontalLayout.addLayout(self.verticalLayout)
self.setLayout(self.horizontalLayout)
self.retranslateUi()
# Connect events
self.push_button_new.clicked.connect(self.btn_new_clicked)
self.push_button_del.clicked.connect(self.btn_del_clicked)
self.push_button_edit.clicked.connect(self.btn_edit_clicked)
self.push_button_defaults.clicked.connect(self.btn_defaults_clicked)
self.push_button_move_up.clicked.connect(self.btn_move_up_clicked)
self.push_button_move_down.clicked.connect(self.btn_move_down_clicked)
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
parent = "dir_list_widget"
if self.parent() is not None:
parent = self.parent().objectName()
self.push_button_new.setText(_translate(parent, "New"))
self.push_button_edit.setText(_translate(parent, "Edit"))
self.push_button_del.setText(_translate(parent, "Delete"))
self.push_button_defaults.setText(_translate(parent, "Set Defaults"))
|
'''Database Models for the Kookboek application'''
from home_portal.extensions import db_kookboek as db
class RecipesIngredients(db.Model):
'''
The RecipiesIngredients class defines the attributes
required to create a many-to-many relationship between recipes
and ingredients.
It also contains the amount and unit information for the required ingredients
related to the recipe.
'''
__bind_key__ = '<KEY>'
__tablename__ = 'RecipesIngredients'
recipe_id = db.Column(
db.Integer,
db.ForeignKey('Recipes.id'),
primary_key=True
)
ingredient_id = db.Column(
db.Integer,
db.ForeignKey('Ingredients.id'),
primary_key=True
)
amount = db.Column(
db.Numeric,
nullable=False
)
unit = db.Column(
db.String(64),
unique=False,
nullable=False
)
unit_description = db.Column(
db.Text(),
unique=False,
nullable=True
)
class Recipe(db.Model):
'''
The recipe class defines the attributes of the
Recipes table
'''
__bind_key__ = '<KEY>'
__tablename__ = 'Recipes'
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String(128),
unique=True,
nullable=False
)
category = db.Column(
db.String(64),
nullable=False
)
preparation = db.Column(
db.Text(),
nullable=True,
unique=False
)
picture = db.Column(
db.LargeBinary,
nullable=True
)
ingredients = db.relationship('Ingredient',
secondary=lambda: RecipesIngredients.__table__,
backref='Recipe')
def __repr__(self):
return "testeke {} {}".format(self.id, self.name)
@property
def get_recipe_id(self):
if self.id:
return self.id
else:
return 0
@property
def get_ingredients_list(self):
'''
Function returns the ingredients related to a recipe
Recipe information is provided via it's ID
'''
ingredients = Ingredient.query.join(
RecipesIngredients,
RecipesIngredients.recipe_id == self.id)\
.filter(RecipesIngredients.ingredient_id == Ingredient.id)\
.all()
return ingredients
class Unit(db.Model):
'''
The Unit class defines the attributes of the unit
table.
'''
__bind_key__ = '<KEY>'
__tablename__ = 'Units'
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String(64),
unique=True,
nullable=False
)
description = db.Column(
db.Text(),
unique=False,
nullable=True
)
class Category(db.Model):
'''
The Category class defines the attributes of the category
table.
'''
__bind_key__ = '<KEY>'
__tablename__ = 'Categories'
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String(64),
unique=True,
nullable=False
)
description = db.Column(
db.Text(),
unique=False,
nullable=True
)
class Ingredient(db.Model):
'''
The Ingredient class defines the attributes of the ingredient
table.
'''
__bind_key__ = '<KEY>'
__tablename__ = 'Ingredients'
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String(64),
unique=True,
nullable=False
)
picture = db.Column(
db.LargeBinary,
nullable=True
)
default_unit = db.Column(
db.String(32),
nullable=False
)
unit_description = db.Column(
db.String(128),
nullable=True
)
default_amount = db.Column(
db.Numeric,
nullable=True
)
recipes = db.relationship('Recipe',
secondary=lambda: RecipesIngredients.__table__,
backref='Ingredient'
)
|
from .testdefs import *
name_tests(
# syntax
syntax = cmp("func f(): {}"),
syntax_args = cmp("func f(a, b:Int=2)->T: {}"),
# format
format_enter = cmp("func f():\n\t1\n\t2"),
format_single = cmp("func f(): 1"),
format_strip = cmp("func f(): {;0;}", "func f(): 0"),
format_anon = cmp("func(): {}"),
# stmt
stmt = cmp("var f = func(): {}"),
# arrow
arrow_no_args = cmp("() -> 2", "func()->Dynamic: return 2"),
arrow_single_arg = cmp("x -> 2", "func(x)->Dynamic: return 2"),
arrow_parens = cmp("(x) -> 2", "func(x)->Dynamic: return 2"),
arrow_multiple_args = cmp("(a, b) -> 2", "func(a, b)->Dynamic: return 2"),
# arrow_sign=cmp"()",
# ?, *, :T=val
# arrow_sign=evals("(a:Int, b:Int) -> 0", "<func (Int,Int)->Dynamic>")
pack_defaults = fails(),
pack_opt = fails(),
# macro
macro_syntax = cmp("func(macro node): {}"),
macro_optional = evals("((?macro node)->node)()", "null"),
macro_default_null = evals("((macro node=null)->node)()", "null"),
macro_decor = evals("@((macro node)->node) a + b", "macro a + b"),
macro_pack = evals("((*macro args, **macro kwargs) -> args, kwargs)(a.b, key=c * d)", "(macro a.b, macro c * d)"), # prec, interaction with sent *args?
macro_arg_type = fails("macro node:Type"),
# wrong miss illegal error syntax
macro_func_type = evals("Code->"),
macro_func_repr = evals("without Code"),
# pack
pack_syntax = cmp("func(*args, **kwargs): {}"),
pack_args_type = evals("((*args)->args)(1, 2)", "[1, 2]"),
pack_kwargs_type = evals("((**kwargs)->kwargs)(key=2)", '["key": 2]'),
# *args:List<T>, conversion
# optional
optional_syntax = cmp("func(?x): {}"),
optional_converts = evals("((?x)->x).args", '[Arg("x", default_=macro null)]'),
optional_repr = evals("(func(?x): {}).source()", '"func(?x): {}"'),
optional_default = fails("(?x=1)->x", "SyntaxError: optional argument with default value"),
# object
object_type = evals("(x->x).__type__", "<class Func>"),
object_repr = evals("x->x", "<func Dynamic->Dynamic>"), #
object_source = evals("(func(x): return x).source()", '"func(x): return x"'),
object_source_bound = evals('(func(this:T): {}).source_bound(T, "call")', '"func call(): {}"'),
object_code = evals("(x->x).code", "macro return x"),
object_signature = evals("(x->x).signature()", '"(x)->Dynamic"'),
# object_type = evals("(x->x).type()", '"Dynamic->Dynamic"'),
# type
type_map = evals("(func(obj:T)->T: {}).type()", '"T->T"'),
type_script = evals("(func(): {}).type()", '"()->()"'),
type_args = evals("(func(a:T, b:T)->T: {}).type()", '"(T,T)->T"'),
object_return_type = evals("@qualname (func()->T: {}).return_type", '"T"'),
sign_no_return = evals("(func(): {})"),
# return_type of a thing that does not spec it
# format, string
# repr
edit_repr = evals("func(a:Float, b:Float)->Float: {}", "<func (Float,Float)->Float>"),
edit_code = evals("(x -> x + 2).code", "macro x + 2"),
edit_args = evals("((a, b)->{}).args", '[Arg("a"), Arg("b")]'),
edit_return_type = evals("(func()->Float:{}).return_type", "<class Float>"),
arrow_id = evals("a->"),
arrow_arg = evals("(x, y:Float=2)->"),
arrow_tuple = fails("a,b->a"),
complicated_arg = evals("(macro ?a:Float=2)->"),
# keyword
keyword_only_syntax = cmp("func(*args, key, **kwargs): {}"),
keyword_only_star = cmp("func(*, type): {}"),
keyword_only_miss = evals("((*args, ?key)->key)(1)", "null"),
keyword_only_pass = evals("((*args, ?key)->key)(key=1)", "1"),
keyword_star_extra = fails("((*, ?key)->key)(1)"),
# a char into *
# sign
# names
mismatch_less = fails("(a->a)()"),
mismatch_more = fails("(()->a)(1)"),
mismatch_keyword = fails("((*, key)->2)()"),
pack_keyword = evals("(x->x)(x=2)", "2"),
)
# >>> f()
# <Module object>
# TypeError: <lambda>() missing 1 required positional argument: 'x'
# positional, keyword works
# unpack list, dict work
# they do
# * can bypass a macro arg
# ** too
# *2 items into macros?
# * accepts any iterable thing
# what are iterable things?
# actual list
# something that converts to a list?
# iterator
# iterable
# a thing that converts to a list is NOT an iterable
# but, if you want a list, and list has FROM iterable, and a thing has FROM list?
# sign tests
"""
func_type_str
>>> a -> b
<func Object->Object>
>>> func(x:Float)->Float: {}
<func Float->Float>
>>> ff(f)
<func ()->()>
<func Float->Float>
func f(macro node)
(macro node)->
"""
"""
# int? dunno
# keyword-only
keyword_only_syntax
keyword_only_wrong = fails("((*, key=null)->key)(1)"),
# all named after * are required to be keyword only
# fails - func(*args, **kwargs, key): {}
# fails - ()(2)
# miss keyword
pack = evals("((*args, **kwargs) -> args, kwargs)(0, a=1)", '[0], ["a": 1]'),
keyword_arg = evals("((a, b) -> a, b)(0, b=1)", "(0, 1)"),
unpack = evals("((a, b) -> a, b)(*[0], **Map(b=1))", "(0, 1)"),
pack_multiple_args = fails("* *"),
pack_multiple_kwargs = fails("** **"),
keyword_only_after_kwargs = fails("**kwargs, key", "SyntaxError: invalid syntax"),
"""
# the "automacro" stuff... or evals? should be ().source()
# unpack_args
# pack_args
# term_call
# TypeError: <lambda>() missing 1 required keyword-only argument: 'key'
# if
# to validators!
# can't @functools.wraps(f) before a lambda. can you in twocode?
# f(x, y, /) - no args
# create partial cast
# thats codebase shit
# precedence so we can safely a -> a + b |
import re
from datetime import datetime
from time import mktime
from discord import Embed, Forbidden, HTTPException
from discord.ext import commands, tasks
from discord.ext.commands import BadArgument
from discord_slash import SlashContext, cog_ext, SlashCommandOptionType
from discord_slash.utils import manage_commands
from feedparser import parse
import db
from administrator import slash
from administrator.check import is_enabled
from administrator.logger import logger
extension_name = "tomuss"
logger = logger.getChild(extension_name)
url_re = re.compile(r"https://tomuss\.univ-lyon1\.fr/S/[0-9]{4}/[a-zA-Z]+/rss/.+")
class Tomuss(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.tomuss_loop.start()
slash.get_cog_commands(self)
def description(self):
return "PCP Univ Lyon 1"
@cog_ext.cog_subcommand(base="tomuss", name="set", description="Set your tomuss RSS feed", options=[
manage_commands.create_option("url", "The RSS URL", SlashCommandOptionType.STRING, True)])
async def tomuss_set(self, ctx: SlashContext, url: str):
if not url_re.fullmatch(url):
raise BadArgument()
entries = parse(url).entries
if not entries:
raise BadArgument()
last = datetime.fromtimestamp(mktime(sorted(entries, key=lambda e: e.published_parsed)[0].published_parsed))
s = db.Session()
t = s.query(db.Tomuss).get(ctx.author.id)
if t:
t.url = url
t.last = last
else:
t = db.Tomuss(ctx.author.id, url, last)
s.add(t)
s.commit()
s.close()
await ctx.channel.send(f"Tomuss RSS set for {ctx.author.mention} \U0001f44d")
@cog_ext.cog_subcommand(base="tomuss", name="unset", description="Unset your tomuss RSS feed")
async def tomuss_unset(self, ctx: SlashContext):
s = db.Session()
t = s.query(db.Tomuss).get(ctx.author.id)
if not t:
raise BadArgument()
s.delete(t)
s.commit()
s.close()
await ctx.send(content="\U0001f44d")
@tasks.loop(minutes=5)
async def tomuss_loop(self):
s = db.Session()
for t in s.query(db.Tomuss).all():
u = await self.bot.fetch_user(t.user_id)
if not u:
s.delete(t)
s.commit()
continue
last = t.last.utctimetuple()
entries = list(filter(lambda e: e.published_parsed > last,
sorted(parse(t.url).entries, key=lambda e: e.published_parsed)))
if entries:
embed = Embed(title="Tomuss update !")
for e in entries:
if len(e.title) > 256:
title = e.title[:253] + "..."
else:
title = e.title
summary = e.summary.replace("<br />", "\n").replace("<b>", "**").replace("</b>", "**")
if len(summary) > 1024:
summary = summary[:1021] + "..."
embed.add_field(name=title, value=summary)
try:
await u.send(embed=embed)
except Forbidden:
s.delete(t)
s.commit()
continue
except HTTPException:
await u.send("Too much to send, I can't handle it sorry...")
finally:
t.last = datetime.fromtimestamp(mktime(entries[-1].published_parsed))
s.add(t)
s.commit()
s.close()
def cog_unload(self):
self.tomuss_loop.stop()
def setup(bot):
logger.info(f"Loading...")
try:
bot.add_cog(Tomuss(bot))
except Exception as e:
logger.error(f"Error loading: {e}")
else:
logger.info(f"Load successful")
def teardown(bot):
logger.info(f"Unloading...")
try:
bot.remove_cog("Tomuss")
except Exception as e:
logger.error(f"Error unloading: {e}")
else:
logger.info(f"Unload successful")
|
#!/usr/bin/env python
"""
matrix_utils.py: utilities for matrix conversion
This file defines the to_matrix() function, which can be used to convert Pandas
dataframes or other types of array-like objects to numpy ndarrays for use in
mlpack bindings.
mlpack is free software; you may redistribute it and/or modify it under the
terms of the 3-clause BSD license. You should have received a copy of the
3-clause BSD license along with mlpack. If not, see
http://www.opensource.org/licenses/BSD-3-Clause for more information.
"""
import numpy as np
import pandas as pd
# The CategoricalDtype class has moved multiple times, so this insanity is
# necessary to import the right version.
if int(pd.__version__.split('.')[0]) > 0 or \
int(pd.__version__.split('.')[1]) >= 20:
from pandas.api.types import CategoricalDtype
elif int(pd.__version__.split('.')[1]) >= 18:
from pandas.types.dtypes import CategoricalDtype
elif int(pd.__version__.split('.')[1]) == 17:
from pandas.core.dtypes import CategoricalDtype
elif int(pd.__version__.split('.')[1]) >= 15:
from pandas.core.common import CategoricalDtype
# We need a unicode type, but on python3 we don't have it.
try:
UNICODE_EXISTS = bool(type(unicode))
except NameError:
unicode = str
# We also need a buffer type.
try:
BUFFER_EXISTS = bool(type(buffer))
except:
buffer = memoryview
def to_matrix(x, dtype=np.double, copy=False):
"""
Given some array-like X, return a numpy ndarray of the same type.
"""
# Make sure it's array-like at all.
if not hasattr(x, '__len__') and \
not hasattr(x, 'shape') and \
not hasattr(x, '__array__'):
raise TypeError("given argument is not array-like")
if (isinstance(x, np.ndarray) and x.dtype == dtype and x.flags.c_contiguous):
if copy: # Copy the matrix if required.
return x.copy("C"), True
else:
return x, False
else:
if isinstance(x, pd.core.series.Series):
x = pd.DataFrame(x)
return np.array(x, copy=True, dtype=dtype, order='C'), True
def to_matrix_with_info(x, dtype, copy=False):
"""
Given some array-like X (which should be either a numpy ndarray or a pandas
DataFrame), convert it into a numpy matrix of the given dtype.
"""
# Make sure it's array-like at all.
if not hasattr(x, '__len__') and \
not hasattr(x, 'shape') and \
not hasattr(x, '__array__'):
raise TypeError("given argument is not array-like")
if isinstance(x, np.ndarray):
# It is already an ndarray, so the vector of info is all 0s (all numeric).
d = np.zeros([x.shape[1]], dtype=np.bool)
# Copy the matrix if needed.
if copy:
return (x.copy(order="C"), True, d)
else:
return (x, False, d)
if isinstance(x, pd.DataFrame) or isinstance(x, pd.Series):
# It's a pandas dataframe. So we need to see if any of the dtypes are
# categorical or object, and if so, we need to convert them. First see if
# we can take a shortcut without copying.
dtype_array = x.dtypes.values if len(x.dtypes) > 0 else [x.dtypes]
if not any(isinstance(t, CategoricalDtype)
for t in dtype_array) and \
not np.dtype(object) in dtype_array and \
not np.dtype(str) in dtype_array and \
not np.dtype(unicode) in dtype_array:
# We can just return the matrix as-is; it's all numeric.
t = to_matrix(x, dtype=dtype, copy=copy)
d = np.zeros([x.shape[1]], dtype=np.bool)
return (t[0], t[1], d)
if np.dtype(str) in dtype_array or np.dtype(unicode) in dtype_array:
raise TypeError('cannot convert matrices with string types')
if np.dtype(buffer) in dtype_array:
raise TypeError("'buffer' dtype not supported")
# If we get to here, then we are going to need to do some type conversion,
# so go ahead and copy the dataframe and we'll work with y to make
# modifications.
y = x
d = np.zeros([x.shape[1]], dtype=np.bool)
# Convert any 'object', 'str', or 'unicode' types to categorical.
convertColumns = x.select_dtypes(['object'])
if not convertColumns.empty:
y[convertColumns] = y[convertColumns].astype('category')
catColumns = x.select_dtypes(['category']).columns
if len(catColumns) > 0:
# Do actual conversion to numeric types. This converts to an int type.
y = x # Copy it... not great...
# Note that this will map NaNs (missing values or unknown categories) to
# -1, so we will have to convert those back to NaN.
y[catColumns] = y[catColumns].apply(
lambda c: c.cat.codes).astype('double')
y[catColumns].replace(to_replace=[-1], value=float('NaN'))
# Construct dataset information: 1s represent categorical data, 0s
# represent otherwise.
catColumnIndices = [y.columns.get_loc(i) for i in catColumns]
d[catColumnIndices] = 1
# We'll have to force the second part of the tuple (whether or not to take
# ownership) to true.
t = to_matrix(y.apply(pd.to_numeric), dtype=dtype)
return (t[0], True, d)
if isinstance(x, list):
# Get the number of dimensions.
dims = 0
if isinstance(x[0], list):
dims = len(x[0])
else:
dims = len(x)
d = np.zeros([dims])
out = np.array(x, dtype=dtype, copy=copy) # Try to avoid copy...
# Since we don't have a great way to check if these are using the same
# memory location, we will probe manually (ugh).
oldval = x[0]
x[0] *= 2
alias = False
if out[0] == x[0]:
alias = True
x[0] = oldval
return (out, not alias, d)
# If we got here, the type is not known.
raise TypeError("given matrix is not a numpy ndarray or pandas DataFrame or "\
"Python array; not supported at this time");
|
<gh_stars>0
# Copyright (c) 2018, Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from marshmallow import Schema, fields
from webargs.fields import DelimitedList
class Organism(Schema):
id = fields.Integer(required=True)
project_id = fields.Integer(required=True)
name = fields.String(required=True)
class Strain(Schema):
id = fields.Integer(required=True)
project_id = fields.Integer(required=True)
name = fields.String(required=True)
parent_id = fields.Integer(required=True, allow_none=True)
genotype = fields.String(required=True)
organism_id = fields.Integer(required=True)
class Experiment(Schema):
id = fields.Integer(required=True)
project_id = fields.Integer(required=True)
name = fields.String(required=True)
description = fields.String(required=True)
class Medium(Schema):
id = fields.Integer(required=True)
project_id = fields.Integer(required=True)
name = fields.String(required=True)
class MediumCompound(Schema):
id = fields.Integer(required=True)
medium_id = fields.Integer(required=True)
compound_name = fields.String(required=True)
compound_identifier = fields.String(required=True)
compound_namespace = fields.String(required=True)
mass_concentration = fields.Float(
required=True, allow_none=True
) # unit: mmol/l
class Condition(Schema):
id = fields.Integer(required=True)
experiment_id = fields.Integer(required=True)
strain_id = fields.Integer(required=True)
medium_id = fields.Integer(required=True)
name = fields.String(required=True)
class Sample(Schema):
id = fields.Integer(required=True)
condition_id = fields.Integer(required=True)
name = fields.String(required=True)
start_time = fields.DateTime(required=True)
end_time = fields.DateTime(required=True, allow_none=True)
class Fluxomics(Schema):
id = fields.Integer(required=True)
sample_id = fields.Integer(required=True)
reaction_name = fields.String(required=True)
reaction_identifier = fields.String(required=True)
reaction_namespace = fields.String(required=True)
measurement = fields.Float(required=True) # unit: mmol/gDW/h
uncertainty = fields.Float(
required=True, allow_none=True
) # unit: mmol/gDW/h
class FluxomicsBatchRequest(Schema):
body = DelimitedList(fields.Nested(Fluxomics(exclude=("id",))))
class Metabolomics(Schema):
id = fields.Integer(required=True)
sample_id = fields.Integer(required=True)
compound_name = fields.String(required=True)
compound_identifier = fields.String(required=True)
compound_namespace = fields.String(required=True)
measurement = fields.Float(required=True) # unit: mmol/l
uncertainty = fields.Float(required=True, allow_none=True) # unit: mmol/l
class MetabolomicsBatchRequest(Schema):
body = DelimitedList(fields.Nested(Metabolomics(exclude=("id",))))
class Proteomics(Schema):
id = fields.Integer(required=True)
sample_id = fields.Integer(required=True)
identifier = fields.String(required=True)
name = fields.String(required=True)
full_name = fields.String(required=True)
gene = fields.Dict(required=True)
measurement = fields.Float(required=True) # unit: mmol/gDW
uncertainty = fields.Float(required=True, allow_none=True) # unit: mmol/gDW
class ProteomicsBatchRequest(Schema):
body = DelimitedList(fields.Nested(Proteomics(exclude=("id",))))
class UptakeSecretionRates(Schema):
id = fields.Integer(required=True)
sample_id = fields.Integer(required=True)
compound_name = fields.String(required=True)
compound_identifier = fields.String(required=True)
compound_namespace = fields.String(required=True)
measurement = fields.Float(required=True) # unit: mmol/gDW/h
uncertainty = fields.Float(
required=True, allow_none=True
) # unit: mmol/gDW/h
class MolarYields(Schema):
id = fields.Integer(required=True)
sample_id = fields.Integer(required=True)
product_name = fields.String(required=True)
product_identifier = fields.String(required=True)
product_namespace = fields.String(required=True)
substrate_name = fields.String(required=True)
substrate_identifier = fields.String(required=True)
substrate_namespace = fields.String(required=True)
# Both in mmol-product / mmol-substrate
measurement = fields.Float(required=True)
uncertainty = fields.Float(required=True, allow_none=True)
class GrowthRate(Schema):
id = fields.Integer(required=True)
sample_id = fields.Integer(required=True)
measurement = fields.Float(required=True) # unit: 1/h
# unit: 1/h; 0 if no uncertainty or unknown
uncertainty = fields.Float(required=True)
# Schemas below include full relation objects across foreign keys in the models.
class SampleData(Sample):
fluxomics = fields.Nested(Fluxomics, many=True, required=True)
metabolomics = fields.Nested(Metabolomics, many=True, required=True)
proteomics = fields.Nested(Proteomics, many=True, required=True)
uptake_secretion_rates = fields.Nested(
UptakeSecretionRates, many=True, required=True
)
molar_yields = fields.Nested(MolarYields, many=True, required=True)
growth_rate = fields.Nested(GrowthRate, required=True)
class MediumData(Medium):
compounds = fields.Nested(MediumCompound, many=True, required=True)
class ConditionData(Condition):
strain = fields.Nested(Strain, required=True)
medium = fields.Nested(MediumData, required=True)
samples = fields.Nested(SampleData, many=True, required=True)
class ExperimentData(Experiment):
conditions = fields.Nested(ConditionData, many=True, required=True)
|
<reponame>Media-Smart/volkstuner
import json
import logging
import multiprocessing as mp
import os
import pickle
import threading
import time
import copy
from collections import OrderedDict
from tqdm.auto import tqdm
from .reporter import DistStatusReporter, FakeReporter
from .resource import DistributedResource
from .scheduler import TaskScheduler
from ..core import Task
from ..core.decorator import _autogluon_method
from ..searcher import BaseSearcher
from ..searcher.searcher_factory import searcher_factory
from ..utils import save, load, mkdir
__all__ = ['FIFOScheduler']
logger = logging #.getLogger(__name__)
class FIFOScheduler(TaskScheduler):
r"""Simple scheduler that just runs trials in submission order.
Parameters
----------
train_fn : callable
A task launch function for training. Note: please add the `@autogluon_method` decorater to the original function.
args : object (optional)
Default arguments for launching train_fn.
resource : dict
Computation resources. For example, `{'num_cpus':2, 'num_gpus':1}`
searcher : str or object
Autogluon searcher. For example, autogluon.searcher.self.argsRandomSampling
time_attr : str
A training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_epoch` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
reward_attr : str
The training result objective value attribute. As with `time_attr`, this may refer to any objective value.
Stopping procedures will use this attribute.
dist_ip_addrs : list of str
IP addresses of remote machines.
Examples
--------
>>> import numpy as np
>>> import autogluon as ag
>>> @ag.args(
... lr=ag.space.Real(1e-3, 1e-2, log=True),
... wd=ag.space.Real(1e-3, 1e-2))
>>> def train_fn(args, reporter):
... print('lr: {}, wd: {}'.format(args.lr, args.wd))
... for e in range(10):
... dummy_accuracy = 1 - np.power(1.8, -np.random.uniform(e, 2*e))
... reporter(epoch=e, accuracy=dummy_accuracy, lr=args.lr, wd=args.wd)
>>> scheduler = ag.scheduler.FIFOScheduler(train_fn,
... resource={'num_cpus': 2, 'num_gpus': 0},
... num_trials=20,
... reward_attr='accuracy',
... time_attr='epoch')
>>> scheduler.run()
>>> scheduler.join_jobs()
>>> scheduler.get_training_curves(plot=True)
"""
def __init__(self, train_fn, args=None, resource=None,
searcher=None, search_options=None,
checkpoint=None,
resume=False, num_trials=None,
time_out=None, max_reward=1.0, time_attr='epoch',
reward_attr='accuracy', dist_ip_addrs=None):
super().__init__(dist_ip_addrs)
if resource is None:
resource = {'num_cpus': 1, 'num_gpus': 0}
self.resource = resource
if searcher is None:
searcher = 'random' # Default: Random searcher
if isinstance(searcher, str):
kwargs = search_options.copy() if search_options else dict()
kwargs['configspace'] = train_fn.cs
self.searcher: BaseSearcher = searcher_factory(searcher, **kwargs)
else:
assert isinstance(searcher, BaseSearcher)
self.searcher: BaseSearcher = searcher
assert isinstance(train_fn, _autogluon_method)
self.train_fn = train_fn
self.args = args if args else train_fn.args
# meta data
self.metadata = {
'search_space': train_fn.kwspaces,
'search_strategy': searcher,
'stop_criterion': {'time_limits': time_out, 'max_reward': max_reward},
'resources_per_trial': resource
}
self.num_trials = num_trials
self.time_out = time_out
self.max_reward = max_reward
self._checkpoint = checkpoint
self._time_attr = time_attr
self._reward_attr = reward_attr
self.log_lock = mp.Lock()
self.training_history = OrderedDict()
self.config_history = OrderedDict()
if resume:
if os.path.isfile(checkpoint):
self.load_state_dict(load(checkpoint))
else:
msg = f'checkpoint path {checkpoint} is not available for resume.'
logger.exception(msg)
raise FileExistsError(msg)
def run(self, **kwargs):
"""Run multiple number of trials
"""
start_time = time.time()
num_trials = kwargs.get('num_trials', self.num_trials)
time_out = kwargs.get('time_out', self.time_out)
logger.info('Starting Experiments')
logger.info(f'Num of Finished Tasks is {self.num_finished_tasks}')
logger.info(f'Num of Pending Tasks is {num_trials - self.num_finished_tasks}')
tbar = tqdm(range(self.num_finished_tasks, num_trials))
for _ in tbar:
if time_out and time.time() - start_time >= time_out \
or self.max_reward and self.get_best_reward() >= self.max_reward:
break
self.schedule_next()
def save(self, checkpoint=None):
"""Save Checkpoint
"""
if checkpoint is None:
if self._checkpoint is None:
logger.warning("Checkpointing is disabled")
else:
checkpoint = self._checkpoint
if checkpoint is not None:
with self.LOCK:
mkdir(os.path.dirname(checkpoint))
save(self.state_dict(), checkpoint)
def schedule_next(self):
"""Schedule next searcher suggested task
"""
# Allow for the promotion of a previously chosen config. Also,
# extra_kwargs contains extra info passed to both add_job and to
# get_config (if no config is promoted)
# request resource first
resources = DistributedResource(**self.resource)
FIFOScheduler.RESOURCE_MANAGER._request(resources)
config, extra_kwargs = self._promote_config()
if config is None:
# No config to promote: Query next config to evaluate from searcher
config = self.searcher.get_config(**extra_kwargs)
extra_kwargs['new_config'] = True
else:
# This is not a new config, but a paused one which is now promoted
extra_kwargs['new_config'] = False
task = Task(
self.train_fn,
{'args': self.args, 'config': config},
resources
)
self.add_job(task, **extra_kwargs)
def run_with_config(self, config):
"""Run with config for final fit.
It launches a single training trial under any fixed values of the hyperparameters.
For example, after HPO has identified the best hyperparameter values based on a hold-out dataset,
one can use this function to retrain a model with the same hyperparameters on all the available labeled data
(including the hold out set). It can also returns other objects or states.
"""
task = Task(
self.train_fn,
{'args': self.args, 'config': config},
DistributedResource(**self.resource)
)
reporter = FakeReporter()
task.args['reporter'] = reporter
return self.run_job(task)
def _dict_from_task(self, task):
if isinstance(task, Task):
return {'TASK_ID': task.task_id, 'Config': task.args['config']}
else:
assert isinstance(task, dict)
return {'TASK_ID': task['TASK_ID'], 'Config': task['Config']}
def add_job(self, task, **kwargs):
"""Adding a training task to the scheduler.
Args:
task (:class:`autogluon.scheduler.Task`): a new training task
Relevant entries in kwargs:
- bracket: HB bracket to be used. Has been sampled in _promote_config
- new_config: If True, task starts new config eval, otherwise it promotes
a config (only if type == 'promotion')
Only if new_config == False:
- config_key: Internal key for config
- resume_from: config promoted from this milestone
- milestone: config promoted to this milestone (next from resume_from)
"""
cls = FIFOScheduler
if not task.resources.is_ready:
cls.RESOURCE_MANAGER._request(task.resources)
# reporter
reporter = DistStatusReporter(remote=task.resources.node)
task.args['reporter'] = reporter
# Register pending evaluation
self.searcher.register_pending(task.args['config'])
# main process
job = cls._start_distributed_job(task, cls.RESOURCE_MANAGER)
# reporter thread
rp = threading.Thread(
target=self._run_reporter,
args=(task, job, reporter, self.searcher),
daemon=False
)
rp.start()
task_dict = self._dict_from_task(task)
task_dict.update({'Task': task, 'Job': job, 'ReporterThread': rp})
# checkpoint thread
if self._checkpoint is not None:
def _save_checkpoint_callback(fut):
self._cleaning_tasks()
self.save()
job.add_done_callback(_save_checkpoint_callback)
with self.LOCK:
self.scheduled_tasks.append(task_dict)
def _clean_task_internal(self, task_dict):
task_dict['ReporterThread'].join()
def _run_reporter(self, task, task_job, reporter, searcher):
last_result = None
while not task_job.done():
reported_result = reporter.fetch()
if 'traceback' in reported_result:
logger.exception(reported_result['traceback'])
reporter.move_on()
break
if reported_result.get('done', False):
reporter.move_on()
break
logger.info('Task %d: %s' % (task.task_id, reported_result))
self._add_training_result(
task.task_id, reported_result, config=task.args['config'])
reporter.move_on()
last_result = reported_result
if last_result is not None:
last_result['done'] = True
config = copy.deepcopy(task.args['config'])
config['task_id'] = task.task_id
searcher.update(
config=config,
reward=last_result[self._reward_attr],
**last_result
)
def _promote_config(self):
"""
Provides a hook in schedule_next, which allows to promote a config
which has been selected and partially evaluated previously.
:return: config, extra_args
"""
config = None
extra_args = dict()
return config, extra_args
def get_best_config(self):
"""Get the best configuration from the finished jobs.
"""
return self.searcher.get_best_config()
def get_best_reward(self):
"""Get the best reward from the finished jobs.
"""
return self.searcher.get_best_reward()
def _add_training_result(self, task_id, reported_result, config=None):
with self.log_lock:
# Note: We store all of reported_result in training_history[task_id],
# not just the reward value.
if task_id in self.training_history:
self.training_history[task_id].append(reported_result)
else:
self.training_history[task_id] = [reported_result]
if config:
self.config_history[task_id] = config
def get_training_curves(self, filename=None, plot=False, use_legend=True):
"""Get Training Curves
Parameters
----------
filename : str
plot : bool
use_legend : bool
Examples
--------
>>> scheduler.run()
>>> scheduler.join_jobs()
>>> scheduler.get_training_curves(plot=True)
.. image:: https://github.com/zhanghang1989/AutoGluonWebdata/blob/master/doc/api/autogluon.1.png?raw=true
"""
if filename is None and not plot:
logger.warning('Please either provide filename or allow plot in get_training_curves')
import matplotlib.pyplot as plt
plt.ylabel(self._reward_attr)
plt.xlabel(self._time_attr)
plt.title("Performance vs Training-Time in each HPO Trial")
with self.log_lock:
for task_id, task_res in self.training_history.items():
rewards = [x[self._reward_attr] for x in task_res]
x = list(range(len(task_res)))
plt.plot(x, rewards, label=f'task {task_id}')
if use_legend:
plt.legend(loc='best')
if filename:
logger.info(f'Saving Training Curve in {filename}')
plt.savefig(filename)
if plot:
plt.show()
def state_dict(self, destination=None):
"""Returns a dictionary containing a whole state of the Scheduler
Examples
--------
>>> ag.save(scheduler.state_dict(), 'checkpoint.ag')
"""
destination = super().state_dict(destination)
destination['searcher'] = pickle.dumps(self.searcher)
with self.log_lock:
destination['training_history'] = json.dumps(self.training_history)
return destination
def load_state_dict(self, state_dict):
"""Load from the saved state dict.
Examples
--------
>>> scheduler.load_state_dict(ag.load('checkpoint.ag'))
"""
super().load_state_dict(state_dict)
self.searcher = pickle.loads(state_dict['searcher'])
with self.log_lock:
self.training_history = json.loads(state_dict['training_history'])
logger.debug(f'Loading Searcher State {self.searcher}')
|
<filename>buzzard/_env.py<gh_stars>0
""">>> help(buzz.env)
>>> help(buzz.Env)
"""
import threading
from collections import namedtuple
import cv2
from osgeo import gdal, ogr, osr
from buzzard._tools import conv, Singleton
try:
from collections import ChainMap
except:
# https://pypi.python.org/pypi/chainmap
from chainmap import ChainMap
# Sanitization ********************************************************************************** **
# _RASTER_DRIVERS = {gdal.GetDriver(i).GetDescription() for i in range(gdal.GetDriverCount())}
# def _sanitize_raster_driver(val):
# if isinstance(val, gdal.Driver):
# return val
# val = str(val)
# if val not in _RASTER_DRIVERS:
# raise ValueError('Unknown raster driver')
# return gdal.GetDriverByName(val)
# _VECTOR_DRIVERS = {ogr.GetDriver(i).GetDescription() for i in range(ogr.GetDriverCount())}
# def _sanitize_vector_driver(val):
# if isinstance(val, ogr.Driver):
# return val
# val = str(val)
# if val not in _RASTER_DRIVERS:
# raise ValueError('Unknown vector driver')
# return ogr.GetDriverByName(val)
# _CV2_INTERPOLATIONS = [
# (cv2.INTER_NEAREST, 'nearest'),
# (cv2.INTER_LINEAR, 'linear'),
# (cv2.INTER_AREA, 'area'),
# (cv2.INTER_CUBIC, 'cubic'),
# (cv2.INTER_LANCZOS4, 'lanczos4'),
# ]
# def _sanitize_raster_interpolation(val):
# for v, s in _CV2_INTERPOLATIONS:
# if val == v or val == s:
# return v
# raise ValueError('Unknown cv2 interpolation')
_INDEX_DTYPES = list(conv.DTYPE_OF_NAME.keys())
def _sanitize_index_dtype(val):
val = conv.dtype_of_any_downcast(val)
if val not in _INDEX_DTYPES:
raise ValueError('%s cannot be used as an index dtype' % val)
return val
def _sanitize_significant(val):
val = float(val)
if val <= 0:
raise ValueError('Significant should be greater than 0')
return val
# Set up **************************************************************************************** **
def _set_up_osgeo_use_exception(new, _):
if new:
gdal.UseExceptions()
osr.UseExceptions()
ogr.UseExceptions()
else:
gdal.DontUseExceptions()
osr.DontUseExceptions()
ogr.DontUseExceptions()
# def _set_up_check_with_invert_proj(new, _):
# if new:
# gdal.SetConfigOption('CHECK_WITH_INVERT_PROJ', 'ON')
# else:
# gdal.SetConfigOption('CHECK_WITH_INVERT_PROJ', 'OFF')
# def _set_up_buzz_trusted(new, _):
# conf = gdal.GetConfigOption('GDAL_VRT_PYTHON_TRUSTED_MODULES') or ''
# conf = conf.split(',')
# conf = [elt for elt in conf if elt not in {'buzzard._raster_recipe', ''}]
# if new:
# conf.append('buzzard._raster_recipe')
# gdal.SetConfigOption(
# 'GDAL_VRT_PYTHON_TRUSTED_MODULES',
# ','.join(conf)
# )
# else:
# pass
# Do not unset because it is not safe in multithreaded environment
# gdal.SetConfigOption(
# 'GDAL_VRT_PYTHON_TRUSTED_MODULES',
# ','.join(conf) if conf else None
# )
# Options declaration *************************************************************************** **
_EnvOption = namedtuple('_Option', 'sanitize, set_up, bottom_value')
_OPTIONS = {
'significant': _EnvOption(_sanitize_significant, None, 8.0),
'default_index_dtype': _EnvOption(_sanitize_index_dtype, None, 'int32'),
'warnings': _EnvOption(bool, None, True),
'allow_complex_footprint': _EnvOption(bool, None, False),
'_osgeo_use_exceptions': _EnvOption(bool, _set_up_osgeo_use_exception, gdal.GetUseExceptions()),
# '_gdal_trust_buzzard': _EnvOption(bool, _set_up_buzz_trusted, False),
# 'check_with_invert_proj': _EnvOption(
# bool, _set_up_check_with_invert_proj,
# gdal.GetConfigOption('CHECK_WITH_INVERT_PROJ') == 'ON'
# ),
# 'default_raster_driver': _EnvOption(_sanitize_raster_driver, None, 'GTiff'),
# 'default_vector_driver': _EnvOption(_sanitize_vector_driver, None, 'ESRI Shapefile'),
# 'raster_interpolation': _EnvOption(_sanitize_raster_interpolation, None, 'area'),
}
# Storage *************************************************************************************** **
class _GlobalMapStack(Singleton):
"""ChainMap updated to behave like a singleton stack"""
_main_storage = None
def __init__(self, bottom=None):
if bottom is not None:
self._mapping = ChainMap(bottom)
self.__class__._main_storage = self
else:
# Copying _mapping to be immune from updates on the main side while thread is running,
# is it really possible?
self._mapping = self._main_storage._mapping.copy()
def push(self, mapping):
self._mapping = self._mapping.new_child(mapping)
def remove_top(self):
assert len(self._mapping.parents) > 1
self._mapping = self._mapping.parents
def __getitem__(self, k):
return self._mapping[k]
class _Storage(threading.local):
"""Thread local storage for the GlobalMapStack instance"""
def __init__(self):
if threading.current_thread().__class__.__name__ == '_MainThread':
self._mapstack = _GlobalMapStack({
k: v.sanitize(v.bottom_value) for (k, v) in _OPTIONS.items()
})
else:
self._mapstack = _GlobalMapStack()
threading.local.__init__(self)
_LOCAL = _Storage()
# Env update ************************************************************************************ **
class Env(object):
"""Context manager to update buzzard's states
Parameters
----------
significant: int
Number of significant digits for floating point comparisons
Initialized to `8.0`
see: https://github.com/airware/buzzard/wiki/Precision-system
see: https://github.com/airware/buzzard/wiki/Floating-Point-Considerations
default_index_dtype: convertible to np.dtype
Default numpy return dtype for array indices.
Initialized to `np.int32` (signed to allow negative indices by default)
allow_complex_footprint: bool
Whether to allow non north-up / west-left Footprints
Initialized to `False`
warnings: bool
Initialized to `True`
Example
-------
>>> import buzzard as buzz
>>> with buzz.Env(default_index_dtype='uint64'):
ds = buzz.DataSource()
dsm = ds.aopen_raster('dsm', 'path/to/dsm.tif')
x, y = dsm.meshgrid_raster
print(x.dtype)
numpy.uint64
"""
def __init__(self, **kwargs):
self._mapping = {}
for k, v in kwargs.items():
if k not in _OPTIONS: # pragma: no cover
raise ValueError('Unknown env key')
v = _OPTIONS[k].sanitize(v)
self._mapping[k] = v
def __enter__(self):
for k, newv in self._mapping.items():
if _OPTIONS[k].set_up is not None:
oldv = _LOCAL._mapstack[k]
_OPTIONS[k].set_up(newv, oldv)
_LOCAL._mapstack.push(self._mapping)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
_LOCAL._mapstack.remove_top()
for k, oldv in self._mapping.items():
if _OPTIONS[k].set_up is not None:
newv = _LOCAL._mapstack[k]
_OPTIONS[k].set_up(newv, oldv)
# Value retrieval ******************************************************************************* **
class _ThreadMapStackGetter(object):
"""Getter for env attribute"""
def __init__(self, key):
self.key = key
def __call__(self, current_env_self):
return _LOCAL._mapstack[self.key]
class _CurrentEnv(Singleton):
"""Namespace to access current values of buzzard's environment variable (see buzz.Env)
Example
-------
>>> buzz.env.significant
8.0
"""
pass
for k in _OPTIONS.keys():
setattr(_CurrentEnv, k, property(_ThreadMapStackGetter(k)))
env = _CurrentEnv() # pylint: disable=invalid-name
|
#!/usr/bin/python3
# tic-tac-toe.py
from random import *
play = True
def printGrid():
grid = "-------------" + "\n" + \
"| " + a + " | " + b + " | " + c + " | " + "\n" + \
"| " + d + " | " + e + " | " + f + " | " + "\n" + \
"| " + g + " | " + h + " | " + i + " | " + "\n" + \
"-------------"
return grid
def init():
global play, not_used, player_symbol, computer_symbol, a, b, c, d, e, f, g, h, i
a = "A"
b = "B"
c = "C"
d = "D"
e = "E"
f = "F"
g = "G"
h = "H"
i = "I"
play = True
not_used = ["a", "b", "c", "d", "e", "f", "g", "h", "i"]
print("* Tic-Tac-Toe *")
print("What do you want to be?")
player_symbol = ""
while player_symbol not in ["X", "O"]:
player_symbol = input("X or O? ")
if player_symbol == "X":
computer_symbol = "O"
else:
computer_symbol = "X"
def tick(field):
global player_symbol, not_used, a, b, c, d, e, f, g, h, i
field = field.lower()
if field in not_used:
not_used.remove(str(field))
if field == "a":
a = player_symbol
elif field == "b":
b = player_symbol
elif field == "c":
c = player_symbol
elif field == "d":
d = player_symbol
elif field == "e":
e = player_symbol
elif field == "f":
f = player_symbol
elif field == "g":
g = player_symbol
elif field == "h":
h = player_symbol
elif field == "i":
i = player_symbol
else:
print("This field is already ticked")
def computer_choose():
global not_used, compute_symbol, a, b, c, d, e, f, g, h, i
choose = choice(not_used)
print("Computer: " + choose.upper())
not_used.remove(str(choose))
if choose == "a":
a = computer_symbol
elif choose == "b":
b = computer_symbol
elif choose == "c":
c = computer_symbol
elif choose == "d":
d = computer_symbol
elif choose == "e":
e = computer_symbol
elif choose == "f":
f = computer_symbol
elif choose == "g":
g = computer_symbol
elif choose == "h":
h = computer_symbol
elif choose == "i":
i = computer_symbol
def check():
global a, b, c, d, e, f, g, h, i
for element in ["X", "Y"]:
if a == element and b == element and c == element:
result(element)
if d == element and e == element and f == element:
result(element)
if g == element and h == element and i == element:
result(element)
if a == element and d == element and g == element:
result(element)
if b == element and e == element and h == element:
result(element)
if c == element and f == element and i == element:
result(element)
if a == element and e == element and i == element:
result(element)
if c == element and e == element and g == element:
result(element)
def result(who):
global player_symbol, computer_symbol, play
print(printGrid())
if who == player_symbol:
print("Congratulations. You're the winner.")
else:
print("Dammage. The computer won.")
print("One more time?")
again = "XyZ"
while again not in ["Y", "N", "y", "n"]:
again = input("Y/N: ")
if again == "Y" or again == "y":
play = True
else:
play = False
if __name__ == "__main__":
init()
while play:
print(printGrid())
check()
tick(input("Letter: "))
check()
computer_choose() |
<gh_stars>0
# coding: utf-8
from __future__ import unicode_literals
try:
import urllib2
except ImportError:
import urllib3
import requests
from taggit.managers import TaggableManager
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
try:
from django.utils.encoding import python_2_unicode_compatible
except ImportError:
from six import python_2_unicode_compatible
from django.urls import reverse
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
try:
from wagtail.admin.utils import get_object_usage
except ImportError:
from wagtail.admin.models import get_object_usage
from wagtail.images.models import Image as WagtailImage
from wagtail.images import get_image_model
from wagtail.search import index
from wagtail.search.queryset import SearchableQuerySetMixin
from embed_video.fields import EmbedVideoField
from embed_video.backends import detect_backend
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models.loading import get_model
try:
image_model_name = settings.WAGTAILIMAGES_IMAGE_MODEL
except AttributeError:
image_model_name = 'wagtailimages.Image'
def checkUrl(url):
r = requests.head(url)
return int(r.status_code) < 400
YOUTUBE_RESOLUTIONS = [
'maxresdefault.jpg',
'sddefault.jpg',
'mqdefault.jpg'
]
def create_thumbnail(model_instance):
# CREATING IMAGE FROM THUMBNAIL
backend = detect_backend(model_instance.url)
thumbnail_url = backend.get_thumbnail_url()
if backend.__class__.__name__ == 'YoutubeBackend':
if thumbnail_url.endswith('hqdefault.jpg'):
for resolution in YOUTUBE_RESOLUTIONS:
temp_thumbnail_url = thumbnail_url.replace(
'hqdefault.jpg', resolution)
if checkUrl(temp_thumbnail_url):
thumbnail_url = temp_thumbnail_url
break
img_temp = NamedTemporaryFile()
try:
img_temp.write(urllib2.urlopen(thumbnail_url).read())
except:
http = urllib3.PoolManager()
img_temp.write(http.request('GET', thumbnail_url).data)
img_temp.flush()
image = get_image_model()(title=model_instance.title)
image.file.save(model_instance.title + '.jpg', File(img_temp))
model_instance.thumbnail = image
model_instance.thumbnail.tags.add('video-thumbnail')
model_instance.save()
class EmbedVideoQuerySet(SearchableQuerySetMixin, models.QuerySet):
pass
@python_2_unicode_compatible
class AbstractEmbedVideo(index.Indexed, models.Model):
title = models.CharField(max_length=255, verbose_name=_('Title'))
url = EmbedVideoField()
thumbnail = models.ForeignKey(
image_model_name,
verbose_name="Thumbnail",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
created_at = models.DateTimeField(auto_now_add=True)
uploaded_by_user = models.ForeignKey(
settings.AUTH_USER_MODEL, null=True, blank=True, editable=False, on_delete=models.SET_NULL)
tags = TaggableManager(help_text=None, blank=True, verbose_name=_('Tags'))
objects = EmbedVideoQuerySet.as_manager()
def get_usage(self):
return get_object_usage(self)
@property
def usage_url(self):
return reverse('wagtail_embed_videos_video_usage',
args=(self.id,))
search_fields = [
index.SearchField('title', partial_match=True, boost=10),
index.RelatedFields('tags', [
index.SearchField('name', partial_match=True, boost=10),
]),
index.FilterField('uploaded_by_user'),
]
def __str__(self):
return self.title
def __init__(self, *args, **kwargs):
super(AbstractEmbedVideo, self).__init__(*args, **kwargs)
if args:
if args[3] is None:
create_thumbnail(self)
def save(self, *args, **kwargs):
super(AbstractEmbedVideo, self).save(*args, **kwargs)
if not self.thumbnail:
create_thumbnail(self)
@property
def default_alt_text(self):
return self.title
def is_editable_by_user(self, user):
if user.has_perm('wagtail_embed_videos.change_embedvideo'):
# user has global permission to change videos
return True
elif user.has_perm('wagtail_embed_videos.add_embedvideo') and\
self.uploaded_by_user == user:
# user has video add permission, which also implicitly provides
# permission to edit their own videos
return True
else:
return False
class Meta:
abstract = True
class EmbedVideo(AbstractEmbedVideo):
admin_form_fields = (
'title',
'url',
'thumbnail',
'tags',
)
def get_embed_video_model():
try:
app_label, model_name =\
settings.WAGTAILEMBEDVIDEO_VIDEO_MODEL.split('.')
except AttributeError:
return EmbedVideo
except ValueError:
raise ImproperlyConfigured(
"WAGTAILEMBEDVIDEO_VIDEO_MODEL must be of the form \
'app_label.model_name'")
embed_video_model = get_model(app_label, model_name)
if embed_video_model is None:
raise ImproperlyConfigured(
"WAGTAILEMBEDVIDEO_VIDEO_MODEL refers to model '%s' that has not \
been installed" % settings.WAGTAILEMBEDVIDEO_VIDE_MODEL)
return embed_video_model
|
<filename>sunpy/net/dataretriever/sources/tests/test_goes_suvi.py
import tempfile
import pytest
from hypothesis import given
import astropy.units as u
import sunpy.net.dataretriever.sources.goes as goes
from sunpy.net import Fido
from sunpy.net import attrs as a
from sunpy.net.dataretriever.client import QueryResponse
from sunpy.net.tests.strategies import time_attr
from sunpy.time import TimeRange, parse_time
@pytest.fixture
def suvi_client():
return goes.SUVIClient()
@given(time_attr())
def test_can_handle_query(time):
# Don't use the fixture, as hypothesis complains
suvi_client = goes.SUVIClient()
ans1 = suvi_client._can_handle_query(time, a.Instrument.suvi)
assert ans1 is True
ans2 = suvi_client._can_handle_query(time, a.Instrument.suvi,
a.Wavelength(131 * u.Angstrom))
assert ans2 is True
ans3 = suvi_client._can_handle_query(time, a.Instrument.suvi,
a.Wavelength(131 * u.Angstrom),
a.Level.two)
assert ans3 is True
ans4 = suvi_client._can_handle_query(time)
assert ans4 is False
ans5 = suvi_client._can_handle_query(time, a.Instrument.aia)
assert ans5 is False
ans6 = suvi_client._can_handle_query(time, a.Instrument.suvi,
a.goes.SatelliteNumber(16))
assert ans6 is True
def mock_query_object(suvi_client):
"""
Creating a Query Response object and prefilling it with some information
"""
# Creating a Query Response Object
start = '2019/05/25 00:50'
end = '2019/05/25 00:52'
wave = 94 * u.Angstrom
obj = {
'Time': TimeRange(parse_time(start), parse_time(end)),
'Start Time': parse_time(start),
'End Time': parse_time(end),
'Instrument': 'SUVI',
'Physobs': 'flux',
'Source': 'GOES',
'Provider': 'NOAA',
'Level': '2',
'Wavelength': wave,
'url': ('https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites'
'/goes/goes16/l2/data/suvi-l2-ci094/2019/05/25/'
'dr_suvi-l2-ci094_g16_s20190525T005200Z_e20190525T005600Z_v1-0-0.fits')
}
results = QueryResponse([obj], client=suvi_client)
return results
def test_attr_reg():
a.Instrument.suvi = a.Instrument("SUVI")
a.goes.SatelliteNumber.A16 = a.goes.SatelliteNumber("16")
@pytest.mark.remote_data
def test_fetch_working(suvi_client):
"""
Tests if the online server for goes_suvi is working.
This also checks if the mock is working well.
"""
start = '2019/05/25 00:50'
end = '2019/05/25 00:52'
wave = 94 * u.Angstrom
goes_sat = a.goes.SatelliteNumber.sixteen
tr = a.Time(start, end)
qr1 = suvi_client.search(tr, a.Instrument.suvi, a.Wavelength(wave), goes_sat, a.Level(2))
# Mock QueryResponse object
mock_qr = mock_query_object(suvi_client)
# Compare if two objects have the same attribute
mock_qr = mock_qr.blocks[0]
qr = qr1.blocks[0]
assert mock_qr['Source'] == qr['Source']
assert mock_qr['Provider'] == qr['Provider']
assert mock_qr['Physobs'] == qr['Physobs']
assert mock_qr['Instrument'] == qr['Instrument']
assert mock_qr['url'] == qr['url']
assert qr1.time_range() == TimeRange("2019-05-25T00:52:00.000",
"2019-05-25T00:56:00.000")
with tempfile.TemporaryDirectory() as tmpdirname:
download_list = suvi_client.fetch(qr1, path=tmpdirname)
assert len(download_list) == len(qr1)
@pytest.mark.remote_data
@pytest.mark.parametrize("start, end, wave, expected_num_files",
[('2019/05/25 00:50', '2019/05/25 00:52', 94, 1),
('2019/05/25 00:50', '2019/05/25 00:52', 131, 1),
('2019/05/25 00:50', '2019/05/25 00:52', 171, 1),
('2019/05/25 00:50', '2019/05/25 00:52', 195, 1),
('2019/05/25 00:50', '2019/05/25 00:52', 284, 1),
('2019/05/25 00:50', '2019/05/25 00:52', 304, 1)]
)
def test_get_url_for_time_range_level2(suvi_client, start, end, wave, expected_num_files):
goes_sat = a.goes.SatelliteNumber.sixteen
qresponse = suvi_client.search(a.Time(start, end), a.Wavelength(wave * u.Angstrom), goes_sat, a.Level(2))
urls = [i['url'] for i in qresponse]
assert isinstance(urls, list)
assert len(urls) == expected_num_files
@pytest.mark.remote_data
@pytest.mark.parametrize("start, end, expected_num_files",
[('2019/05/25 00:50', '2019/05/25 00:52', 6)]
)
def test_get_url_for_time_range_level2_allwave(suvi_client, start, end, expected_num_files):
"""check that we get all wavelengths if no wavelength is given"""
goes_sat = a.goes.SatelliteNumber.sixteen
qresponse = suvi_client.search(a.Time(start, end), goes_sat, a.Level(2))
urls = [i['url'] for i in qresponse]
assert isinstance(urls, list)
assert len(urls) == expected_num_files
@pytest.mark.remote_data
@pytest.mark.parametrize("start, end ,wave, expected_num_files",
[('2019/05/25 00:50', '2019/05/25 00:54', 94, 6),
('2019/05/25 00:50', '2019/05/25 00:54', 131, 3),
('2019/05/25 00:50', '2019/05/25 00:54', 171, 2),
('2019/05/25 00:50', '2019/05/25 00:54', 195, 7),
('2019/05/25 00:50', '2019/05/25 00:54', 284, 2),
('2019/05/25 00:50', '2019/05/25 00:54', 304, 4)]
)
def test_get_url_for_time_range_level1b(suvi_client, start, end, wave, expected_num_files):
"""check that we get all wavelengths if no wavelength is given"""
goes_sat = a.goes.SatelliteNumber.sixteen
qresponse = suvi_client.search(a.Time(start, end), a.Wavelength(wave * u.Angstrom), goes_sat, a.Level('1b'))
urls = [i['url'] for i in qresponse]
assert isinstance(urls, list)
assert len(urls) == expected_num_files
@pytest.mark.remote_data
@pytest.mark.parametrize("start, end ,wave, expected_num_files",
[('2019/05/25 00:50', '2019/05/25 00:54', 94, 6),
('2019/05/25 00:50', '2019/05/25 00:54', 131, 3),
('2019/05/25 00:50', '2019/05/25 00:54', 171, 2),
('2019/05/25 00:50', '2019/05/25 00:54', 195, 7),
('2019/05/25 00:50', '2019/05/25 00:54', 284, 2),
('2019/05/25 00:50', '2019/05/25 00:54', 304, 4)]
)
def test_fido_onewave_level1b(start, end, wave, expected_num_files):
goes_sat = a.goes.SatelliteNumber.sixteen
result = Fido.search(a.Time(start, end), a.Instrument.suvi, goes_sat,
a.Wavelength(wave * u.Angstrom), a.Level('1b'))
assert result.file_num == expected_num_files
@pytest.mark.remote_data
@pytest.mark.parametrize("start, end, wave1, wave2, expected_num_files",
[('2019/05/25 00:50', '2019/05/25 00:54', 1, 100, 6),
('2019/05/25 00:50', '2019/05/25 00:54', 1, 150, 9),
('2019/05/25 00:50', '2019/05/25 00:54', 1, 180, 11),
('2019/05/25 00:50', '2019/05/25 00:54', 1, 200, 18),
('2019/05/25 00:50', '2019/05/25 00:54', 1, 300, 20),
('2019/05/25 00:50', '2019/05/25 00:54', 1, 310, 24)]
)
def test_fido_waverange_level1b(start, end, wave1, wave2, expected_num_files):
"""check that we get all wavelengths if no wavelength is given"""
goes_sat = a.goes.SatelliteNumber.sixteen
result = Fido.search(a.Time(start, end), a.Instrument.suvi, goes_sat,
a.Wavelength(wave1 * u.Angstrom, wave2 * u.Angstrom),
a.Level('1b'))
assert result.file_num == expected_num_files
@pytest.mark.remote_data
@pytest.mark.parametrize("start, end, expected_num_files",
[('2019/05/25 00:50', '2019/05/25 00:52', 6)]
)
def test_query(suvi_client, start, end, expected_num_files):
goes_sat = a.goes.SatelliteNumber.sixteen
qr1 = suvi_client.search(a.Time(start, end), a.Instrument.suvi, goes_sat, a.Level.two)
assert isinstance(qr1, QueryResponse)
assert len(qr1) == expected_num_files
assert qr1.time_range().start == parse_time('2019/05/25 00:52')
assert qr1.time_range().end == parse_time('2019/05/25 00:56')
def test_show(suvi_client):
mock_qr = mock_query_object(suvi_client)
qrshow0 = mock_qr.show()
qrshow1 = mock_qr.show('Start Time', 'Instrument')
allcols = ['Start Time', 'End Time', 'Instrument', 'Physobs', 'Source',
'Provider', 'Level', 'Wavelength']
assert qrshow0.colnames == allcols
assert qrshow1.colnames == ['Start Time', 'Instrument']
assert qrshow0['Instrument'][0] == 'SUVI'
|
<filename>Code/ml_pipeline/model/Classification.py
import os
import pandas as pd
import numpy as np
from numpy import mean
from numpy import std
import random
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
import pickle
import MLJobConfig as mlconfig
import MLPipeline
import AppConfig as app_config
import ml_pipeline.utils.Helper as helper
import Evaluation
DATA_FLD_NAME = app_config.CLF_FLD_NAME
DATA_FILE_NAME_PRFX = app_config.CLF_FLD_PREFIX
class Classification:
def __init__(self, ml_pipeline: MLPipeline):
self.ml_pipeline = ml_pipeline
self.jlogger = self.ml_pipeline.jlogger
self.jlogger.info(
"Inside Classification initialization with status {}".format(self.ml_pipeline.status))
step5 = os.path.join(self.ml_pipeline.job_data['job_data_path'], DATA_FLD_NAME)
os.makedirs(step5, exist_ok=True)
if self.ml_pipeline.status == app_config.STEP4_STATUS: # resuming at step 5
self.apply_on_all_fg()
def apply_on_all_fg(self):
if self.ml_pipeline.config.fg_padelpy_flg:
self.jlogger.info("Started classification of preprocessed PaDEL features")
job_fld_path = self.ml_pipeline.job_data['job_fld_path']
pp_padel_fld_path = os.path.join(
*[job_fld_path, app_config.TEMP_TTS_FLD_NAME, app_config.FG_PADEL_FLD_NAME])
padel_xtrain_fp = os.path.join(pp_padel_fld_path, app_config.TEMP_XTRAIN_FNAME)
padel_ytrain_fp = os.path.join(pp_padel_fld_path, app_config.TEMP_YTRAIN_FNAME)
padel_xtest_fp = os.path.join(pp_padel_fld_path, app_config.TEMP_XTEST_FNAME)
padel_ytest_fp = os.path.join(pp_padel_fld_path, app_config.TEMP_YTEST_FNAME)
self.ml_pipeline.x_train = pd.read_csv(padel_xtrain_fp)
self.ml_pipeline.y_train = pd.read_csv(padel_ytrain_fp)
self.ml_pipeline.y_train = self.ml_pipeline.y_train.values.ravel()
self.ml_pipeline.x_test = pd.read_csv(padel_xtest_fp)
self.ml_pipeline.y_test = pd.read_csv(padel_ytest_fp)
self.ml_pipeline.y_test = self.ml_pipeline.y_test.values.ravel()
# folder path to save output of preprocessed padel features classification data
clf_padel_fld_path = os.path.join(*[self.ml_pipeline.job_data['job_data_path'], DATA_FLD_NAME,
app_config.FG_PADEL_FLD_NAME])
self.ml_pipeline.fg_clf_fld_path = clf_padel_fld_path
os.makedirs(self.ml_pipeline.fg_clf_fld_path, exist_ok=True)
self.apply_classification_models()
if self.ml_pipeline.config.fg_mordered_flg:
self.jlogger.info("Started classification of preprocessed mordred features")
job_fld_path = self.ml_pipeline.job_data['job_fld_path']
pp_mordred_fld_path = os.path.join(
*[job_fld_path, app_config.TEMP_TTS_FLD_NAME, app_config.FG_MORDRED_FLD_NAME])
mordred_xtrain_fp = os.path.join(pp_mordred_fld_path, app_config.TEMP_XTRAIN_FNAME)
mordred_ytrain_fp = os.path.join(pp_mordred_fld_path, app_config.TEMP_YTRAIN_FNAME)
mordred_xtest_fp = os.path.join(pp_mordred_fld_path, app_config.TEMP_XTEST_FNAME)
mordred_ytest_fp = os.path.join(pp_mordred_fld_path, app_config.TEMP_YTEST_FNAME)
self.ml_pipeline.x_train = pd.read_csv(mordred_xtrain_fp)
self.ml_pipeline.y_train = pd.read_csv(mordred_ytrain_fp)
self.ml_pipeline.y_train = self.ml_pipeline.y_train.values.ravel()
self.ml_pipeline.x_test = pd.read_csv(mordred_xtest_fp)
self.ml_pipeline.y_test = pd.read_csv(mordred_ytest_fp)
self.ml_pipeline.y_test = self.ml_pipeline.y_test.values.ravel()
# folder path to save output of preprocessed mordred features classification data
clf_mordred_fld_path = os.path.join(*[self.ml_pipeline.job_data['job_data_path'], DATA_FLD_NAME,
app_config.FG_MORDRED_FLD_NAME])
self.ml_pipeline.fg_clf_fld_path = clf_mordred_fld_path
os.makedirs(self.ml_pipeline.fg_clf_fld_path, exist_ok=True)
self.apply_classification_models()
updated_status = app_config.STEP5_STATUS
job_oth_config_fp = self.ml_pipeline.job_data['job_oth_config_path']
helper.update_job_status(job_oth_config_fp, updated_status)
self.ml_pipeline.status = updated_status
self.jlogger.info("Classification completed successfully")
def apply_classification_models(self):
self.jlogger.info("Inside Classification, Train Shape: {}".format(self.ml_pipeline.x_train.shape))
self.jlogger.info("Inside Classification, Test Shape: {}".format(self.ml_pipeline.x_test.shape))
self.apply_gbm()
self.apply_svm()
self.apply_rf()
self.apply_lr()
self.apply_gnb()
self.apply_et()
self.apply_mlp()
def apply_gbm(self):
if self.ml_pipeline.config.clf_gbm_flg:
if self.ml_pipeline.config.clf_gbm_auto:
x_train = self.ml_pipeline.x_train
y_train = self.ml_pipeline.y_train
cv_inner = StratifiedKFold(n_splits=5, shuffle=True, random_state=43)
cv_outer = StratifiedKFold(n_splits=10, shuffle=True, random_state=43)
# clf = GradientBoostingClassifier(n_estimators=50, random_state=None, max_depth=2)
grid_search_model = self.gbm_grid_search()
grid_search_model.cv = cv_inner
grid_search_model.fit(x_train, y_train)
chosen_model = grid_search_model.best_estimator_
scores = cross_val_score(grid_search_model, x_train, y_train, scoring='f1', cv=cv_outer, n_jobs=1)
self.jlogger.info(str(chosen_model))
self.jlogger.info("MEAN F1 scores after nested CV {} ".format(mean(scores)))
self.jlogger.info("Standard Deviation for F1 scores after nested CV {} ".format(std(scores)))
evalclf = Evaluation.Evaluation(self.ml_pipeline)
evalclf.evaluate_and_save_results(chosen_model, "gbm")
if self.ml_pipeline.config.clf_bagging_gbm:
n = self.ml_pipeline.config.clf_bag_gbm_n
evalclf.evaluate_bagging_model(chosen_model, n, "gbm_bagging")
def apply_et(self):
if self.ml_pipeline.config.clf_et_flg:
if self.ml_pipeline.config.clf_et_auto:
x_train = self.ml_pipeline.x_train
y_train = self.ml_pipeline.y_train
cv_inner = StratifiedKFold(n_splits=5, shuffle=True, random_state=43)
cv_outer = StratifiedKFold(n_splits=10, shuffle=True, random_state=43)
# clf = ExtraTreesClassifier(n_estimators=200, random_state=42, max_depth=10, n_jobs=-1)
grid_search_model = self.et_grid_search()
grid_search_model.cv = cv_inner
grid_search_model.fit(x_train, y_train)
chosen_model = grid_search_model.best_estimator_
scores = cross_val_score(grid_search_model, x_train, y_train, scoring='f1', cv=cv_outer, n_jobs=1)
self.jlogger.info(str(chosen_model))
self.jlogger.info("MEAN F1 scores after nested CV {} ".format(mean(scores)))
self.jlogger.info("Standard Deviation for F1 scores after nested CV {} ".format(std(scores)))
else:
manual_params = self.ml_pipeline.config.clf_gbm_manual
evalclf = Evaluation.Evaluation(self.ml_pipeline)
evalclf.evaluate_and_save_results(chosen_model, "et")
if self.ml_pipeline.config.clf_bagging_et:
n = self.ml_pipeline.config.clf_bag_et_n
evalclf.evaluate_bagging_model(chosen_model, n, "et_bagging")
def apply_svm(self):
if self.ml_pipeline.config.clf_svm_flg:
x_train = self.ml_pipeline.x_train
y_train = self.ml_pipeline.y_train
cv_inner = StratifiedKFold(n_splits=5, shuffle=True, random_state=43)
cv_outer = StratifiedKFold(n_splits=10, shuffle=True, random_state=43)
if self.ml_pipeline.config.clf_svm_auto:
grid_search_model = self.SVM_GridSearch()
grid_search_model.cv = cv_inner
grid_search_model.fit(x_train, y_train)
chosen_model = grid_search_model.best_estimator_
scores = cross_val_score(grid_search_model, x_train, y_train, scoring='f1', cv=cv_outer, n_jobs=1)
self.jlogger.info(str(chosen_model))
self.jlogger.info("MEAN F1 scores after nested CV {} ".format(mean(scores)))
self.jlogger.info("Standard Deviation for F1 scores after nested CV {} ".format(std(scores)))
evalclf = Evaluation.Evaluation(self.ml_pipeline)
evalclf.evaluate_and_save_results(chosen_model, "svm")
if self.ml_pipeline.config.clf_bagging_svm:
n = self.ml_pipeline.config.clf_bag_svm_n
evalclf.evaluate_bagging_model(chosen_model, n, "svm_bagging")
def apply_rf(self):
if self.ml_pipeline.config.clf_rf_flg:
x_train = self.ml_pipeline.x_train
y_train = self.ml_pipeline.y_train
if self.ml_pipeline.config.clf_rf_auto:
cv_inner = StratifiedKFold(n_splits=5, shuffle=True, random_state=43)
cv_outer = StratifiedKFold(n_splits=10, shuffle=True, random_state=43)
grid_search_model = self.RF_GridSearch()
grid_search_model.cv = cv_inner
grid_search_model.fit(x_train, y_train)
chosen_model = grid_search_model.best_estimator_
scores = cross_val_score(grid_search_model, x_train, y_train, scoring='f1', cv=cv_outer, n_jobs=1)
self.jlogger.info(str(chosen_model))
self.jlogger.info("MEAN F1 scores after nested CV {} ".format(mean(scores)))
self.jlogger.info("Standard Deviation for F1 scores after nested CV {} ".format(std(scores)))
evalclf = Evaluation.Evaluation(self.ml_pipeline)
evalclf.evaluate_and_save_results(chosen_model, "rf")
if self.ml_pipeline.config.clf_bagging_rf:
n = self.ml_pipeline.config.clf_bag_rf_n
evalclf.evaluate_bagging_model(chosen_model, n, "rf_bagging")
def apply_lr(self):
if self.ml_pipeline.config.clf_lr_flg:
x_train = self.ml_pipeline.x_train
y_train = self.ml_pipeline.y_train
if self.ml_pipeline.config.clf_lr_auto:
cv_inner = StratifiedKFold(n_splits=5, shuffle=True, random_state=43)
cv_outer = StratifiedKFold(n_splits=10, shuffle=True, random_state=43)
grid_search_model = self.lr_grid_search()
grid_search_model.cv = cv_inner
grid_search_model.fit(x_train, y_train)
chosen_model = grid_search_model.best_estimator_
scores = cross_val_score(grid_search_model, x_train, y_train, scoring='f1', cv=cv_outer, n_jobs=1)
self.jlogger.info(str(chosen_model))
self.jlogger.info("MEAN F1 scores after nested CV {} ".format(mean(scores)))
self.jlogger.info("Standard Deviation for F1 scores after nested CV {} ".format(std(scores)))
else:
manual_params = self.ml_pipeline.config.clf_svm_manual
evalclf = Evaluation.Evaluation(self.ml_pipeline)
evalclf.evaluate_and_save_results(chosen_model, "lr")
if self.ml_pipeline.config.clf_bagging_lr:
n = self.ml_pipeline.config.clf_bag_lr_n
evalclf.evaluate_bagging_model(chosen_model, n, "lr_bagging")
def apply_gnb(self):
if self.ml_pipeline.config.clf_gnb_flg:
x_train = self.ml_pipeline.x_train
y_train = self.ml_pipeline.y_train
if self.ml_pipeline.config.clf_gnb_auto:
model = GaussianNB()
chosen_model = model.fit(x_train, y_train)
self.jlogger.info(str(chosen_model))
else:
manual_params = self.ml_pipeline.config.clf_svm_manual
evalclf = Evaluation.Evaluation(self.ml_pipeline)
evalclf.evaluate_and_save_results(chosen_model, "gnb")
if self.ml_pipeline.config.clf_bagging_gnb:
n = self.ml_pipeline.config.clf_bag_gnb_n
evalclf.evaluate_bagging_model(chosen_model, n, "gnb_bagging")
def apply_mlp(self):
if self.ml_pipeline.config.clf_mlp_flg:
x_train = self.ml_pipeline.x_train
y_train = self.ml_pipeline.y_train
if self.ml_pipeline.config.clf_mlp_auto:
cv_inner = StratifiedKFold(n_splits=5, shuffle=True, random_state=43)
cv_outer = StratifiedKFold(n_splits=10, shuffle=True, random_state=43)
grid_search_model = self.MLP_GridSearch()
grid_search_model.cv = cv_inner
grid_search_model.fit(x_train, y_train)
chosen_model = grid_search_model.best_estimator_
scores = cross_val_score(grid_search_model, x_train, y_train, scoring='f1', cv=cv_outer, n_jobs=1)
self.jlogger.info(str(chosen_model))
self.jlogger.info("MEAN F1 scores after nested CV {} ".format(mean(scores)))
self.jlogger.info("Standard Deviation for F1 scores after nested CV {} ".format(std(scores)))
else:
manual_params = self.ml_pipeline.config.clf_svm_manual
evalclf = Evaluation.Evaluation(self.ml_pipeline)
evalclf.evaluate_and_save_results(chosen_model, "mlp")
if self.ml_pipeline.config.clf_bagging_mlp:
n = self.ml_pipeline.config.clf_bag_mlp_n
evalclf.evaluate_bagging_model(chosen_model, n, "mlp_bagging")
def SVM_GridSearch(self):
random.seed(50)
if self.ml_pipeline.config.clf_hyp_man_c_svm:
Cs = self.ml_pipeline.config.svm_C
if self.ml_pipeline.config.clf_hyp_man_gamma_svm:
gammas = self.ml_pipeline.config.svm_gamma
if self.ml_pipeline.config.clf_hyp_man_kernel_svm:
kernel = self.ml_pipeline.config.svm_kernels
else:
Cs = [0.0001, 0.001, 0.01, 0.1, 1, 10]
gammas = [0.000001, 0.0001, 0.001, 0.01, 0.1, 1, 10]
kernel = ['rbf', 'poly', 'linear']
param_grid = {'C': Cs, 'gamma': gammas, 'kernel': kernel}
svm_clf = svm.SVC(probability=True)
clf = GridSearchCV(svm_clf, param_grid, cv=5, n_jobs=-1, scoring='f1', verbose=3)
return clf
def MLP_GridSearch(self):
if self.ml_pipeline.config.clf_hyp_man_layers_mlp:
hidden_layers_mlp = self.ml_pipeline.config.mlp_hidden_layers_list
else:
hidden_layers_mlp = [(5, 5, 5), (20, 30, 50), (50, 50, 50), (50, 100, 50), (100,), (100, 100, 100),
(5, 2)]
if self.ml_pipeline.config.clf_hyp_man_activation_mlp:
activation_mlp = self.ml_pipeline.config.mlp_activation
else:
activation_mlp = ['tanh', 'relu']
if self.ml_pipeline.config.clf_hyp_man_solver_mlp:
solvers = self.ml_pipeline.config.mlp_solver
else:
solvers = ['sgd', 'adam']
if self.ml_pipeline.config.clf_hyp_man_alpha_mlp:
alphas = self.ml_pipeline.config.clf_hyp_alphas
else:
alphas = [0.0001, 0.05, 0.001, 0.01]
if self.ml_pipeline.config.clf_hyp_man_lr_rate_mlp:
learning_rates = self.ml_pipeline.config.mlp_lr
else:
learning_rates = ['constant', 'adaptive']
parameter_space = {
'hidden_layer_sizes': hidden_layers_mlp, 'activation': activation_mlp,
'solver': solvers,
'alpha': alphas,
'learning_rate': learning_rates}
mlp = MLPClassifier(max_iter=1000, random_state=50)
clf = GridSearchCV(mlp, parameter_space, cv=5, scoring='f1', verbose=2)
return clf
def RF_GridSearch(self):
if self.ml_pipeline.config.clf_hyp_man_depth_oth_rf:
estimators = self.ml_pipeline.config.clf_hyp_man_estimate_oth_rf
n_estimators = [int(x) for x in np.linspace(start=2, stop=estimators, num=2)]
else:
estimators = 100
n_estimators = [int(x) for x in np.linspace(start=2, stop=estimators, num=10)]
if self.ml_pipeline.config.clf_hyp_man_estimate_oth_rf:
depth = self.ml_pipeline.config.clf_hyp_man_depth_oth_rf
max_depth = [int(x) for x in np.arange(start=1, stop=depth, step=1)]
else:
depth = 110
max_depth = [int(x) for x in np.linspace(10, depth, num=11)]
if self.ml_pipeline.config.clf_hyp_man_sample_split_rf:
sample_split_size = self.ml_pipeline.config.rf_sample_spit
else:
sample_split_size = [2, 5, 10]
if self.ml_pipeline.config.clf_hyp_man_sample_leaf_rf:
min_sample_leaf = self.ml_pipeline.config.rf_leaf
else:
min_sample_leaf = [1, 2, 4]
if self.ml_pipeline.config.clf_hyp_man_bootstrap_rf:
boot_strap = self.ml_pipeline.config.rf_bootstrap
else:
boot_strap = [True, False]
if self.ml_pipeline.config.clf_hyp_man_features_rf:
max_features = self.ml_pipeline.config.max_feat_rf
else:
max_features = ['auto', 'sqrt']
max_depth.append(None)
min_samples_split = sample_split_size
min_samples_leaf = min_sample_leaf
bootstrap = boot_strap
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
rf = RandomForestClassifier()
rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, n_iter=100, cv=5, verbose=2,
random_state=50, n_jobs=-1, scoring="f1")
return rf_random
def gbm_grid_search(self):
if self.ml_pipeline.config.clf_hyp_man_estimate_oth_gbm:
estimators = self.ml_pipeline.config.clf_hyp_man_estimate_oth_gbm
print(estimators)
n_estimators = [int(x) for x in np.arange(start=2, stop=estimators, step=1)]
else:
estimators = 510
n_estimators = [int(x) for x in np.arange(start=10, stop=estimators, step=10)]
if self.ml_pipeline.config.clf_hyp_man_depth_oth_gbm:
depth_param = self.ml_pipeline.config.clf_hyp_man_depth_oth_gbm
max_depth = [int(x) for x in np.arange(start=1, stop=depth_param, step=1)]
else:
depth_param = 20
max_depth = [int(x) for x in np.arange(start=5, stop=depth_param, step=5)]
param_grid = {'n_estimators': n_estimators, 'max_depth': max_depth}
gbm = GradientBoostingClassifier(random_state=50)
clf = GridSearchCV(gbm, param_grid, cv=5, scoring='f1', verbose=3, n_jobs=-1)
return clf
def et_grid_search(self):
if self.ml_pipeline.config.clf_hyp_man_estimator_et:
estimators = self.ml_pipeline.config.clf_hyp_man_estimate_oth_et
n_estimators = [int(x) for x in np.arange(start=2, stop=estimators, step=1)]
else:
estimators = 510
n_estimators = [int(x) for x in np.arange(start=10, stop=estimators, step=10)]
if self.ml_pipeline.config.clf_hyp_man_depth_et:
depth = self.ml_pipeline.config.clf_hyp_man_depth_oth_et
max_depth = [int(x) for x in np.arange(start=1, stop=depth, step=1)]
else:
depth = 20
max_depth = [int(x) for x in np.arange(start=2, stop=depth, step=2)]
param_grid = {'n_estimators': n_estimators, 'max_depth': max_depth}
et = ExtraTreesClassifier(random_state=42, n_jobs=-1)
clf = GridSearchCV(et, param_grid, cv=5, scoring='f1', verbose=3)
return clf
def lr_grid_search(self):
if self.ml_pipeline.config.clf_hyp_man_lr:
lr_cs = self.ml_pipeline.config.clf_lr_list
else:
lr_cs = [1, 0.1, 0.001, 0.0001, 0.00001]
penalty = ["l2"]
grid = {"C": lr_cs, "penalty": penalty} #l2 ridge
logistic_reg = LogisticRegression()
logistic_reg_cv = GridSearchCV(logistic_reg, grid)
return logistic_reg_cv
|
<filename>cogs/botbrain/help.py<gh_stars>1-10
import logging
from typing import List, Union
import discord
from discord.ext import commands
from naotimes.bot import naoTimesBot, naoTimesContext
from naotimes.helpgenerator import HelpField, HelpOption
POSISI_TEXT = ", ".join(["TL", "TLC", "ENC", "ED", "TM", "TS", "QC"])
ANIMANGAVN_HELP = r"""```
<judul>: Judul anime ataupun manga yang ada di Anilist.co atau VN yang ada di vndb.org
```
""" # noqa: E501
class BotBrainHelper(commands.Cog):
"""A custom !help command for all of my bot command"""
def __init__(self, bot: naoTimesBot):
self.bot = bot
self.logger = logging.getLogger("BotBrain.Helper")
@staticmethod
def _owner_only_command(command: commands.Command):
if command.checks:
for check in command.checks:
fn_primitive_name = check.__str__()
if "is_owner" in fn_primitive_name:
return True
return False
async def _fallback_help(self, ctx: naoTimesContext):
msg = ctx.message
split_message: List[str] = msg.clean_content.split(" ")
if len(split_message) < 2:
return None
cmd_info: Union[commands.Command, None] = self.bot.get_command(split_message[1])
if cmd_info is None:
return None
is_owner = await self.bot.is_owner(ctx.author)
if self._owner_only_command(cmd_info) and not is_owner:
return None
cmd_opts = []
for key, val in cmd_info.clean_params.items():
anotasi = val.annotation if val.annotation is not val.empty else None
if anotasi is not None:
anotasi = anotasi.__name__
cmd_sample = {"name": key}
if val.default is val.empty:
cmd_sample["type"] = "r"
cmd_sample["desc"] = f"Parameter `{key}` dibutuhkan untuk menjalankan perintah ini!"
else:
cmd_sample["type"] = "o"
cmd_sample["desc"] = f"Parameter `{key}` opsional dan bisa diabaikan!"
if anotasi is not None and "desc" in cmd_sample:
cmd_sample["desc"] += f"\n`{key}` akan dikonversi ke format `{anotasi}` nanti."
cmd_opts.append(HelpOption.from_dict(cmd_sample))
extra_kwargs = {"cmd_name": cmd_info.qualified_name}
if cmd_info.description:
extra_kwargs["desc"] = cmd_info.description
helpcmd = ctx.create_help(**extra_kwargs)
if len(cmd_opts) > 0:
helpcmd.add_field(HelpField(cmd_info.qualified_name, options=cmd_opts))
else:
helpcmd.add_field(HelpField(cmd_info.qualified_name, "Cukup jalankan perintah ini!"))
helpcmd.add_aliases(cmd_info.aliases)
return helpcmd.get()
@commands.command(name="help", aliases=["bantuan"])
async def _bbhelp_original_main(self, ctx: naoTimesContext):
new_h = "Dokumentasi telah dipindah ke website baru!\n"
new_h += "Silakan kunjungi <https://naoti.me/docs> untuk melihat "
new_h += "bantuan dan dokumentasi bot!\n\n"
new_h += f"Untuk melihat bantuan lama, gunakan {self.bot.prefix}oldhelp di DM Bot"
await ctx.send(new_h)
@commands.group(name="oldhelp", aliases=["bantuanlama"])
@commands.dm_only()
async def _bbhelp(self, ctx: naoTimesContext):
is_nsfw = False
if isinstance(ctx.channel, discord.TextChannel):
is_nsfw = ctx.channel.is_nsfw()
if ctx.invoked_subcommand is None:
if not ctx.empty_subcommand(2):
gen_help = await self._fallback_help(ctx)
if isinstance(gen_help, discord.Embed):
return await ctx.send(embed=gen_help)
return await ctx.send("Tidak dapat menemukan bantuan perintah tersebut.")
is_owner = await self.bot.is_owner(ctx.author)
helpcmd = ctx.create_help(desc=f"Versi {self.bot.semver}")
helpcmd.add_field(HelpField("help", "Munculkan bantuan perintah"))
helpcmd.add_field(HelpField("oldhelp", "Munculkan bantuan perintah ini"))
helpcmd.add_field(
HelpField("oldhelp showtimes", "Munculkan bantuan perintah berkaitan dengan Showtimes")
)
helpcmd.add_field(
HelpField("oldhelp weebs", "Munculkan bantuan perintah berkaitan dengan Anime/VN/VTuber")
)
helpcmd.add_field(HelpField("oldhelp kutubuku", "Munculkan bantuan perintah berkaitan KBBI"))
helpcmd.add_field(HelpField("oldhelp fun", "Munculkan bantuan yang *menyenangkan*"))
helpcmd.add_field(
HelpField(
"oldhelp peninjau", "Munculkan berbagai macam perintah yang mengambil data dari Internet."
)
)
helpcmd.add_field(HelpField("oldhelp moderasi", "Munculkan semua perintah moderasi naoTimes."))
helpcmd.add_field(
HelpField("oldhelp vote", "Munculkan bantuan perintah untuk voting dan giveaway")
)
helpcmd.add_field(HelpField("oldhelp mod", "Munculkan bantuan perintah untuk moderasi peladen"))
if is_nsfw:
helpcmd.add_field(HelpField("oldhelp nsfw", "Munculkan bantuan perintah untuk hal NSFW"))
if is_owner:
helpcmd.add_field(HelpField("oldhelp owner", "Munculkan bantuan perintah khusus Owner Bot"))
helpcmd.generate_aliases(["bantuanlama"])
await ctx.send(embed=helpcmd.get())
@_bbhelp.error
async def _bboldhelp_error(self, ctx: naoTimesContext, error: Exception):
if isinstance(error, commands.PrivateMessageOnly):
await ctx.send("Mohon gunakan perintah ini di DM Bot!")
"""
Owner extensions
"""
@_bbhelp.command(name="owner")
@commands.is_owner()
async def _bbhelp_owner(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("Admin[*]", desc=f"Versi {self.bot.semver}")
helpcmd.add_field(
HelpField("load", "Load sebuah module yang ada di Bot", [HelpOption("module", required=True)])
)
helpcmd.add_field(
HelpField("unload", "Unload module yang ada di Bot", [HelpOption("module", required=True)])
)
helpcmd.add_field(
HelpField("reload", "Reload module yang ada di Bot", [HelpOption("module", required=True)])
)
helpcmd.add_field(
HelpField(
"gprefix",
"Ubah prefix utama bot",
[HelpOption(name="prefix", description="Prefix baru untuk bot")],
)
)
await ctx.send(embed=helpcmd.get())
@_bbhelp.command(name="load")
@commands.is_owner()
async def _bbhelp_owner_load(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("load", desc="Load sebuah module yang ada di Bot")
helpcmd.add_field(
HelpField(
"load",
options=[HelpOption("module", "`<module>` yang akan di load", required=True)],
examples=["kutubuku.kbbi", "cogs.kutubuku.kbbi"],
)
)
helpcmd.generate_aliases(add_note=False)
await ctx.send(embed=helpcmd.get())
@_bbhelp.command(name="unload")
@commands.is_owner()
async def _bbhelp_owner_unload(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("unload", desc="Unload sebuah module yang ada di Bot")
helpcmd.add_field(
HelpField(
"unload",
options=[HelpOption("module", "`<module>` yang akan di unload", required=True)],
examples=["kutubuku.kbbi", "cogs.kutubuku.kbbi"],
)
)
helpcmd.generate_aliases(add_note=False)
await ctx.send(embed=helpcmd.get())
@_bbhelp.command(name="reload")
@commands.is_owner()
async def _bbhelp_owner_reload(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("reload", desc="Reload sebuah module yang ada di Bot")
helpcmd.add_field(
HelpField(
"reload",
options=[HelpOption("module", "`<module>` yang akan di reload", required=True)],
examples=["kutubuku.kbbi", "cogs.kutubuku.kbbi"],
)
)
helpcmd.generate_aliases(add_note=False)
await ctx.send(embed=helpcmd.get())
@_bbhelp.command(name="gprefix")
@commands.is_owner()
async def _bbhelp_owner_gprefix(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("gprefix", desc="Ubah prefix utama bot")
helpcmd.add_field(
HelpField(
"prefix",
options=[
HelpOption("prefix", "`<prefix>` baru yang akan digunakan untuk Bot", required=True)
],
examples=["n!", "c!"],
)
)
helpcmd.generate_aliases(add_note=False)
await ctx.send(embed=helpcmd.get())
"""
Showtimes extensions
"""
@_bbhelp.group(name="showtimes")
async def _bbhelp_showtimes(self, ctx: naoTimesContext):
if not ctx.invoked_subcommand:
if not ctx.empty_subcommand():
return await ctx.send("Tidak dapat menemukan bantuan perintah tersebut.")
helpcmd = ctx.create_help("Showtimes[*]", desc=f"Versi {self.bot.prefix}")
helpcmd.add_field(
HelpField("oldhelp showtimes user", "Munculkan bantuan perintah Showtimes untuk pengguna")
)
helpcmd.add_field(
HelpField("oldhelp showtimes staff", "Munculkan bantuan perintah Showtimes untuk staff")
)
helpcmd.add_field(
HelpField(
"oldhelp showtimes admin", "Munculkan bantuan perintah Showtimes untuk admin peladen"
)
)
helpcmd.add_field(
HelpField("oldhelp showtimes alias", "Munculkan bantuan perintah Showtimes untuk alias anime")
)
helpcmd.add_field(
HelpField(
"oldhelp showtimes kolaborasi",
"Munculkan bantuan perintah Showtimes untuk kolaborasi proyek",
)
)
helpcmd.add_field(
HelpField(
"oldhelp showtimes fansubdb",
"Munculkan bantuan perintah Showtimes untuk integrasi FansubDB",
)
)
is_owner = await self.bot.is_owner(ctx.author)
if is_owner:
helpcmd.add_field(
HelpField(
"oldhelp showtimes owner", "Munculkan bantuan perintah Showtimes untuk Owner Bot"
)
)
helpcmd.add_field(HelpField("oldhelp fansubrss", "Munculkan bantuan perintah untuk FansubRSS"))
await ctx.send(embed=helpcmd.get())
@staticmethod
def _showtimes_get_text(switch: str):
judul_info = "`<judul>` adalah garapan yang "
judul_info += "terdaftar di database naoTimes."
judul_info += "\n`<judul>` dapat disingkat sesingkat mungkin."
posisi_info = "`<posisi>` merupakan salah satu dari 7 posisi ini:\n"
posisi_info += "```\ntl, tlc, enc, ed, tm, ts, atau qc\n"
posisi_info += "(Translator, Translation Checker, Encoder, Editor, "
posisi_info += "Timer, Typesetter, Quality Checker)\n```"
jumlah_info = "`<jumlah>` adalah total episode yang mau dirilis (dari episode yang terakhir dirilis)\n" # noqa: E501
jumlah_info += "Misalkan lagi ngerjain Episode 4, terus mau rilis sampe episode 7\n" # noqa: E501
jumlah_info += "Total dari Episode 4 sampai 7 ada 4 (4, 5, 6, 7)\n"
jumlah_info += "Maka tulis jumlahnya 4"
switches = {
"judul": judul_info,
"posisi": posisi_info,
"jumlah": jumlah_info,
}
return switches.get(switch, "")
# Showtimes user extensions
@_bbhelp_showtimes.command(name="user", aliases=["pengguna"])
async def _bbhelp_showtimes_user(self, ctx: naoTimesContext):
helpcmd = ctx.create_help(
"Showtimes User[*]", desc="Perintah-perintah yang dapat digunakan oleh semua pengguna."
)
helpcmd.add_field(
HelpField("tagih", "Melihat progres garapan untuk sebuah anime.", [HelpOption("judul")])
)
helpcmd.add_field(HelpField("jadwal", "Melihat jadwal untuk episode selanjutnya untuk musim ini"))
helpcmd.add_field(
HelpField(
"staff",
"Melihat informasi staff untuk sebuah garapan",
[HelpOption("judul")],
)
)
await ctx.send(embed=helpcmd.get())
@_bbhelp.command("tagih", aliases=["blame", "mana"])
async def _bbhelp_showtimes_user_tagih(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("tagih", desc="Melihat progres garapan untuk sebuah anime.")
extra_info = self._showtimes_get_text("judul") + "\n"
extra_info += "Jika tidak diberikan, akan dilist semua garapan"
helpcmd.add_field(
HelpField("tagih", options=[HelpOption("judul", extra_info)], examples=["hitori", "hitoribocchi"])
)
helpcmd.generate_aliases(["blame", "mana"])
await ctx.send(embed=helpcmd.get())
@_bbhelp.command("jadwal", aliases=["airing"])
async def _bbhelp_showtimes_user_jadwal(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("jadwal", desc="Melihat jadwal untuk episode selanjutnya untuk musim ini")
helpcmd.add_field(HelpField("jadwal"))
helpcmd.generate_aliases(["airing"])
await ctx.send(embed=helpcmd.get())
@_bbhelp.command("staff", aliases=["tukangdelay", "pendelay", "staf"])
async def _bbhelp_showtimes_user_staff(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("staff", desc="Melihat informasi staff untuk sebuah garapan")
extra_info = self._showtimes_get_text("judul") + "\n"
extra_info += "Jika tidak diberikan, akan dilist semua garapan"
helpcmd.add_field(
HelpField("staff", options=[HelpOption("judul", extra_info)], examples=["hitori", "hitoribocchi"])
)
helpcmd.generate_aliases(["tukangdelay", "pendelay", "staf"])
await ctx.send(embed=helpcmd.get())
# Showtimes staff extensions
@_bbhelp_showtimes.command(name="staff")
async def _bbhelp_showtimes_staff(self, ctx: naoTimesContext):
helpcmd = ctx.create_help(
"Showtimes Staff[*]", desc="Perintah-perintah yang dapat digunakan oleh staff."
)
helpcmd.add_field(
HelpField(
"beres",
"Menandakan posisi garapan episode menjadi beres",
[HelpOption("posisi", required=True), HelpOption("judul", required=True)],
)
)
helpcmd.add_field(
HelpField(
"gakjadi",
"Menandakan posisi garapan episode mejadi belum selesai",
[HelpOption("posisi", required=True), HelpOption("judul", required=True)],
)
)
helpcmd.add_field(
HelpField(
"tandakan",
"Mengubah status posisi sebuah garapan menjadi beres atau belum beres",
[
HelpOption("posisi", required=True),
HelpOption("episode", required=True),
HelpOption("judul", required=True),
],
)
)
helpcmd.add_field(
HelpField(
"rilis",
"Merilis garapan!\n*Hanya bisa dipakai oleh Admin atau QCer*",
[HelpOption("...", required=True)],
)
)
helpcmd.add_field(
HelpField(
"batalrilis",
"Membatalkan rilisan garapan!\n*Hanya bisa dipakai oleh Admin atau QCer*",
[HelpOption("judul", required=True)],
)
)
await ctx.send(embed=helpcmd.get())
@_bbhelp.command("beres", aliases=["done"])
async def _bbhelp_showtimes_staff_beres(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("beres", desc="Menandakan posisi garapan episode menjadi beres")
helpcmd.add_field(
HelpField(
"beres",
options=[
HelpOption("posisi", self._showtimes_get_text("posisi"), required=True),
HelpOption("judul", self._showtimes_get_text("judul"), required=True),
],
examples=["enc hitoribocchi", "ts hitoribocchi"],
)
)
helpcmd.generate_aliases(["done"])
await ctx.send(embed=helpcmd.get())
@_bbhelp.command("gakjadi", aliases=["undone", "cancel"])
async def _bbhelp_showtimes_staff_gakjadi(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("gakjadi", desc="Menandakan posisi garapan episode mejadi belum selesai")
helpcmd.add_field(
HelpField(
"gakjadi",
options=[
HelpOption("posisi", self._showtimes_get_text("posisi"), required=True),
HelpOption("judul", self._showtimes_get_text("judul"), required=True),
],
examples=["enc hitoribocchi", "ts hitoribocchi"],
)
)
helpcmd.generate_aliases(["undone", "cancel"])
await ctx.send(embed=helpcmd.get())
@_bbhelp.command("tandakan", aliases=["mark"])
async def _bbhelp_showtimes_staff_tandakan(self, ctx: naoTimesContext):
helpcmd = ctx.create_help(
"tandakan", desc="Mengubah status posisi sebuah garapan menjadi beres atau belum beres"
)
helpcmd.add_field(
HelpField(
"tandakan",
options=[
HelpOption("posisi", self._showtimes_get_text("posisi"), required=True),
HelpOption("episode", "Episode yang ingin ditandakan", required=True),
HelpOption("judul", self._showtimes_get_text("judul"), required=True),
],
examples=["enc hitoribocchi", "ts hitoribocchi"],
)
)
helpcmd.generate_aliases(["mark"])
await ctx.send(embed=helpcmd.get())
@_bbhelp.command("rilis", aliases=["release"])
async def _bbhelp_showtimes_staff_rilis(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("rilis", desc="Merilis garapan!\n*Hanya bisa dipakai oleh Admin atau QCer*")
helpcmd.add_field(
HelpField(
"rilis",
"Merilis episode garapan yang sedang dikerjakan",
[HelpOption("judul", self._showtimes_get_text("judul"), True)],
["hitoribocchi"],
)
)
helpcmd.add_field(
HelpField(
"rilis batch",
"Merilis beberapa episode sekaligus (dimulai dari episode yang dikerjakan)",
[
HelpOption("jumlah", self._showtimes_get_text("jumlah"), True),
HelpOption("judul", self._showtimes_get_text("judul"), True),
],
["4 hitoribocchi"],
)
)
helpcmd.add_field(
HelpField(
"rilis semua",
"Merilis semua episode yang ada",
[HelpOption("judul", self._showtimes_get_text("judul"), True)],
["hitoribocchi"],
)
)
helpcmd.generate_aliases(["release"])
await ctx.send(embed=helpcmd.get())
@_bbhelp.command("batalrilis", aliases=["gakjadirilis", "revert"])
async def _bbhelp_showtimes_staff_batalrilis(self, ctx: naoTimesContext):
helpcmd = ctx.create_help(
"batalrilis", desc="Membatalkan rilisan garapan!\n*Hanya bisa dipakai oleh Admin atau QCer*"
)
helpcmd.add_field(
HelpField(
"batalrilis",
options=[HelpOption("judul", self._showtimes_get_text("judul"), required=True)],
examples=["hitoribocchi"],
)
)
helpcmd.generate_aliases(["gakjadirilis", "revert"])
await ctx.send(embed=helpcmd.get())
# Showtimes admin extension
@_bbhelp_showtimes.command("admin")
async def _bbhelp_showtimes_admin(self, ctx: naoTimesContext):
helpcmd = ctx.create_help(
"Showtimes Admin[*]", desc="Perintah-perintah yang dapat digunakan oleh admin."
)
helpcmd.add_field(
HelpField("ubahdata", "Ubah berbagai macam informasi dan data garapan", [HelpOption("judul")])
)
helpcmd.add_field(HelpField("tambahutang", "Tambah garapan baru"))
helpcmd.add_field(HelpField("showui", "Lihat informasi untuk ShowtimesUI atau naoTimesUI (WebUI)"))
await ctx.send(embed=helpcmd.get())
@_bbhelp.command("ubahdata")
async def _bbhelp_showtimes_admin_ubahdata(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("ubahdata", desc="Ubah berbagai macam informasi dan data garapan")
extra_info = self._showtimes_get_text("judul") + "\n"
extra_info += "Jika tidak diberikan, akan dilist semua garapan"
helpcmd.add_field(
HelpField(
"ubahdata",
"Anda dapat menambah/menghapus episode, mengubah staff, atau drop garapan",
[HelpOption("judul", extra_info)],
["hitoribocchi"],
)
)
await ctx.send(embed=helpcmd.get())
@_bbhelp.command("tambahutang", aliases=["addnew"])
async def _bbhelp_showtimes_admin_tambahutang(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("tambahutang", desc="Tambah garapan baru ke database Showtimes!")
helpcmd.add_field(HelpField("tambahutang"))
helpcmd.add_aliases(["addnew"])
await ctx.send(embed=helpcmd.get())
@_bbhelp.command("showui")
async def _bbhelp_showtimes_admin_showui(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("showui", desc="Tambah garapan baru ke database Showtimes!")
help_info = "*Perintah ini akan memperlihatkan password untuk naoTimesUI, hati-hati!*\n"
help_info += "Anda juga dapat menggunakan via DM bot"
helpcmd.add_field(
HelpField(
"showui",
help_info,
[HelpOption("guild_id", "ID peladen, hanya dibutuhkan jika digunakan via DM bot")],
)
)
await ctx.send(embed=helpcmd.get())
# Showtimes alias extension
@_bbhelp_showtimes.command("alias")
async def _bbhelp_showtimes_alias(self, ctx: naoTimesContext):
helpcmd = ctx.create_help(
"alias", desc="Perintah-perintah yang digunakan untuk menambah/menghapus alias!"
)
helpcmd.add_field(HelpField("alias", "Tambah alias baru untuk sebuah garapan"))
helpcmd.add_field(
HelpField(
"alias list",
"Lihat alias yang terdaftar untuk garapan",
[HelpOption("judul", self._showtimes_get_text("judul"), True)],
["hitoribocchi"],
)
)
helpcmd.add_field(
HelpField(
"alias hapus",
"Hapus alias untuk sebuah garapan",
[HelpOption("judul", self._showtimes_get_text("judul"), True)],
["hitoribocchi"],
)
)
helpcmd.add_aliases(["alias remove (alias hapus)"])
await ctx.send(embed=helpcmd.get())
# Showtimes alias extension
@_bbhelp_showtimes.group("kolaborasi", aliases=["joint", "join", "koleb"])
async def _bbhelp_showtimes_kolaborasi(self, ctx: naoTimesContext):
helpcmd = ctx.create_help(
"Showtimes Kolaborasi[*]", desc="Perintah-perintah untuk melakukan kolaborasi dengan peladen lain"
)
helpcmd.add_field(HelpField("kolaborasi", "Memunculkan bantuan perintah"))
helpcmd.add_field(
HelpField(
"kolaborasi dengan",
"Inisiasi kolaborasi dengan peladen lain",
[HelpOption("server_id", required=True), HelpOption("judul", required=True)],
use_fullquote=True,
)
)
helpcmd.add_field(
HelpField(
"kolaborasi konfirmasi",
"Konfirmasi sebuah ajakan kolaborasi",
[HelpOption("kode", required=True)],
use_fullquote=True,
)
)
helpcmd.add_field(
HelpField(
"kolaborasi putus",
"Putuskan kolaborasi yang sedang berlangsung",
[HelpOption("judul", required=True)],
use_fullquote=True,
)
)
helpcmd.add_field(
HelpField(
"kolaborasi batalkan",
"Batalkan ajakan konfirmasi",
[HelpOption("server_id", required=True), HelpOption("kode", required=True)],
use_fullquote=True,
)
)
helpcmd.add_aliases(["joint", "join", "koleb"])
await ctx.send(embed=helpcmd.get())
@_bbhelp_showtimes_kolaborasi.command("dengan", aliases=["with"])
async def _bbhelp_showtimes_kolaborasi_dengan(self, ctx: naoTimesContext):
helpcmd = ctx.create_help(
"kolaborasi dengan", desc="Inisiasi kolaborasi dengan peladen lain untuk sebuah garapan"
)
helpcmd.add_field(
HelpField(
"kolaborasi dengan",
options=[
HelpOption("server_id", "ID peladen yang ingin anda ajak kolaborasi", True),
HelpOption("judul", self._showtimes_get_text("judul"), True),
],
examples=["472705451117641729 hitoribocchi"],
)
)
helpcmd.add_aliases(["kolaborasi with", "joint with", "join with", "koleb with"])
await ctx.send(embed=helpcmd)
@_bbhelp_showtimes_kolaborasi.command("konfirmasi", aliases=["confirm"])
async def _bbhelp_showtimes_kolaborasi_konfirmasi(self, ctx: naoTimesContext):
helpcmd = ctx.create_help(
"kolaborasi konfirmasi", desc="Konfirmasi sebuah ajakan kolaborasi dari peladen lain"
)
helpcmd.add_field(
HelpField(
"kolaborasi konfirmasi",
options=[HelpOption("kode", "Kode unik yang dibuat dengan `!kolaborasi dengan`", True)],
examples=["abc123xyz"],
)
)
helpcmd.add_aliases(["kolaborasi confirm", "joint confirm", "join confirm", "koleb confirm"])
await ctx.send(embed=helpcmd)
@_bbhelp_showtimes_kolaborasi.command("batalkan")
async def _bbhelp_showtimes_kolaborasi_batalkan(self, ctx: naoTimesContext):
helpcmd = ctx.create_help(
"kolaborasi batalkan", desc="Batalkan sebuah ajakan kolaborasi sebuah garapan"
)
helpcmd.add_field(
HelpField(
"kolaborasi batalkan",
options=[
HelpOption("server_id", "ID peladen yang ingin anda ajak kolaborasi", True),
HelpOption("kode", "Kode unik yang dibuat dengan `!kolaborasi dengan`", True),
],
examples=["472705451117641729 abc123xyz"],
)
)
await ctx.send(embed=helpcmd.get())
@_bbhelp_showtimes_kolaborasi.command("putus")
async def _bbhelp_showtimes_kolaborasi_putus(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("kolaborasi putus", desc="Putuskan kolaborasi sebuah garapan")
helpcmd.add_field(
HelpField(
"kolaborasi putus",
options=[HelpOption("judul", self._showtimes_get_text("judul"), True)],
examples=["hitoribocchi"],
)
)
await ctx.send(embed=helpcmd.get())
"""
Weebs command
"""
@_bbhelp.command(name="weebs", aliases=["ayaya"])
async def _bbhelp_weebs(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("Weebs[*]", desc=f"Versi {self.bot.semver}")
# animanga.py
helpcmd.add_fields(
[
HelpField("anime", "Melihat informasi sebuah Anime", HelpOption("judul", required=True)),
HelpField("manga", "Melihat informasi sebuah Manga", HelpOption("judul", required=True)),
HelpField("tayang", "Melihat jadwal tayang Anime musim ini."),
]
)
# visualnovel.py
helpcmd.add_fields(
[
HelpField("vn", "Melihat informasi sebuah Visual Novel", HelpOption("judul", required=True)),
HelpField("randomvn", "Melihat informasi sebuah Visual Novel random"),
]
)
# vtuber.py
helpcmd.add_fields(
[
HelpField("vtuber", "Melihat bantuan perintah VTuber"),
HelpField("vtuber live", "Melihat VTuber yang sedang live"),
HelpField("vtuber jadwal", "Melihat jadwal stream VTuber"),
HelpField("vtuber channel", "Melihat informasi sebuah channel"),
HelpField("vtuber grup", "Melihat list grup atau organisasi yang terdaftar"),
]
)
helpcmd.add_aliases(["ayaya"])
await ctx.send(embed=helpcmd.get())
@_bbhelp.command(name="anime", aliases=["animu", "kartun", "ani"])
async def _bbhelp_anime(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("anime", "Cari informasi judul Anime melalui Anilist")
helpcmd.add_field(
HelpField(
"anime",
options=HelpOption(
"judul",
ANIMANGAVN_HELP,
True,
),
examples=["hitoribocchi"],
)
)
helpcmd.embed.add_field(
name="*Tambahan*",
value="⏪ **(Selanjutnya)** ⏩ **(Sebelumnya)** "
"✅ **(Selesai melihat)**\n⏳ **(Waktu Episode selanjutnya)** "
"👍 **(Melihat Info kembali)**\n"
"📺 **(Melihat tempat streaming legal)**",
inline=False,
)
helpcmd.add_aliases(["animu", "kartun", "ani"])
await ctx.send(embed=helpcmd.get())
@_bbhelp.command(name="manga", aliases=["komik", "mango"])
async def _bbhelp_manga(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("manga", "Cari informasi judul Manga melalui Anilist")
helpcmd.add_field(
HelpField(
"manga",
options=HelpOption(
"judul",
ANIMANGAVN_HELP,
True,
),
examples=["hitoribocchi"],
)
)
helpcmd.embed.add_field(
name="*Tambahan*",
value="⏪ **(Selanjutnya)** ⏩ **(Sebelumnya)** "
"✅ **(Selesai melihat)**\n"
"👍 **(Melihat Info kembali)**",
inline=False,
)
helpcmd.add_aliases(["komik", "mango"])
await ctx.send(embed=helpcmd.get())
@_bbhelp.command(name="tayang")
async def _bbhelp_tayang(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("tayang", "Melihat informasi jadwal tayang untuk musim ini.")
helpcmd.add_field(
HelpField(
"tayang",
"Melihat jadwal tayang dengan listing per sisa hari menuju episode selanjutnya.",
examples=[""],
)
)
helpcmd.embed.add_field(
name="*Tamabahan*",
value="0️⃣ - 🇭 **(Melihat listing per sisa hari)**\n✅ **(Selesai melihat)**",
inline=False,
)
await ctx.send(embed=helpcmd.get())
@_bbhelp.command(name="vn", aliases=["visualnovel", "eroge", "vndb"])
async def _bbhelp_vnmain(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("vn", "Melihat informasi sebuah VN melalui VNDB.")
helpcmd.add_field(
HelpField(
"vn",
options=HelpOption(
"judul",
ANIMANGAVN_HELP,
True,
),
examples=["steins;gate", "ao no kana"],
)
)
helpcmd.embed.add_field(
name="*Tambahan*",
value="⏪ **(Selanjutnya)** ⏩ **(Sebelumnya)** 📸 "
"**(Melihat screenshot)**\n✅ **(Melihat Info kembali)**",
inline=False,
)
helpcmd.add_aliases(["visualnovel", "eroge", "vndb"])
await ctx.send(embed=helpcmd.get())
@_bbhelp.command(name="randomvn", aliases=["randomvisualnovel", "randomeroge", "vnrandom"])
async def _bbhelp_vnrandom(self, ctx: naoTimesContext):
helpcmd = ctx.create_help("vn", "Melihat informasi sebuah VN random melalui VNDB.")
helpcmd.add_field(HelpField("vn", "VN akan dicari dipilih secara random oleh bot menggunakan RNG."))
helpcmd.embed.add_field(
name="*Tambahan*",
value="📸 **(Melihat screenshot)** ✅ **(Melihat Info kembali)**",
inline=False,
)
helpcmd.add_aliases(["randomvisualnovel", "randomeroge", "vnrandom"])
await ctx.send(embed=helpcmd.get())
@_bbhelp.group(name="vtuber")
async def _bbhelp_vtuber(self, ctx: naoTimesContext):
if ctx.invoked_subcommand is None:
if not ctx.empty_subcommand():
return await ctx.send("Tidak dapat menemukan bantuan perintah tersebut.")
# Return later
def setup(bot: naoTimesBot):
bot.add_cog(BotBrainHelper(bot))
|
<gh_stars>100-1000
from __future__ import unicode_literals
import mock
import unittest
import pytest
import pytz
from django.utils import timezone
from nose.tools import * # noqa
from framework.auth import Auth
from addons.osfstorage.models import OsfStorageFile, OsfStorageFileNode, OsfStorageFolder
from osf.models import BaseFileNode
from osf.exceptions import ValidationError
from osf.utils.permissions import WRITE, ADMIN
from osf_tests.factories import ProjectFactory, UserFactory, PreprintFactory, RegionFactory, NodeFactory
from addons.osfstorage.tests import factories
from addons.osfstorage.tests.utils import StorageTestCase
from addons.osfstorage.listeners import delete_files_task
import datetime
from osf import models
from addons.osfstorage import utils
from addons.osfstorage import settings
from website.files.exceptions import FileNodeCheckedOutError, FileNodeIsPrimaryFile
@pytest.mark.django_db
class TestOsfstorageFileNode(StorageTestCase):
def test_root_node_exists(self):
assert_true(self.node_settings.root_node is not None)
def test_root_node_has_no_parent(self):
assert_true(self.node_settings.root_node.parent is None)
def test_node_reference(self):
assert_equal(self.project, self.node_settings.root_node.target)
# def test_get_folder(self):
# file = models.OsfStorageFile(name='MOAR PYLONS', node=self.node)
# folder = models.OsfStorageFolder(name='MOAR PYLONS', node=self.node)
# _id = folder._id
# file.save()
# folder.save()
# assert_equal(folder, models.OsfStorageFileNode.get_folder(_id, self.node_settings))
# def test_get_file(self):
# file = models.OsfStorageFile(name='MOAR PYLONS', node=self.node)
# folder = models.OsfStorageFolder(name='MOAR PYLONS', node=self.node)
# file.save()
# folder.save()
# _id = file._id
# assert_equal(file, models.OsfStorageFileNode.get_file(_id, self.node_settings))
def test_serialize(self):
file = OsfStorageFile(name='<NAME>', target=self.node_settings.owner)
file.save()
assert_equals(file.serialize(), {
u'id': file._id,
u'path': file.path,
u'created': None,
u'name': u'<NAME>',
u'kind': 'file',
u'version': 0,
u'downloads': 0,
u'size': None,
u'modified': None,
u'contentType': None,
u'checkout': None,
u'md5': None,
u'sha256': None,
})
version = file.create_version(
self.user,
{
u'service': u'cloud',
settings.WATERBUTLER_RESOURCE: u'osf',
u'object': u'06d80e',
}, {
u'size': 1234,
u'contentType': u'text/plain'
})
assert_equals(file.serialize(), {
u'id': file._id,
u'path': file.path,
u'created': version.created.isoformat(),
u'name': u'<NAME>',
u'kind': u'file',
u'version': 1,
u'downloads': 0,
u'size': 1234,
u'modified': version.created.isoformat(),
u'contentType': u'text/plain',
u'checkout': None,
u'md5': None,
u'sha256': None,
})
date = timezone.now()
version.update_metadata({
u'modified': date.isoformat()
})
assert_equals(file.serialize(), {
u'id': file._id,
u'path': file.path,
u'created': version.created.isoformat(),
u'name': u'<NAME>',
u'kind': u'file',
u'version': 1,
u'downloads': 0,
u'size': 1234,
# modified date is the creation date of latest version
# see https://github.com/CenterForOpenScience/osf.io/pull/7155
u'modified': version.created.isoformat(),
u'contentType': u'text/plain',
u'checkout': None,
u'md5': None,
u'sha256': None,
})
def test_get_child_by_name(self):
child = self.node_settings.get_root().append_file('Test')
assert_equal(child, self.node_settings.get_root().find_child_by_name('Test'))
def test_root_node_path(self):
assert_equal(self.node_settings.get_root().name, '')
def test_folder_path(self):
path = '/{}/'.format(self.node_settings.root_node._id)
assert_equal(self.node_settings.get_root().path, path)
def test_file_path(self):
file = OsfStorageFile(name='MOAR PYLONS', target=self.node)
file.save()
assert_equal(file.name, 'MOAR PYLONS')
assert_equal(file.path, '/{}'.format(file._id))
def test_append_folder(self):
child = self.node_settings.get_root().append_folder('Test')
children = self.node_settings.get_root().children
assert_equal(child.kind, 'folder')
assert_equal([child], list(children))
def test_append_file(self):
child = self.node_settings.get_root().append_file('Test')
children = self.node_settings.get_root().children
assert_equal(child.kind, 'file')
assert_equal([child], list(children))
def test_append_to_file(self):
child = self.node_settings.get_root().append_file('Test')
with assert_raises(AttributeError):
child.append_file('Cant')
def test_children(self):
kids = [
self.node_settings.get_root().append_file('Foo{}Bar'.format(x))
for x in range(100)
]
assert_equals(sorted(kids, key=lambda kid: kid.name), list(self.node_settings.get_root().children.order_by('name')))
def test_download_count_file_defaults(self):
child = self.node_settings.get_root().append_file('Test')
assert_equals(child.get_download_count(), 0)
@mock.patch('framework.sessions.session')
def test_download_count_file(self, mock_session):
mock_session.data = {}
child = self.node_settings.get_root().append_file('Test')
utils.update_analytics(self.project, child, 0)
utils.update_analytics(self.project, child, 1)
utils.update_analytics(self.project, child, 2)
assert_equals(child.get_download_count(), 3)
assert_equals(child.get_download_count(0), 1)
assert_equals(child.get_download_count(1), 1)
assert_equals(child.get_download_count(2), 1)
@unittest.skip
def test_create_version(self):
pass
@unittest.skip
def test_update_version_metadata(self):
pass
def test_delete_folder(self):
parent = self.node_settings.get_root().append_folder('Test')
kids = []
for x in range(10):
kid = parent.append_file(str(x))
kid.save()
kids.append(kid)
count = OsfStorageFileNode.objects.count()
tcount = models.TrashedFileNode.objects.count()
parent.delete()
assert_is(OsfStorageFileNode.load(parent._id), None)
assert_equals(count - 11, OsfStorageFileNode.objects.count())
assert_equals(tcount + 11, models.TrashedFileNode.objects.count())
for kid in kids:
assert_is(
OsfStorageFileNode.load(kid._id),
None
)
def test_delete_root_node(self):
root = self.node_settings.get_root()
folder = root.append_folder('Test')
file = folder.append_file('test_file')
# If the top-level item is a root, it is not deleted
root.delete()
root.reload()
assert root.type == 'osf.osfstoragefolder'
assert BaseFileNode.objects.get(_id=folder._id).type == 'osf.trashedfolder'
assert BaseFileNode.objects.get(_id=file._id).type == 'osf.trashedfile'
def test_delete_file(self):
child = self.node_settings.get_root().append_file('Test')
field_names = [f.name for f in child._meta.get_fields() if not f.is_relation and f.name not in ['id', 'content_type_pk']]
child_data = {f: getattr(child, f) for f in field_names}
child.delete()
assert_is(OsfStorageFileNode.load(child._id), None)
trashed = models.TrashedFileNode.load(child._id)
child_storage = dict()
trashed_storage = dict()
trashed_storage['parent'] = trashed.parent._id
child_storage['materialized_path'] = child.materialized_path
assert_equal(trashed.path, '/' + child._id)
trashed_field_names = [f.name for f in child._meta.get_fields() if not f.is_relation and
f.name not in ['id', '_materialized_path', 'content_type_pk', '_path', 'deleted', 'deleted_on', 'deleted_by', 'type', 'modified']]
for f, value in child_data.items():
if f in trashed_field_names:
assert_equal(getattr(trashed, f), value)
def test_delete_preprint_primary_file(self):
user = UserFactory()
preprint = PreprintFactory(creator=user)
preprint.save()
file = preprint.files.all()[0]
with assert_raises(FileNodeIsPrimaryFile):
file.delete()
def test_delete_file_no_guid(self):
child = self.node_settings.get_root().append_file('Test')
assert_is(OsfStorageFileNode.load(child._id).guids.first(), None)
with mock.patch('osf.models.files.apps.get_model') as get_model:
child.delete()
assert_is(get_model.called, False)
assert_is(OsfStorageFileNode.load(child._id), None)
def test_delete_file_guids(self):
child = self.node_settings.get_root().append_file('Test')
guid = child.get_guid(create=True)
assert_is_not(OsfStorageFileNode.load(child._id).guids.first(), None)
with mock.patch('osf.models.files.apps.get_model') as get_model:
child.delete()
assert_is(get_model.called, True)
assert_is(get_model('osf.Comment').objects.filter.called, True)
assert_is(OsfStorageFileNode.load(child._id), None)
@mock.patch('addons.osfstorage.listeners.enqueue_postcommit_task')
def test_file_deleted_when_node_deleted(self, mock_enqueue):
child = self.node_settings.get_root().append_file('Test')
self.node.remove_node(auth=Auth(self.user))
mock_enqueue.assert_called_with(delete_files_task, (self.node._id, ), {}, celery=True)
def test_materialized_path(self):
child = self.node_settings.get_root().append_file('Test')
assert_equals('/Test', child.materialized_path)
def test_materialized_path_folder(self):
child = self.node_settings.get_root().append_folder('Test')
assert_equals('/Test/', child.materialized_path)
def test_materialized_path_nested(self):
child = self.node_settings.get_root().append_folder('Cloud').append_file('Carp')
assert_equals('/Cloud/Carp', child.materialized_path)
def test_copy(self):
to_copy = self.node_settings.get_root().append_file('Carp')
copy_to = self.node_settings.get_root().append_folder('Cloud')
version = to_copy.create_version(
self.user,
{
'service': 'cloud',
settings.WATERBUTLER_RESOURCE: 'osf',
'object': '06d80e',
}, {
'sha256': 'existing',
'vault': 'the cloud',
'archive': 'erchiv'
})
assert_equal(to_copy.versions.first().get_basefilenode_version(to_copy).version_name, 'Carp')
copied = to_copy.copy_under(copy_to)
assert_not_equal(copied, to_copy)
assert_equal(copied.parent, copy_to)
assert_equal(copied.versions.first().get_basefilenode_version(copied).version_name, 'Carp')
assert_equal(to_copy.parent, self.node_settings.get_root())
def test_copy_node_file_to_preprint(self):
user = UserFactory()
preprint = PreprintFactory(creator=user)
preprint.save()
to_copy = self.node_settings.get_root().append_file('Carp')
copy_to = preprint.root_folder
copied = to_copy.copy_under(copy_to)
assert_equal(copied.parent, copy_to)
assert_equal(copied.target, preprint)
def test_move_nested(self):
new_project = ProjectFactory()
other_node_settings = new_project.get_addon('osfstorage')
move_to = other_node_settings.get_root().append_folder('Cloud')
to_move = self.node_settings.get_root().append_folder('Carp')
child = to_move.append_file('A dee um')
moved = to_move.move_under(move_to)
child.reload()
assert_equal(moved, to_move)
assert_equal(new_project, to_move.target)
assert_equal(new_project, move_to.target)
assert_equal(new_project, child.target)
def test_move_nested_between_regions(self):
canada = RegionFactory()
new_component = NodeFactory(parent=self.project)
component_node_settings = new_component.get_addon('osfstorage')
component_node_settings.region = canada
component_node_settings.save()
move_to = component_node_settings.get_root()
to_move = self.node_settings.get_root().append_folder('Aaah').append_folder('Woop')
child = to_move.append_file('There it is')
for _ in range(2):
version = factories.FileVersionFactory(region=self.node_settings.region)
child.add_version(version)
child.save()
moved = to_move.move_under(move_to)
child.reload()
assert new_component == child.target
versions = child.versions.order_by('-created')
assert versions.first().region == component_node_settings.region
assert versions.last().region == self.node_settings.region
def test_copy_rename(self):
to_copy = self.node_settings.get_root().append_file('Carp')
copy_to = self.node_settings.get_root().append_folder('Cloud')
version = to_copy.create_version(
self.user,
{
'service': 'cloud',
settings.WATERBUTLER_RESOURCE: 'osf',
'object': '06d80e',
}, {
'sha256': 'existing',
'vault': 'the cloud',
'archive': 'erchiv'
})
assert_equal(to_copy.versions.first().get_basefilenode_version(to_copy).version_name, 'Carp')
copied = to_copy.copy_under(copy_to, name='But')
assert_equal(copied.versions.first().get_basefilenode_version(copied).version_name, 'But')
assert_equal(copied.name, 'But')
assert_not_equal(copied, to_copy)
assert_equal(to_copy.name, 'Carp')
assert_equal(copied.parent, copy_to)
assert_equal(to_copy.parent, self.node_settings.get_root())
def test_move(self):
to_move = self.node_settings.get_root().append_file('Carp')
move_to = self.node_settings.get_root().append_folder('Cloud')
moved = to_move.move_under(move_to)
assert_equal(to_move, moved)
assert_equal(moved.parent, move_to)
def test_move_and_rename(self):
to_move = self.node_settings.get_root().append_file('Carp')
version = to_move.create_version(
self.user,
{
'service': 'cloud',
settings.WATERBUTLER_RESOURCE: 'osf',
'object': '06d80e',
}, {
'sha256': 'existing',
'vault': 'the cloud',
'archive': 'erchiv'
})
move_to = self.node_settings.get_root().append_folder('Cloud')
assert_equal(to_move.versions.first().get_basefilenode_version(to_move).version_name, 'Carp')
moved = to_move.move_under(move_to, name='Tuna')
assert_equal(to_move, moved)
assert_equal(to_move.name, 'Tuna')
assert_equal(moved.versions.first().get_basefilenode_version(moved).version_name, 'Tuna')
assert_equal(moved.parent, move_to)
def test_move_preprint_primary_file_to_node(self):
user = UserFactory()
preprint = PreprintFactory(creator=user)
preprint.save()
to_move = preprint.files.all()[0]
assert_true(to_move.is_preprint_primary)
move_to = self.node_settings.get_root().append_folder('Cloud')
with assert_raises(FileNodeIsPrimaryFile):
moved = to_move.move_under(move_to, name='Tuna')
def test_move_preprint_primary_file_within_preprint(self):
user = UserFactory()
preprint = PreprintFactory(creator=user)
preprint.save()
folder = OsfStorageFolder(name='foofolder', target=preprint)
folder.save()
to_move = preprint.files.all()[0]
assert_true(to_move.is_preprint_primary)
moved = to_move.move_under(folder, name='Tuna')
assert preprint.primary_file == to_move
assert to_move.parent == folder
assert folder.target == preprint
@unittest.skip
def test_move_folder(self):
pass
@unittest.skip
def test_move_folder_and_rename(self):
pass
@unittest.skip
def test_rename_folder(self):
pass
@unittest.skip
def test_rename_file(self):
pass
@unittest.skip
def test_move_across_nodes(self):
pass
@unittest.skip
def test_move_folder_across_nodes(self):
pass
@unittest.skip
def test_copy_across_nodes(self):
pass
@unittest.skip
def test_copy_folder_across_nodes(self):
pass
def test_get_file_guids_for_live_file(self):
node = self.node_settings.owner
file = OsfStorageFile(name='foo', target=node)
file.save()
file.get_guid(create=True)
guid = file.get_guid()._id
assert guid is not None
assert guid in OsfStorageFileNode.get_file_guids(
'/' + file._id, provider='osfstorage', target=node)
def test_get_file_guids_for_live_folder(self):
node = self.node_settings.owner
folder = OsfStorageFolder(name='foofolder', target=node)
folder.save()
files = []
for i in range(1, 4):
files.append(folder.append_file('foo.{}'.format(i)))
files[-1].get_guid(create=True)
guids = [file.get_guid()._id for file in files]
assert len(guids) == len(files)
all_guids = OsfStorageFileNode.get_file_guids(
'/' + folder._id, provider='osfstorage', target=node)
assert sorted(guids) == sorted(all_guids)
def test_get_file_guids_for_trashed_file(self):
node = self.node_settings.owner
file = OsfStorageFile(name='foo', target=node)
file.save()
file.get_guid(create=True)
guid = file.get_guid()._id
file.delete()
assert guid is not None
assert guid in OsfStorageFileNode.get_file_guids(
'/' + file._id, provider='osfstorage', target=node)
def test_get_file_guids_for_trashed_folder(self):
node = self.node_settings.owner
folder = OsfStorageFolder(name='foofolder', target=node)
folder.save()
files = []
for i in range(1, 4):
files.append(folder.append_file('foo.{}'.format(i)))
files[-1].get_guid(create=True)
guids = [file.get_guid()._id for file in files]
assert len(guids) == len(files)
folder.delete()
all_guids = OsfStorageFileNode.get_file_guids(
'/' + folder._id, provider='osfstorage', target=node)
assert sorted(guids) == sorted(all_guids)
def test_get_file_guids_live_file_wo_guid(self):
node = self.node_settings.owner
file = OsfStorageFile(name='foo', target=node)
file.save()
assert [] == OsfStorageFileNode.get_file_guids(
'/' + file._id, provider='osfstorage', target=node)
def test_get_file_guids_for_live_folder_wo_guids(self):
node = self.node_settings.owner
folder = OsfStorageFolder(name='foofolder', target=node)
folder.save()
files = []
for i in range(1, 4):
files.append(folder.append_file('foo.{}'.format(i)))
all_guids = OsfStorageFileNode.get_file_guids(
'/' + folder._id, provider='osfstorage', target=node)
assert [] == all_guids
def test_get_file_guids_trashed_file_wo_guid(self):
node = self.node_settings.owner
file = OsfStorageFile(name='foo', target=node)
file.save()
file.delete()
assert [] == OsfStorageFileNode.get_file_guids(
'/' + file._id, provider='osfstorage', target=node)
def test_get_file_guids_for_trashed_folder_wo_guids(self):
node = self.node_settings.owner
folder = OsfStorageFolder(name='foofolder', target=node)
folder.save()
files = []
for i in range(1, 4):
files.append(folder.append_file('foo.{}'.format(i)))
folder.delete()
all_guids = OsfStorageFileNode.get_file_guids(
'/' + folder._id, provider='osfstorage', target=node)
assert [] == all_guids
def test_get_file_guids_for_live_folder_recursive(self):
node = self.node_settings.owner
folder = OsfStorageFolder(name='foofolder', target=node)
folder.save()
files = []
for i in range(1, 4):
files.append(folder.append_file('foo.{}'.format(i)))
files[-1].get_guid(create=True)
subfolder = folder.append_folder('subfoo')
for i in range(1, 4):
files.append(subfolder.append_file('subfoo.{}'.format(i)))
files[-1].get_guid(create=True)
guids = [file.get_guid()._id for file in files]
assert len(guids) == len(files)
all_guids = OsfStorageFileNode.get_file_guids(
'/' + folder._id, provider='osfstorage', target=node)
assert sorted(guids) == sorted(all_guids)
def test_get_file_guids_for_trashed_folder_recursive(self):
node = self.node_settings.owner
folder = OsfStorageFolder(name='foofolder', target=node)
folder.save()
files = []
for i in range(1, 4):
files.append(folder.append_file('foo.{}'.format(i)))
files[-1].get_guid(create=True)
subfolder = folder.append_folder('subfoo')
for i in range(1, 4):
files.append(subfolder.append_file('subfoo.{}'.format(i)))
files[-1].get_guid(create=True)
guids = [file.get_guid()._id for file in files]
assert len(guids) == len(files)
folder.delete()
all_guids = OsfStorageFileNode.get_file_guids(
'/' + folder._id, provider='osfstorage', target=node)
assert sorted(guids) == sorted(all_guids)
def test_get_file_guids_for_live_folder_recursive_wo_guids(self):
node = self.node_settings.owner
folder = OsfStorageFolder(name='foofolder', target=node)
folder.save()
files = []
for i in range(1, 4):
files.append(folder.append_file('foo.{}'.format(i)))
subfolder = folder.append_folder('subfoo')
for i in range(1, 4):
files.append(subfolder.append_file('subfoo.{}'.format(i)))
all_guids = OsfStorageFileNode.get_file_guids(
'/' + folder._id, provider='osfstorage', target=node)
assert [] == all_guids
def test_get_file_guids_for_trashed_folder_recursive_wo_guids(self):
node = self.node_settings.owner
folder = OsfStorageFolder(name='foofolder', target=node)
folder.save()
files = []
for i in range(1, 4):
files.append(folder.append_file('foo.{}'.format(i)))
subfolder = folder.append_folder('subfoo')
for i in range(1, 4):
files.append(subfolder.append_file('subfoo.{}'.format(i)))
folder.delete()
all_guids = OsfStorageFileNode.get_file_guids(
'/' + folder._id, provider='osfstorage', target=node)
assert [] == all_guids
@pytest.mark.django_db
class TestNodeSettingsModel:
@pytest.fixture()
def region(self):
return RegionFactory()
@pytest.fixture()
def region2(self):
return RegionFactory()
@pytest.fixture()
def user(self, region):
user = UserFactory()
user_settings = user.get_addon('osfstorage')
user_settings.default_region = region
user_settings.save()
return user
@pytest.fixture()
def user2(self, region2):
user = UserFactory()
user_settings = user.get_addon('osfstorage')
user_settings.default_region = region2
user_settings.save()
return user
@pytest.fixture()
def node(self, user):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture()
def child_node_with_different_region(self, user, node, region2):
child = NodeFactory(parent=node, creator=user, is_public=True)
child_settings = child.get_addon('osfstorage')
child_settings.region_id = region2.id
child_settings.save()
return child
@pytest.fixture()
def node_settings(sel, node):
return node.get_addon('osfstorage')
@pytest.fixture()
def user_settings(sel, user):
return user.get_addon('osfstorage')
@pytest.fixture()
def auth_obj(self, node):
return Auth(user=node.creator)
def test_fields(self, node_settings):
assert node_settings._id
assert node_settings.has_auth is True
assert node_settings.complete is True
def test_after_fork_copies_versions(self, node, node_settings, auth_obj):
num_versions = 5
path = 'jazz/dreamers-ball.mp3'
record = node_settings.get_root().append_file(path)
for _ in range(num_versions):
version = factories.FileVersionFactory()
record.add_version(version)
fork = node.fork_node(auth_obj)
fork_node_settings = fork.get_addon('osfstorage')
fork_node_settings.reload()
cloned_record = fork_node_settings.get_root().find_child_by_name(path)
assert list(cloned_record.versions.all()) == list(record.versions.all())
assert fork_node_settings.root_node
def test_fork_reverts_to_node_storage_region(self, user2, region, region2, node, child_node_with_different_region):
"""
Despite different user regions defaults, the forked node always stay in the same region as it's orginal node.
"""
fork = node.fork_node(Auth(user2))
assert fork.get_addon('osfstorage').region_id == region.id
# don't inherit or override region
child_fork = models.Node.objects.get_children(fork).first()
assert child_fork.forked_from == child_node_with_different_region
assert child_fork.get_addon('osfstorage').region_id == region2.id
def test_region_wb_url_from_creators_defaults(self, user, region, user_settings, node):
user_settings.default_region = region
user_settings.save()
node_settings = node.get_addon('osfstorage')
assert node_settings.region_id == region.id
def test_encrypted_json_field(self, region):
new_test_creds = {
'storage': {
'go': 'science',
'hey': ['woo', 'yeah', 'great']
}
}
region.waterbutler_credentials = new_test_creds
region.save()
assert region.waterbutler_credentials == new_test_creds
@pytest.mark.django_db
@pytest.mark.enable_implicit_clean
class TestOsfStorageFileVersion(StorageTestCase):
def setUp(self):
super(TestOsfStorageFileVersion, self).setUp()
self.user = factories.AuthUserFactory()
self.mock_date = datetime.datetime(1991, 10, 31, tzinfo=pytz.UTC)
def test_fields(self):
version = factories.FileVersionFactory(
size=1024,
content_type='application/json',
modified=timezone.now(),
)
retrieved = models.FileVersion.load(version._id)
assert_true(retrieved.creator)
assert_true(retrieved.location)
assert_true(retrieved.size)
# sometimes identifiers are strings, so this always has to be a string, sql is funny about that.
assert_equal(retrieved.identifier, u'0')
assert_true(retrieved.content_type)
assert_true(retrieved.modified)
def test_is_duplicate_true(self):
version1 = factories.FileVersionFactory()
version2 = factories.FileVersionFactory()
assert_true(version1.is_duplicate(version2))
assert_true(version2.is_duplicate(version1))
def test_is_duplicate_false(self):
version1 = factories.FileVersionFactory(
location={
'service': 'cloud',
settings.WATERBUTLER_RESOURCE: 'osf',
'object': 'd077f2',
},
)
version2 = factories.FileVersionFactory(
location={
'service': 'cloud',
settings.WATERBUTLER_RESOURCE: 'osf',
'object': '06d80e',
},
)
assert_false(version1.is_duplicate(version2))
assert_false(version2.is_duplicate(version1))
def test_validate_location(self):
creator = factories.AuthUserFactory()
version = factories.FileVersionFactory.build(creator=creator, location={'invalid': True})
with assert_raises(ValidationError):
version.save()
version.location = {
'service': 'cloud',
settings.WATERBUTLER_RESOURCE: 'osf',
'object': 'object',
}
version.save()
def test_update_metadata(self):
version = factories.FileVersionFactory()
version.update_metadata(
{'archive': 'glacier', 'size': 123, 'modified': 'Mon, 16 Feb 2015 18:45:34 GMT'})
version.reload()
assert_in('archive', version.metadata)
assert_equal(version.metadata['archive'], 'glacier')
def test_matching_archive(self):
version = factories.FileVersionFactory(
location={
'service': 'cloud',
settings.WATERBUTLER_RESOURCE: 'osf',
'object': 'd077f2',
},
metadata={'sha256': 'existing'}
)
factories.FileVersionFactory(
location={
'service': 'cloud',
settings.WATERBUTLER_RESOURCE: 'osf',
'object': '06d80e',
},
metadata={
'sha256': 'existing',
'vault': 'the cloud',
'archive': 'erchiv'
}
)
assert_is(version._find_matching_archive(), True)
assert_is_not(version.archive, None)
assert_equal(version.metadata['vault'], 'the cloud')
assert_equal(version.metadata['archive'], 'erchiv')
def test_archive_exits(self):
node_addon = self.project.get_addon('osfstorage')
fnode = node_addon.get_root().append_file('MyCoolTestFile')
version = fnode.create_version(
self.user,
{
'service': 'cloud',
settings.WATERBUTLER_RESOURCE: 'osf',
'object': '06d80e',
}, {
'sha256': 'existing',
'vault': 'the cloud',
'archive': 'erchiv'
})
assert_equal(version.archive, 'erchiv')
version2 = fnode.create_version(
self.user,
{
'service': 'cloud',
settings.WATERBUTLER_RESOURCE: 'osf',
'object': '07d80a',
}, {
'sha256': 'existing',
})
assert_equal(version2.archive, 'erchiv')
def test_no_matching_archive(self):
models.FileVersion.objects.all().delete()
assert_is(False, factories.FileVersionFactory(
location={
'service': 'cloud',
settings.WATERBUTLER_RESOURCE: 'osf',
'object': 'd077f2',
},
metadata={'sha256': 'existing'}
)._find_matching_archive())
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestOsfStorageCheckout(StorageTestCase):
def setUp(self):
super(TestOsfStorageCheckout, self).setUp()
self.user = factories.AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.osfstorage = self.node.get_addon('osfstorage')
self.root_node = self.osfstorage.get_root()
self.file = self.root_node.append_file('3005')
def test_checkout_logs(self):
non_admin = factories.AuthUserFactory()
self.node.add_contributor(non_admin, permissions=WRITE)
self.node.save()
self.file.check_in_or_out(non_admin, non_admin, save=True)
self.file.reload()
self.node.reload()
assert_equal(self.file.checkout, non_admin)
assert_equal(self.node.logs.latest().action, 'checked_out')
assert_equal(self.node.logs.latest().user, non_admin)
self.file.check_in_or_out(self.user, None, save=True)
self.file.reload()
self.node.reload()
assert_equal(self.file.checkout, None)
assert_equal(self.node.logs.latest().action, 'checked_in')
assert_equal(self.node.logs.latest().user, self.user)
self.file.check_in_or_out(self.user, self.user, save=True)
self.file.reload()
self.node.reload()
assert_equal(self.file.checkout, self.user)
assert_equal(self.node.logs.latest().action, 'checked_out')
assert_equal(self.node.logs.latest().user, self.user)
with assert_raises(FileNodeCheckedOutError):
self.file.check_in_or_out(non_admin, None, save=True)
with assert_raises(FileNodeCheckedOutError):
self.file.check_in_or_out(non_admin, non_admin, save=True)
def test_delete_checked_out_file(self):
self.file.check_in_or_out(self.user, self.user, save=True)
self.file.reload()
assert_equal(self.file.checkout, self.user)
with assert_raises(FileNodeCheckedOutError):
self.file.delete()
def test_delete_folder_with_checked_out_file(self):
folder = self.root_node.append_folder('folder')
self.file.move_under(folder)
self.file.check_in_or_out(self.user, self.user, save=True)
self.file.reload()
assert_equal(self.file.checkout, self.user)
with assert_raises(FileNodeCheckedOutError):
folder.delete()
def test_move_checked_out_file(self):
self.file.check_in_or_out(self.user, self.user, save=True)
self.file.reload()
assert_equal(self.file.checkout, self.user)
folder = self.root_node.append_folder('folder')
with assert_raises(FileNodeCheckedOutError):
self.file.move_under(folder)
def test_checked_out_merge(self):
user = factories.AuthUserFactory()
node = ProjectFactory(creator=user)
osfstorage = node.get_addon('osfstorage')
root_node = osfstorage.get_root()
file = root_node.append_file('test_file')
user_merge_target = factories.AuthUserFactory()
file.check_in_or_out(user, user, save=True)
file.reload()
assert_equal(file.checkout, user)
user_merge_target.merge_user(user)
file.reload()
assert_equal(user_merge_target.id, file.checkout.id)
def test_remove_contributor_with_checked_file(self):
user = factories.AuthUserFactory()
models.Contributor.objects.create(
node=self.node,
user=user,
visible=True
)
self.node.add_permission(user, ADMIN)
self.file.check_in_or_out(self.user, self.user, save=True)
self.file.reload()
assert_equal(self.file.checkout, self.user)
self.file.target.remove_contributors([self.user], save=True)
self.file.reload()
assert_equal(self.file.checkout, None)
|
#!/usr/bin/env python
##############################################################
# universal core routines for processing SAR images with GAMMA
# <NAME> 2014-2019
##############################################################
"""
This module is intended as a set of generalized processing routines for modularized GAMMA work flows.
The function parametrization is intended to be applicable to any kind of situation and input data set.
Thus, instead of choosing a specific parametrization for the data at hand,
core parameters are iterated over a set of values in order to find the one best suited for the task.
The approach of the single routines is likely to still have drawbacks and might fail in certain situations.
Testing and suggestions on improvements are very welcome.
"""
import os
import re
import sys
import shutil
import zipfile as zf
from datetime import datetime
if sys.version_info >= (3, 0):
from urllib.error import URLError
else:
from urllib2 import URLError
from spatialist import haversine
from spatialist.ancillary import union, finder
from ..S1 import OSV
from ..drivers import ID, CEOS_ERS, CEOS_PSR, ESA, SAFE, TSX, identify
from . import ISPPar, Namespace, par2hdr
from ..ancillary import multilook_factors, hasarg
from pyroSAR.examine import ExamineSnap
try:
from .api import diff, disp, isp, lat
except ImportError:
pass
def calibrate(id, directory, replace=False, logpath=None, outdir=None, shellscript=None):
"""
Parameters
----------
id: ~pyroSAR.drivers.ID
an SAR scene object of type pyroSAR.ID or any subclass
directory: str
the directory to search for Gamma calibration candidates
replace: bool
replace the input images by the new files? If True, the input images will be deleted.
logpath: str or None
a directory to write command logfiles to
outdir: str or None
the directory to execute the command in
shellscript: str or None
a file to write the Gamma commands to in shell format
Returns
-------
"""
if isinstance(id, CEOS_PSR):
for image in id.getGammaImages(directory):
if image.endswith('_slc'):
isp.radcal_SLC(SLC=image,
SLC_par=image + '.par',
CSLC=image + '_cal',
CSLC_par=image + '_cal.par',
K_dB=id.meta['k_dB'],
logpath=logpath,
outdir=outdir,
shellscript=shellscript)
par2hdr(image + '_cal.par', image + '_cal.hdr')
elif isinstance(id, ESA):
k_db = {'ASAR': 55., 'ERS1': 58.24, 'ERS2': 59.75}[id.sensor]
inc_ref = 90. if id.sensor == 'ASAR' else 23.
candidates = [x for x in id.getGammaImages(directory) if re.search('_pri$', x)]
for image in candidates:
out = image.replace('pri', 'grd')
isp.radcal_PRI(PRI=image,
PRI_par=image + '.par',
GRD=out,
GRD_par=out + '.par',
K_dB=k_db,
inc_ref=inc_ref,
logpath=logpath,
outdir=outdir,
shellscript=shellscript)
par2hdr(out + '.par', out + '.hdr')
if replace:
for item in [image, image + '.par', image + '.hdr']:
if os.path.isfile(item):
os.remove(item)
elif isinstance(id, SAFE):
print('calibration already performed during import')
else:
raise NotImplementedError('calibration for class {} is not implemented yet'.format(type(id).__name__))
def convert2gamma(id, directory, S1_noiseremoval=True, basename_extensions=None,
logpath=None, outdir=None, shellscript=None):
"""
general function for converting SAR images to GAMMA format
Parameters
----------
id: ~pyroSAR.drivers.ID
an SAR scene object of type pyroSAR.ID or any subclass
directory: str
the output directory for the converted images
S1_noiseremoval: bool
only Sentinel-1: should noise removal be applied to the image?
basename_extensions: list of str
names of additional parameters to append to the basename, e.g. ['orbitNumber_rel']
logpath: str or None
a directory to write command logfiles to
outdir: str or None
the directory to execute the command in
shellscript: str or None
a file to write the Gamma commands to in shell format
Returns
-------
"""
if not isinstance(id, ID):
raise IOError('id must be of type pyroSAR.ID')
if id.compression is not None:
raise RuntimeError('scene is not yet unpacked')
if not os.path.isdir(directory):
os.makedirs(directory)
if isinstance(id, CEOS_ERS):
if id.sensor in ['ERS1', 'ERS2']:
if id.product == 'SLC' \
and id.meta['proc_system'] in ['PGS-ERS', 'VMP-ERS', 'SPF-ERS']:
outname_base = id.outname_base(extensions=basename_extensions)
outname_base = '{}_{}_{}'.format(outname_base,
id.polarizations[0],
id.product.lower())
outname = os.path.join(directory, outname_base)
if not os.path.isfile(outname):
lea = id.findfiles('LEA_01.001')[0]
dat = id.findfiles('DAT_01.001')[0]
title = re.sub(r'\.PS$', '', os.path.basename(id.file))
isp.par_ESA_ERS(CEOS_SAR_leader=lea,
SLC_par=outname + '.par',
CEOS_DAT=dat,
SLC=outname,
inlist=[title],
logpath=logpath,
outdir=outdir,
shellscript=shellscript)
else:
print('scene already converted')
else:
raise NotImplementedError('ERS {} product of {} processor in CEOS format not implemented yet'
.format(id.product, id.meta['proc_system']))
else:
raise NotImplementedError('sensor {} in CEOS format not implemented yet'.format(id.sensor))
elif isinstance(id, CEOS_PSR):
images = id.findfiles('^IMG-')
if id.product == '1.0':
raise RuntimeError('PALSAR level 1.0 products are not supported')
for image in images:
polarization = re.search('[HV]{2}', os.path.basename(image)).group(0)
outname_base = id.outname_base(extensions=basename_extensions)
if id.product == '1.1':
outname_base = '{}_{}_slc'.format(outname_base, polarization)
outname = os.path.join(directory, outname_base)
isp.par_EORC_PALSAR(CEOS_leader=id.file,
SLC_par=outname + '.par',
CEOS_data=image,
SLC=outname,
logpath=logpath,
outdir=outdir,
shellscript=shellscript)
else:
outname_base = '{}_{}_mli_geo'.format(outname_base, polarization)
outname = os.path.join(directory, outname_base)
diff.par_EORC_PALSAR_geo(CEOS_leader=id.file,
MLI_par=outname + '.par',
DEM_par=outname + '_dem.par',
CEOS_data=image,
MLI=outname,
logpath=logpath,
outdir=outdir,
shellscript=shellscript)
par2hdr(outname + '.par', outname + '.hdr')
elif isinstance(id, ESA):
"""
the command par_ASAR also accepts a K_dB argument for calibration
in which case the resulting image names will carry the suffix grd;
this is not implemented here but instead in function calibrate
"""
outname = os.path.join(directory, id.outname_base(extensions=basename_extensions))
if not id.is_processed(directory):
isp.par_ASAR(ASAR_ERS_file=os.path.basename(id.file),
output_name=outname,
outdir=os.path.dirname(id.file),
logpath=logpath,
shellscript=shellscript)
os.remove(outname + '.hdr')
for item in finder(directory, [os.path.basename(outname)], regex=True):
ext = '.par' if item.endswith('.par') else ''
outname_base = os.path.basename(item)\
.strip(ext)\
.replace('.', '_')\
.replace('PRI', 'pri')\
.replace('SLC', 'slc')
outname = os.path.join(directory, outname_base + ext)
os.rename(item, outname)
if outname.endswith('.par'):
par2hdr(outname, outname.replace('.par', '.hdr'))
else:
raise IOError('scene already processed')
elif isinstance(id, SAFE):
if id.product == 'OCN':
raise IOError('Sentinel-1 OCN products are not supported')
if id.meta['category'] == 'A':
raise IOError('Sentinel-1 annotation-only products are not supported')
for xml_ann in finder(os.path.join(id.scene, 'annotation'), [id.pattern_ds], regex=True):
base = os.path.basename(xml_ann)
match = re.compile(id.pattern_ds).match(base)
tiff = os.path.join(id.scene, 'measurement', base.replace('.xml', '.tiff'))
xml_cal = os.path.join(id.scene, 'annotation', 'calibration', 'calibration-' + base)
product = match.group('product')
# specify noise calibration file
# L1 GRD product: thermal noise already subtracted, specify xml_noise to add back thermal noise
# SLC products: specify noise file to remove noise
# xml_noise = '-': noise file not specified
if (S1_noiseremoval and product == 'slc') or (not S1_noiseremoval and product == 'grd'):
xml_noise = os.path.join(id.scene, 'annotation', 'calibration', 'noise-' + base)
else:
xml_noise = '-'
fields = (id.outname_base(extensions=basename_extensions),
match.group('pol').upper(),
product)
outname = os.path.join(directory, '_'.join(fields))
pars = {'GeoTIFF': tiff,
'annotation_XML': xml_ann,
'calibration_XML': xml_cal,
'noise_XML': xml_noise,
'logpath': logpath,
'shellscript': shellscript,
'outdir': outdir}
if product == 'slc':
swath = match.group('swath').upper()
old = '{:_<{length}}'.format(id.acquisition_mode, length=len(swath))
outname = outname.replace(old, swath)
pars['SLC'] = outname
pars['SLC_par'] = outname + '.par'
pars['TOPS_par'] = outname + '.tops_par'
isp.par_S1_SLC(**pars)
else:
pars['MLI'] = outname
pars['MLI_par'] = outname + '.par'
isp.par_S1_GRD(**pars)
par2hdr(outname + '.par', outname + '.hdr')
elif isinstance(id, TSX):
images = id.findfiles(id.pattern_ds)
pattern = re.compile(id.pattern_ds)
for image in images:
pol = pattern.match(os.path.basename(image)).group('pol')
outname_base = id.outname_base(extensions=basename_extensions)
outname = os.path.join(directory, outname_base + '_' + pol)
pars = {'annotation_XML': id.file,
'pol': pol,
'logpath': logpath,
'shellscript': shellscript,
'outdir': outdir}
if id.product == 'SSC':
outname += '_slc'
pars['COSAR'] = image
pars['SLC_par'] = outname + '.par'
pars['SLC'] = outname
isp.par_TX_SLC(**pars)
elif id.product == 'MGD':
outname += '_mli'
pars['GeoTIFF'] = image
pars['GRD_par'] = outname + '.par'
pars['GRD'] = outname
isp.par_TX_GRD(**pars)
elif id.product in ['GEC', 'EEC']:
outname += '_mli_geo'
pars['GeoTIFF'] = image
pars['MLI_par'] = outname + '.par'
pars['DEM_par'] = outname + '_dem.par'
pars['GEO'] = outname
diff.par_TX_geo(**pars)
else:
raise RuntimeError('unknown product: {}'.format(id.product))
par2hdr(outname + '.par', outname + '.hdr')
else:
raise NotImplementedError('conversion for class {} is not implemented yet'.format(type(id).__name__))
def correctOSV(id, osvdir=None, osvType='POE', logpath=None, outdir=None, shellscript=None):
"""
correct GAMMA parameter files with orbit state vector information from dedicated OSV files;
OSV files are downloaded automatically to either the defined `osvdir` or a sub-directory `osv` of the scene directory
Parameters
----------
id: ~pyroSAR.drivers.ID
the scene to be corrected
osvdir: str
the directory of OSV files; subdirectories POEORB and RESORB are created automatically
osvType: {'POE', 'RES'}
the OSV type to be used
logpath: str or None
a directory to write command logfiles to
outdir: str or None
the directory to execute the command in
shellscript: str or None
a file to write the Gamma commands to in shell format
Returns
-------
Examples
--------
>>> from pyroSAR import identify
>>> from pyroSAR.gamma import correctOSV, convert2gamma
>>> filename = 'S1A_IW_GRDH_1SDV_20150222T170750_20150222T170815_004739_005DD8_3768.zip'
# identify the SAR scene
>>> scene = identify(filename)
# unpack the zipped scene to an arbitrary directory
>>> scene.unpack('/home/test')
>>> print(scene.scene)
/home/test/S1A_IW_GRDH_1SDV_20150222T170750_20150222T170815_004739_005DD8_3768.SAFE
# convert the unpacked scene to GAMMA format
>>> convert2gamma(id=scene, directory=scene.scene)
# correct the OSV information of the converted GAMMA images
>>> correctOSV(id=scene, osvdir='/home/test/osv')
See Also
--------
:meth:`pyroSAR.drivers.SAFE.getOSV`
"""
if not isinstance(id, ID):
raise IOError('id must be of type pyroSAR.ID')
if id.sensor not in ['S1A', 'S1B']:
raise IOError('this method is currently only available for Sentinel-1. Please stay tuned...')
if not os.path.isdir(logpath):
os.makedirs(logpath)
if osvdir is None:
try:
auxdatapath = ExamineSnap().auxdatapath
except AttributeError:
auxdatapath = os.path.join(os.path.expanduser('~'), '.snap', 'auxdata')
osvdir = os.path.join(auxdatapath, 'Orbits', 'Sentinel-1')
try:
id.getOSV(osvdir, osvType)
except URLError:
print('..no internet access')
images = id.getGammaImages(id.scene)
# read parameter file entries into object
with ISPPar(images[0] + '.par') as par:
# extract acquisition time stamp
timestamp = datetime.strptime(par.date, '%Y-%m-%dT%H:%M:%S.%f').strftime('%Y%m%dT%H%M%S')
# find an OSV file matching the time stamp and defined OSV type(s)
with OSV(osvdir) as osv:
osvfile = osv.match(sensor=id.sensor, timestamp=timestamp, osvtype=osvType)
if not osvfile:
raise RuntimeError('no Orbit State Vector file found')
if osvfile.endswith('.zip'):
osvdir = os.path.join(id.scene, 'osv')
with zf.ZipFile(osvfile) as zip:
zip.extractall(path=osvdir)
osvfile = os.path.join(osvdir, os.path.basename(osvfile).replace('.zip', ''))
# update the GAMMA parameter file with the selected orbit state vectors
print('correcting state vectors with file {}'.format(osvfile))
for image in images:
isp.S1_OPOD_vec(SLC_par=image + '.par',
OPOD=osvfile,
logpath=logpath,
outdir=outdir,
shellscript=shellscript)
def geocode(scene, dem, tempdir, outdir, targetres, scaling='linear', func_geoback=1,
func_interp=2, nodata=(0, -99), sarSimCC=False, osvdir=None, allow_RES_OSV=False,
cleanup=True, normalization_method=2, export_extra=None, basename_extensions=None,
removeS1BorderNoise=True, removeS1BorderNoiseMethod='pyroSAR'):
"""
general function for geocoding SAR images with GAMMA
Parameters
----------
scene: str or ~pyroSAR.drivers.ID
the SAR scene to be processed
dem: str
the reference DEM in GAMMA format
tempdir: str
a temporary directory for writing intermediate files
outdir: str
the directory for the final GeoTiff output files
targetres: int
the target resolution in meters
scaling: {'linear', 'db'} or list
the value scaling of the backscatter values; either 'linear', 'db' or a list of both, i.e. ['linear', 'db']
func_geoback: {0, 1, 2, 3, 4, 5, 6, 7}
backward geocoding interpolation mode (see GAMMA command geocode_back)
- 0: nearest-neighbor
- 1: bicubic spline (default)
- 2: bicubic-spline, interpolate log(data)
- 3: bicubic-spline, interpolate sqrt(data)
- 4: B-spline interpolation (default B-spline degree: 5)
- 5: B-spline interpolation sqrt(x) (default B-spline degree: 5)
- 6: Lanczos interpolation (default Lanczos function order: 5)
- 7: Lanczos interpolation sqrt(x) (default Lanczos function order: 5)
NOTE: log and sqrt interpolation modes should only be used with non-negative data!
NOTE: Gamma reccomendation for MLI data: "The interpolation should be performed on
the square root of the data. A mid-order (3 to 5) B-spline interpolation is recommended."
func_interp: {0, 1, 2, 3}
output lookup table values in regions of layover, shadow, or DEM gaps (see GAMMA command gc_map)
- 0: set to (0., 0.)
- 1: linear interpolation across these regions
- 2: actual value
- 3: nn-thinned
nodata: tuple
the nodata values for the output files; defined as a tuple with two values, the first for linear,
the second for logarithmic scaling
sarSimCC: bool
perform geocoding with SAR simulation cross correlation?
If False, geocoding is performed with the Range-Doppler approach using orbit state vectors
osvdir: str
a directory for Orbit State Vector files;
this is currently only used by for Sentinel-1 where two subdirectories POEORB and RESORB are created;
if set to None, a subdirectory OSV is created in the directory of the unpacked scene.
allow_RES_OSV: bool
also allow the less accurate RES orbit files to be used?
Otherwise the function will raise an error if no POE file exists
cleanup: bool
should all files written to the temporary directory during function execution be deleted after processing?
normalization_method: {1, 2}
the topographic normalization approach to be used
- 1: first geocoding, then terrain flattening
- 2: first terrain flattening, then geocoding; see `Small 2011 <https://doi.org/10.1109/Tgrs.2011.2120616>`_
export_extra: list or None
a list of image file IDs to be exported to outdir
- format is GeoTiff if the file is geocoded and ENVI otherwise. Non-geocoded images can be converted via Gamma
command data2tiff yet the output was found impossible to read with GIS software
- scaling of SAR image products is applied as defined by parameter `scaling`
- see Notes for ID options
basename_extensions: list of str
names of additional parameters to append to the basename, e.g. ['orbitNumber_rel']
removeS1BorderNoise: bool, optional
Enables removal of S1 GRD border noise (default).
removeS1BorderNoiseMethod: str
the border noise removal method to be applied, See :func:`pyroSAR.S1.removeGRDBorderNoise` for details; one of the following:
- 'ESA': the pure implementation as described by ESA
- 'pyroSAR': the ESA method plus the custom pyroSAR refinement
Returns
-------
Note
----
| intermediate output files
| DEM products are named <scene identifier>_<ID>, e.g. `S1A__IW___A_20141012T162337_inc_geo`
| SAR products will additionally contain the polarization, e.g. `S1A__IW___A_20141012T162337_VV_grd_mli`
| IDs in brackets are only written if selected by `export_extra`
- images in range-Doppler geometry
* **grd**: the ground range detected SAR intensity image
* **grd_mli**: the multi-looked grd image with approached target resolution
* specific to normalization method 2:
+ **pix_ellip_sigma0**: ellipsoid-based pixel area
+ **pix_area_sigma0**: actual illuminated area as obtained from integrating DEM-facets (command pixel_area)
+ **pix_fine**: refined pixel area normalization factor (pix_ellip_sigma0 / pix_area_sigma0)
+ **grd_mli_pan**: the pixel area normalized MLI (grd_mli * pix_fine)
- images in map geometry
* **dem_seg_geo**: dem subsetted to the extent of the intersect between input DEM and SAR image
* (**u_geo**): zenith angle of surface normal vector n (angle between z and n)
* (**v_geo**): orientation angle of n (between x and projection of n in xy plane)
* **inc_geo**: local incidence angle (between surface normal and look vector)
* (**psi_geo**): projection angle (between surface normal and image plane normal)
* **pix_geo**: pixel area normalization factor (command gc_map)
* **ls_map_geo**: layover and shadow map (in map projection)
* (**sim_sar_geo**): simulated SAR backscatter image
- additional files
* **lut_init**: initial geocoding lookup table
- files specific to SAR simulation cross-correlation geocoding
* **lut_fine**: refined geocoding lookup table
* **diffpar**: ISP offset/interferogram parameter file
* **offs**: offset estimates (fcomplex)
* **coffs**: culled range and azimuth offset estimates (fcomplex)
* **coffsets**: culled offset estimates and cross correlation values (text format)
* **ccp**: cross-correlation of each patch (0.0->1.0) (float)
Examples
--------
geocode a Sentinel-1 scene and export the local incidence angle map with it
>>> from pyroSAR.gamma import geocode
>>> filename = 'S1A_IW_GRDH_1SDV_20180829T170656_20180829T170721_023464_028DE0_F7BD.zip'
>>> geocode(scene=filename, dem='demfile', outdir='outdir', targetres=20, scaling='db',
>>> export_extra=['dem_seg_geo', 'inc_geo', 'ls_map_geo'])
.. figure:: figures/gamma_geocode.png
:scale: 25%
:align: center
Workflow diagram for function geocode using normalization method 2 for processing a Sentinel-1 Ground Range
Detected (GRD) scene to radiometrically terrain corrected (RTC) backscatter.
"""
if normalization_method == 2 and func_interp != 2:
raise RuntimeError('parameter func_interp must be set to 2 if normalization_method is set to 2; '
'see documentation of Gamma command pixel_area')
if isinstance(scene, ID):
scene = identify(scene.scene)
elif isinstance(scene, str):
scene = identify(scene)
else:
raise RuntimeError("'scene' must be of type str or pyroSAR.ID")
if scene.sensor not in ['S1A', 'S1B']:
raise IOError('this method is currently only available for Sentinel-1. Please stay tuned...')
if sarSimCC:
raise IOError('geocoding with cross correlation offset refinement is still in the making. Please stay tuned...')
if export_extra is not None and not isinstance(export_extra, list):
raise TypeError("parameter 'export_extra' must either be None or a list")
for dir in [tempdir, outdir]:
if not os.path.isdir(dir):
os.makedirs(dir)
if scene.is_processed(outdir):
print('scene {} already processed'.format(scene.outname_base(extensions=basename_extensions)))
return
scaling = [scaling] if isinstance(scaling, str) else scaling if isinstance(scaling, list) else []
scaling = union(scaling, ['db', 'linear'])
if len(scaling) == 0:
raise IOError('wrong input type for parameter scaling')
if scene.compression is not None:
print('unpacking scene..')
try:
scene.unpack(tempdir)
except RuntimeError:
print('scene was attempted to be processed before, exiting')
return
else:
scene.scene = os.path.join(tempdir, os.path.basename(scene.file))
os.makedirs(scene.scene)
shellscript = os.path.join(scene.scene, scene.outname_base(extensions=basename_extensions) + '_commands.sh')
path_log = os.path.join(scene.scene, 'logfiles')
if not os.path.isdir(path_log):
os.makedirs(path_log)
if scene.sensor in ['S1A', 'S1B'] and removeS1BorderNoise:
print('removing border noise..')
scene.removeGRDBorderNoise(method=removeS1BorderNoiseMethod)
print('converting scene to GAMMA format..')
convert2gamma(scene, scene.scene, logpath=path_log, outdir=scene.scene,
basename_extensions=basename_extensions, shellscript=shellscript)
if scene.sensor in ['S1A', 'S1B']:
print('updating orbit state vectors..')
if allow_RES_OSV:
osvtype = ['POE', 'RES']
else:
osvtype = 'POE'
try:
correctOSV(id=scene, osvdir=osvdir, osvType=osvtype,
logpath=path_log, outdir=scene.scene, shellscript=shellscript)
except RuntimeError:
print('orbit state vector correction failed for scene {}'.format(scene.scene))
return
calibrate(scene, scene.scene, logpath=path_log, outdir=scene.scene, shellscript=shellscript)
images = [x for x in scene.getGammaImages(scene.scene) if x.endswith('_grd') or x.endswith('_slc_cal')]
products = list(images)
print('multilooking..')
for image in images:
multilook(infile=image, outfile=image + '_mli', targetres=targetres,
logpath=path_log, outdir=scene.scene, shellscript=shellscript)
images = [x + '_mli' for x in images]
products.extend(images)
master = images[0]
# create output names for files to be written
# appreciated files will be written
# depreciated files will be set to '-' in the GAMMA function call and are thus not written
n = Namespace(scene.scene, scene.outname_base(extensions=basename_extensions))
n.appreciate(['dem_seg_geo', 'lut_init', 'pix_geo', 'inc_geo', 'ls_map_geo'])
n.depreciate(['sim_sar_geo', 'u_geo', 'v_geo', 'psi_geo'])
# if sarSimCC:
# n.appreciate(['ccp', 'lut_fine'])
if export_extra is not None:
n.appreciate(export_extra)
ovs_lat, ovs_lon = ovs(dem + '.par', targetres)
master_par = ISPPar(master + '.par')
gc_map_args = {'DEM_par': dem + '.par',
'DEM': dem,
'DEM_seg_par': n.dem_seg_geo + '.par',
'DEM_seg': n.dem_seg_geo,
'lookup_table': n.lut_init,
'lat_ovr': ovs_lat,
'lon_ovr': ovs_lon,
'sim_sar': n.sim_sar_geo,
'u': n.u_geo,
'v': n.v_geo,
'inc': n.inc_geo,
'psi': n.psi_geo,
'pix': n.pix_geo,
'ls_map': n.ls_map_geo,
'frame': 8,
'ls_mode': func_interp,
'logpath': path_log,
'shellscript': shellscript,
'outdir': scene.scene}
print('creating DEM products..')
if master_par.image_geometry == 'GROUND_RANGE':
gc_map_args.update({'GRD_par': master + '.par'})
diff.gc_map_grd(**gc_map_args)
else:
gc_map_args.update({'MLI_par': master + '.par',
'OFF_par': '-'})
diff.gc_map(**gc_map_args)
for item in ['dem_seg_geo', 'sim_sar_geo', 'u_geo', 'v_geo', 'psi_geo', 'pix_geo', 'inc_geo', 'ls_map_geo']:
if n.isappreciated(item):
mods = {'data_type': 1} if item == 'ls_map_geo' else None
par2hdr(n.dem_seg_geo + '.par', n.get(item) + '.hdr', mods)
sim_width = ISPPar(n.dem_seg_geo + '.par').width
if sarSimCC:
raise IOError('geocoding with cross correlation offset refinement is still in the making. Please stay tuned...')
else:
lut_final = n.lut_init
######################################################################
# normalization and backward geocoding approach 1 ####################
######################################################################
print('geocoding and normalization..')
if normalization_method == 1:
method_suffix = 'geo_norm'
for image in images:
diff.geocode_back(data_in=image,
width_in=master_par.range_samples,
lookup_table=lut_final,
data_out=image + '_geo',
width_out=sim_width,
interp_mode=func_geoback,
logpath=path_log,
outdir=scene.scene,
shellscript=shellscript)
par2hdr(n.dem_seg_geo + '.par', image + '_geo.hdr')
lat.product(data_1=image + '_geo',
data_2=n.pix_geo,
product=image + '_geo_pan',
width=sim_width,
bx=1,
by=1,
logpath=path_log,
outdir=scene.scene,
shellscript=shellscript)
par2hdr(n.dem_seg_geo + '.par', image + '_geo_pan.hdr')
lat.sigma2gamma(pwr1=image + '_geo_pan',
inc=n.inc_geo,
gamma=image + '_{}'.format(method_suffix),
width=sim_width,
logpath=path_log,
outdir=scene.scene,
shellscript=shellscript)
par2hdr(n.dem_seg_geo + '.par', image + '_{}.hdr'.format(method_suffix))
products.extend([image + '_geo', image + '_geo_pan'])
######################################################################
# normalization and backward geocoding approach 2 ####################
######################################################################
elif normalization_method == 2:
method_suffix = 'norm_geo'
# newer versions of Gamma enable creating the ratio of ellipsoid based
# pixel area and DEM-facet pixel area directly with command pixel_area
if hasarg(diff.pixel_area, 'sigma0_ratio'):
n.appreciate(['pix_fine'])
n.depreciate(['pix_area_sigma0'])
diff.pixel_area(MLI_par=master + '.par',
DEM_par=n.dem_seg_geo + '.par',
DEM=n.dem_seg_geo,
lookup_table=lut_final,
ls_map=n.ls_map_geo,
inc_map=n.inc_geo,
pix_sigma0=n.pix_area_sigma0,
sigma0_ratio=n.pix_fine,
logpath=path_log,
outdir=scene.scene,
shellscript=shellscript)
par2hdr(master + '.par', n.pix_fine + '.hdr')
else:
n.appreciate(['pix_area_sigma0', 'pix_ellip_sigma0', 'pix_fine'])
# actual illuminated area as obtained from integrating DEM-facets (pix_area_sigma0 | pix_area_gamma0)
diff.pixel_area(MLI_par=master + '.par',
DEM_par=n.dem_seg_geo + '.par',
DEM=n.dem_seg_geo,
lookup_table=lut_final,
ls_map=n.ls_map_geo,
inc_map=n.inc_geo,
pix_sigma0=n.pix_area_sigma0,
logpath=path_log,
outdir=scene.scene,
shellscript=shellscript)
par2hdr(master + '.par', n.pix_area_sigma0 + '.hdr')
# ellipsoid-based pixel area (ellip_pix_sigma0)
isp.radcal_MLI(MLI=master,
MLI_par=master + '.par',
OFF_par='-',
CMLI=master + '_cal',
refarea_flag=1, # calculate sigma0, scale area by sin(inc_ang)/sin(ref_inc_ang)
pix_area=n.pix_ellip_sigma0,
logpath=path_log,
outdir=scene.scene,
shellscript=shellscript)
par2hdr(master + '.par', n.pix_ellip_sigma0 + '.hdr')
par2hdr(master + '.par', master + '_cal.hdr')
# ratio of ellipsoid based pixel area and DEM-facet pixel area
lat.ratio(d1=n.pix_ellip_sigma0,
d2=n.pix_area_sigma0,
ratio=n.pix_fine,
width=master_par.range_samples,
bx=1,
by=1,
logpath=path_log,
outdir=scene.scene,
shellscript=shellscript)
par2hdr(master + '.par', n.pix_fine + '.hdr')
for image in images:
# sigma0 = MLI * ellip_pix_sigma0 / pix_area_sigma0
# gamma0 = MLI * ellip_pix_sigma0 / pix_area_gamma0
lat.product(data_1=image,
data_2=n.pix_fine,
product=image + '_pan',
width=master_par.range_samples,
bx=1,
by=1,
logpath=path_log,
outdir=scene.scene,
shellscript=shellscript)
par2hdr(master + '.par', image + '_pan.hdr')
diff.geocode_back(data_in=image + '_pan',
width_in=master_par.range_samples,
lookup_table=lut_final,
data_out=image + '_pan_geo',
width_out=sim_width,
interp_mode=func_geoback,
logpath=path_log,
outdir=scene.scene,
shellscript=shellscript)
par2hdr(n.dem_seg_geo + '.par', image + '_pan_geo.hdr')
lat.sigma2gamma(pwr1=image + '_pan_geo',
inc=n.inc_geo,
gamma=image + '_{}'.format(method_suffix),
width=sim_width,
logpath=path_log,
outdir=scene.scene,
shellscript=shellscript)
par2hdr(n.dem_seg_geo + '.par', image + '_{}.hdr'.format(method_suffix))
products.extend([image + '_pan', image + '_pan_geo'])
else:
raise RuntimeError('unknown option for normalization_method')
######################################################################
print('conversion to (dB and) geotiff..')
def exporter(data_in, outdir, nodata, scale='linear', dtype=2):
if scale == 'db':
if re.search('_geo', os.path.basename(data_in)):
width = sim_width
refpar = n.dem_seg_geo + '.par'
else:
width = master_par.range_samples
refpar = master + '.par'
lat.linear_to_dB(data_in=data_in,
data_out=data_in + '_db',
width=width,
inverse_flag=0,
null_value=nodata,
logpath=path_log,
outdir=scene.scene,
shellscript=shellscript)
par2hdr(refpar, data_in + '_db.hdr')
data_in += '_db'
if re.search('_geo', os.path.basename(data_in)):
outfile = os.path.join(outdir, os.path.basename(data_in) + '.tif')
disp.data2geotiff(DEM_par=n.dem_seg_geo + '.par',
data=data_in,
type=dtype,
GeoTIFF=outfile,
nodata=nodata,
logpath=path_log,
outdir=scene.scene,
shellscript=shellscript)
else:
outfile = os.path.join(outdir, os.path.basename(data_in))
shutil.copyfile(data_in, outfile)
shutil.copyfile(data_in + '.hdr', outfile + '.hdr')
for image in images:
for scale in scaling:
exporter(data_in=image + '_{}'.format(method_suffix), scale=scale, dtype=2,
nodata=dict(zip(('linear', 'db'), nodata))[scale], outdir=outdir)
if scene.sensor in ['S1A', 'S1B']:
outname_base = scene.outname_base(extensions=basename_extensions)
shutil.copyfile(os.path.join(scene.scene, 'manifest.safe'),
os.path.join(outdir, outname_base + '_manifest.safe'))
if export_extra is not None:
print('exporting extra products..')
for key in export_extra:
# SAR image products
product_match = [x for x in products if x.endswith(key)]
if len(product_match) > 0:
for product in product_match:
for scale in scaling:
exporter(data_in=product, outdir=outdir, scale=scale, dtype=2,
nodata=dict(zip(('linear', 'db'), nodata))[scale])
# ancillary (DEM) products
elif n.isfile(key) and key not in ['lut_init']:
filename = n[key]
dtype = 5 if key == 'ls_map_geo' else 2
nodata = 0
exporter(filename, outdir, dtype=dtype, nodata=nodata)
else:
print('cannot not export file {}'.format(key))
shutil.copyfile(shellscript, os.path.join(outdir, os.path.basename(shellscript)))
if cleanup:
print('cleaning up temporary files..')
shutil.rmtree(scene.scene)
def ovs(parfile, targetres):
"""
compute DEM oversampling factors for a target resolution in meters
Parameters
----------
parfile: str
a GAMMA DEM parameter file
targetres: int or float
the target resolution in meters
Returns
-------
tuple of float
the oversampling factors for latitude and longitude
"""
# read DEM parameter file
dempar = ISPPar(parfile)
# extract coordinates and pixel posting of the DEM
if hasattr(dempar, 'post_north'):
post_north, post_east = [abs(float(x)) for x in
[dempar.post_north, dempar.post_east]]
else:
res_lat, res_lon = [abs(float(x)) for x in [dempar.post_lat, dempar.post_lon]]
# compute center coordinate
lat = float(dempar.corner_lat) - (res_lat * (dempar.nlines // 2))
lon = float(dempar.corner_lon) + (res_lon * (dempar.width // 2))
# convert DEM resolution to meters
post_north = haversine(lat, lon, lat + res_lat, lon)
post_east = haversine(lat, lon, lat, lon + res_lon)
# compute resampling factors for the DEM
ovs_lat = post_north / targetres
ovs_lon = post_east / targetres
return ovs_lat, ovs_lon
def multilook(infile, outfile, targetres, logpath=None, outdir=None, shellscript=None):
"""
multilooking of SLC and MLI images
if the image is in slant range the ground range resolution is computed by dividing the range pixel spacing by
the sine of the incidence angle
the looks in range and azimuth are chosen to approximate the target resolution by rounding the ratio between
target resolution and ground range/azimuth pixel spacing to the nearest integer
an ENVI HDR parameter file is automatically written for better handling in other software
Parameters
----------
infile: str
a SAR image in GAMMA format with a parameter file of name <infile>.par
outfile: str
the name of the output GAMMA file
targetres: int
the target resolution in ground range
logpath: str or None
a directory to write command logfiles to
outdir: str or None
the directory to execute the command in
shellscript: str or None
a file to write the Gamma commands to in shell format
"""
# read the input parameter file
par = ISPPar(infile + '.par')
rlks, azlks = multilook_factors(sp_rg=par.range_pixel_spacing,
sp_az=par.azimuth_pixel_spacing,
tr_rg=targetres,
tr_az=targetres,
geometry=par.image_geometry,
incidence=par.incidence_angle)
pars = {'rlks': rlks,
'azlks': azlks,
'logpath': logpath,
'shellscript': shellscript,
'outdir': outdir}
if par.image_format in ['SCOMPLEX', 'FCOMPLEX']:
# multilooking for SLC images
pars['SLC'] = infile
pars['SLC_par'] = infile + '.par'
pars['MLI'] = outfile
pars['MLI_par'] = outfile + '.par'
isp.multi_look(**pars)
else:
# multilooking for MLI images
pars['MLI_in'] = infile
pars['MLI_in_par'] = infile + '.par'
pars['MLI_out'] = outfile
pars['MLI_out_par'] = outfile + '.par'
isp.multi_look_MLI(**pars)
par2hdr(outfile + '.par', outfile + '.hdr')
def S1_deburst(burst1, burst2, burst3, name_out, rlks=5, azlks=1,
replace=False, logpath=None, outdir=None, shellscript=None):
"""
Debursting of Sentinel-1 SLC imagery in GAMMA
The procedure consists of two steps. First antenna pattern deramping and
then mosaicing of the single deramped bursts.
For mosaicing, the burst boundaries are calculated from the number of looks in range (`rlks`)
and azimuth (`azlks`), in this case 5 range looks and 1 azimuth looks.
Alternately 10 range looks and 2 azimuth looks could be used.
Parameters
----------
burst1: str
burst image 1
burst2: str
burst image 2
burst3: str
burst image 3
name_out: str
the name of the output file
rlks: int
the number of looks in range
azlks: int
the number of looks in azimuth
replace: bool
replace the burst images by the new file? If True, the three burst images will be deleted.
logpath: str or None
a directory to write command logfiles to
outdir: str or None
the directory to execute the command in
shellscript: str or None
a file to write the Gamma commands to in shell format
Returns
-------
"""
for burst in [burst1, burst2, burst3]:
if not os.path.isfile(burst) or not os.path.isfile(burst + '.par') or not os.path.isfile(burst + '.tops_par'):
raise IOError('input files missing; parameter files must be named e.g. {burst1}.par and {burst1}.tops_par')
outpath = os.path.dirname(name_out)
if not os.path.isdir(outpath):
os.makedirs(outpath)
tab_in = os.path.join(outpath, 'tab_deramp1')
tab_out = os.path.join(outpath, 'tab_deramp2')
with open(tab_in, 'w') as out1:
with open(tab_out, 'w') as out2:
for item in [burst1, burst2, burst3]:
out1.write(item + '\t' + item + '.par\t' + item + '.tops_par\n')
out2.write(item + '_drp\t' + item + '_drp.par\t' + item + '_drp.tops_par\n')
isp.SLC_deramp_S1_TOPS(SLC1_tab=tab_in,
SLC2_tab=tab_out,
mode=0,
phflg=0,
logpath=logpath,
outdir=outdir,
shellscript=shellscript)
isp.SLC_mosaic_S1_TOPS(SLC_tab=tab_out,
SLC=name_out,
SLC_par=name_out + '.par',
rlks=rlks,
azlks=azlks,
logpath=logpath,
outdir=outdir,
shellscript=shellscript)
if replace:
for item in [burst1, burst2, burst3]:
for subitem in [item + x for x in ['', '.par', '.tops_par']]:
os.remove(subitem)
for item in [burst1, burst2, burst3]:
for subitem in [item + x for x in ['_drp', '_drp.par', '_drp.tops_par']]:
os.remove(subitem)
os.remove(tab_in)
os.remove(tab_out)
|
<filename>networkapiclient/EventLog.py<gh_stars>10-100
# -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networkapiclient.GenericClient import GenericClient
from networkapiclient.exception import InvalidParameterError
from networkapiclient.utils import is_valid_int_param, get_list_map
from networkapiclient.Pagination import Pagination
import urllib
class EventLog(GenericClient):
def __init__(self, networkapi_url, user, password, user_ldap=None):
"""Class constructor receives parameters to connect to the networkAPI.
:param networkapi_url: URL to access the network API.
:param user: User for authentication.
:param password: Password for authentication.
"""
super(
EventLog,
self).__init__(
networkapi_url,
user,
password,
user_ldap)
def find_logs(
self,
user_name,
first_date,
start_time,
last_date,
end_time,
action,
functionality,
parameter,
pagination):
"""
Search all logs, filtering by the given parameters.
:param user_name: Filter by user_name
:param first_date: Sets initial date for begin of the filter
:param start_time: Sets initial time
:param last_date: Sets final date
:param end_time: Sets final time and ends the filter. That defines the searching gap
:param action: Filter by action (Create, Update or Delete)
:param functionality: Filter by class
:param parameter: Filter by parameter
:param pagination: Class with all data needed to paginate
:return: Following dictionary:
::
{'eventlog': {'id_usuario' : < id_user >,
'hora_evento': < hora_evento >,
'acao': < acao >,
'funcionalidade': < funcionalidade >,
'parametro_anterior': < parametro_anterior >,
'parametro_atual': < parametro_atual > }
'total' : {< total_registros >} }
:raise InvalidParameterError: Some parameter was invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not isinstance(pagination, Pagination):
raise InvalidParameterError(
u"Invalid parameter: pagination must be a class of type 'Pagination'.")
eventlog_map = dict()
eventlog_map["start_record"] = pagination.start_record
eventlog_map["end_record"] = pagination.end_record
eventlog_map["asorting_cols"] = pagination.asorting_cols
eventlog_map["searchable_columns"] = pagination.searchable_columns
eventlog_map["custom_search"] = pagination.custom_search
eventlog_map["usuario"] = user_name
eventlog_map["data_inicial"] = first_date
eventlog_map["hora_inicial"] = start_time
eventlog_map["data_final"] = last_date
eventlog_map["hora_final"] = end_time
eventlog_map["acao"] = action
eventlog_map["funcionalidade"] = functionality
eventlog_map["parametro"] = parameter
url = "eventlog/find/"
code, xml = self.submit({'eventlog': eventlog_map}, 'POST', url)
key = "eventlog"
return get_list_map(self.response(code, xml, key), key)
def get_choices(self):
"""
Returns a dictionary with the values used to construct the select box of actions,
functionalities and users.
:return: the following dictionary:
::
{'choices_map': {'usuario' : [{ 'usuario' : < user_id >
'usuario__nome' : < nome >
'usuario__user' : < user > }]
'acao' : ['action1', 'action2', 'action3' .. 'actionN']
'funcionalidade' : ['functionality1', 'functionality2', .. 'functionalityN'] }}
"""
url = "eventlog/choices/"
code, xml = self.submit(None, 'POST', url)
key = "choices"
return get_list_map(self.response(code, xml, key), key)
def get_version(self):
"""
Returns the API's version
:return:
::
{'version_api': <version_api> }
"""
url = "eventlog/version/"
code, xml = self.submit(None, 'GET', url)
key = "version"
return get_list_map(self.response(code, xml, key), key)
|
<filename>serialFootPedalControl.py<gh_stars>1-10
import serial #from library PySerial
import keyboard
import json
configFile = open("pedalConfig.json").read()
configData = json.loads(configFile)
numPedals = configData["numPedals"]
#press actions (order in array is index of pedal)
onKey = configData["onKey"]
onKeySHIFT = configData["onKeySHIFT"]
def PTTHook(a):
pass
def setPTTHook(input_fn):
global PTTHook
PTTHook = input_fn
shiftState = False
PTTLockedState = False
ser = serial.Serial(configData["port"], 9600)
while True:
code=int(ser.readline())
#key presses
#~~~~~~~~~~~~~~~~~~~ #when shift is OFF# ~~~~~~~~~~~~~~~~~~~~~~#
if not shiftState:
######### press events ##########
if code < numPedals:
if onKey[code] == "PTT":
if PTTLockedState:
PTTLockedState = False
print("Code: " + str(code) +" PTT UNLOCKED!")
else: # if unlocked
print("Code: " + str(code) +" PTT ON!")
PTTHook(True) # send PTT on
ser.write(bytes(b'o')) #turn LED on
elif onKey[code] == "SHIFT":
shiftState = True
print("Code: " + str(code) +" SHIFT ON!")
else:
keyboard.press(onKey[code])
print("Code: " + str(code) + " " + onKey[code])
######### release events ##########
else: #( when code >= numPedals)
if onKey[code-numPedals] == "PTT":
if PTTLockedState:
pass
#do nothing
else: #if unlocked
print("Code: " + str(code) +" PTT OFF!")
PTTHook(False) # send PTT off
ser.write(bytes(b'f'))
elif onKey[code-numPedals] == "SHIFT":
shiftState = False
print("Code: " + str(code) +" SHIFT OFF (already was)!")
else:
keyboard.release(onKey[code-numPedals])
keyboard.release(onKeySHIFT[code-numPedals]) #to make sure shifted keys are removed too
print("Code: " + str(code) + " " + onKey[code-numPedals] +" OFF")
print("(and) Code: shift of " + str(code) + " " + onKeySHIFT[code-numPedals] +" OFF")
#~~~~~~~~~~~~~~~~~~~ #when shift is ON# ~~~~~~~~~~~~~~~~~~~~~~#
else:
######### press events ##########
if code < numPedals:
if onKeySHIFT[code] == "PTT TOGGLE":
if PTTLockedState:
PTTLockedState = False
PTTHook(False) #send PTT off
print("Code: " + str(code) +" PTT UNLOCKED AND OFF!")
ser.write(bytes(b'f'))
else: #if unlocked
PTTLockedState = True
PTTHook(True) #send PTT ON
print("Code: " + str(code) +" PTT LOCKED ON!")
ser.write(bytes(b'o'))
elif onKeySHIFT[code] == "SHIFT":
shiftState = True
print("Code: " + str(code) +" SHIFT ON (already was)!")
else:
keyboard.press(onKeySHIFT[code])
print("Code: " + str(code) + " " + onKeySHIFT[code])
######### release events ##########
else: #(when code >= numPedals)
if onKeySHIFT[code - numPedals] == "PTT TOGGLE":
pass
#Nothing
elif onKeySHIFT[code - numPedals] == "SHIFT":
shiftState = False
print("Code: " + str(code) + " SHIFT OFF!")
else:
keyboard.release(onKey[code - numPedals])
keyboard.release(onKeySHIFT[code - numPedals]) #to make sure shifted keys are removed too
print("Code: " + str(code) + " " + onKeySHIFT[code - numPedals] +" OFF")
print("(and) Code: normal of" + " " + str(code) + onKey[code - numPedals] +" OFF")
|
# coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a modified version of the original SEED network
"""SEED agent using Keras."""
from seed_rl.football import observation
import tensorflow as tf
from seed_rl.common import utils
import numpy as np
from .base_vtrace_network import AgentOutput, BaseVTraceNetwork
class _Stack(tf.Module):
"""Stack of pooling and convolutional blocks with residual connections."""
def __init__(self, num_ch, num_blocks):
super(_Stack, self).__init__(name='stack')
self._conv = tf.keras.layers.Conv2D(num_ch, 3, strides=1, padding='same',
kernel_initializer='lecun_normal')
self._max_pool = tf.keras.layers.MaxPool2D(
pool_size=3, padding='same', strides=2)
self._res_convs0 = [
tf.keras.layers.Conv2D(
num_ch, 3, strides=1, padding='same', name='res_%d/conv2d_0' % i,
kernel_initializer='lecun_normal')
for i in range(num_blocks)
]
self._res_convs1 = [
tf.keras.layers.Conv2D(
num_ch, 3, strides=1, padding='same', name='res_%d/conv2d_1' % i,
kernel_initializer='lecun_normal')
for i in range(num_blocks)
]
def __call__(self, conv_out):
# Downscale.
conv_out = self._conv(conv_out)
conv_out = self._max_pool(conv_out)
# Residual block(s).
for (res_conv0, res_conv1) in zip(self._res_convs0, self._res_convs1):
block_input = conv_out
conv_out = tf.nn.relu(conv_out)
conv_out = res_conv0(conv_out)
conv_out = tf.nn.relu(conv_out)
conv_out = res_conv1(conv_out)
conv_out += block_input
return conv_out
def make_logits(layer_fn, action_specs):
return [layer_fn(n, 'policy_logits') for n in action_specs]
def apply_net(action_specs, policy_logits, core_output):
n_actions = len(action_specs)
arr = [policy_logits[i](core_output) for i in range(n_actions)]
arr = tf.stack(arr)
arr = tf.transpose(arr, perm=[1, 0, 2])
return arr
def post_process_logits(action_specs, policy_logits):
all_logits = np.sum(action_specs)
new_shape = policy_logits.shape[:-2] + [all_logits]
return tf.reshape(policy_logits, new_shape)
def choose_action(action_specs, policy_logits, sample=True):
n_actions = len(action_specs)
policy_logits = tf.transpose(policy_logits, perm=[1, 0, 2])
if not sample:
new_action = tf.stack([
tf.math.argmax(
policy_logits[i], -1, output_type=tf.int64) for i in range(n_actions)])
else:
new_action = tf.stack([tf.squeeze(
tf.random.categorical(
policy_logits[i], 1, dtype=tf.int64), 1) for i in range(n_actions)])
new_action = tf.transpose(new_action, perm=[1, 0])
return new_action
class GFootball(BaseVTraceNetwork):
"""Agent with ResNet, but without LSTM and additional inputs.
Four blocks instead of three in ImpalaAtariDeep.
"""
def __init__(self, action_specs):
super(GFootball, self).__init__(name='gfootball')
self._config = {'sample_actions': True}
# Parameters and layers for unroll.
self._action_specs = action_specs
# Parameters and layers for _torso.
self._stacks = [
_Stack(num_ch, num_blocks)
for num_ch, num_blocks in [(16, 2), (32, 2), (32, 2), (32, 2)]
]
self._conv_to_linear = tf.keras.layers.Dense(
256, kernel_initializer='lecun_normal')
# Layers for _head.
self._policy_logits = make_logits(
lambda num_units, name: tf.keras.layers.Dense(
num_units,
name=name,
kernel_initializer='lecun_normal'),
self._action_specs)
self._baseline = tf.keras.layers.Dense(
1, name='baseline', kernel_initializer='lecun_normal')
self._process_role = tf.keras.Sequential([
tf.keras.layers.Dense(16, activation='relu', name='baseline', kernel_initializer='lecun_normal'),
tf.keras.layers.Dense(8, activation='relu', name='baseline', kernel_initializer='lecun_normal')])
@tf.function
def initial_state(self, batch_size):
return ()
def change_config(self, new_config):
self._config = new_config
def _torso(self, unused_prev_action, env_output):
_, _, frame = env_output
frame = observation.unpackbits(frame)
frame /= 255
# Beware that this needs an appriopriate wrapper in gfootball
# and only supports 4 info layers stacked 4 times plus one role layer
#print(frame.shape)
one_hot_role = frame[:, 0, 0:10, 16] # there are 10 roles https://github.com/google-research/football/blob/master/gfootball/doc/observation.md
frame = frame[:, :, :, 0:16]
conv_out = frame
for stack in self._stacks:
conv_out = stack(conv_out)
conv_out = tf.nn.relu(conv_out)
conv_out = tf.keras.layers.Flatten()(conv_out)
conv_out = self._conv_to_linear(conv_out)
conv_out = tf.nn.relu(conv_out)
roles_out = self._process_role(one_hot_role)
output = tf.concat((conv_out, roles_out), axis=-1)
return output
def _head(self, core_output):
policy_logits = apply_net(
self._action_specs,
self._policy_logits,
core_output)
baseline = tf.squeeze(self._baseline(core_output), axis=-1)
# Sample an action from the policy.
new_action = choose_action(self._action_specs, policy_logits, self._config['sample_actions'])
return AgentOutput(new_action, post_process_logits(self._action_specs, policy_logits), baseline)
@tf.function
def get_action(self, *args, **kwargs):
return self.__call__(*args, **kwargs)
def __call__(self, prev_actions, env_outputs, core_state, unroll,
is_training, postprocess_action):
outputs, core_state = self._unroll(prev_actions, env_outputs, core_state)
return outputs, core_state
def _unroll(self, prev_actions, env_outputs, core_state):
torso_outputs = utils.batch_apply(self._torso, (prev_actions, env_outputs))
return utils.batch_apply(self._head, (torso_outputs,)), core_state
def create_network(network_config):
net = GFootball(network_config['action_space'].nvec)
net.change_config(network_config)
return net
|
<reponame>dvekeman/gamification-engine
# -*- coding: utf-8 -*-
"""models including business logic"""
import datetime
import logging
from collections import defaultdict
from datetime import timedelta
import hashlib
import pytz
import sqlalchemy.types as ty
from dateutil import relativedelta
from sqlalchemy.dialects.postgresql import JSON
import sys
from pyramid.settings import asbool
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.sql.ddl import DDL
from sqlalchemy.sql.schema import UniqueConstraint, Index
from sqlalchemy.sql.sqltypes import Integer, String
from gengine.app.permissions import perm_global_increase_value
from gengine.base.model import ABase, exists_by_expr, calc_distance, coords, update_connection
from gengine.app.cache import cache_general, cache_achievements_subjects_levels, \
cache_achievements_by_subject_for_today, cache_translations
from sqlalchemy import (
Table,
ForeignKey,
func,
select,
and_,
or_,
text,
Column
, event)
from sqlalchemy.dialects.postgresql import TIMESTAMP
from sqlalchemy.orm import (
mapper,
relationship as sa_relationship,
backref as sa_backref
)
from sqlalchemy.sql import bindparam
from gengine.base.settings import get_settings
from gengine.base.util import dt_now, dt_ago, dt_in, normalize_key, rowproxy2dict, seconds_until_end_of_day
from gengine.metadata import Base, DBSession
from gengine.app.formular import evaluate_condition, evaluate_value_expression, evaluate_string
log = logging.getLogger(__name__)
# Subjects are the actors and the organization of actors
# Which type of subjects do we have? (e.g. User, Team, City, Country,...)
t_subjecttypes = Table("subjecttypes", Base.metadata,
Column('id', ty.Integer, primary_key=True),
Column("name", ty.String(100), unique=True, nullable=False),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
)
# Defined the allowed hierarchy of subjects (users in teams and citys; cities in countries)
t_subjecttypes_subjecttypes = Table("subjecttypes_subjecttypes", Base.metadata,
Column('id', ty.Integer, primary_key=True),
Column('subjecttype_id', ty.Integer, ForeignKey("subjecttypes.id", ondelete="CASCADE"), index=True, nullable=False),
Column('part_of_id', ty.Integer, ForeignKey("subjecttypes.id", ondelete="CASCADE"), index=True, nullable=False),
UniqueConstraint("subjecttype_id", "part_of_id")
)
# Check for Cycle!
t_subjecttypes_subjecttypes_ddl = DDL("""
CREATE OR REPLACE FUNCTION check_subjecttypes_subjecttypes_cycle() RETURNS trigger AS $$
DECLARE
cycles INTEGER;
BEGIN
LOCK TABLE subjecttypes_subjecttypes IN ACCESS EXCLUSIVE MODE;
WITH RECURSIVE search_graph(part_of_id, subjecttype_id, depth, path, cycle) AS (
SELECT tt.part_of_id, t1.id, 1, ARRAY[t1.id], false FROM subjecttypes t1
LEFT JOIN subjecttypes_subjecttypes AS tt ON tt.subjecttype_id=t1.id
UNION ALL
SELECT g.part_of_id, g.subjecttype_id, sg.depth + 1, path || g.subjecttype_id, g.subjecttype_id = ANY(path)
FROM subjecttypes_subjecttypes g, search_graph sg
WHERE g.part_of_id = sg.subjecttype_id AND NOT cycle
)
SELECT INTO cycles COUNT(*) FROM search_graph WHERE cycle=true;
RAISE NOTICE 'cycles: %%', cycles;
IF cycles > 0 THEN
RAISE EXCEPTION 'cycle';
END IF;
RETURN NEW;
END
$$ LANGUAGE plpgsql;
CREATE TRIGGER check_subjecttypes_subjecttypes_cycle AFTER INSERT OR UPDATE ON subjecttypes_subjecttypes
FOR EACH ROW EXECUTE PROCEDURE check_subjecttypes_subjecttypes_cycle();
""")
event.listen(t_subjecttypes_subjecttypes, 'after_create', t_subjecttypes_subjecttypes_ddl.execute_if(dialect='postgresql'))
# Subjects are the actors and the organization of actors.
# These are the instances (e.g. users, cities, teams)
t_subjects = Table("subjects", Base.metadata,
Column('id', ty.BigInteger, primary_key = True),
Column('subjecttype_id', ty.Integer, ForeignKey("subjecttypes.id", ondelete="CASCADE"), nullable=False, index=True),
Column("name", ty.String, index=True, nullable=True),
Column("lat", ty.Float(precision=64), nullable=True),
Column("lng", ty.Float(precision=64), nullable=True),
Column("language_id", ty.Integer, ForeignKey("languages.id"), nullable=True),
Column("timezone", ty.String(), nullable=False, default="UTC"),
Column("additional_public_data", JSON(), nullable=True, default=None),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
)
# The relations of subjects (directed acyclic graph)
t_subjects_subjects = Table("subjects_subjects", Base.metadata,
Column('id', ty.BigInteger, primary_key=True),
Column('subject_id', ty.BigInteger, ForeignKey("subjects.id", ondelete="CASCADE"), index=True, nullable=False),
Column('part_of_id', ty.BigInteger, ForeignKey("subjects.id", ondelete="CASCADE"), index=True, nullable=False),
Column('joined_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
Column('left_at', TIMESTAMP(timezone=True), nullable=True, default=None, index=True),
UniqueConstraint("subject_id", "part_of_id", "joined_at")
)
# Check for Cycle!
t_subjects_subjects_ddl = DDL("""
CREATE OR REPLACE FUNCTION check_subjects_subjects_cycle() RETURNS trigger AS $$
DECLARE
cycles INTEGER;
BEGIN
LOCK TABLE subjects_subjects IN ACCESS EXCLUSIVE MODE;
WITH RECURSIVE search_graph(part_of_id, subject_id, depth, path, cycle) AS (
SELECT tt.part_of_id, t1.id, 1, ARRAY[t1.id], false FROM subjects t1
LEFT JOIN subjects_subjects AS tt ON tt.subject_id=t1.id
WHERE tt.left_at IS NULL
UNION ALL
SELECT g.part_of_id, g.subject_id, sg.depth + 1, path || g.subject_id, g.subject_id = ANY(path)
FROM subjects_subjects g, search_graph sg
WHERE g.part_of_id = sg.subject_id AND g.left_at IS NULL AND NOT cycle
)
SELECT INTO cycles COUNT(*) FROM search_graph WHERE cycle=true;
RAISE NOTICE 'cycles: %%', cycles;
IF cycles > 0 THEN
RAISE EXCEPTION 'cycle';
END IF;
RETURN NEW;
END
$$ LANGUAGE plpgsql;
CREATE TRIGGER check_subjects_subjects_cycle AFTER INSERT OR UPDATE ON subjects_subjects
FOR EACH ROW EXECUTE PROCEDURE check_subjects_subjects_cycle();
""")
event.listen(t_subjects_subjects, 'after_create', t_subjects_subjects_ddl.execute_if(dialect='postgresql'))
#TODO: Add constraints that checks if ancestor is actually allowed by the ancestor hierarchy. (on update/insert of subject OR subjecttype)
# Authentication Stuff (user, role, permission system); Token based authentication
t_auth_users = Table("auth_users", Base.metadata,
Column('id', ty.BigInteger, primary_key = True),
Column('subject_id', ty.BigInteger, ForeignKey("subjects.id", ondelete="RESTRICT"), index=True, nullable=False),
Column("email", ty.String, unique=True),
Column("password_hash", ty.String, nullable=False),
Column("password_salt", ty.Unicode, nullable=False),
Column("force_password_change", ty.Boolean, nullable=False, server_default='0'),
Column("active", ty.Boolean, nullable=False, index=True, server_default='1'),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
)
def get_default_token_valid_time():
return dt_ago(days=-30) # in 30 days
t_auth_tokens = Table("auth_tokens", Base.metadata,
Column("id", ty.BigInteger, primary_key=True),
Column("auth_user_id", ty.BigInteger, ForeignKey("auth_users.id", ondelete="CASCADE"), nullable=False),
Column("token", ty.String, nullable=False),
Column('valid_until', TIMESTAMP(timezone=True), nullable=False, default=get_default_token_valid_time),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
)
t_auth_roles = Table("auth_roles", Base.metadata,
Column("id", ty.Integer, primary_key=True),
Column("name", ty.String(100), unique=True),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
)
t_auth_users_roles = Table("auth_users_roles", Base.metadata,
Column("id", ty.BigInteger, primary_key=True),
Column("auth_user_id", ty.BigInteger, ForeignKey("auth_users.id", ondelete="CASCADE"), nullable=False, index=True),
Column("auth_role_id", ty.Integer, ForeignKey("auth_roles.id", ondelete="CASCADE"), nullable=False, index=True),
)
t_auth_roles_permissions = Table("auth_roles_permissions", Base.metadata,
Column("id", ty.Integer, primary_key=True),
Column("auth_role_id", ty.Integer, ForeignKey("auth_roles.id", use_alter=True, ondelete="CASCADE"), nullable=False, index=True),
Column("name", ty.String(255), nullable=False), # taken from gengine.app.permissions
UniqueConstraint("auth_role_id", "name")
)
# Directed relations (like friendships)
t_subjectrelations = Table("subjectrelations", Base.metadata,
Column("id", ty.BigInteger, primary_key=True),
Column('from_id', ty.BigInteger, ForeignKey("subjects.id", ondelete="CASCADE"), nullable=False, index=True),
Column('to_id', ty.BigInteger, ForeignKey("subjects.id", ondelete="CASCADE"), nullable=False, index=True),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
UniqueConstraint("from_id", "to_id")
)
# Achievements can be categorized (for better organization in the client)
t_achievementcategories = Table('achievementcategories', Base.metadata,
Column('id', ty.Integer, primary_key=True),
# The name is used to filter the achievements in the client and api requests
Column('name', ty.String(255), nullable=False, unique=True),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
)
# Achievements! (The core of this application)
t_achievements = Table('achievements', Base.metadata,
Column('id', ty.Integer, primary_key=True),
# Internal Use, external should be added by using a property
Column('name', ty.String(255), nullable=False, unique=True),
# For ordering in the UI
Column('priority', ty.Integer, index=True, default=0),
# We assign the achievement to a category
Column("achievementcategory_id", ty.Integer, ForeignKey("achievementcategories.id", ondelete="SET NULL"), index=True, nullable=True),
# An achievement can have multiple levels. What is the maximum level? (For leaderboards this is typically always 1)
Column('maxlevel', ty.Integer, nullable=False, default=1),
# May the user see this achievement and the progress before it is reached?
Column('hidden', ty.Boolean, nullable=False, default=False),
# The achievement can be valid for only a specific time
Column('valid_start', ty.Date, nullable=True),
Column('valid_end', ty.Date, nullable=True),
# The achievement can be constrained to geo-position (radius)
Column("lat", ty.Float(precision=64), nullable=True),
Column("lng", ty.Float(precision=64), nullable=True),
Column("max_distance", ty.Integer, nullable=True),
# Some achievements occur periodically. This fields defines when and how often they are evaluated.
# "immediately" means, it is evaluated each time a value changes
# "end" means, it is evaluated after "valid_end" is reached
Column('evaluation', ty.Enum("immediately", "daily", "weekly", "monthly", "yearly", "end", name="evaluation_types"), default="immediately", nullable=False),
# For time-related achievements, the timezone should be fixed im multiple subjects are involved (leaderboard), as otherwise the comparison is not in sync
# For single-user achievements (no leaderboard), we can use the timezone of each subject
Column('evaluation_timezone', ty.String(), default=None, nullable=True),
# Weeks don't start on the same day everywhere and in every use-cases. Same for years, days and months.
# We can shift them by a fixed amount of seconds!
Column('evaluation_shift', ty.Integer(), nullable=True, default=None),
# If this is just a normal achievement, we don't want to compare the value to other subjects
# For leaderaboard, we need to define who is compared to whom:
# - global: the subject is compared to all other subjects of the same subjecttype
# - context_subject: the subject is compared inside the defined context_subjecttype.
# As the player can be part of multiple subjects of this type, the achievement can be evaluated and achieved for each of these subjects!
# - relations: The player is compared to all his relations (they are directed)
# - none: no leaderboard, just single achievement
Column('comparison_type', ty.Enum("global", "context_subject", "relations", "none", name="comparison_types"), default="none"),
# This one is the actual player, that will "win" the achievement
Column('player_subjecttype_id', ty.Integer(), ForeignKey("subjecttypes.id", ondelete="CASCADE"), nullable=False, index=True),
# If this is a leaderboard: In which group of subjects do we compare the current subject?
# It may also make sense to compare the groups inbetween:
# - User is the Player
# - Country is the Context
# - We may also see how the team performs in comparison to other teams in the country (though the team cannot "achieve" anything)
# These "compared subject types" are defined in the table achievement_compared_subjects
Column('context_subjecttype_id', ty.Integer(), ForeignKey("subjecttypes.id", ondelete="RESTRICT"), nullable=True, index=True),
# Do only members count, that have been part of the context subject for the whole time?
# For achievements with a lower bound (geq) this will mostly be true, as a later joined user gets no advantage
# For upper bound achievements ("do at most x times event e in this month") a later joined user would have an advantage and this should be set to true
Column('lb_subject_part_whole_time', ty.Boolean, nullable=False, default=False, server_default='0'),
# Who may see this Achievement?
Column('view_permission', ty.Enum("everyone", "own", name="achievement_view_permission"), default="everyone"),
# filter condition for the event values that are aggregated for the achievement value
Column('condition', ty.String(255), nullable=True),
# How old may the values be, that are considered in this achievement?
Column('timespan', ty.Integer, nullable=True),
# We can group the values by a key or date (eg. achieve sth. on a sunday)
Column('group_by_key', ty.Boolean(), default=False),
Column('group_by_dateformat', ty.String(255), nullable=True),
# The value that has to be achieved to be reached to achieve this / is NULL for pure leaderboards
Column('goal', ty.String(255), nullable=True),
# Is the goal value a lower or upper bound?
Column('operator', ty.Enum("geq","leq", name="goal_operators"), nullable=True),
# When we group by key or dateformat: Should we select the max or min value of the groups?
Column('maxmin', ty.Enum("max","min", name="goal_maxmin"), nullable=True, default="max"),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now),
)
# Between the achievement player and the context, there can be other levels of comparison (e.g. compare the team instead of the user)
# These cannot achieve the achievement, but can only be looked at!
t_achievement_compared_subjecttypes = Table('achievement_compared_subjects', Base.metadata,
Column('id', ty.Integer, primary_key=True),
Column('achievement_id', ty.Integer, ForeignKey("achievements.id", ondelete="CASCADE"), nullable=False, index=True),
Column('subjecttype_id', ty.Integer, ForeignKey("subjecttypes.id", ondelete="CASCADE"), nullable=False, index=True),
UniqueConstraint("achievement_id", "subjecttype_id")
)
# The achievements can be restricted to be valid inside a specific subject set (e.g. only in Germany)
# This restriction applies to the players, compared subjects and the context subjects
t_achievement_domain_subjects = Table('achievement_domain_subjects', Base.metadata,
Column('id', ty.Integer, primary_key=True),
Column('achievement_id', ty.Integer, ForeignKey("achievements.id", ondelete="CASCADE"), nullable=False, index=True),
Column('subject_id', ty.BigInteger, ForeignKey("subjects.id", ondelete="CASCADE"), nullable=False, index=True),
UniqueConstraint("achievement_id", "subject_id")
)
# This contains the current achievement state (level, achieved) for a subject in the context of a date / context_subject
t_evaluations = Table("evaluations", Base.metadata,
Column('id', ty.Integer, primary_key=True),
# For whom?
Column("subject_id", ty.BigInteger, ForeignKey("subjects.id", ondelete="CASCADE"), nullable=False, index=True),
# Which achievement?
Column("achievement_id", ty.Integer, ForeignKey("achievements.id", ondelete="CASCADE"), nullable=False, index=True),
# For which period?
Column('achievement_date', TIMESTAMP(timezone=True), nullable=True, index=True), # To identify the goals for monthly, weekly, ... achievements;
# For which context? (The achievement defines the context type, this is the actual context!)
Column('context_subject_id', ty.BigInteger, ForeignKey("subjects.id", ondelete="CASCADE"), nullable=True, index=True),
# Is this achieved?
Column("achieved", ty.Boolean, nullable=False, server_default='0', default=False, index=True),
Column('achieved_at', TIMESTAMP(timezone=True), nullable=True, index=True),
# Which level is this
Column('level', ty.Integer, default=0, nullable=False, index=True),
)
Index("idx_evaluations_date_not_null_unique",
t_evaluations.c.subject_id,
t_evaluations.c.achievement_id,
t_evaluations.c.achievement_date,
t_evaluations.c.level,
unique=True,
postgresql_where=t_evaluations.c.achievement_date != None
)
Index("idx_evaluations_date_null_unique",
t_evaluations.c.subject_id,
t_evaluations.c.achievement_id,
t_evaluations.c.level,
unique=True,
postgresql_where=t_evaluations.c.achievement_date == None
)
# We store the evaluated values in a table to generate the leaderboard and return the achievement's state efficiently
t_progress = Table("progress", Base.metadata,
Column('id', ty.Integer, primary_key=True),
# For whom?
Column("subject_id", ty.BigInteger, ForeignKey("subjects.id", ondelete="CASCADE"), nullable=False, index=True),
# Which achievement?
Column("achievement_id", ty.Integer, ForeignKey("achievements.id", ondelete="CASCADE"), nullable=False, index=True),
# For which period?
Column('achievement_date', TIMESTAMP(timezone=True), nullable=True, index=True), # To identify the goals for monthly, weekly, ... achievements;
# The current value
Column("value", ty.Float, index=True, nullable=False),
)
Index("idx_progress_date_not_null_unique",
t_progress.c.subject_id,
t_progress.c.achievement_id,
t_progress.c.achievement_date,
unique=True,
postgresql_where=t_progress.c.achievement_date != None
)
Index("idx_progress_date_null_unique",
t_progress.c.subject_id,
t_progress.c.achievement_id,
unique=True,
postgresql_where=t_progress.c.achievement_date == None
)
# The event types that can happen. The types of values we use to construct achievements.
t_variables = Table('variables', Base.metadata,
Column('id', ty.Integer, primary_key = True),
# The name; is used by the API to increase the values
Column('name', ty.String(255), nullable = False, index=True, unique=True),
# Who may increase this? (API permissions)
Column('increase_permission',ty.Enum("own", "admin", name="variable_increase_permission"), default="admin"),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
)
# These are the actual values
t_values = Table('values', Base.metadata,
Column('id', ty.Integer, primary_key=True),
# For whom we count the value
Column('subject_id', ty.BigInteger, ForeignKey("subjects.id", ondelete="CASCADE"), index=True, nullable=False),
# Who invoked the increasement?
Column('agent_id', ty.BigInteger, ForeignKey("subjects.id", ondelete="SET NULL"), index=True, nullable=False),
# When did it happen
Column('datetime', TIMESTAMP(timezone=True), nullable=False, index=True, default=dt_now),
# Which type of event happened?
Column('variable_id', ty.Integer, ForeignKey("variables.id", ondelete="CASCADE"), index=True, nullable=False),
# The value
Column('value', ty.Float, nullable = False),
# In which context did this happen (e.g. a product_id; s.th. unique to the application)
Column('key', ty.String(100), nullable=False, index=True, default=''),
)
# Achievements can trigger things (like messages)
t_achievement_triggers = Table('achievement_triggers', Base.metadata,
Column('id', ty.Integer, primary_key = True),
# internal use only
Column("name", ty.String(100), nullable=False),
Column('achievement_id', ty.Integer, ForeignKey("achievements.id", ondelete="CASCADE"), nullable=False, index=True),
# Should this also be executed when the achievement is completed (e.g. a message like "10 events to go" should not be send if you do 20 events at once
Column('execute_when_complete', ty.Boolean, nullable=False, server_default='0', default=False),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
)
# The triggers can be divided into multiple steps (e.g. 10 to go; 5 to go; 3 to go)
t_achievement_trigger_steps = Table('achievement_trigger_steps', Base.metadata,
Column('id', ty.Integer, primary_key=True),
Column('achievement_trigger_id', ty.Integer, ForeignKey("achievement_triggers.id", ondelete="CASCADE"), nullable=False, index=True),
# the number of the step (order in which they are executed)
Column('step', ty.Integer, nullable=False, default=0),
# The condition type. currently we only support a percentage of the goal value
Column('condition_type', ty.Enum("percentage", name="achievemennt_trigger_condition_types"), default="percentage"),
Column('condition_percentage', ty.Float, nullable=True),
# type of action to execute (currently only creation of a message)
Column('action_type', ty.Enum("subject_message", "increase_value", name="achievement_trigger_action_types"), default="subject_message"),
Column('action_translation_id', ty.Integer, ForeignKey("translationvariables.id", ondelete="RESTRICT"), nullable=True),
# for "increase_value" we might want to increase in the name of a certain subject type
# -> we will search for that subject type (ancestor, descendent) and then give the points to those subjects
Column('action_subjecttype_id', ty.Integer, ForeignKey("subjecttypes.id", ondelete="CASCADE"), nullable=True),
Column('action_value', ty.String, nullable=True),
Column('action_variable_id', ty.Integer, ForeignKey("variables.id", ondelete="CASCADE"), nullable=True),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
UniqueConstraint("achievement_trigger_id", "step")
)
# Which steps have already been executed. This is used to prevent duplicate executions.
t_achievement_trigger_step_executions = Table('achievement_trigger_executions', Base.metadata,
Column('id', ty.BigInteger, primary_key=True),
# Which step?
Column('trigger_step_id', ty.Integer, ForeignKey("achievement_trigger_steps.id", ondelete="CASCADE"), nullable=False),
# For whom?
Column('subject_id', ty.BigInteger, ForeignKey("subjects.id", ondelete="CASCADE"), nullable=False),
# For which period?
Column('achievement_date', TIMESTAMP(timezone=True), nullable=True, index=True),
# For which level?
Column('execution_level', ty.Integer, nullable = False, default=0),
# In which context? (this is the actual context, the type is defined in the achievement)
Column('context_subject_id', ty.BigInteger, ForeignKey("subjects.id", ondelete="CASCADE"), nullable=True),
# When did the execution happen (autofilled)
Column('execution_date', TIMESTAMP(timezone=True), nullable=False, default=datetime.datetime.utcnow, index=True),
Index("ix_achievement_trigger_executions_combined", "trigger_step_id", "subject_id", "execution_level", "achievement_date")
)
# We can add properties to achievements, that are used to described them
# E.g. names, texts, urls to graphics etc.
# This table describes the types of properties that can be created
t_achievementproperties = Table('achievementproperties', Base.metadata,
Column('id', ty.Integer, primary_key=True),
Column('name', ty.String(255), nullable=False, unique=True),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
)
# This are the instances.
t_achievements_achievementproperties = Table('achievements_achievementproperties', Base.metadata,
Column('id', ty.Integer, primary_key = True),
Column('achievement_id', ty.Integer, ForeignKey("achievements.id", ondelete="CASCADE"), index=True, nullable=False),
Column('property_id', ty.Integer, ForeignKey("achievementproperties.id", ondelete="CASCADE"), index=True, nullable=False),
# Can be a formula...
Column('value', ty.String(255), nullable = True),
# ...or a text with translation
Column('value_translation_id', ty.Integer, ForeignKey("translationvariables.id", ondelete="RESTRICT"), nullable=True),
# Valid from which level (higher level overrides entries for lower levels)
Column('from_level', ty.Integer, nullable=False, default=0, index=True),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now),
UniqueConstraint("achievement_id", "property_id", "from_level")
)
# There are two types of rewards for achievements:
# - (Rewards) Rewards that are collected and can be achieved only once (like Badges, Backgrounds, etc.)
# - (Rewardpoints) Points (like EXP) that are earned with every achievement / level
# This table described the available reward types...
t_rewards = Table('rewards', Base.metadata,
Column('id', ty.Integer, primary_key = True),
# For internal use and identification in frontend
Column('name', ty.String(255), nullable = False, unique=True),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
# Which subjecttype can collect this type of reward (user? team?)
# If this is added to an achievement and the achievement player does not equal this subjecttype,
# the path to this subjecttype is constructed and all relevant subjects are rewarded!
Column('rewarded_subjecttype_id', ty.Integer(), ForeignKey("subjecttypes.id", ondelete="CASCADE"), nullable=False, index=True),
)
# Who inherits the rewarded items? When a team wins s.th, it's members might inherit the items...
# ..Or just the team as a whole gets it, and users who leave don't have it (depends on the application model)
t_reward_inheritors = Table('reward_inheritors', Base.metadata,
Column('id', ty.Integer, primary_key=True),
Column('reward_id', ty.Integer, ForeignKey("rewards.id", ondelete="CASCADE"), nullable=False, index=True),
Column('inheritor_subjecttype_id', ty.Integer(), ForeignKey("subjecttypes.id", ondelete="CASCADE"), nullable=False, index=True),
UniqueConstraint("reward_id", "inheritor_subjecttype_id")
)
# What is rewarded by the achievements?
t_achievements_rewards = Table('achievements_rewards', Base.metadata,
Column('id', ty.Integer, primary_key = True),
Column('achievement_id', ty.Integer, ForeignKey("achievements.id", ondelete="CASCADE"), index = True, nullable=False),
Column('reward_id', ty.Integer, ForeignKey("rewards.id", ondelete="CASCADE"), index = True, nullable=False),
# Can be a computed value or can be a translation
Column('value', ty.String(255), nullable = True),
Column('value_translation_id', ty.Integer, ForeignKey("translationvariables.id"), nullable = True),
# Valid from which level (higher level overrides entries for lower levels)
Column('from_level', ty.Integer, nullable = False, default=1, index = True),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
UniqueConstraint("achievement_id", "reward_id", "from_level")
)
# The languages for which we want to provide translations.
t_languages = Table('languages', Base.metadata,
Column('id', ty.Integer, primary_key = True),
Column('name', ty.String(255), nullable = False, index=True),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
)
# Translation variables
t_translationvariables = Table('translationvariables', Base.metadata,
Column('id', ty.Integer, primary_key = True),
Column('name', ty.String(255), nullable = False, index=True),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
)
# The translation values
t_translations = Table('translations', Base.metadata,
Column('id', ty.Integer, primary_key = True),
Column('translationvariable_id', ty.Integer, ForeignKey("translationvariables.id", ondelete="CASCADE"), nullable = False),
Column('language_id', ty.Integer, ForeignKey("languages.id", ondelete="CASCADE"), nullable = False),
# The translation
Column('text', ty.Text(), nullable=False),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
UniqueConstraint("translationvariable_id", "language_id")
)
# This probably only makes sense for user-subjects, but lets keep it general
t_subject_device = Table('subject_devices', Base.metadata,
Column('device_id', ty.String(255), primary_key = True),
Column('subject_id', ty.BigInteger, ForeignKey("subjects.id", ondelete="CASCADE"), primary_key = True, nullable=False),
Column('device_os', ty.String, nullable=False),
Column('push_id', ty.String(255), nullable=False),
Column('app_version', ty.String(255), nullable=False),
Column('registered_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now),
)
# This probably only makes sense for user-subjects, but lets keep it general
t_subject_messages = Table('subject_messages', Base.metadata,
Column('id', ty.BigInteger, primary_key=True),
Column('subject_id', ty.BigInteger, ForeignKey("subjects.id", ondelete="CASCADE"), index=True, nullable=False),
Column('translation_id', ty.Integer, ForeignKey("translationvariables.id", ondelete="RESTRICT"), nullable=True),
Column('params', JSON(), nullable=True, default={}),
Column('is_read', ty.Boolean, index=True, default=False, nullable=False),
Column('has_been_pushed', ty.Boolean, index=True, default=True, server_default='0', nullable=False),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
)
t_tasks = Table('tasks', Base.metadata,
Column('id', ty.Integer, primary_key=True),
Column('entry_name', ty.String(100), index=True),
Column('task_name', ty.String(100), index=True, nullable=False),
Column('config', ty.JSON()),
Column('cron', ty.String(100)),
Column('is_removed', ty.Boolean, index=True, nullable=False, default=False),
Column('is_auto_created', ty.Boolean, index=True, nullable=False, default=False),
Column('is_manually_modified', ty.Boolean, index=True, nullable=False, default=False),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
)
t_taskexecutions = Table('taskexecutions', Base.metadata,
Column('id', ty.Integer, primary_key=True),
Column('task_id', ty.Integer, ForeignKey("tasks.id", ondelete="CASCADE"), index=True, nullable=False),
Column('planned_at', TIMESTAMP(timezone=True), nullable=False, default=None, index=True),
Column('locked_at', TIMESTAMP(timezone=True), nullable=True, default=None, index=True),
Column('finished_at', TIMESTAMP(timezone=True), nullable=True, default=None, index=True),
Column('canceled_at', TIMESTAMP(timezone=True), nullable=True, default=None, index=True),
Column('log', ty.String),
Column('success', ty.Boolean, index=True, nullable=True, default=None),
Column('created_at', TIMESTAMP(timezone=True), nullable=False, default=dt_now, index=True),
)
#class EvaluationContext:
# def __init__(self, context_subject_id, achievment_date):
# self.context_subject_id = context_subject_id
# self.achievement_date = achievment_date
class AuthUser(ABase):
@hybrid_property
def password(self):
return self.password_hash
@password.setter
def password(self,new_pw):
if new_pw!=self.password_hash:
import argon2
import crypt
import base64
self.password_salt = crypt.mksalt()+crypt.mksalt()+crypt.mksalt()+crypt.mksalt()+crypt.mksalt()
hash = argon2.argon2_hash(new_pw, self.password_salt)
self.password_hash = base64.b64encode(hash).decode("UTF-8")
def verify_password(self, pw):
import argon2
import base64
check = base64.b64encode(argon2.argon2_hash(pw, self.password_salt)).decode("UTF-8")
orig = self.password_hash
is_valid = check == orig
return is_valid
@classmethod
def check_password_strength(cls, password):
length_error = len(password) < 8
return not length_error
def get_or_create_token(self):
tokenObj = DBSession.query(AuthToken).filter(and_(
AuthToken.valid_until >= dt_now(),
AuthToken.auth_user_id == self.id,
)).first()
if not tokenObj:
token = AuthToken.generate_token()
tokenObj = AuthToken(
auth_user_id=self.id,
token=token
)
DBSession.add(tokenObj)
return tokenObj
@classmethod
def may_increase(cls, variable_row, request, subject_id):
if not asbool(get_settings().get("enable_user_authentication", False)):
#Authentication deactivated
return True
if request.has_perm(perm_global_increase_value):
# I'm the global admin
return True
if variable_row["increase_permission"] == "own" and request.subject and str(request.subject.id) == str(subject_id):
#The variable may be updated for myself
return True
return False
class AuthToken(ABase):
@staticmethod
def generate_token():
import crypt
return str(crypt.mksalt()+crypt.mksalt())
def extend(self):
self.valid_until = dt_in(days=30)
DBSession.add(self)
def __unicode__(self, *args, **kwargs):
return "Token %s" % (self.id,)
class AuthRole(ABase):
def __unicode__(self, *args, **kwargs):
return "Role %s" % (self.name,)
class AuthRolePermission(ABase):
def __unicode__(self, *args, **kwargs):
return "%s" % (self.name,)
class SubjectDevice(ABase):
def __unicode__(self, *args, **kwargs):
return "Device: %s" % (self.device_id,)
@classmethod
def add_or_update_device(cls, subject_id, device_id, push_id, device_os, app_version):
update_connection().execute(t_subject_device.delete().where(and_(
t_subject_device.c.push_id == push_id,
t_subject_device.c.device_os == device_os
)))
device = DBSession.execute(t_subject_device.select().where(and_(
t_subject_device.c.device_id == device_id,
t_subject_device.c.subject_id == subject_id
))).fetchone()
if device and (device["push_id"] != push_id
or device["device_os"] != device_os
or device["app_version"] != app_version
):
uSession = update_connection()
q = t_subject_device.update().values({
"push_id": push_id,
"device_os": device_os,
"app_version": app_version
}).where(and_(
t_subject_device.c.device_id == device_id,
t_subject_device.c.subject_id == subject_id
))
uSession.execute(q)
elif not device: # insert
uSession = update_connection()
q = t_subject_device.insert().values({
"push_id": push_id,
"device_os": device_os,
"app_version": app_version,
"device_id": device_id,
"subject_id": subject_id
})
uSession.execute(q)
class Subject(ABase):
"""A subject participates in the gamification, i.e. can get achievements, rewards, participate in leaderbaord etc."""
def __unicode__(self, *args, **kwargs):
return "Subject %s" % (self.id,)
def __init__(self, *args, **kw):
""" create a subject object
Each subject has a timezone and a location to support time- and geo-aware gamification.
There is also a subject-relation for leaderboards and a hierarchical subject-subject structure.
"""
ABase.__init__(self, *args, **kw)
@classmethod
def get_subject(cls,subject_id):
return DBSession.execute(t_subjects.select().where(t_subjects.c.id == subject_id)).fetchone()
@classmethod
def get_subjects(cls, subject_ids):
return {
x["id"] : x for x in
DBSession.execute(t_subjects.select().where(t_subjects.c.id.in_(subject_ids))).fetchall()
}
@classmethod
def set_relations(cls, subject_id, relation_ids):
new_friends_set = set(relation_ids)
existing_subjects_set = {x["id"] for x in DBSession.execute(select([t_subjects.c.id]).where(t_subjects.c.id.in_([subject_id, ] + relation_ids))).fetchall()}
existing_friends = {x["to_id"] for x in DBSession.execute(select([t_subjectrelations.c.to_id]).where(t_subjectrelations.c.from_id==subject_id)).fetchall()}
not_existing_friends = (new_friends_set-existing_subjects_set-{subject_id,})
friends_to_append = ((new_friends_set - existing_friends) - not_existing_friends)
friends_to_delete = ((existing_friends - new_friends_set) - not_existing_friends)
#delete old friends
if len(friends_to_delete)>0:
update_connection().execute(t_subjectrelations.delete().where(and_(t_subjectrelations.c.from_id==subject_id,
t_subjectrelations.c.to_id.in_(friends_to_delete))))
#insert missing friends
if len(friends_to_append)>0:
update_connection().execute(t_subjectrelations.insert(),[{"from_id":subject_id,"to_id":f} for f in friends_to_append])
@classmethod
def set_parent_subjects(cls, subject_id, parent_subject_ids):
pass
@classmethod
def set_infos(cls, subject_id, lat, lng, timezone, language_id, additional_public_data):
"""set the subject's metadata like friends,location and timezone"""
# add or select subject
subject = DBSession.query(Subject).filter_by(id=subject_id).first()
subject.lat = lat
subject.lng = lng
subject.timezone = timezone
subject.additional_public_data = additional_public_data
subject.language_id = language_id
DBSession.add(subject)
DBSession.flush()
@classmethod
def delete_subject(cls,subject_id):
"""delete a subject including all dependencies."""
update_connection().execute(t_progress.delete().where(t_evaluations.c.subject_id == subject_id))
update_connection().execute(t_evaluations.delete().where(t_evaluations.c.subject_id == subject_id))
update_connection().execute(t_evaluations.delete().where(t_evaluations.c.context_subject_id == subject_id))
update_connection().execute(t_subjectrelations.delete().where(t_subjectrelations.c.to_id==subject_id))
update_connection().execute(t_subjectrelations.delete().where(t_subjectrelations.c.from_id==subject_id))
update_connection().execute(t_subjects_subjects.delete().where(t_subjects_subjects.c.subject_id==subject_id))
update_connection().execute(t_subjects_subjects.delete().where(t_subjects_subjects.c.part_of_id==subject_id))
update_connection().execute(t_values.delete().where(t_values.c.subject_id==subject_id))
update_connection().execute(t_subjects.delete().where(t_subjects.c.id == subject_id))
@classmethod
def basic_output(cls, subject):
return {
"id": subject["id"],
"name": subject["name"],
"additional_public_data": subject["additional_public_data"]
}
@classmethod
def full_output(cls, subject_id):
subject = DBSession.execute(t_subjects.select().where(t_subjects.c.id == subject_id)).fetchone()
j = t_subjects.join(t_subjectrelations, t_subjectrelations.c.to_id == t_subjects.c.id)
friends = DBSession.execute(t_subjects.select(from_obj=j).where(t_subjectrelations.c.from_id == subject_id)).fetchall()
j = t_subjects.join(t_subjects_subjects, t_subjects_subjects.c.part_of_id == t_subjects.c.id)
part_of_subjects = DBSession.execute(t_subjects.select(from_obj=j).where(t_subjects_subjects.c.subject_id == subject_id)).fetchall()
language = get_settings().get("fallback_language","en")
j = t_subjects.join(t_languages)
subject_language = DBSession.execute(select([t_languages.c.name], from_obj=j).where(t_subjects.c.id == subject_id)).fetchone()
if subject_language:
language = subject_language["name"]
ret = {
"id": subject["id"],
"lat": subject["lat"],
"lng": subject["lng"],
"timezone": subject["timezone"],
"language": language,
"created_at": subject["created_at"],
"additional_public_data": subject["additional_public_data"],
"relations": [Subject.basic_output(f) for f in friends],
"part_of": [Subject.basic_output(g) for g in part_of_subjects],
}
if get_settings().get("enable_user_authentication"):
auth_user = DBSession.execute(t_auth_users.select().where(t_auth_users.c.subject_id == subject_id)).fetchone()
if auth_user:
ret.update({
"email" : auth_user["email"]
})
return ret
@classmethod
def get_ancestor_subjects(cls, subject_id, of_type_id, from_date, to_date, whole_time_required):
#print("Getting ancestors of %s of type %s" % (subject_id, of_type_id))
#print("From date %s, To date %s, whole_time_required: %s" % (from_date, to_date, whole_time_required))
if whole_time_required:
datestr = "(%(ss)s.joined_at<=:from_date AND (%(ss)s.left_at IS NULL OR %(ss)s.left_at >= :to_date))"
else:
datestr = "((%(ss)s.joined_at<=:from_date AND (%(ss)s.left_at IS NULL OR %(ss)s.left_at >= :from_date))" \
"OR (%(ss)s.joined_at >= :from_date AND %(ss)s.joined_at <= :to_date)" \
"OR (%(ss)s.left_at >= :from_date AND %(ss)s.left_at <= :to_date))"
sq = text("""
WITH RECURSIVE nodes_cte(subject_id, name, part_of_id, depth, path) AS (
SELECT g1.id, g1.name, g1.id::bigint as part_of_id, 1::INT as depth, g1.id::TEXT as path
FROM subjects_subjects ss
LEFT JOIN subjects as g1 ON ss.part_of_id=g1.id
WHERE ss.subject_id = :subject_id AND """+(datestr % {'ss': 'ss'})+"""
UNION ALL
SELECT g2.id, g2.name, ss2.part_of_id, p.depth + 1 AS depth,
(p.path || '->' || g2.id ::TEXT)
FROM nodes_cte AS p
LEFT JOIN subjects_subjects AS ss2 ON ss2.subject_id=p.subject_id
LEFT JOIN subjects AS g2 ON ss2.part_of_id = g2.id
WHERE """+(datestr % {'ss': 'ss2'})+"""
) SELECT * FROM nodes_cte
""").bindparams(subject_id=subject_id, from_date=from_date, to_date=to_date).columns(subject_id=Integer, name=String, part_of_id=Integer, depth=Integer, path=String).alias()
j = t_subjects.join(sq, sq.c.subject_id == t_subjects.c.id)
q = select([
sq.c.path.label("subject_path"),
sq.c.subject_id.label("subject_id"),
sq.c.part_of_id.label("part_of_id"),
sq.c.name.label("subject_name"),
t_subjects.c.subjecttype_id.label("subjecttype_id")
], from_obj=j)
if of_type_id is not None:
q = q.where(t_subjects.c.subjecttype_id == of_type_id)
rows = DBSession.execute(q).fetchall()
groups = {r["part_of_id"]: r for r in rows if r["part_of_id"]}
return groups
@classmethod
def get_descendent_subjects(cls, subject_id, of_type_id, from_date, to_date, whole_time_required):
if whole_time_required:
datestr = "(%(ss)s.joined_at<=:from_date AND (%(ss)s.left_at IS NULL OR %(ss)s.left_at >= :to_date))"
else:
datestr = "((%(ss)s.joined_at<=:from_date AND (%(ss)s.left_at IS NULL OR %(ss)s.left_at >= :from_date))" \
"OR (%(ss)s.joined_at >= :from_date AND %(ss)s.joined_at <= :to_date)" \
"OR (%(ss)s.left_at >= :from_date AND %(ss)s.left_at <= :to_date))"
sq = text("""
WITH RECURSIVE nodes_cte(subject_id, name, part_of_id, depth, path) AS (
SELECT g1.id, g1.name, NULL::bigint as part_of_id, 1::INT as depth, g1.id::TEXT as path
FROM subjects as g1
LEFT JOIN subjects_subjects ss ON ss.subject_id=g1.id
WHERE ss.part_of_id = :subject_id AND """+(datestr % {'ss': 'ss'})+"""
UNION ALL
SELECT c.subject_id, g2.name, c.part_of_id, p.depth + 1 AS depth,
(p.path || '->' || g2.id ::TEXT)
FROM nodes_cte AS p, subjects_subjects AS c
JOIN subjects AS g2 ON g2.id=c.subject_id
WHERE c.part_of_id = p.subject_id AND """+(datestr % {'ss': 'c'})+"""
) SELECT * FROM nodes_cte
""").bindparams(subject_id=subject_id, from_date=from_date, to_date=to_date).columns(subject_id=Integer, name=String, part_of_id=Integer, depth=Integer, path=String).alias()
j = t_subjects.join(sq, sq.c.subject_id == t_subjects.c.id)
q = select([
sq.c.path.label("subject_path"),
sq.c.subject_id.label("subject_id"),
sq.c.name.label("subject_name"),
t_subjects.c.subjecttype_id.label("subjecttype_id")
], from_obj=j)
if of_type_id is not None:
q = q.where(t_subjects.c.subjecttype_id == of_type_id)
rows = DBSession.execute(q).fetchall()
subjects = {r["subject_id"]: r for r in rows if r["subject_id"]}
return subjects
@classmethod
def join_subject(cls,
subject_id,
part_of_id,
join_date):
q = t_subjects_subjects.insert().values({
'subject_id': subject_id,
'part_of_id': part_of_id,
'joined_at': join_date
})
update_connection().execute(q)
@classmethod
def leave_subject(cls,
subject_id,
part_of_id,
leave_date):
q = t_subjects_subjects.update().values({
'left_at': leave_date
}).where(and_(
t_subjects_subjects.c.subject_id == subject_id,
t_subjects_subjects.c.part_of_id == part_of_id,
))
update_connection().execute(q)
class SubjectType(ABase):
def __unicode__(self, *args, **kwargs):
return "(ID: %s; Name: %s)" % (self.id, self.name)
@classmethod
def basic_output(cls, subjecttype):
return {
"id": subjecttype["id"],
"name": subjecttype["name"],
}
class Variable(ABase):
"""A Variable is anything that should be meassured in your application and be used in :class:`.Goal`.
To save database rows, variables may be grouped by time:
group needs to be set to "year","month","day","timeslot" or "none" (default)
"""
def __unicode__(self, *args, **kwargs):
return self.name + " (ID: %s)" % (self.id,)
@classmethod
@cache_general.cache_on_arguments()
def get_variable_by_name(cls,name):
return DBSession.execute(t_variables.select(t_variables.c.name==name)).fetchone()
@classmethod
@cache_general.cache_on_arguments()
def map_variables_to_rules(cls):
"""return a map from variable_ids to [achievement1,..] lists.
Used to know which achievements need to be reevaluated after a value for the variable changes."""
q = select([t_achievements.c.id.label("achievement_id"), t_variables.c.id.label("variable_id")])\
.where(or_(t_achievements.c.condition.ilike(func.concat('%"',t_variables.c.name,'"%')),
t_achievements.c.condition.ilike(func.concat("%'",t_variables.c.name,"'%"))))
rows = DBSession.execute(q).fetchall()
achievements = { aid : Achievement.get_achievement(aid) for aid in { r["achievement_id"] for r in rows } }
m={}
for row in rows:
if not row["variable_id"] in m:
m[row["variable_id"]] = []
m[row["variable_id"]].append(achievements[row["achievement_id"]])
return m
class Value(ABase):
"""A Value describes the relation of the subject to a variable.
(e.g. it counts the occurences of the "events" which the variable represents) """
@classmethod
def increase_value(cls, variable_name, subject_id, value, key, at_datetime, populate_to_ancestors=True, override_agent_id=None):
"""increase the value of the variable for the subject.
In addition to the variable_name there may be an application-specific key which can be used in your :class:`.Achievement` definitions
The parameter at_datetime specifies a timezone-aware datetime to define when the event happened
"""
#subject_id = subject["id"]
agent_id = override_agent_id if override_agent_id is not None else subject_id
variable = Variable.get_variable_by_name(variable_name)
key = normalize_key(key)
if populate_to_ancestors:
part_of_ids = list(Subject.get_ancestor_subjects(subject_id, None, at_datetime, at_datetime, False).keys())
else:
part_of_ids = []
sid_set = set([subject_id, ] + part_of_ids)
# Populate the value for all relevant subjects:
for sid in sid_set:
condition = and_(t_values.c.datetime == at_datetime,
t_values.c.variable_id == variable["id"],
t_values.c.subject_id == sid,
t_values.c.agent_id == agent_id,
t_values.c.key == key)
current_value = DBSession.execute(select([t_values.c.value,]).where(condition)).scalar()
if current_value is not None:
update_connection().execute(t_values.update(condition, values={"value":current_value+value}))
else:
update_connection().execute(t_values.insert({"datetime": at_datetime,
"variable_id": variable["id"],
"subject_id": sid,
"agent_id": agent_id,
"key": key,
"value": value}))
# Clear the relevant achievement caches!
achievements = Variable.map_variables_to_rules().get(variable["id"], [])
achievement_id_to_achievement_date = {
entry["id"]: AchievementDate.compute(
evaluation_timezone=entry["evaluation_timezone"],
evaluation_type=entry["evaluation"],
context_datetime=at_datetime,
evaluation_shift=entry["evaluation_shift"]
) for entry in achievements
}
# get the subjects...
subjects = {}
for sid in sid_set:
subjects[sid] = Subject.get_ancestor_subjects(
subject_id=sid,
of_type_id=None,
from_date=at_datetime,
to_date=at_datetime,
whole_time_required=False
)
for sid in sid_set:
for achievement in achievements:
achievement_date = achievement_id_to_achievement_date[achievement["id"]]
compared_subjects = [s for s in subjects[sid].values() if s["subjecttype_id"] in achievement["compared_subjecttypes"]]
csids = set([x["subject_id"] for x in compared_subjects] + [subject_id,])
q = t_progress.delete().where(and_(
t_progress.c.subject_id.in_(csids),
t_progress.c.achievement_id == achievement["id"],
t_progress.c.achievement_date == AchievementDate.db_format(achievement_date),
))
update_connection().execute(q)
class AchievementCategory(ABase):
"""A category for grouping achievement types"""
@classmethod
@cache_general.cache_on_arguments()
def get_achievementcategory(cls, achievementcategory_id):
return DBSession.execute(t_achievementcategories.select().where(t_achievementcategories.c.id==achievementcategory_id)).fetchone()
def __unicode__(self, *args, **kwargs):
return self.name + " (ID: %s)" % (self.id,)
class AchievementDate:
def __init__(self, from_date, to_date):
self.from_date = from_date
self.to_date = to_date
def __repr__(self):
return "AchievementDate(%s, %s)" % (str(self.from_date), str(self.to_date))
def __str__(self):
return self.from_date.isoformat()
def __json__(self, *args, **kw):
return self.from_date.isoformat()
def __lt__(self, other):
return AchievementDate.db_format(self) < AchievementDate.db_format(other)
@classmethod
def db_format(cls, instance):
return instance.from_date if instance else None
@classmethod
def compute(cls, evaluation_timezone, evaluation_type, evaluation_shift, context_datetime):
"""
This computes the datetime to identify the time of the achievement.
Only relevant for repeating achievements (monthly, yearly, weekly, daily)
Returns None for all other achievement types
"""
if evaluation_type and not evaluation_timezone:
evaluation_timezone = "UTC"
tzobj = pytz.timezone(evaluation_timezone)
if not context_datetime:
dt = datetime.datetime.now(tzobj)
else:
dt = context_datetime.astimezone(tzobj)
from_date = dt
to_date = dt
if evaluation_type == "yearly":
if evaluation_shift:
from_date = tzobj.localize((from_date.replace(tzinfo=None) - datetime.timedelta(seconds=evaluation_shift)))
from_date = from_date.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
if evaluation_shift:
from_date = tzobj.localize((from_date.replace(tzinfo=None) + datetime.timedelta(seconds=evaluation_shift)))
to_date = from_date + relativedelta.relativedelta(years=1)
elif evaluation_type == "monthly":
if evaluation_shift:
from_date = tzobj.localize((from_date.replace(tzinfo=None) - datetime.timedelta(seconds=evaluation_shift)))
from_date = from_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
if evaluation_shift:
from_date = tzobj.localize((from_date.replace(tzinfo=None) + datetime.timedelta(seconds=evaluation_shift)))
to_date = from_date + relativedelta.relativedelta(months=1)
elif evaluation_type == "weekly":
if evaluation_shift:
from_date = tzobj.localize((from_date.replace(tzinfo=None) - datetime.timedelta(seconds=evaluation_shift)))
from_date = from_date - datetime.timedelta(days=from_date.weekday())
from_date = from_date.replace(hour=0, minute=0, second=0, microsecond=0)
if evaluation_shift:
from_date = tzobj.localize((from_date.replace(tzinfo=None) + datetime.timedelta(seconds=evaluation_shift)))
to_date = from_date + relativedelta.relativedelta(weeks=1)
elif evaluation_type == "daily":
if evaluation_shift:
from_date = tzobj.localize((from_date.replace(tzinfo=None) - datetime.timedelta(seconds=evaluation_shift)))
from_date = from_date.replace(hour=0, minute=0, second=0, microsecond=0)
if evaluation_shift:
from_date = tzobj.localize((from_date.replace(tzinfo=None) + datetime.timedelta(seconds=evaluation_shift)))
to_date = from_date + relativedelta.relativedelta(days=1)
elif evaluation_type == "immediately":
return None
elif evaluation_type == "end":
return None
return AchievementDate(
from_date = from_date.astimezone(tzobj),
to_date = to_date.astimezone(tzobj)
)
class Achievement(ABase):
"""A collection of goals which has multiple :class:`AchievementProperty` and :class:`Reward`."""
def __unicode__(self, *args, **kwargs):
return self.name + " (ID: %s)" % (self.id,)
@classmethod
def is_leaderboard(cls, achievement):
return achievement["goal"] is None
@classmethod
@cache_general.cache_on_arguments()
def get_achievement(cls,achievement_id):
achievement = rowproxy2dict(DBSession.execute(t_achievements.select().where(t_achievements.c.id == achievement_id)).fetchone())
compared_subjecttypes = [x["id"] for x in DBSession.execute(t_achievement_compared_subjecttypes.select().where(t_achievement_compared_subjecttypes.c.achievement_id == achievement_id)).fetchall()]
domain_subjects = [x["id"] for x in DBSession.execute(t_achievement_compared_subjecttypes.select().where(t_achievement_compared_subjecttypes.c.achievement_id == achievement_id)).fetchall()]
achievement['compared_subjecttypes'] = compared_subjecttypes
achievement['domain_subjects'] = domain_subjects
return achievement
@classmethod
def get_achievements_by_subject_for_today(cls,subject):
"""Returns all achievements that are relevant for the subject today.
This is needed as achievements may be limited to a specific time (e.g. only during holidays)
"""
def generate_achievements_by_subject_for_today():
today = datetime.date.today()
by_loc = cls.get_achievements_by_location(coords(subject))
by_date = cls.get_achievements_by_date(today)
return {x["id"]: x for x in by_loc+by_date}.values()
key = str(subject["id"])
expiration_time = seconds_until_end_of_day(subject["timezone"])
return cache_achievements_by_subject_for_today.get_or_create(key, generate_achievements_by_subject_for_today, expiration_time=expiration_time)
#We need to fetch all achievement data in one of these methods -> by_date is just queried once a date
@classmethod
@cache_general.cache_on_arguments()
def get_achievements_by_location(cls, latlng):
"""return achievements which are valid in that location."""
distance = calc_distance(latlng, (t_achievements.c.lat, t_achievements.c.lng)).label("distance")
q = select([t_achievements, distance])\
.where(or_(and_(t_achievements.c.lat==None, t_achievements.c.lng==None),
distance < t_achievements.c.max_distance))
return [rowproxy2dict(x) for x in DBSession.execute(q).fetchall()]
@classmethod
@cache_general.cache_on_arguments()
def get_achievements_by_date(cls, date):
"""return achievements which are valid at that date"""
q = t_achievements.select().where(and_(or_(t_achievements.c.valid_start == None,
t_achievements.c.valid_start <= date),
or_(t_achievements.c.valid_end == None,
t_achievements.c.valid_end >= date)
))
return [rowproxy2dict(x) for x in DBSession.execute(q).fetchall()]
@classmethod
def get_relevant_contexts(cls, subject_id, achievement, from_date, to_date, whole_time_required):
if achievement["comparison_type"]=="context_subject":
return Subject.get_ancestor_subjects(
subject_id=subject_id,
of_type_id=achievement["context_subjecttype_id"],
from_date=from_date if from_date else dt_now(),
to_date=to_date if to_date else dt_now(),
whole_time_required=whole_time_required
)
return [None,]
@classmethod
def get_relevant_subjects_by_achievement_and_subject(cls, achievement, subject, context_subject_id, from_date, to_date):
"""
return all relevant other subjects for the leaderboard. This method is used for collecting all subjects for the output. the reverse method is used to clear the caches properly
depends on the "relevance" attribute of the achievement, can be "friends", "global" or "context_subject"
"""
# this is needed to compute the leaderboards
#subjects=[subject_id,]
from gengine.app.leaderboard import RelationsLeaderBoardSubjectSet, GlobalLeaderBoardSubjectSet, \
ContextSubjectLeaderBoardSubjectSet
subjects=[]
if achievement["comparison_type"] == "relations":
subjects = RelationsLeaderBoardSubjectSet.forward(
subject_id=subject["id"],
from_date=from_date,
to_date=to_date,
whole_time_required=achievement["lb_subject_part_whole_time"]
)
subjects = set(subjects) | {subject.id}
elif achievement["comparison_type"] == "global":
subjects = GlobalLeaderBoardSubjectSet.forward(
subjecttype_id=subject["subjecttype_id"],
from_date=from_date,
to_date=to_date,
whole_time_required=achievement["lb_subject_part_whole_time"]
)
elif achievement["comparison_type"] == "context_subject":
subjects = ContextSubjectLeaderBoardSubjectSet.forward(
subjecttype_id=subject["subjecttype_id"],
context_subject_id=context_subject_id,
from_date=from_date,
to_date=to_date,
whole_time_required=achievement["lb_subject_part_whole_time"]
)
return subjects
@classmethod
def get_level(cls, subject_id, achievement_id, achievement_date, context_subject_id):
"""get the current level of the subject for this achievement."""
def generate():
q = select([t_evaluations.c.level,
t_evaluations.c.achieved_at,],
and_(t_evaluations.c.subject_id == subject_id,
t_evaluations.c.achievement_date == AchievementDate.db_format(achievement_date),
t_evaluations.c.context_subject_id == context_subject_id,
t_evaluations.c.achievement_id == achievement_id,
t_evaluations.c.achieved == True)).order_by(t_evaluations.c.level.desc())
return [x for x in DBSession.execute(q).fetchall()]
return cache_achievements_subjects_levels.get_or_create("%s_%s_%s_%s" % (str(subject_id), str(achievement_id), str(achievement_date), str(context_subject_id)), generate)
@classmethod
def get_level_int(cls, subject_id, achievement_id, achievement_date, context_subject_id):
"""get the current level of the subject for this achievement as int (0 if the user does not have this achievement)"""
lvls = Achievement.get_level(subject_id, achievement_id, achievement_date, context_subject_id)
if not lvls:
return 0
else:
return lvls[0]["level"]
@classmethod
def basic_output(cls, achievement, include_levels=True, max_level_included=None):
"""construct the basic basic_output structure for the achievement."""
achievementcategory = None
if achievement["achievementcategory_id"]!=None:
achievementcategory = AchievementCategory.get_achievementcategory(achievement["achievementcategory_id"])
out = {
"id" : achievement["id"],
"view_permission" : achievement["view_permission"],
"internal_name" : achievement["name"],
"maxlevel" : achievement["maxlevel"],
"priority" : achievement["priority"],
"hidden" : achievement["hidden"],
"achievementcategory" : achievementcategory["name"] if achievementcategory!=None else ""
}
if include_levels:
levellimit = achievement["maxlevel"]
if max_level_included:
max_level_included = min(max_level_included, levellimit)
out["levels"] = {
str(i): {
"level": i,
"goal": evaluate_value_expression(achievement["goal"], {"level": i}),
"rewards": {str(r["id"]): {
"id": r["id"],
"reward_id": r["reward_id"],
"name": r["name"],
"value": evaluate_string(r["value"], {"level": i}),
"value_translated": Translation.trs(r["value_translation_id"], {"level": i}),
} for r in Achievement.get_rewards(achievement["id"], i)},
"properties": {str(r["property_id"]): {
"property_id": r["property_id"],
"name": r["name"],
"value": evaluate_string(r["value"], {"level": i}),
"value_translated": Translation.trs(r["value_translation_id"], {"level": i}),
} for r in Achievement.get_achievement_properties(achievement["id"], i)}
} for i in range(1, max_level_included+1)}
return out
@classmethod
def evaluate(cls, compared_subject, achievement_id, achievement_date, context_subject_id, execute_triggers=True, generate_output=True, skip_trigger_action=False):
"""evaluate the achievement including all its subgoals for the subject.
return the basic_output for the achievement plus information about the new achieved levels
"""
def generate():
achievement = Achievement.get_achievement(achievement_id)
#print("Generating for %s, generate_output=%s, comparison_type=%s" %(achievement["name"], generate_output, achievement["comparison_type"]))
#print("Context Subject ID: %s" % (context_subject_id, ))
goal = None
subject_id = compared_subject["id"]
is_player = (compared_subject["subjecttype_id"] == achievement["player_subjecttype_id"])
# get current level
subject_has_level = Achievement.get_level_int(
subject_id=subject_id,
achievement_id=achievement["id"],
achievement_date=achievement_date,
context_subject_id=context_subject_id
)
# if there is at least one more level, use that. otherwise the current level is also the next level
subject_wants_level = min((subject_has_level or 0)+1, achievement["maxlevel"])
achieved = achieved_before = (subject_has_level == subject_wants_level)
# Check if the current achievement is already evaluated (these rows are deleted when the value is increased)
# This value is independent of the context, as it does not contain the comparison!
current_progress = current_progress_before = DBSession.execute(select([t_progress.c.value]).where(and_(
t_progress.c.subject_id == subject_id,
t_progress.c.achievement_id == achievement_id,
t_progress.c.achievement_date == AchievementDate.db_format(achievement_date),
))).scalar()
if not current_progress:
# No valid value found. Compute it!
current_progress = next(
(x["value"] for x in Achievement.compute_progress(achievement, compared_subject, achievement_date) if x["subject_id"]==subject_id),
0.0
)
if is_player:
goal = evaluate_value_expression(achievement["goal"], {
"level": subject_wants_level
})
if goal is not None and achievement["operator"] == "geq" and current_progress >= goal:
achieved = True
#current_progress = min(current_progress, goal)
elif goal is not None and achievement["operator"] == "leq" and current_progress <= goal:
achieved = True
#current_progress = max(current_progress, goal)
if current_progress != current_progress_before:
if current_progress_before:
update_connection().execute(t_progress.update({
"value": current_progress
}).where(and_(
t_progress.c.subject_id == subject_id,
t_progress.c.achievement_id == achievement_id,
t_progress.c.achievement_date == AchievementDate.db_format(achievement_date)
)))
else:
update_connection().execute(t_progress.insert({
"value": current_progress,
"subject_id": subject_id,
"achievement_id": achievement_id,
"achievement_date": AchievementDate.db_format(achievement_date),
}))
# Evaluate triggers
if is_player and execute_triggers:
Achievement.select_and_execute_triggers(
achievement=achievement,
achievement_date=achievement_date,
subject=compared_subject,
level=subject_wants_level,
current_goal=goal,
previous_goal=evaluate_value_expression(achievement["goal"], {
"level": subject_has_level
}),
value=current_progress,
context_subject_id=context_subject_id,
skip_trigger_action=skip_trigger_action
)
# No we have the value for the current level
leaderboard = None
leaderboard_position = None
if generate_output is True and achievement["comparison_type"] in ("relations", "global", "context_subject"):
# This is leaderboard! Compare to others
# Find all other subjects that we want to compare to
subject_ids = Achievement.get_relevant_subjects_by_achievement_and_subject(
achievement=achievement,
subject=compared_subject,
context_subject_id=context_subject_id,
from_date=achievement_date.from_date if achievement_date else None,
to_date=achievement_date.to_date if achievement_date else None
)
#print("relevant subjects:"+",".join(str(s) for s in subject_ids))
leaderboard = Achievement.get_leaderboard(
achievement=achievement,
achievement_date=achievement_date,
subject_ids=subject_ids,
context_subject_id=context_subject_id #this is needed to trigger the missing evaluations
)
own_filter = list(filter(lambda x: x["subject"]["id"] == subject_id, leaderboard))
if len(own_filter)>0:
leaderboard_position = own_filter[0]["position"]
else:
leaderboard_position = None
output = ""
new_level_output = None
last_recursion_step = True # will be false, if the full basic_output is generated in a recursion step
if achieved and subject_has_level < achievement["maxlevel"]:
#NEW LEVEL YEAH!
if generate_output:
new_level_output = {
"rewards": {
str(r["id"]): {
"id": r["id"],
"reward_id": r["reward_id"],
"name": r["name"],
"value": evaluate_string(r["value"], {"level": subject_wants_level}),
"value_translated": Translation.trs(r["value_translation_id"], {"level": subject_wants_level}),
} for r in Achievement.get_rewards(achievement["id"], subject_wants_level)
},
"properties": {
str(r["property_id"]): {
"property_id": r["property_id"],
"name": r["name"],
"value": evaluate_string(r["value"], {"level": subject_wants_level}),
"value_translated": Translation.trs(r["value_translation_id"], {"level": subject_wants_level})
} for r in Achievement.get_achievement_properties(achievement["id"], subject_wants_level)
},
"level": subject_wants_level
}
evaluation = update_connection().execute(select([t_evaluations.c.id]).where(and_(
t_evaluations.c.subject_id == subject_id,
t_evaluations.c.achievement_id == achievement["id"],
t_evaluations.c.achievement_date == AchievementDate.db_format(achievement_date),
t_evaluations.c.context_subject_id == context_subject_id,
t_evaluations.c.level == subject_wants_level,
))).fetchone()
if evaluation:
update_connection().execute(t_evaluations.update().values({
"achieved": True
}).where(
t_evaluations.c.id == evaluation["id"]
))
else:
update_connection().execute(t_evaluations.insert().values({
"subject_id": subject_id,
"achievement_id": achievement["id"],
"achievement_date": AchievementDate.db_format(achievement_date),
"context_subject_id": context_subject_id,
"level": subject_wants_level,
"achieved": True,
"achieved_at": dt_now()
}))
#invalidate current level cache
cache_achievements_subjects_levels.delete("%s_%s_%s_%s" % (str(subject_id), str(achievement_id), str(achievement_date), str(context_subject_id)))
subject_has_level = subject_wants_level
subject_wants_level = subject_wants_level+1
#Goal.clear_subject_goal_caches(subject_id, [(g["goal_id"], achievement_date) for g in goal_evals.values()])
#the level has been updated, we need to do recursion now...
#but only if there are more levels...
if subject_has_level < achievement["maxlevel"]:
output = generate()
last_recursion_step = False
if generate_output and last_recursion_step: #is executed, if this is the last recursion step
output = Achievement.basic_output(achievement, True, max_level_included=subject_has_level+1)
context_subject_output = None
if context_subject_id:
context_subject_output = Subject.basic_output(Subject.get_subject(context_subject_id))
output.update({
"level": subject_has_level,
"levels_achieved": {
str(x["level"]): x["achieved_at"] for x in Achievement.get_level(subject_id, achievement["id"], achievement_date, context_subject_id)
},
"maxlevel": achievement["maxlevel"],
"new_levels": {},
"progress": current_progress,
"goal": goal,
"leaderboard": leaderboard,
"leaderboard_position": leaderboard_position,
"achievement_date": achievement_date,
"context_subject": context_subject_output,
"evaluation": achievement["evaluation"],
"evaluation_timezone": achievement["evaluation_timezone"]
})
if generate_output and new_level_output is not None: #if we reached a new level in this recursion step, add the previous levels rewards and properties
output["new_levels"][str(subject_has_level)] = new_level_output
return output
#TODO: Caching may only be implemented for generate_output=True
return generate()
#return cache_achievement_eval.get_or_create("%s_%s_%s_%s" % (subject["id"], achievement_id, achievement_date, context_subject_id), generate)
#@classmethod
#def invalidate_evaluate_cache(cls, subject_id, achievement, achievement_date):
# """
# This method is called to invalidate the achievement evaluation output when a value is increased.
# For leaderboards this means, that we need to reset the achievement evaluation output for all other subjects in that leaderboard!
# """
#
# #We neeed to invalidate for all relevant users because of the leaderboards
# for user_id, user_meta in Achievement.get_relevant_users_by_achievement_and_user_reverse(achievement, user_id).items():
# group_ids = user_meta.get("groups", {None,})
# for gid in group_ids:
# cache_achievement_eval.delete("%s_%s_%s_%s" % (user_id, achievement["id"], achievement_date, gid))
@classmethod
@cache_general.cache_on_arguments()
def get_rewards(cls, achievement_id, level):
"""return the new rewards which are given for the achievement level."""
this_level = DBSession.execute(select([t_rewards.c.id.label("reward_id"),
t_achievements_rewards.c.id,
t_rewards.c.name,
t_achievements_rewards.c.from_level,
t_achievements_rewards.c.value,
t_achievements_rewards.c.value_translation_id],
from_obj=t_rewards.join(t_achievements_rewards))\
.where(and_(or_(t_achievements_rewards.c.from_level <= level,
t_achievements_rewards.c.from_level == None),
t_achievements_rewards.c.achievement_id == achievement_id))\
.order_by(t_achievements_rewards.c.from_level))\
.fetchall()
prev_level = DBSession.execute(select([t_rewards.c.id.label("reward_id"),
t_achievements_rewards.c.id,
t_achievements_rewards.c.value,
t_achievements_rewards.c.value_translation_id],
from_obj=t_rewards.join(t_achievements_rewards))\
.where(and_(or_(t_achievements_rewards.c.from_level <= level-1,
t_achievements_rewards.c.from_level == None),
t_achievements_rewards.c.achievement_id == achievement_id))\
.order_by(t_achievements_rewards.c.from_level))\
.fetchall()
#now compute the diff :-/
build_hash = lambda x, l: hashlib.md5((str(x["id"])+str(evaluate_string(x["value"], {"level": l}))+str(Translation.trs(x["value_translation_id"], {"level": l}))).encode("UTF-8")).hexdigest()
prev_hashes = {build_hash(x, level-1) for x in prev_level}
#this_hashes = {build_hash(x,level) for x in this_level}
retlist = [x for x in this_level if not build_hash(x, level) in prev_hashes]
return retlist
@classmethod
@cache_general.cache_on_arguments()
def get_achievement_properties(cls, achievement_id, level):
"""return all properties which are associated to the achievement level."""
return DBSession.execute(select([t_achievementproperties.c.id.label("property_id"),
t_achievementproperties.c.name,
t_achievements_achievementproperties.c.from_level,
t_achievements_achievementproperties.c.value,
t_achievements_achievementproperties.c.value_translation_id],
from_obj=t_achievementproperties.join(t_achievements_achievementproperties))\
.where(and_(or_(t_achievements_achievementproperties.c.from_level <= level,
t_achievements_achievementproperties.c.from_level == None),
t_achievements_achievementproperties.c.achievement_id == achievement_id))\
.order_by(t_achievements_achievementproperties.c.from_level))\
.fetchall()
@classmethod
def compute_progress(cls, achievement, subject, achievement_date):
"""computes the progress of the goal for the given user_id
goal attributes:
- goal: the value that is used for comparison
- operator: "geq" or "leq"; used for comparison
- condition: the rule as python code
- group_by_dateformat: passed as a parameter to to_char ( http://www.postgresql.org/docs/9.3/static/functions-formatting.html )
e.g. you can select and group by the weekday by using "ID" for ISO 8601 day of the week (1-7) which can afterwards be used in the condition
- group_by_key: group by the key of the values table
- timespan: number of days which are considered (uses utc, i.e. days*24hours)
- maxmin: "max" or "min" - select min or max value after grouping
- evaluation: "daily", "weekly", "monthly", "yearly" evaluation (users timezone)
"""
subject_id = subject["id"]
def generate_statement_cache():
# Transform the condition DSL to sqlalchemy WHERE-format
condition = evaluate_condition(
achievement["condition"],
column_variable=t_variables.c.name.label("variable_name"),
column_key=t_values.c.key
)
# We can group the values by a key or date (eg. achieve sth. on a sunday)
group_by_dateformat = achievement["group_by_dateformat"]
group_by_key = achievement["group_by_key"]
# How old may the values be, that are considered in this achievement?
timespan = achievement["timespan"]
# When we group by key or dateformat: Should we select the max or min value of the groups?
maxmin = achievement["maxmin"]
# Some achievements occur periodically. This fields defines when and how often they are evaluated.
evaluation_type = achievement["evaluation"]
# Weeks don't start on the same day everywhere and in every use-cases. Same for years, days and months.
# We can shift them by a fixed amount of seconds!
evaluation_shift = achievement["evaluation_shift"]
# For time-related achievements, the timezone should be fixed im multiple subjects are involved (leaderboard), as otherwise the comparison is not in sync
# For single-user achievements (no leaderboard), we can use the timezone of each subject
timezone = achievement["evaluation_timezone"]
#prepare
select_cols = [func.sum(t_values.c.value).label("value"),
t_values.c.subject_id]
j = t_values.join(t_variables)
# We need to access the subject's timezone later
j = j.join(t_subjects, t_subjects.c.id == t_values.c.subject_id)
datetime_col = None
if group_by_dateformat:
# here we need to convert to users' time zone, as we might need to group by e.g. USER's weekday
if timezone:
datetime_col = func.to_char(text("values.datetime AT TIME ZONE '%s'" % (timezone,)), group_by_dateformat).label("datetime")
else:
datetime_col = func.to_char(text("values.datetime AT TIME ZONE subjects.timezone"), group_by_dateformat).label("datetime")
select_cols.append(datetime_col)
if group_by_key:
select_cols.append(t_values.c.key)
#build query
q = select(select_cols,
from_obj=j)\
.where(t_values.c.subject_id == bindparam("subject_id"))\
.group_by(t_values.c.subject_id)
if condition is not None:
q = q.where(condition)
if timespan:
#here we can use the utc time
q = q.where(t_values.c.datetime >= datetime.datetime.utcnow()-datetime.timedelta(days=timespan))
if evaluation_type != "immediately":
if evaluation_type in ('daily', 'weekly', 'monthly', 'yearly'):
q = q.where(and_(
t_values.c.datetime >= achievement_date.from_date,
t_values.c.datetime < achievement_date.to_date
))
elif evaluation_type == "end":
pass
#Todo implement for end
if datetime_col is not None or group_by_key is not False:
if datetime_col is not None:
q = q.group_by(datetime_col)
if group_by_key is not False:
q = q.group_by(t_values.c.key)
query_with_groups = q.alias()
select_cols2 = [query_with_groups.c.subject_id]
if maxmin == "min":
select_cols2.append(func.min(query_with_groups.c.value).label("value"))
else:
select_cols2.append(func.max(query_with_groups.c.value).label("value"))
combined_user_query = select(select_cols2, from_obj=query_with_groups)\
.group_by(query_with_groups.c.subject_id)
return combined_user_query
else:
return q
#q = cache_goal_statements.get_or_create(str(goal["id"]),generate_statement_cache)
# TODO: Cache the statement / Make it serializable for caching in redis
q = generate_statement_cache()
return DBSession.execute(q, {'subject_id': subject_id})
@classmethod
def select_and_execute_triggers(cls, achievement, achievement_date, subject, level, current_goal, value, previous_goal, context_subject_id, skip_trigger_action=False):
subject_id = subject["id"]
if previous_goal == current_goal:
previous_goal = 0.0
j = t_achievement_trigger_step_executions.join(t_achievement_trigger_steps)
executions = {r["achievement_trigger_id"]: r["step"] for r in
DBSession.execute(
select([t_achievement_trigger_steps.c.id.label("step_id"),
t_achievement_trigger_steps.c.achievement_trigger_id,
t_achievement_trigger_steps.c.step], from_obj=j).\
where(and_(t_achievement_triggers.c.achievement_id == achievement["id"],
t_achievement_trigger_step_executions.c.achievement_date == AchievementDate.db_format(achievement_date),
t_achievement_trigger_step_executions.c.subject_id == subject_id,
t_achievement_trigger_step_executions.c.execution_level == level))).fetchall()
}
j = t_achievement_trigger_steps.join(t_achievement_triggers)
trigger_steps = DBSession.execute(select([
t_achievement_trigger_steps.c.id,
t_achievement_trigger_steps.c.achievement_trigger_id,
t_achievement_trigger_steps.c.step,
t_achievement_trigger_steps.c.condition_type,
t_achievement_trigger_steps.c.condition_percentage,
t_achievement_trigger_steps.c.action_type,
t_achievement_trigger_steps.c.action_translation_id,
t_achievement_trigger_steps.c.action_subjecttype_id,
t_achievement_trigger_steps.c.action_value,
t_achievement_trigger_steps.c.action_variable_id,
t_achievement_triggers.c.execute_when_complete,
], from_obj=j).where(t_achievement_triggers.c.achievement_id == achievement["id"], )).fetchall()
trigger_steps = [s for s in trigger_steps if s["step"] > executions.get(s["achievement_trigger_id"], -sys.maxsize)]
exec_queue = {}
#When editing things here, check the insert_trigger_step_executions_after_step_upsert event listener too!!!!!!!
if len(trigger_steps) > 0:
operator = achievement["operator"]
properties = Achievement.get_achievement_properties(achievement["id"], level)
for step in trigger_steps:
if step["condition_type"] == "percentage" and step["condition_percentage"]:
current_percentage = float(value - previous_goal) / float(current_goal - previous_goal)
required_percentage = step["condition_percentage"]
if current_percentage >= 1.0 and required_percentage != 1.0 and not step["execute_when_complete"]:
# When the user reaches the full goal, and there is a trigger at e.g. 90%, we don't want it to be executed anymore.
continue
if (operator == "geq" and current_percentage >= required_percentage) \
or (operator == "leq" and current_percentage <= required_percentage):
if exec_queue.get(step["achievement_trigger_id"], {"step": -sys.maxsize})["step"] < step["step"]:
exec_queue[step["achievement_trigger_id"]] = step
for step in exec_queue.values():
current_percentage = float(value - previous_goal) / float(current_goal - previous_goal)
AchievementTriggerStep.execute(
trigger_step=step,
subject=subject,
current_percentage=current_percentage,
value=value,
achievement_goal=current_goal,
level=level,
properties=properties,
achievement_date=achievement_date,
context_subject_id=context_subject_id,
suppress_actions=skip_trigger_action
)
@classmethod
def get_leaderboard(cls, achievement, achievement_date, subject_ids, context_subject_id):
"""get the leaderboard for the goal and userids"""
q = select([t_progress.c.subject_id,
t_progress.c.value])\
.where(and_(t_progress.c.subject_id.in_(subject_ids),
t_progress.c.achievement_id == achievement["id"],
t_progress.c.achievement_date == AchievementDate.db_format(achievement_date),
))\
.order_by(t_progress.c.value.desc(),
t_progress.c.subject_id.desc())
items = DBSession.execute(q).fetchall()
subjects = Subject.get_subjects(subject_ids)
requested_subject_ids = set(int(s) for s in subject_ids)
values_found_for_subject_ids = set([int(x["subject_id"]) for x in items])
missing_subject_ids = requested_subject_ids - values_found_for_subject_ids
missing_subjects = Subject.get_subjects(missing_subject_ids).values()
if len(missing_subjects)>0:
#the goal has not been evaluated for some subjects...
#achievement = Achievement.get_achievement(goal["achievement_id"])
for subject in missing_subjects:
subject_has_level = Achievement.get_level_int(
subject["id"],
achievement["id"],
achievement_date,
context_subject_id
)
subject_wants_level = min((subject_has_level or 0)+1, achievement["maxlevel"])
Achievement.evaluate(
compared_subject=subject,
achievement_id=achievement["id"],
achievement_date=achievement_date,
context_subject_id=context_subject_id,
execute_triggers=True,
generate_output=False
)
#rerun the query
items = DBSession.execute(q).fetchall()
positions = [{"subject": Subject.basic_output(subjects[items[i]["subject_id"]]),
"value": items[i]["value"],
"position": i} for i in range(0, len(items))]
return positions
class AchievementProperty(ABase):
"""A AchievementProperty describes the :class:`Achievement`s of our system.
Examples: name, image, description, xp
Additionally Properties can be used as variables.
This is useful to model goals like "reach 1000xp"
"""
def __unicode__(self, *args, **kwargs):
return self.name + " (ID: %s)" % (self.id,)
class AchievementAchievementProperty(ABase):
"""A poperty value for an :class:`Achievement`"""
pass
class Reward(ABase):
"""Rewards are given when reaching :class:`Achievement`s.
Examples: badge, item
"""
def __unicode__(self, *args, **kwargs):
return self.name + " (ID: %s)" % (self.id,)
class AchievementReward(ABase):
"""A Reward value for an :class:`Achievement` """
@classmethod
def get_achievement_reward(cls, achievement_reward_id):
return DBSession.execute(t_achievements_rewards.select(t_achievements_rewards.c.id == achievement_reward_id)).fetchone()
class Evaluation(ABase):
pass
class Progress(ABase):
pass
class Language(ABase):
def __unicode__(self, *args, **kwargs):
return "%s" % (self.name,)
class TranslationVariable(ABase):
def __unicode__(self, *args, **kwargs):
return "%s" % (self.name,)
class Translation(ABase):
def __unicode__(self, *args, **kwargs):
return "%s" % (self.text,)
@classmethod
@cache_translations.cache_on_arguments()
def trs(cls, translation_id, params={}):
"""returns a map of translations for the translation_id for ALL languages"""
if translation_id is None:
return None
try:
# TODO support params which are results of this function itself (dicts of lang -> value)
# maybe even better: add possibility to refer to other translationvariables directly (so they can be modified later on)
ret = {str(x["name"]): evaluate_string(x["text"], params) for x in cls.get_translation_variable(translation_id)}
except Exception as e:
ret = {str(x["name"]): x["text"] for x in cls.get_translation_variable(translation_id)}
log.exception("Evaluation of string-forumlar failed: %s" % (ret.get(get_settings().get("fallback_language", "en"), translation_id),))
if not get_settings().get("fallback_language", "en") in ret:
ret[get_settings().get("fallback_language", "en")] = "[not_translated]_"+str(translation_id)
for lang in cls.get_languages():
if not str(lang["name"]) in ret:
ret[str(lang["name"])] = ret[get_settings().get("fallback_language", "en")]
return ret
@classmethod
@cache_translations.cache_on_arguments()
def get_translation_variable(cls, translation_id):
return DBSession.execute(select([t_translations.c.text,
t_languages.c.name],
from_obj=t_translationvariables.join(t_translations).join(t_languages))\
.where(t_translationvariables.c.id == translation_id)).fetchall()
@classmethod
@cache_translations.cache_on_arguments()
def get_languages(cls):
return DBSession.execute(t_languages.select()).fetchall()
class SubjectMessage(ABase):
def __unicode__(self, *args, **kwargs):
return "Message: %s" % (Translation.trs(self.translation_id,self.params).get(get_settings().get("fallback_language","en")),)
@classmethod
def get_text(cls, row):
return Translation.trs(row["translation_id"],row["params"])
@property
def text(self):
return Translation.trs(self.translation_id, self.params)
@classmethod
def deliver(cls, message):
from gengine.app.push import send_push_message
text = SubjectMessage.get_text(message)
language = get_settings().get("fallback_language", "en")
j = t_subjects.join(t_languages)
subject_language = DBSession.execute(select([t_languages.c.name], from_obj=j).where(t_subjects.c.id == message["subject_id"])).fetchone()
if subject_language:
language = subject_language["name"]
translated_text = text[language]
if not message["has_been_pushed"]:
try:
send_push_message(
user_id=message["subject_id"],
text=translated_text,
custom_payload={},
title=get_settings().get("push_title", "Gamification-Engine")
)
except Exception as e:
log.error(e, exc_info=True)
else:
DBSession.execute(t_subject_messages.update().values({"has_been_pushed": True}).where(t_subject_messages.c.id == message["id"]))
class AchievementTrigger(ABase):
def __unicode__(self, *args, **kwargs):
return "GoalTrigger: %s" % (self.id,)
class AchievementTriggerStep(ABase):
def __unicode__(self, *args, **kwargs):
return "GoalTriggerStep: %s" % (self.id,)
@classmethod
def execute(cls, trigger_step, subject, current_percentage, value, achievement_goal, level, properties, achievement_date, context_subject_id, suppress_actions=False):
subject_id = subject["id"]
uS = update_connection()
uS.execute(t_achievement_trigger_step_executions.insert().values({
'subject_id': subject_id,
'trigger_step_id': trigger_step["id"],
'execution_level': level,
'achievement_date': AchievementDate.db_format(achievement_date),
'context_subject_id': context_subject_id
}))
properties = {
r["name"] : Translation.trs(r["value_translation_id"], {"level": level})
for r in properties}
if not suppress_actions:
if trigger_step["action_type"] == "subject_message":
m = SubjectMessage(
subject_id=subject_id,
translation_id=trigger_step["action_translation_id"],
params=dict({
'value': value,
'goal': achievement_goal,
'percentage': current_percentage,
},**properties),
is_read=False,
has_been_pushed=False
)
uS.add(m)
elif trigger_step["action_type"] == "increase_value":
action_value = evaluate_value_expression(trigger_step["action_value"], {
'level': level
})
action_subject_type_id = trigger_step["action_subjecttype_id"]
action_variable_id = trigger_step["action_variable_id"]
action_variable = DBSession.execute(select([t_variables.c.name], from_obj=t_variables).where(t_variables.c.id==action_variable_id)).fetchone()
action_variable_name = action_variable["name"]
at_dt = min(achievement_date.to_date, dt_now())
subjects = []
if action_subject_type_id == subject["subjecttype_id"]:
subjects.append(subject_id)
else:
ancestors = Subject.get_ancestor_subjects(
subject_id=subject_id,
of_type_id=action_subject_type_id,
from_date=at_dt,
to_date=at_dt,
whole_time_required=False
)
subjects += list(ancestors.keys())
descendents = Subject.get_descendent_subjects(
subject_id=subject_id,
of_type_id=action_subject_type_id,
from_date=at_dt,
to_date=at_dt,
whole_time_required=False
)
subjects += list(descendents.keys())
for subj in subjects:
Value.increase_value(
variable_name=action_variable_name,
subject_id=subj,
value=action_value,
key=None,
at_datetime=at_dt
)
class Task(ABase):
def __unicode__(self, *args, **kwargs):
return "Task: %s" % (self.id,)
class TaskExecution(ABase):
def __unicode__(self, *args, **kwargs):
return "TaskExecution: %s" % (self.id,)
@event.listens_for(AchievementTriggerStep, "after_insert")
@event.listens_for(AchievementTriggerStep, 'after_update')
def insert_trigger_step_executions_after_step_upsert(mapper,connection,target):
"""When we create a new Trigger-Step, we must ensure, that is will not be executed for the users who already met the conditions before."""
subject_ids = [x["id"] for x in DBSession.execute(select([t_subjects.c.id, ], from_obj=t_subjects)).fetchall()]
subjects = Subject.get_subjects(subject_ids).values()
achievement = target.trigger.achievement
for subject in subjects:
d = max(achievement["created_at"], subject["created_at"]).replace(tzinfo=pytz.utc)
now = dt_now()
while d <= now:
achievement_date = AchievementDate.compute(
evaluation_timezone=achievement["evaluation_timezone"],
evaluation_type=achievement["evaluation"],
evaluation_shift=achievement["evaluation_shift"],
context_datetime=d
)
context_subject_ids = []
if achievement["comparison_type"] == "context_subject":
context_subject_ids = Subject.get_ancestor_subjects(
subject_id=subject["id"],
of_type_id=achievement["context_subjecttype_id"],
from_date=achievement_date.from_date,
to_date=achievement_date.to_date,
whole_time_required=achievement_date["lb_subject_part_whole_time"]
)
else:
context_subject_ids.append(None)
for context_subject_id in context_subject_ids:
#print("eval "+str(achievement["id"])+" - "+str(achievement_date.from_date if achievement_date else "None")+" - "+str(context_subject_id))
goal_eval = Achievement.evaluate(
compared_subject=subject,
achievement_id=achievement["id"],
achievement_date=achievement_date,
context_subject_id=context_subject_id,
execute_triggers=True,
generate_output=False,
skip_trigger_action=True
)
if achievement["evaluation"] == "yearly":
d += relativedelta.relativedelta(years=1)
elif achievement["evaluation"] == "monthly":
d += relativedelta.relativedelta(months=1)
elif achievement["evaluation"] == "weekly":
d += relativedelta.relativedelta(weeks=1)
elif achievement["evaluation"] == "daily":
d += relativedelta.relativedelta(days=1)
else:
break
def backref(*args,**kw):
if not "passive_deletes" in kw:
kw["passive_deletes"] = True
return sa_backref(*args,**kw)
def relationship(*args,**kw):
if not "passive_deletes" in kw:
kw["passive_deletes"] = True
if "backref" in kw:
if type(kw["backref"]=="str"):
kw["backref"] = backref(kw["backref"])
return sa_relationship(*args,**kw)
mapper(AuthUser, t_auth_users, properties={
'roles': relationship(AuthRole, secondary=t_auth_users_roles, backref="users"),
'subject': relationship(Subject, backref="auth_users")
})
mapper(AuthToken, t_auth_tokens, properties={
'user': relationship(AuthUser, backref="tokens")
})
mapper(AuthRole, t_auth_roles, properties={
})
mapper(AuthRolePermission, t_auth_roles_permissions, properties={
'role': relationship(AuthRole, backref="permissions"),
})
mapper(Subject, t_subjects, properties={
'friends': relationship(Subject, secondary=t_subjectrelations,
primaryjoin=t_subjects.c.id == t_subjectrelations.c.from_id,
secondaryjoin=t_subjects.c.id == t_subjectrelations.c.to_id),
'language': relationship(Language, backref="subjects"),
'type': relationship(SubjectType, backref="subjects"),
'subsubjects': relationship(Subject, secondary=t_subjects_subjects,
primaryjoin=t_subjects.c.id==t_subjects_subjects.c.part_of_id,
secondaryjoin=t_subjects.c.id==t_subjects_subjects.c.subject_id,
backref="part_of_subjects"),
})
mapper(SubjectType, t_subjecttypes, properties={
'subtypes': relationship(SubjectType, secondary=t_subjecttypes_subjecttypes,
primaryjoin=t_subjecttypes.c.id == t_subjecttypes_subjecttypes.c.part_of_id,
secondaryjoin=t_subjecttypes.c.id == t_subjecttypes_subjecttypes.c.subjecttype_id,
backref="part_of_types"),
})
mapper(SubjectDevice, t_subject_device, properties={
'subject': relationship(Subject, backref="devices"),
})
mapper(Variable, t_variables, properties={
'values': relationship(Value),
})
mapper(Value, t_values, properties={
'subject': relationship(Subject, primaryjoin=t_values.c.subject_id == t_subjects.c.id),
'agent': relationship(Subject, primaryjoin=t_values.c.agent_id == t_subjects.c.id),
'variable': relationship(Variable)
})
mapper(AchievementCategory, t_achievementcategories)
mapper(Achievement, t_achievements, properties={
#'requirements': relationship(Achievement, secondary=t_requirements,
# primaryjoin=t_achievements.c.id==t_requirements.c.from_id,
# secondaryjoin=t_achievements.c.id==t_requirements.c.to_id,
# ),
#'denials': relationship(Achievement, secondary=t_denials,
# primaryjoin=t_achievements.c.id==t_denials.c.from_id,
# secondaryjoin=t_achievements.c.id==t_denials.c.to_id,
# ),
#'subjects': relationship(AchievementSubject, backref='achievement'),
'properties': relationship(AchievementAchievementProperty, backref='achievement'),
'rewards': relationship(AchievementReward, backref='achievement'),
'achievementcategory': relationship(AchievementCategory, backref='achievements'),
'player_subjecttype': relationship(SubjectType, primaryjoin=t_achievements.c.player_subjecttype_id == t_subjecttypes.c.id),
'context_subjecttype': relationship(SubjectType, primaryjoin=t_achievements.c.context_subjecttype_id == t_subjecttypes.c.id),
'compared_subjecttypes': relationship(SubjectType,
secondary=t_achievement_compared_subjecttypes,
primaryjoin=t_achievements.c.id == t_achievement_compared_subjecttypes.c.achievement_id,
secondaryjoin=t_subjecttypes.c.id == t_achievement_compared_subjecttypes.c.subjecttype_id
),
'domain_subjects': relationship(Subject,
secondary=t_achievement_domain_subjects,
primaryjoin=t_achievements.c.id == t_achievement_domain_subjects.c.achievement_id,
secondaryjoin=t_subjects.c.id == t_achievement_domain_subjects.c.subject_id
),
})
mapper(AchievementProperty, t_achievementproperties)
mapper(AchievementAchievementProperty, t_achievements_achievementproperties, properties={
'property' : relationship(AchievementProperty, backref='achievements'),
'value_translation' : relationship(TranslationVariable)
})
mapper(Reward, t_rewards, properties={
'rewarded_subjecttype': relationship(SubjectType)
})
mapper(AchievementReward, t_achievements_rewards, properties={
'reward' : relationship(Reward, backref='achievements'),
'value_translation' : relationship(TranslationVariable)
})
mapper(Evaluation, t_evaluations, properties={
'subject': relationship(Subject, primaryjoin=t_evaluations.c.subject_id == t_subjects.c.id),
'context_subject': relationship(Subject, primaryjoin=t_evaluations.c.context_subject_id == t_subjects.c.id),
'achievement': relationship(Achievement)
})
mapper(Progress, t_progress, properties={
'subject': relationship(Subject),
'achievement': relationship(Achievement)
})
mapper(AchievementTrigger, t_achievement_triggers, properties={
'achievement': relationship(Achievement, backref="triggers"),
})
mapper(AchievementTriggerStep, t_achievement_trigger_steps, properties={
'trigger': relationship(AchievementTrigger, backref="steps"),
'action_translation': relationship(TranslationVariable),
'action_subjecttype': relationship(SubjectType),
'action_variable': relationship(Variable),
})
mapper(Language, t_languages)
mapper(TranslationVariable, t_translationvariables)
mapper(Translation, t_translations, properties={
'language': relationship(Language),
'translationvariable': relationship(TranslationVariable, backref="translations"),
})
mapper(SubjectMessage, t_subject_messages, properties = {
'subject': relationship(Subject, backref="subject_messages"),
'translationvariable': relationship(TranslationVariable),
})
mapper(Task, t_tasks, properties={
})
mapper(TaskExecution, t_taskexecutions, properties={
'task': relationship(Task, backref="executions"),
})
<EMAIL>for(AchievementProperty, "after_insert")
<EMAIL>ens_for(AchievementProperty, 'after_update')
#def insert_variable_for_property(mapper,connection,target):
# """when setting is_variable on a :class:`AchievementProperty` a variable is automatically created"""
# if target.is_variable and not exists_by_expr(t_variables, t_variables.c.name==target.name):
# variable = Variable()
# variable.name = target.name
# variable.group = "day"
# DBSession.add(variable)
|
import asyncio
import os
import aiohttp
from prometheus_client import (
CollectorRegistry,
generate_latest,
)
from .app_outgoing_elasticsearch import (
ESMetricsUnavailable,
es_bulk_ingest,
es_feed_activities_total,
es_searchable_total,
es_nonsearchable_total,
create_activities_index,
create_objects_index,
get_new_index_names,
get_old_index_names,
split_index_names,
indexes_matching_feeds,
indexes_matching_no_feeds,
add_remove_aliases_atomically,
delete_indexes,
refresh_index,
)
from .dns import (
AioHttpDnsResolver,
)
from .elasticsearch import (
es_min_verification_age,
)
from .feeds import (
parse_feed_config,
)
from .logger import (
get_root_logger,
logged,
)
from .metrics import (
metric_counter,
metric_inprogress,
metric_timer,
get_metrics,
)
from .raven import (
get_raven_client,
)
from .redis import (
redis_get_client,
)
from .app_outgoing_redis import (
acquire_and_keep_lock,
set_feed_updates_seed_url_init,
set_feed_updates_seed_url,
set_feed_updates_url,
get_feed_updates_url,
redis_set_metrics,
set_feed_status,
)
from .app_outgoing_utils import (
repeat_until_cancelled,
)
from .utils import (
Context,
cancel_non_current_tasks,
get_child_context,
get_common_config,
main,
normalise_environment,
sleep,
)
from . import settings
EXCEPTION_INTERVALS = [1, 2, 4, 8, 16, 32, 64]
METRICS_INTERVAL = 1
UPDATES_INTERVAL = 1
async def run_outgoing_application():
"""Indefinitely poll paginated feeds and ingest them into Elasticsearch
As part of startup:
- Read environment variables that specifify what feeds to poll, with any
authentication credentials
- Create HTTPS and Redis connection pools
- Create a raven client for reporting errors to Sentry
- Create a logging "context" that allows child contexts to be created
from, which allow the same function to log output slightly differently
when run from different tasks
- Create the metrics registry in which functions throught the application
store metrics. This registry which is then periodically exported from by
the metrics application, also started here, into Redis.
Exceptions are raised if any of the above fails in order to fail
blue/green deployments. Other error cases are swallowed and retried after
intervals in EXCEPTION_INTERVALS.
A lock is acquired before any connections to feeds or Elasticsearch to
prevent conflicts with the existing version of the outgoing application
during blue/green deployment.
Once the above is done, a task that performs the polling and ingest is
created.
A cleanup function is returned that is expected to be called just before
the application is shut down to given every chance for operation to
shutdown cleanly.
"""
logger = get_root_logger('outgoing')
with logged(logger.debug, logger.error, 'Examining environment', []):
env = normalise_environment(os.environ)
es_uri, es_version, es_aws_access_key_id, es_aws_secret_access_key, es_aws_region, \
redis_uri, sentry = get_common_config(env)
feeds = [parse_feed_config(feed) for feed in env['FEEDS']]
settings.ES_URI = es_uri
settings.ES_VERSION = es_version
settings.ES_AWS_ACCESS_KEY_ID = es_aws_access_key_id
settings.ES_AWS_SECRET_ACCESS_KEY = es_aws_secret_access_key
settings.ES_AWS_REGION = es_aws_region
metrics_registry = CollectorRegistry()
metrics = get_metrics(metrics_registry)
conn = aiohttp.TCPConnector(limit_per_host=10, use_dns_cache=False,
resolver=AioHttpDnsResolver(metrics))
session = aiohttp.ClientSession(
connector=conn,
headers={'Accept-Encoding': 'identity;q=1.0, *;q=0'},
timeout=aiohttp.ClientTimeout(
total=60.0,
),
)
redis_client = await redis_get_client(redis_uri)
raven_client = get_raven_client(sentry, session, metrics)
context = Context(
logger=logger, metrics=metrics,
raven_client=raven_client, redis_client=redis_client, session=session,
es_semaphore=asyncio.Semaphore(value=1),
)
await acquire_and_keep_lock(context, EXCEPTION_INTERVALS, 'lock')
await create_outgoing_application(context, feeds)
await create_metrics_application(
context, metrics_registry, feeds,
)
async def cleanup():
await cancel_non_current_tasks()
redis_client.close()
await redis_client.wait_closed()
await session.close()
# https://github.com/aio-libs/aiohttp/issues/1925
await asyncio.sleep(0.250)
return cleanup
async def create_outgoing_application(context, feeds):
"""Create a task that polls feeds and ingests them into Elasticsearch
This task is repeated in case there is some issue at the beginning of
`ingest_feeds` that causes an exception to be raised. There is an argument
that it is better to bubble such exceptions in order to fail deployments,
but this is not yet decided.
"""
asyncio.get_event_loop().create_task(
repeat_until_cancelled(
context, EXCEPTION_INTERVALS,
to_repeat=ingest_feeds, to_repeat_args=(context, feeds),
)
)
async def ingest_feeds(context, feeds):
"""Create tasks that poll feeds and ingest them into Elasticsearch
This deletes any unused indexes, for example for feeds that used to be
configured. It then repeats the "full" and "updates" ingest cycles for
all feeds until cancellation, which is expected to only be just before the
application closes down.
Two tasks are created for each feed, a "full" task for the full ingest
and an "updates" task for the updates ingest.
"""
all_feed_ids = [feed.unique_id for feed in feeds]
indexes_without_alias, indexes_with_alias = await get_old_index_names(context)
indexes_to_delete = indexes_matching_no_feeds(
indexes_without_alias + indexes_with_alias, all_feed_ids)
await delete_indexes(
get_child_context(context, 'initial-delete'), indexes_to_delete,
)
# Some of the full ingests run in seconds, and have concern about creating/deleting indexes so
# frequently: ES reports memory leaks in some versions.
# Using a mininum duration rather than a sleep after each full ingest to keep the ingests
# as homogenous as possible wrt time
await asyncio.gather(*[
repeat_until_cancelled(
context, feed.exception_intervals,
to_repeat=ingest_func, to_repeat_args=(
context, feed), min_duration=min_duration
)
for feed in feeds
for (ingest_func, min_duration) in (
(ingest_full, feed.full_ingest_interval), (ingest_updates, 0)
)
])
async def ingest_full(parent_context, feed):
"""Perform a single "full" ingest cycle of a paginated source feed
Starting at feed.seed, iteratively request all pages from the feed, and
ingest each into Elasticsearch. Indexes are created specifically for this
ingest cycle, with unused indexes deleted.
At the end of the cycle the `activities` and `objects` index aliases
are flipped so that they now alias the indexes created and ingested into
in this cycle, i.e. made visible to clients of the incoming app that only
query the `activities` and `objects` aliases.
This is a "partial" flip of the aliases, since they continue to also alias
indexes from _other_ feeds. This design allows the presentation of single
`activities` and `objects` indexes to clients, but also allows the ingest
cycle of feeds to fail without affecting the ingest of other feeds.
Unused indexes are deleted at the beginning of a cycle rather than the
end, to always clean up after any unexpected shutdown _before_ ingesting
more data.
The primary purpose of always ingesting into new indexes and then flipping
aliases is to allow for hard-deletion without any explicit "deletion" code
path. It also allows for data format changes/corrections.
"""
context = get_child_context(parent_context, f'{feed.unique_id},full')
metrics = context.metrics
with \
logged(context.logger.info, context.logger.warning, 'Full ingest', []), \
metric_timer(metrics['ingest_feed_duration_seconds'], [feed.unique_id, 'full']), \
metric_inprogress(metrics['ingest_inprogress_ingests_total']):
await set_feed_updates_seed_url_init(context, feed.unique_id)
indexes_without_alias, _ = await get_old_index_names(context)
indexes_to_delete = indexes_matching_feeds(
indexes_without_alias, [feed.unique_id])
await delete_indexes(context, indexes_to_delete)
activities_index_name, objects_index_name = get_new_index_names(feed.unique_id)
await create_activities_index(context, activities_index_name)
await create_objects_index(context, objects_index_name)
updates_href = feed.seed
async for page_of_activities, href in feed.pages(context, feed, feed.seed, 'full'):
updates_href = href
await ingest_page(
context, page_of_activities, 'full', feed, [
activities_index_name], [objects_index_name]
)
await sleep(context, feed.full_ingest_page_interval)
await refresh_index(context, activities_index_name, feed.unique_id, 'full')
await refresh_index(context, objects_index_name, feed.unique_id, 'full')
await add_remove_aliases_atomically(context, activities_index_name,
objects_index_name, feed.unique_id)
await set_feed_updates_seed_url(context, feed.unique_id, updates_href)
async def ingest_updates(parent_context, feed):
"""Perform a single "updates" ingest cycle of a paginated source feed
Poll the last page from the last completed "full" ingest and ingest it
into Elasticsearch.
This past page is paginated: if it has a next page, it too is fetched and
ingested from. This repeated until a page _without_ a next page, which is
then made the target of the polling.
Data is ingested into two sets of indexes. 1) The indexes that are aliased
to `activities` and `objects`, so they are immediately visible to clients
of the incoming app. 2) The target of the current "full" ingest. This is
to avoid the race condition:
- An activity has been ingested and visible in the incoming app
- The last page of data has been fetched during the "full" ingest, but
the alias flip has not yet occurred.
- A change is made to the activity, and the "updates" ingests it, and so
is visible in the incoming app
- The alias flip from the full ingest is performed, and the pre-change
version of the activity is visible in the incoming app.
This would "correct" on the next full ingest, but it has been deemed
strange and unexpected enough to ensure it doesn't happen.
"""
context = get_child_context(parent_context, f'{feed.unique_id},updates')
metrics = context.metrics
with \
logged(context.logger.debug, context.logger.warning, 'Updates ingest', []), \
metric_timer(metrics['ingest_feed_duration_seconds'], [feed.unique_id, 'updates']):
href = await get_feed_updates_url(context, feed.unique_id)
updates_href = None
if not feed.disable_updates:
indexes_without_alias, indexes_with_alias = await get_old_index_names(context)
# We deliberately ingest into both the live and ingesting indexes
indexes_to_ingest_into = indexes_matching_feeds(
indexes_without_alias + indexes_with_alias, [feed.unique_id])
activities_index_names, objects_index_names = split_index_names(indexes_to_ingest_into)
async for page_of_activities, href in feed.pages(context, feed, href, 'updates'):
updates_href = href
await ingest_page(
context, page_of_activities, 'updates', feed,
activities_index_names, objects_index_names,
)
if updates_href is not None:
for index_name in indexes_matching_feeds(indexes_with_alias, [feed.unique_id]):
await refresh_index(context, index_name, feed.unique_id, 'updates')
await set_feed_updates_url(context, feed.unique_id, updates_href)
await sleep(context, feed.updates_page_interval)
async def create_metrics_application(parent_context, metrics_registry, feeds):
"""Creates a task that supplies metrics to the incoming application
Every METRICS_INTERVAL seconds the metrics are exported to Redis, so that
they are available to the incoming app, at an endpoint which is queried
by Prometheus and then used in Grafana. This is slightly awkward, but no
better way has been thought of.
"""
context = get_child_context(parent_context, 'metrics')
metrics = context.metrics
async def poll_metrics():
with logged(context.logger.debug, context.logger.warning, 'Polling', []):
searchable = await es_searchable_total(context)
metrics['elasticsearch_activities_total'].labels(
'searchable').set(searchable)
await set_metric_if_can(
metrics['elasticsearch_activities_total'],
['nonsearchable'],
es_nonsearchable_total(context),
)
await set_metric_if_can(
metrics['elasticsearch_activities_age_minimum_seconds'],
['verification'],
es_min_verification_age(context),
)
feed_ids = [feed.unique_id for feed in feeds]
for feed_id in feed_ids:
try:
searchable, nonsearchable = await es_feed_activities_total(
context, feed_id)
metrics['elasticsearch_feed_activities_total'].labels(
feed_id, 'searchable').set(searchable)
metrics['elasticsearch_feed_activities_total'].labels(
feed_id, 'nonsearchable').set(nonsearchable)
except ESMetricsUnavailable:
pass
await redis_set_metrics(context, generate_latest(metrics_registry))
await sleep(context, METRICS_INTERVAL)
asyncio.get_event_loop().create_task(
repeat_until_cancelled(
context, EXCEPTION_INTERVALS, to_repeat=poll_metrics)
)
async def ingest_page(context, activities, ingest_type, feed, activity_index_names,
objects_index_names):
"""
Ingest a page activities into Elasticsearch by calling es_bulk_ingest
"""
with \
logged(context.logger.debug, context.logger.warning, 'Polling/pushing page', []), \
metric_timer(context.metrics['ingest_page_duration_seconds'],
[feed.unique_id, ingest_type, 'total']):
num_es_documents = len(
activities) * (len(activity_index_names) + len(objects_index_names))
with \
metric_timer(context.metrics['ingest_page_duration_seconds'],
[feed.unique_id, ingest_type, 'push']), \
metric_counter(context.metrics['ingest_activities_nonunique_total'],
[feed.unique_id, ingest_type], num_es_documents):
await es_bulk_ingest(context, activities, activity_index_names, objects_index_names)
asyncio.ensure_future(set_feed_status(
context, feed.unique_id, feed.down_grace_period, b'GREEN'))
async def set_metric_if_can(metric, labels, get_value_coroutine):
try:
metric.labels(*labels).set(await get_value_coroutine)
except ESMetricsUnavailable:
pass
if __name__ == '__main__':
main(run_outgoing_application)
|
<reponame>Reclusive-Trader/upbit-client
# coding: utf-8
"""
Upbit Open API
## REST API for Upbit Exchange - Base URL: [https://api.upbit.com] - Official Upbit API Documents: [https://docs.upbit.com] - Official Support email: [<EMAIL>] # noqa: E501
OpenAPI spec version: 1.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class DepositApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def deposit_coin_address(self, currency, **kwargs): # noqa: E501
"""개별 입금 주소 조회 # noqa: E501
## 개별 입금 주소 조회 **NOTE**: 입금 주소 조회 요청 API 유의사항 입금 주소 생성 요청 이후 아직 발급되지 않은 상태일 경우 deposit_address가 null일 수 있습니다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deposit_coin_address(currency, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str currency: Currency symbol (required)
:return: DepositCompleteResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.deposit_coin_address_with_http_info(currency, **kwargs) # noqa: E501
else:
(data) = self.deposit_coin_address_with_http_info(currency, **kwargs) # noqa: E501
return data
def deposit_coin_address_with_http_info(self, currency, **kwargs): # noqa: E501
"""개별 입금 주소 조회 # noqa: E501
## 개별 입금 주소 조회 **NOTE**: 입금 주소 조회 요청 API 유의사항 입금 주소 생성 요청 이후 아직 발급되지 않은 상태일 경우 deposit_address가 null일 수 있습니다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deposit_coin_address_with_http_info(currency, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str currency: Currency symbol (required)
:return: DepositCompleteResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['currency'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method deposit_coin_address" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'currency' is set
if ('currency' not in params or
params['currency'] is None):
raise ValueError("Missing the required parameter `currency` when calling `deposit_coin_address`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'currency' in params:
query_params.append(('currency', params['currency'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/deposits/coin_address', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DepositCompleteResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def deposit_coin_addresses(self, **kwargs): # noqa: E501
"""전체 입금 주소 조회 # noqa: E501
## 내가 보유한 자산 리스트를 보여줍니다. **NOTE**: 입금 주소 조회 요청 API 유의사항 입금 주소 생성 요청 이후 아직 발급되지 않은 상태일 경우 deposit_address가 null일 수 있습니다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deposit_coin_addresses(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.deposit_coin_addresses_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.deposit_coin_addresses_with_http_info(**kwargs) # noqa: E501
return data
def deposit_coin_addresses_with_http_info(self, **kwargs): # noqa: E501
"""전체 입금 주소 조회 # noqa: E501
## 내가 보유한 자산 리스트를 보여줍니다. **NOTE**: 입금 주소 조회 요청 API 유의사항 입금 주소 생성 요청 이후 아직 발급되지 않은 상태일 경우 deposit_address가 null일 수 있습니다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deposit_coin_addresses_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method deposit_coin_addresses" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/deposits/coin_addresses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def deposit_generate_coin_address(self, currency, **kwargs): # noqa: E501
"""입금 주소 생성 요청 # noqa: E501
입금 주소 생성을 요청한다. **NOTE**: 입금 주소 생성 요청 API 유의사항 입금 주소의 생성은 서버에서 비동기적으로 이뤄집니다. 비동기적 생성 특성상 요청과 동시에 입금 주소가 발급되지 않을 수 있습니다. 주소 발급 요청 시 결과로 Response1이 반환되며 주소 발급 완료 이전까지 계속 Response1이 반환됩니다. 주소가 발급된 이후부터는 새로운 주소가 발급되는 것이 아닌 이전에 발급된 주소가 Response2 형태로 반환됩니다. 정상적으로 주소가 생성되지 않는다면 일정 시간 이후 해당 API를 다시 호출해주시길 부탁드립니다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deposit_generate_coin_address(currency, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str currency: Currency 코드 (required)
:return: DepositCompleteResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.deposit_generate_coin_address_with_http_info(currency, **kwargs) # noqa: E501
else:
(data) = self.deposit_generate_coin_address_with_http_info(currency, **kwargs) # noqa: E501
return data
def deposit_generate_coin_address_with_http_info(self, currency, **kwargs): # noqa: E501
"""입금 주소 생성 요청 # noqa: E501
입금 주소 생성을 요청한다. **NOTE**: 입금 주소 생성 요청 API 유의사항 입금 주소의 생성은 서버에서 비동기적으로 이뤄집니다. 비동기적 생성 특성상 요청과 동시에 입금 주소가 발급되지 않을 수 있습니다. 주소 발급 요청 시 결과로 Response1이 반환되며 주소 발급 완료 이전까지 계속 Response1이 반환됩니다. 주소가 발급된 이후부터는 새로운 주소가 발급되는 것이 아닌 이전에 발급된 주소가 Response2 형태로 반환됩니다. 정상적으로 주소가 생성되지 않는다면 일정 시간 이후 해당 API를 다시 호출해주시길 부탁드립니다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deposit_generate_coin_address_with_http_info(currency, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str currency: Currency 코드 (required)
:return: DepositCompleteResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['currency'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method deposit_generate_coin_address" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'currency' is set
if ('currency' not in params or
params['currency'] is None):
raise ValueError("Missing the required parameter `currency` when calling `deposit_generate_coin_address`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'currency' in params:
form_params.append(('currency', params['currency'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/deposits/generate_coin_address', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DepositCompleteResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def deposit_info(self, **kwargs): # noqa: E501
"""개별 입금 조회 # noqa: E501
## 개별 입금 조회 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deposit_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: 입금 UUID
:param str txid: 입금 TXID
:param str currency: Currency 코드
:return: Deposit
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.deposit_info_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.deposit_info_with_http_info(**kwargs) # noqa: E501
return data
def deposit_info_with_http_info(self, **kwargs): # noqa: E501
"""개별 입금 조회 # noqa: E501
## 개별 입금 조회 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deposit_info_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: 입금 UUID
:param str txid: 입금 TXID
:param str currency: Currency 코드
:return: Deposit
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid', 'txid', 'currency'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method deposit_info" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'uuid' in params:
query_params.append(('uuid', params['uuid'])) # noqa: E501
if 'txid' in params:
query_params.append(('txid', params['txid'])) # noqa: E501
if 'currency' in params:
query_params.append(('currency', params['currency'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/deposit', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Deposit', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def deposit_info_all(self, **kwargs): # noqa: E501
"""입금 리스트 조회 # noqa: E501
## 입금 리스트 조회 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deposit_info_all(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str currency: Currency 코드
:param str state: 출금 상태 - submitting : 처리 중 - submitted : 처리완료 - almost_accepted : 입금 대기 중 - rejected : 거절 - accepted : 승인됨 - processing : 처리 중
:param list[str] uuids: 입금 UUID의 목록
:param list[str] txids: 입금 TXID의 목록
:param float limit: 개수 제한 (default: 100, max: 100)
:param float page: 페이지 수, default: 1
:param str order_by: 정렬 방식 - asc : 오름차순 - desc : 내림차순 (default)
:return: list[Deposit]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.deposit_info_all_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.deposit_info_all_with_http_info(**kwargs) # noqa: E501
return data
def deposit_info_all_with_http_info(self, **kwargs): # noqa: E501
"""입금 리스트 조회 # noqa: E501
## 입금 리스트 조회 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deposit_info_all_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str currency: Currency 코드
:param str state: 출금 상태 - submitting : 처리 중 - submitted : 처리완료 - almost_accepted : 입금 대기 중 - rejected : 거절 - accepted : 승인됨 - processing : 처리 중
:param list[str] uuids: 입금 UUID의 목록
:param list[str] txids: 입금 TXID의 목록
:param float limit: 개수 제한 (default: 100, max: 100)
:param float page: 페이지 수, default: 1
:param str order_by: 정렬 방식 - asc : 오름차순 - desc : 내림차순 (default)
:return: list[Deposit]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['currency', 'state', 'uuids', 'txids', 'limit', 'page', 'order_by'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method deposit_info_all" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'currency' in params:
query_params.append(('currency', params['currency'])) # noqa: E501
if 'state' in params:
query_params.append(('state', params['state'])) # noqa: E501
if 'uuids' in params:
query_params.append(('uuids', params['uuids'])) # noqa: E501
collection_formats['uuids'] = 'multi' # noqa: E501
if 'txids' in params:
query_params.append(('txids', params['txids'])) # noqa: E501
collection_formats['txids'] = 'multi' # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/deposits', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Deposit]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ops_facts
version_added: "2.1"
author: "<NAME> (@privateip)"
short_description: Collect device specific facts from OpenSwitch
description:
- Collects facts from devices running the OpenSwitch operating
system. Fact collection is supported over both Cli and Rest
transports. This module prepends all of the base network fact keys
with C(ansible_net_<fact>). The facts module will always collect a
base set of facts from the device and can enable or disable
collection of additional facts.
- The facts collected from pre Ansible 2.2 are still available and
are collected for backwards compatibility; however, these facts
should be considered deprecated and will be removed in a future
release.
extends_documentation_fragment: openswitch
options:
config:
description:
- When enabled, this argument will collect the current
running configuration from the remote device. If the
C(transport=rest) then the collected configuration will
be the full system configuration.
required: false
choices:
- true
- false
default: false
endpoints:
description:
- Accepts a list of endpoints to retrieve from the remote
device using the REST API. The endpoints should be valid
endpoints available on the device. This argument is only
valid when the C(transport=rest).
required: false
default: null
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, legacy, and interfaces. Can specify a
list of values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
version_added: "2.2"
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
---
vars:
cli:
host: "{{ inventory_hostname }}"
username: netop
password: <PASSWORD>
transport: cli
rest:
host: "{{ inventory_hostname }}"
username: netop
password: <PASSWORD>
transport: rest
---
- ops_facts:
gather_subset: all
provider: "{{ rest }}"
# Collect only the config and default facts
- ops_facts:
gather_subset: config
provider: "{{ cli }}"
# Do not collect config facts
- ops_facts:
gather_subset:
- "!config"
provider: "{{ cli }}"
- name: collect device facts
ops_facts:
provider: "{{ cli }}"
- name: include the config
ops_facts:
config: yes
provider: "{{ rest }}"
- name: include a set of rest endpoints
ops_facts:
endpoints:
- /system/interfaces/1
- /system/interfaces/2
provider: "{{ rest }}"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: when transport is cli
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: when transport is cli
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: string
ansible_net_image:
description: The image file the device is running
returned: when transport is cli
type: string
# config
ansible_net_config:
description: The current active config from the device
returned: when config is enabled
type: str
# legacy (pre Ansible 2.2)
config:
description: The current system configuration
returned: when enabled
type: string
sample: '....'
hostname:
description: returns the configured hostname
returned: always
type: string
sample: ops01
version:
description: The current version of OpenSwitch
returned: always
type: string
sample: '0.3.0'
endpoints:
description: The JSON response from the URL endpoint
returned: when endpoints argument is defined and transport is rest
type: list
sample: [{....}, {....}]
"""
import re
import ansible.module_utils.openswitch
from ansible.module_utils.netcli import CommandRunner, AddCommandError
from ansible.module_utils.network import NetworkModule
from ansible.module_utils.six import iteritems
def add_command(runner, command):
try:
runner.add_command(command)
except AddCommandError:
# AddCommandError is raised for any issue adding a command to
# the runner. Silently ignore the exception in this case
pass
class FactsBase(object):
def __init__(self, module, runner):
self.module = module
self.transport = module.params['transport']
self.runner = runner
self.facts = dict()
if self.transport == 'cli':
self.commands()
def commands(self):
raise NotImplementedError
def populate(self):
getattr(self, self.transport)()
def cli(self):
pass
def rest(self):
pass
class Default(FactsBase):
def commands(self):
add_command(self.runner, 'show system')
add_command(self.runner, 'show hostname')
def rest(self):
self.facts.update(self.get_system())
def cli(self):
data = self.runner.get_command('show system')
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['image'] = self.parse_image(data)
self.facts['hostname'] = self.runner.get_command('show hostname')
def parse_version(self, data):
match = re.search(r'OpenSwitch Version\s+: (\S+)', data)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'Platform\s+:\s(\S+)', data, re.M)
if match:
return match.group(1)
def parse_image(self, data):
match = re.search(r'\(Build: (\S+)\)', data, re.M)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'Serial Number\s+: (\S+)', data)
if match:
return match.group(1)
def get_system(self):
response = self.module.connection.get('/system')
return dict(
hostname=response.json['configuration']['hostname'],
version=response.json['status']['switch_version']
)
class Config(FactsBase):
def commands(self):
add_command(self.runner, 'show running-config')
def cli(self):
self.facts['config'] = self.runner.get_command('show running-config')
class Legacy(FactsBase):
# facts from ops_facts 2.1
def commands(self):
add_command(self.runner, 'show system')
add_command(self.runner, 'show hostname')
if self.module.params['config']:
add_command(self.runner, 'show running-config')
def rest(self):
self.facts['_endpoints'] = self.get_endpoints()
self.facts.update(self.get_system())
if self.module.params['config']:
self.facts['_config'] = self.get_config()
def cli(self):
self.facts['_hostname'] = self.runner.get_command('show hostname')
data = self.runner.get_command('show system')
self.facts['_version'] = self.parse_version(data)
if self.module.params['config']:
self.facts['_config'] = self.runner.get_command('show running-config')
def parse_version(self, data):
match = re.search(r'OpenSwitch Version\s+: (\S+)', data)
if match:
return match.group(1)
def get_endpoints(self):
responses = list()
urls = self.module.params['endpoints'] or list()
for ep in urls:
response = self.module.connection.get(ep)
if response.headers['status'] != 200:
self.module.fail_json(msg=response.headers['msg'])
responses.append(response.json)
return responses
def get_system(self):
response = self.module.connection.get('/system')
return dict(
_hostname=response.json['configuration']['hostname'],
_version=response.json['status']['switch_version']
)
def get_config(self):
response = self.module.connection.get('/system/full-configuration')
return response.json
def check_args(module, warnings):
if module.params['transport'] != 'rest' and module.params['endpoints']:
warnings.append('Endpoints can only be collected when transport is '
'set to "rest". Endpoints will not be collected')
FACT_SUBSETS = dict(
default=Default,
config=Config,
legacy=Legacy
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list'),
# the next two arguments are legacy from pre 2.2 ops_facts
# these will be deprecated and ultimately removed
config=dict(default=False, type='bool'),
endpoints=dict(type='list'),
transport=dict(default='cli', choices=['cli', 'rest'])
)
module = NetworkModule(argument_spec=spec, supports_check_mode=True)
gather_subset = module.params['gather_subset']
warnings = list()
check_args(module, warnings)
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
runable_subsets.add('legacy')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
runner = CommandRunner(module)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module, runner))
if module.params['transport'] == 'cli':
runner.run()
try:
for inst in instances:
inst.populate()
facts.update(inst.facts)
except Exception:
module.exit_json(out=module.from_json(runner.items))
ansible_facts = dict()
for key, value in iteritems(facts):
# this is to maintain capability with ops_facts 2.1
if key.startswith('_'):
ansible_facts[key[1:]] = value
else:
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
|
<reponame>mshonichev/example_pkg<gh_stars>10-100
#!/usr/bin/env python3
#
# Copyright 2017-2020 GridGain Systems.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .ignitenodesmixin import IgniteNodesMixin
from ....util import print_green, print_red
from time import time
class IgniteControlThreadMixin(IgniteNodesMixin):
"""
Provides callbacks for GeneralGridTestcase.run_console_thread
"""
def __init__(self, *args, **kwargs):
# print('IgniteControlThreadMixin.__init__')
super().__init__(*args, **kwargs)
def make_cluster_thread(self):
alive_nodes = self.get_alive_additional_nodes() + self.get_alive_default_nodes()
print_green('make jstack on alive nodes (%s) process' % alive_nodes)
for node_idx in alive_nodes:
if 'PID' in self.nodes[node_idx]:
path_to_jstack = self.nodes[node_idx]['log'].replace('.log', '-`date +%d.%m.%Y-%H.%M.%S`.jstack')
try:
self.ssh.exec_on_host(
self.nodes[node_idx]['host'], [
'jstack -l %s > %s &' % (self.nodes[node_idx]['PID'], path_to_jstack)
]
)
except Exception as e:
print_red('Error make jstack on node %s : %s' % (node_idx, str(e)))
def make_cluster_jfr(self, duration, settings=None):
alive_nodes = self.get_alive_additional_nodes() + self.get_alive_default_nodes()
print_green('make jfr on alive nodes (%s) process, duration = %s' % (alive_nodes, duration))
if not settings:
if 'jfr_cfg' in self.config['artifacts'].keys():
settings = self.config['remote']['suite_var_dir'] + '/jfr_cfg/gridgain.jfc'
else:
settings = 'profile'
for node_idx in alive_nodes:
if 'PID' in self.nodes[node_idx]:
path_to_jfr = self.nodes[node_idx]['log'].replace('.log', '-$date_jfr.jfr')
export = 'export date_jfr=`date +%d.%m.%Y-%H.%M.%S`'
try:
self.ssh.exec_on_host(
self.nodes[node_idx]['host'], [
'%s;echo $date_jfr; jcmd %s JFR.start duration=%ss filename=%s settings=%s &'
% (export, self.nodes[node_idx]['PID'], duration, path_to_jfr, settings)
]
)
except Exception as e:
print_red('Error make jfr on node %s : %s' % (node_idx, str(e)))
def make_cluster_heapdump(self, nodes=None, tag='test'):
if nodes is None:
nodes = self.get_all_alive_nodes()
print_green('make heapdump on nodes (%s)' % (nodes))
for node_idx in nodes:
if 'PID' in self.nodes[node_idx]:
path_to_heapdump = self.nodes[node_idx]['log'].replace(
'.log',
'-heapdump-{time}-{pid}-{tag}.hprof'.format(
pid=self.nodes[node_idx]['PID'],
time=time(),
tag=tag,
)
)
try:
self.ssh.exec_on_host(self.nodes[node_idx]['host'], [
'jmap -dump:format=b,file={path_to_heapdump} {pid}'.format(
path_to_heapdump=path_to_heapdump,
pid=self.nodes[node_idx]['PID'],
)
])
except Exception as e:
print_red('Error make heapdump on node %s : %s' % (node_idx, str(e)))
|
import os
import re
import time
import tempfile
import traceback
import smtplib
try:
from email.mime.text import MIMEText
except ImportError:
from email.MIMEText import MIMEText
import Atlassian
import utility
import grapeGit as git
import grapeMenu
import grapeConfig
import resumable
import stashy.stashy.errors as stashyErrors
class PublishStepFailed(Exception):
def __init__(self, stepName):
assert isinstance(stepName, str)
self.stepName = stepName
class Publish(resumable.Resumable):
"""
grape publish
Merges/Squash-merges/Rebases the current topic branch <type>/<username>/<descr> into the public <branch>,
where <public> is read from one of the <type>:<public> pairs found in .grapeconfig.flow.topicPrefixMappings,
.grapeconfig.flow.topicDestinationMappings, and/or .grapeconfig.workspace.submoduleTopicPrefixMappings. The
branch-dependent publish policy (merge vs. squash merge. vs rebase, etc) is decided using
grapeconfig.flow.publishPolicy for the top-level repo and the publish policy for
submodules is decided using grapeconfig.workspace.submodulePublishPolicy.
Usage: grape-publish [--squash [--cascade=<branch>... ] | --merge | --rebase]
[-m <msg>]
[--recurse | --noRecurse]
[--public=<public> [--submodulePublic=<submodulePublic>]]
[--topic=<branch>]
[--noverify]
[--nopush]
[--pushSubtrees | --noPushSubtrees]
[--forcePushSubtree=<subtreeName>]...
[--startAt=<startStep>] [--stopAt=<stopStep>]
[--buildCmds=<buildStr>] [--buildDir=<path>]
[--testCmds=<testStr>] [--testDir=<path>]
[--prepublishCmds=<cmds>] [--prepublishDir=<path>]
[--postpublishCmds=<cmds>] [--postpublishDir=<path>]
[--noUpdateLog | [--updateLog=<file> --skipFirstLines=<int> --entryHeader=<string>]]
[--tickVersion=<bool> [-T <arg>]...]
[--tickOnCascade=<slot> ]
[--user=<BitbucketUserName>]
[--bitbucketURL=<httpsURL>]
[--verifySSL=<bool>]
[--project=<BitbucketProjectKey>]
[--repo=<BitbucketRepoName>]
[-R <arg>]...
[--noReview]
[--useBitbucket=<bool>]
[--deleteTopic=<bool>]
[--emailNotification=<bool> [--emailHeader=<str> --emailFooter=<str>
--emailSubject=<str> --emailSendTo=<addr> --emailServer=<smtpserver> --emailMaxFiles=<int>]]
[<CommitMessageFile>]
[--remoteMerge]
grape-publish --continue
grape-publish --abort
grape-publish --printSteps
grape-publish --quick -m <msg> [--user=<BitbucketUserName>] [--public=<public>] [--noReview] [--remoteMerge]
Options:
--squash Squash merges the topic into the public, then performs a commit if the merge goes clean.
--cascade=<branch> For squash merges, can choose to cascade back to <branch> after the merge is
completed. Define multiple times to setup a chain of cascades. Overrides outer repo and
nestedSubproject cascades defined in .grapeconfig publish policies. Does not override
submodule publish policies.
--merge Perform a normal merge.
-m <msg> The commit message to use for a successful merge / squash merge. Ignored if used with
--rebase.
--rebase Rebases the topic branch to the public, then fast forwards the public to the tip of the
topic.
--recurse Perform the publish action in submodules.
Defaults to True if .grapeconfig.workspace.manageSubmodules is True.
--noRecurse Do not perform the publish action in submodules.
Defaults to True if .grapeconfig.workspace.manageSubmodules is False.
--topic=<branch> The branch to publish. Defaults to the current branch.
--noverify Set to skip interactive verification of publish commands.
--nopush Set to skip the push of commits generated during the publish procedure.
--pushSubtrees Push subtrees to their respective remotes (.grapeconfig.subtree-<name>.remote) appropriate
public branches (.grapeconfig.subtree-<name>.topicPrefixMappings)
Set by default if .grapeconfig.subtrees.pushOnPublish is True.
--noPushSubtrees Don't perform a git subtree push.
--startAt=<startStep> The publish step to start at. One of "testForCleanWorkspace1", "md1",
"ensureModifiedSubmodulesAreActive", "verifyPublishActions", "ensureReview",
"verifyCompletedReview", "markInProgress", "md2", "tickVersion", "updateLog",
"build", "test", "testForCleanWorkspace2", "prePublish", "publish", "postPublish",
"tagVersion", "performCascades", "markAsDone", "notify", or "deleteTopic".
--stopAt=<stopStep> The publish step to stop at. Valid values are the same as for --startAt. Publish will
perform all steps from <startStep> (inclusive) to <stopStep> (exclusive).
--continue Resume a previous call to grape publish that encountered a failure at one of the publish
steps.
--abort Abort a previously failed call to grape publish.
--buildCmds=<buildStr> The comma-delimited list of build commands to execute.
[default: .grapeconfig.publish.buildCmds]
--buildDir=<path> The directory (relative to the workspace root directory) to execute the build steps in.
[default: .grapeconfig.publish.buildDir]
--testCmds=<testStr> The comma-delimited list of test commands to execute.
[default: .grapeconfig.publish.testCmds]
--testDir=<path> The directory (relative to the workspace root directory) to execute the test steps in.
[default: .grapeconfig.publish.testDir]
--prepublishCmds=<str> The comma-delimited list of commands to execute just before the publish step.
[default: .grapeconfig.publish.prepublishCmds]
--prepublishDir=<str> The directory (relative to the workspace root directory) to execute the pre-publish cmds in.
[default: .grapeconfig.publish.prepublishDir]
--postpublishCmds=<str> The comma-delimited list of commands to execute just after the publish step.
[default: .grapeconfig.publish.postpublishCmds]
--postpublishDir=<str> The directory (relative to the workspace root directory) to execute the post-publish
cmds in.
[default: .grapeconfig.publish.postpublishDir]
--deleteTopic=<bool> Offer to delete the topic branch when done. [default: .grapeconfig.publish.deleteTopic]
--noUpdateLog Set to skip the updateLog step.
--updateLog=<file> The log file to update with the commit message for this branch.
[default: .grapeconfig.publish.updateLog]
--skipFirstLines=<int> The number of lines to skip in the updateLog file before inserting the commit message.
[default: .grapeconfig.publish.logSkipFirstLines]
--entryHeader=<string> The format for the commit message header. The string literals <date>, <user>, and <version>
will be replaced by the date, the result of git config --get user.name, and the result of
git describe --abbrev=0 after the tickversion step, respectively.
[default: .grapeconfig.publish.logEntryHeader]
--tickVersion=<bool> Tick a version number as a part of this publish action.
[default: .grapeconfig.publish.tickVersion]
--tickOnCascade=<slot> Tick the <slot> version number when performing a cascade.
Default behavior governed by the flow.topicCascadeTick mapping.
-T <arg> An argument to pass to grape-version tick. Type grape version --help for available options
and defaults. -T can be used multiple times to pass multiple arguments.
--user=<user> Your Bitbucket username.
--bitbucketURL=<url> Your Bitbucket URL, e.g. https://rzlc.llnl.gov/bitbucket .
[default: .grapeconfig.project.stashURL]
--verifySSL=<bool> Set to False to ignore SSL certificate verification issues.
[default: .grapeconfig.project.verifySSL]
--project=<project> Your Bitbucket Project. See grape-review for more details.
[default: .grapeconfig.project.name]
--repo=<repo> Your Bitbucket repo. See grape-review for more details.
[default: .grapeconfig.repo.name]
-R <arg> Argument(s) to pass to grape-review, in addition to --title="**IN PROGRESS**:" --prepend.
Type grape review --help for valid options.
--noReview Don't perform any actions that interact with pull requests. Overrides --useBitbucket.
--useBitbucket=<bool> Whether or not to use pull requests. [default: .grapeconfig.publish.useStash]
--public=<public> The branch to publish to. Defaults to the mapping for the current topic branch as described
by .grapeconfig.flow.topicDestinationMappings. .grapeconfig.flow.topicPrefixMappings is used
if no option for .grapeconfig.flow.topicDestinationMappings exists.
--submodulePublic=<b> The branch to publish to in submodules. Defaults to the mapping for the current topic branch
as described by .grapeconfig.workspace.submoduleTopicPrefixMappings.
--emailNotification=<b> Set to true to send a notification email after you've published. The email will consist of
a header <header> and a message, generally the contents of <CommitMessageFile> and/or
the Pull Request description, followed by a footer <footer>. The email is sent to <addr>,
and will be CC'd to the user.
For the email subject, header and footer, the string literals
'<user>', '<date>', '<version>', and '<public>' with the following:
<user>: the result of git config --get user.name
<date>: the current timestamp.
<version>: The version of the project, so long as grape is managing your versioning.
<public>: The branch to publish to.
[default: .grapeconfig.publish.emailNotification]
--emailHeader=<header> The email header. See above.
[default: .grapeconfig.publish.emailHeader]
--emailFooter=<footer> The email footer. See above.
[default: .grapeconfig.publish.emailFooter]
--emailSubject=<sbj> The email subject. See above.
[default: .grapeconfig.publish.emailSubject]
--emailSendTo=<addr> The comma-delimited list of receivers of the email.
[default: .grapeconfig.publish.emailSendTo]
--emailServer=<server> The smtp email server address.
[default: .grapeconfig.publish.emailServer]
--emailMaxFiles=<int> Maximum number of modified files (per subproject) to show in email.
[default: .grapeconfig.publish.emailMaxFiles]
--quick Perform the following steps only: md1, ensureModifiedSubmodulesAreActive, ensureReview,
markInProgress, md2, publish, markAsDone, deleteTopic, done]
--remoteMerge Perform the merge using the Bitbucket REST API.
Optional Arguments:
<CommitMessageFile> A file with an update message for this publish command. The pull request associated with
this branch will be updated to contain this message. If you don't specify a filename, grape
will give you an opportunity to use contents of the pull request description are intended for the update
message. Both the commit message for the merge and an update log will contain this message.
Additionally, if email notification is configured, the contents of the email will have
this message.
"""
def setDefaultConfig(self, config):
config.ensureSection("workspace")
config.ensureSection("flow")
config.ensureSection("subtrees")
config.ensureSection("publish")
# workspace defaults
config.set('workspace', 'manageSubmodules', 'True')
config.set('workspace', 'submoduleTopicPrefixMappings', '?:develop')
config.set('workspace', 'submodulePublishPolicy', '?:merge')
# publish policy defaults
config.set('flow', 'publishPolicy', '?:merge')
# subtree publish actions
config.set('subtrees', 'names', '')
config.set('subtrees', 'pushOnPublish', "False")
# build steps
config.set('publish', 'buildCmds', '')
config.set('publish', 'buildDir', '.')
# test steps
config.set('publish', 'testCmds', '')
config.set('publish', 'testDir', '.')
# prepublish steps
config.set('publish', 'prepublishCmds', '')
config.set('publish', 'prepublishDir', '.')
# postpublish steps
config.set('publish', 'postpublishCmds', '')
config.set('publish', 'postpublishDir', '.')
# tick the version?
config.set('publish', 'tickVersion', 'False')
# use Bitbucket for checking Pull Request status?
config.set('publish', 'useStash', 'True')
# delete when done
config.set('publish', 'deleteTopic', 'False')
# log file
config.set('publish', 'updateLog', '.grapepublishlog')
config.set('publish', 'logSkipFirstLines', '0')
config.set('publish', 'logEntryHeader', "<date> <user>\\n<version>\\n")
# email config
config.set('publish', 'emailNotification', 'False')
config.set('publish', 'emailHeader', '<public> updated to <version>')
config.set('publish', 'emailFooter', '')
config.set('publish', 'emailServer', 'smtp.email.server')
config.set('publish', 'emailSendTo', '<EMAIL>')
config.set('publish', 'emailSubject', '<public> updated to <version>')
config.set('publish', 'emailMaxFiles', '100')
# tick on cascade behavior
config.set("flow","topicCascadeTick","?:0")
def __init__(self):
super(Publish, self).__init__()
self._key = "publish"
self._section = "Gitflow Tasks"
self.branchPrefix = None
self.modifiedSubtrees = set()
self.st_prefixes = {}
self.st_remotes = {}
self.st_branches = {}
self.cascadeDict = {}
self.doDelete = {}
def description(self):
try:
current = git.currentBranch()
public = grapeConfig.grapeConfig().getPublicBranchFor(git.currentBranch())
except git.GrapeGitError:
public = "Unknown"
current = "Unknown"
except KeyError:
public = "Unknown"
current = "Unknown"
return "Publish the current %s branch to %s" % (git.branchPrefix(current), public)
def _resume(self, args):
super(Publish, self)._resume(args)
self.execute(args)
def _saveProgress(self, args):
pass
def parseArgs(self, args):
# resolve default topic branch, ensure we are on the topic branch
topic = args["--topic"]
if not topic:
topic = git.currentBranch()
if topic != git.currentBranch() and self.after(self.order, "publish", args["--startAt"]):
git.checkout(topic)
args["--topic"] = topic
# resolve default public branch using .grapeconfig.flow.topicPrefixMappings
config = grapeConfig.grapeConfig()
prefix = git.branchPrefix(topic)
public = args["--public"]
if not public:
public = config.getPublicBranchFor(topic)
args["--public"] = public
self.branchPrefix = prefix
# whether or not to use Bitbucket
if args["--useBitbucket"].lower() == "false" and not args["--noReview"]:
args["--noReview"] = True
if not args["--noReview"] and type(args["--verifySSL"]) != bool:
verify = True if args["--verifySSL"].lower() == "true" else False
args["--verifySSL"] = verify
# get the Bitbucket Username
user = args["--user"]
if not user and not args["--noReview"] and not args["--printSteps"]:
args["--user"] = utility.getUserName(service="Bitbucket")
if args["--tickVersion"] is not False and args["--tickVersion"] is not True:
if args["--tickVersion"].lower() == "false":
args["--tickVersion"] = False
else:
args["--tickVersion"] = True
if args["--tickOnCascade"] is None:
args["--tickOnCascade"] = int(config.getMapping("flow","topicCascadeTick")[args["--topic"]])
def abort(self, args):
#undo any commits done since we first started
super(Publish, self)._resume(args)
branch = git.currentBranch()
if self.progress["startingSHA"] != git.SHA(branch):
utility.printMsg("Reverting all commits from %s from %s to %s" % (branch, self.progress["startingSHA"],
git.SHA(branch)))
revert = utility.userInput("This will apply to %s. continue? [y,n]" % git.currentBranch(), "y")
if revert:
git.revert("--no-edit %s..%s" % (self.progress["startingSHA"], "HEAD"))
# release IN PROGRESS LOCK
utility.printMsg("Releasing In Progress Lock")
self.releaseInProgressLock(args)
@staticmethod
def after(array, item1, item2):
try:
pos1 = array.index(item1)
pos2 = array.index(item2)
except ValueError:
return False
def execute(self, args):
if args["--abort"]:
self.abort(args)
return True
if "startingSHA" not in self.progress:
self.progress["startingSHA"] = git.SHA("HEAD")
self.order = ["testForCleanWorkspace1", "md1", "ensureModifiedSubmodulesAreActive",
"verifyPublishActions",
"ensureReview", "verifyCompletedReview",
"markInProgress", "md2", "tickVersion", "updateLog",
"build", "test", "testForCleanWorkspace2", "prePublish", "publish", "postPublish",
"tagVersion", "performCascades", "markAsDone", "notify", "deleteTopic", "done"]
if args["--quick"]:
self.order = ["md1","ensureModifiedSubmodulesAreActive","ensureReview", "verifyPublishActions", "markInProgress", "md2", "publish",
"markAsDone", "deleteTopic", "done"]
self.parseArgs(args)
startPoint = args["--startAt"]
if args["--printSteps"]:
print self.order
return True
if startPoint:
if startPoint not in self.order:
utility.printMsg("%s not a valid publish step. Choose 1 of :\n %s" % (startPoint, self.order))
return False
else:
startPoint = self.order[0]
stopPoint = args["--stopAt"]
if stopPoint:
if stopPoint not in self.order:
utility.printMsg("%s not a valid publish step. Choose 1 of :\n %s" % (stopPoint, self.order))
return False
steps = {"build": self.performCustomBuildStep,
"test": self.performCustomTestStep,
"prePublish": self.performCustomPrePublishSteps,
"tickVersion": self.tickVersion,
"tagVersion": self.tagVersion,
"performCascades": self.performCascades,
"publish": self.publishAllProjects,
"postPublish": self.performCustomPostPublishSteps,
"deleteTopic": self.deleteTopicBranch,
"verifyCompletedReview": self.verifyCompletedReview,
"testForCleanWorkspace1": self.testForCleanWorkspace,
"testForCleanWorkspace2": self.testForCleanWorkspace,
"markInProgress": self.acquireInProgressLock,
"markAsDone": self.releaseInProgressLock,
"updateLog": self.updateLog,
"notify": self.sendNotificationEmail,
"ensureReview": self.ensureReview,
"ensureModifiedSubmodulesAreActive": self.ensureModifiedSubmodulesAreActive,
"md1": self.mergePublic,
"md2": self.mergePublic,
"verifyPublishActions": self.verifyPublishTargetsWithUser}
currentStep = startPoint
os.chdir(utility.workspaceDir())
for step in self.order:
if step == "done":
break
if step == stopPoint:
utility.printMsg("Stopping at %s step as requested." % stopPoint)
args["--startAt"] = step
self.dumpProgress(args)
return True
if step != currentStep:
continue
try:
ret = steps[step](args)
except BaseException as e:
self.bailOut(step, args)
print(traceback.format_exc())
return False
if ret:
currentStep = self.order[self.order.index(currentStep) + 1]
else:
self.bailOut(step, args)
return False
return True
def bailOut(self, step, args):
utility.printMsg("Publish step %s failed. Please resolve the issue and then continue using\n"
"grape publish --continue" % step.upper())
args["--startAt"] = step
self.dumpProgress(args)
return
def ensureModifiedSubmodulesAreActive(self, args):
missing = utility.getModifiedInactiveSubmodules(args["--public"], args["--topic"])
if missing:
utility.printMsg("The following submodules that you've modified are not currently present in your workspace.\n"
"You should activate them using grape uv and then call publish --continue")
utility.printMsg(','.join(missing))
return False
return True
def mergePublic(self, args):
menu = grapeMenu.menu()
return menu.applyMenuChoice("md", ["--am", "--public=%s" % args["--public"]])
@staticmethod
def markReview(args, newArgs, skipStr, updateOnly=True):
if args["--noReview"]:
utility.printMsg(skipStr)
return True
reviewArgs = args["-R"]
finalArgs = []
if updateOnly:
finalArgs = ["--update"]
finalArgs += ["--source=%s" % args["--topic"], "--target=%s" % args["--public"],
"--user=%s" % args["--user"]]
if len(newArgs) > 0:
finalArgs += newArgs
for arg in reviewArgs:
finalArgs.append(arg.strip())
return grapeMenu.menu().applyMenuChoice("review", finalArgs)
def markReviewAsInProgress(self, args):
utility.printMsg("Prepending pull request title with **IN PROGRESS**...")
return self.markReview(args, ["--title=**IN PROGRESS** ", "--prepend"], "Skipping marking pull request "
"as IN PROGRESS...")
def markReviewWithVersionNumber(self, args):
version = self.progress["version"]
utility.printMsg("Prepending pull request title with %s" % version)
return self.markReview(args, ["--title=%s :" % version, "--prepend"], "Skipping marking pull request with "
"version number")
def ensureReview(self, args):
return self.markReview(args, [], "Skipping ensuring review exists.", updateOnly=False)
@staticmethod
def checkInProgressLock(args):
if args["--noReview"]:
utility.printMsg("Skipping In Progress Lock Check..")
return True
atlassian = Atlassian.Atlassian(username=args["--user"], url=args["--bitbucketURL"], verify=args["--verifySSL"])
repo = atlassian.project(args["--project"]).repo(args["--repo"])
pullRequests = repo.pullRequests()
inProgressRequests = []
for request in pullRequests:
inProgress = "IN PROGRESS" in request.title()
if inProgress:
doesConflict = request.toRef() == args["--public"]
if doesConflict:
inProgressRequests.append(request)
if len(inProgressRequests) == 0:
utility.printMsg("No other pull requests are IN PROGRESS...")
return True
elif len(inProgressRequests) == 1:
thisRequest = repo.getOpenPullRequest(args["--topic"], args["--public"])
if thisRequest == inProgressRequests[0]:
utility.printMsg("The pull request for this branch is already in progress. Continuing...")
return 2
else:
utility.printMsg("The following pull request is already in progress:")
print(inProgressRequests[0])
return False
else:
utility.printMsg("ERROR: There are multiple pull requests in progress!")
for request in inProgressRequests:
print request
return False
def acquireInProgressLock(self, args):
if args["--noReview"]:
utility.printMsg("Skipping In Progress Lock Check..")
return True
retcode = self.checkInProgressLock(args)
if retcode:
# the 2 means we are already marked as in progress
return ((retcode == 2) or self.markReviewAsInProgress(args)) and self.checkInProgressLock(args)
else:
return False
def releaseInProgressLock(self, args):
if args["--noReview"]:
utility.printMsg("Skipping In Progress Lock Release...")
return True
atlassian = Atlassian.Atlassian(username=args["--user"], url=args["--bitbucketURL"], verify=args["--verifySSL"])
repo = atlassian.project(args["--project"]).repo(args["--repo"])
request = repo.getOpenPullRequest(args["--topic"], args["--public"])
state = "open"
if not request:
matchingRequests = repo.getMergedPullRequests(args["--topic"], args["--public"])
state = "merged"
for r in matchingRequests:
if "**IN PROGRESS**" in r.title():
request = r
break
if request:
title = re.sub("^.*\*\*IN PROGRESS\*\* *", "", request.title())
return self.markReview(args, ["--title=%s" % title, "--state=%s" % state], "")
else:
utility.printMsg("WARNING: No Open or Merged IN PROGRESS pull request found. Continuing...")
return True
def verifyCompletedReview(self, args):
if args["--noReview"]:
utility.printMsg("Skipping verification of code review...")
self.progress["reviewers"] = "No reviewers"
return True
atlassian = Atlassian.Atlassian(username=args["--user"], url=args["--bitbucketURL"], verify=args["--verifySSL"])
repo = atlassian.project(args["--project"]).repo(args["--repo"])
pullRequest = repo.getOpenPullRequest(args["--topic"], args["--public"])
verified = False
if pullRequest:
verified = pullRequest.approved()
reviewers = pullRequest.reviewers()
if not verified:
if not reviewers:
utility.printMsg("There are no reviewers for your pull request for %s targeting %s." %
(args["--topic"], args["--public"]))
self.progress["reviewers"] = "No reviewers"
else:
utility.printMsg("The following reviewers have not approved your request:\n")
approvedReviewerNames = []
for reviewer in reviewers:
if reviewer[1] is False:
print "%s (%s)" % (reviewer[0], reviewer[2])
else:
approvedReviewerNames.append(reviewer[2])
if len(approvedReviewerNames) > 0:
self.progress["reviewers"] = ", ".join(approvedReviewerNames)
else:
self.progress["reviewers"] = "No reviewers"
else:
utility.printMsg("All reviewers have approved your request.")
if args["--user"] != pullRequest.author():
reviewers.append((pullRequest.author(), True, pullRequest.authorName()))
self.progress["reviewers"] = ", ".join(x[2] for x in reviewers)
else:
utility.printMsg("There is no pull request for your current branch. \nStart one using grape review or by "
"visiting %s" % ('/'.join([atlassian.url, "projects", args["--project"], "repos",
args["--repo"], "pull-requests"])))
self.progress["reviewers"] = "No reviewers"
return verified
@staticmethod
def testForCleanWorkspace(args):
utility.printMsg("Checking to make sure workspace has a clean status.")
with utility.cd(utility.workspaceDir()):
ret = utility.isWorkspaceClean(printOutput=True)
ret = grapeMenu.menu().applyMenuChoice("status", ["--failIfInconsistent"]) and ret
if ret:
cb = git.currentBranch()
topic = args["--topic"]
ret = ret and cb == topic
if not ret:
utility.printMsg("Current branch %s is not topic branch %s. Please checkout %s before publishing. " % (cb, topic, topic))
return ret
def performCustomStep(self, prefix, args):
if not args["--%sCmds" % prefix]:
return True
cwd = os.getcwd()
if args["--%sDir" % prefix]:
os.chdir(os.path.join(utility.workspaceDir(), args["--%sDir" % prefix]))
cmds = args["--%sCmds" % prefix].split(',')
ret = True
utility.printMsg("GRAPE PUBLISH - PERFORMING CUSTOM %s STEP" % prefix.upper())
for cmd in cmds:
if ret:
if "<version>" in cmd:
self.loadVersion(args)
verStr = self.progress["version"]
cmd = cmd.replace("<version>", verStr)
returnCode = utility.executeSubProcess(cmd.strip(), workingDirectory=os.getcwd(),
stream=True).returncode
print(returnCode)
ret = ret and (returnCode == 0)
if not ret:
break
os.chdir(cwd)
return ret
def performCustomBuildStep(self, args):
return self.performCustomStep("build", args) and self.checkInProgressLock(args)
def performCustomTestStep(self, args):
return self.performCustomStep("test", args) and self.checkInProgressLock(args)
def performCustomPrePublishSteps(self, args):
ret = self.performCustomStep("prepublish", args)
if not ret:
return ret
self.loadModifiedFiles(args)
# Commit any files that may have been added to the main repo.
# The custom prepublish step is responsible for performing the git add for any
# modified files.
# Submodules and nested subprojects are not handled here. If any files there are
# modified during the prepublish step, the git add *and* the git commit must be
# handled in the custom step.
try:
git.commit(" -m \"%s\"" % args["-m"])
except git.GrapeGitError:
pass
return self.checkInProgressLock(args)
def performCustomPostPublishSteps(self, args):
return self.performCustomStep("postpublish", args)
@staticmethod
def getModifiedFileList(public, topic, args):
# Limit the number of updated files displayed per subproject
emailMaxFiles = args["--emailMaxFiles"]
updatelist = git.diff("--name-only %s %s" % (public, topic)).split('\n')
if len(updatelist) > emailMaxFiles:
updatelist.append("[ Additional files not shown ]")
return updatelist
def loadModifiedFiles(self, args):
if "modifiedFiles" in self.progress:
return True
wsdir = utility.workspaceDir()
os.chdir(wsdir)
public = args["--public"]
topic = args["--topic"]
if git.SHA(public) == git.SHA(topic):
public = utility.userInput("Please enter the branch name or SHA of the commit to diff against %s for the "
"modified file list." % topic)
self.progress["modifiedFiles"] = []
# Get list of modified files in main repo
self.progress["modifiedFiles"] += self.getModifiedFileList(public, topic, args)
# Get list of modified files in submodules
if args["--recurse"]:
submodulePublic = args["--submodulePublic"]
submodules = git.getModifiedSubmodules(public, topic)
for sub in submodules:
os.chdir(os.path.join(wsdir, sub))
self.progress["modifiedFiles"] += [sub + "/" + s for s in self.getModifiedFileList(submodulePublic, topic, args)]
os.chdir(wsdir)
# Get list of modified files in nested subprojects
for nested in grapeConfig.GrapeConfigParser.getAllActiveNestedSubprojectPrefixes():
os.chdir(os.path.join(wsdir, nested))
modified = self.getModifiedFileList(public, topic, args)
if len(modified) > 0:
self.progress["modifiedFiles"] += [nested + "/" + s for s in modified]
os.chdir(wsdir)
return True
def loadVersion(self, args):
if "version" in self.progress:
return True
else:
menu = grapeMenu.menu()
menu.applyMenuChoice("version", ["read"])
guess = menu.getOption("version").ver
self.progress["version"] = utility.userInput("Please enter version string for this commit", guess)
return True
def loadCommitMessage(self, args):
if "reviewers" not in self.progress:
# fill in the reviewers entry in progress, but don't check the review status.
self.verifyCompletedReview(args)
if "commitMsg" in self.progress:
if not args["-m"]:
args["-m"] = self.progress["commitMsg"]
return True
if args["--noUpdateLog"]:
self.progress["commitMsg"] = "no details entered"
return True
if not args["<CommitMessageFile>"] and not args["-m"]:
proceed = utility.userInput("No commit message entered. Would you like to use the Pull Request's "
"description as your commit message? [y/n] \n(Enter 'n' to enter a file name with your commit message instead)", 'y')
if not proceed:
args["<CommitMessageFile>"] = utility.userInput("Enter the name of the file containing your commit "
"message: ")
if args["<CommitMessageFile>"] and not args["-m"]:
# commit message should come from the file
commitMsgFile = args["<CommitMessageFile>"]
try:
with open(commitMsgFile, 'r') as f:
commitMsg = f.readlines()+["\n"]
except IOError as e:
print(e.message)
utility.printMsg("Could not read contents of %s" % commitMsgFile)
args["<CommitMessageFile>"] = False
return False
if not args["--noReview"]:
utility.printMsg("Updating Pull Request with commit msg...")
self.markReview(args, ["--descr", commitMsgFile], "")
else:
utility.printMsg("Skipping update of pull request description from commit message")
elif args["-m"]:
commitMsg = [args["-m"]+"\n"]
else:
if args["--noReview"]:
utility.printMsg("Skipping retrieval of commit message from Pull Request description..")
if not args["-m"]:
print("File with commit message is required argument when publishing with --noReview and no -m "
"<msg> defined.")
return False
utility.printMsg("Retrieving pull request description for use as commit message...")
atlassian = Atlassian.Atlassian(username=args["--user"], url=args["--bitbucketURL"], verify=args["--verifySSL"])
repo = atlassian.project(args["--project"]).repo(args["--repo"])
pullRequest = repo.getOpenPullRequest(args["--topic"], args["--public"])
if pullRequest:
commitMsg = pullRequest.description().splitlines(True)+['\n']
else:
commitMsg = ""
# this will be used for the actual merge commit message.
escapedCommitMsg = ''.join(commitMsg).replace("\"", "\\\"")
escapedCommitMsg = escapedCommitMsg.replace("`", "'")
if escapedCommitMsg:
args["-m"] = escapedCommitMsg
else:
utility.printMsg("WARNING: Commit message is empty. ")
utility.printMsg("The following commit message will be used for email notification, merge commits, etc.\n"
"======================================================================")
print ''.join(commitMsg[:10])
print "======================================================================"
proceed = utility.userInput("Is the above message what you want for email notifications and merge commits? "
"['y','n']", 'y')
if not proceed:
utility.printMsg("Stopping. Either edit the message in your pull request, or pass in the name of a file "
"containing your message as an argument to grape publish.")
e = Exception()
e.message = "Invalid commit message."
args["<CommitMessageFile>"] = False
args["-m"] = False
raise e
else:
self.progress["commitMsg"] = escapedCommitMsg
args["-m"] = escapedCommitMsg
return True
def updateLog(self, args):
if not (self.loadCommitMessage(args) and self.loadVersion(args)):
return False
commitMsg = self.progress["commitMsg"].split('\n')
if args["--noUpdateLog"]:
return True
logFile = args["--updateLog"]
cwd = os.getcwd()
os.chdir(utility.workspaceDir())
if logFile:
header = args["--entryHeader"]
header = header.replace("<date>", time.asctime())
header = header.replace("<user>", git.config("--get user.name"))
header = header.replace("<version>", self.progress["version"])
header = header.replace("<reviewers>", self.progress["reviewers"])
header = ["\n"]+header.split("\\n")
commitMsg = header + commitMsg
numLinesToSkip = int(args["--skipFirstLines"])
with open(logFile, 'r') as f:
loglines = f.readlines()
loglines.insert(numLinesToSkip, '\n'.join(commitMsg))
with open(logFile, 'w') as f:
f.writelines(loglines)
git.commit("%s -m \"GRAPE publish: updated log file %s\"" % (logFile, logFile))
os.chdir(cwd)
return self.checkInProgressLock(args)
def tickVersion(self, args):
if not args["--tickVersion"]:
return True
menu = grapeMenu.menu()
if not args["--noReview"]:
atlassian = Atlassian.Atlassian(username=args["--user"], url=args["--bitbucketURL"], verify=args["--verifySSL"])
repo = atlassian.project(args["--project"]).repo(args["--repo"])
thisRequest = repo.getOpenPullRequest(args["--topic"], args["--public"])
requestTitle = thisRequest.title()
versionArgs = ["read"]
menu.applyMenuChoice("version", versionArgs)
currentVer = grapeMenu.menu().getOption("version").ver
if currentVer in requestTitle:
utility.printMsg("Current Version string already in pull request title. Assuming this is from "
"a previous call to grape publish. Not ticking version again.")
return True
ret = True
if args["--tickVersion"]:
versionArgs = ["tick", "--notag", "--public=%s" % args["--public"]]
for arg in args["-T"]:
versionArgs += [arg.strip()]
ret = grapeMenu.menu().applyMenuChoice("version", versionArgs)
self.progress["version"] = grapeMenu.menu().getOption("version").ver
ret = ret and self.markReviewWithVersionNumber(args)
return ret and self.checkInProgressLock(args)
@staticmethod
def tagVersion(args):
ret = True
if args["--tickVersion"]:
versionArgs = ["tick", "--tag", "--notick", "--nocommit", "--tagNested"]
for arg in args["-T"]:
versionArgs += [arg.strip()]
cwd = os.getcwd()
wsdir = utility.workspaceDir()
os.chdir(wsdir)
ret = grapeMenu.menu().applyMenuChoice("version", versionArgs)
for nested in grapeConfig.GrapeConfigParser.getAllActiveNestedSubprojectPrefixes():
os.chdir(os.path.join(wsdir, nested))
git.push("--tags origin")
os.chdir(wsdir)
git.push("--tags origin")
os.chdir(cwd)
return ret
def sendNotificationEmail(self, args):
if not (self.loadCommitMessage(args) and self.loadVersion(args) and self.loadModifiedFiles(args)):
return False
# Write the contents of the mail file out to a temporary file
mailfile = tempfile.mktemp()
with open(mailfile, 'w') as mf:
date = time.asctime()
emailHeader = args["--emailHeader"]
emailHeader = emailHeader.replace("<user>", git.config("--get user.name"))
emailHeader = emailHeader.replace("<date>", date)
emailHeader = emailHeader.replace("<version>", self.progress["version"])
emailHeader = emailHeader.replace("<reviewers>", self.progress["reviewers"])
emailHeader = emailHeader.replace("<public>", args["--public"])
emailHeader = emailHeader.split("\\n")
mf.write('\n'.join(emailHeader))
comments = self.progress["commitMsg"]
mf.write('\n')
mf.write(comments)
updatelist = self.progress["modifiedFiles"]
if len(updatelist) > 0:
mf.write("\nFILES UPDATED:\n")
mf.write("\n".join(updatelist))
mf.write('\n')
emailFooter = args["--emailFooter"]
emailFooter = emailFooter.replace("<user>", git.config("--get user.name"))
emailFooter = emailFooter.replace("<date>", date)
emailFooter = emailFooter.replace("<version>", self.progress["version"])
emailFooter = emailFooter.replace("<reviewers>", self.progress["reviewers"])
emailFooter = emailFooter.replace("<public>", args["--public"])
emailFooter = emailFooter.split("\\n")
mf.write('\n'.join(emailFooter))
if not args["--emailNotification"].lower() == "true":
utility.printMsg("Skipping E-mail notification..")
with open(mailfile, 'r') as mf:
utility.printMsg("-- Begin update message --")
utility.printMsg(mf.read())
utility.printMsg("-- End update message --")
return True
# Open the file back up and attach it to a MIME message
t = open(mailfile, 'rb')
message = t.read()
t.close()
msg = MIMEText(message)
# Use their email address from their git user profile.
myemail = git.config("--get user.email")
mailsubj = args["--emailSubject"]
mailsubj = mailsubj.replace("<user>", git.config("--get user.name"))
mailsubj = mailsubj.replace("<public>", args["--public"])
mailsubj = mailsubj.replace("<version>", self.progress["version"])
mailsubj = mailsubj.replace("<date>", date)
sendto = args["--emailSendTo"]
msg['Subject'] = mailsubj
msg['From'] = myemail
msg['To'] = sendto
msg['CC'] = myemail
# Send the message via the configured SMTP server (don't know if this
# is necessary - localhost might work just as well)
import socket
try:
s = smtplib.SMTP("nospam.llnl.gov", timeout=10)
except socket.error, e:
utility.printMsg("Failed to email: %s" % str(e))
return False
# Don't need to connect if we specified the
# host in the SMTP constructor above...
#s.connect()
tolist = msg['To'].split(',')
tolist.append(myemail)
s.sendmail(msg['From'], tolist, msg.as_string())
s.quit()
# Remove the tempfile
os.remove(mailfile)
return True
def askWhetherToDelete(self, args):
if "<<doDelete>>" in self.progress:
self.doDelete = self.progress["<<doDelete>>"]
if not self.doDelete:
if args["--deleteTopic"].lower() == "true":
self.doDelete[args["--topic"]] = utility.userInput("Once the publish is done, would you like to delete the branch %s ? \n[y/n]" % (args["--topic"]), default='y')
else:
self.doDelete[args["--topic"]] = False
self.progress["<<doDelete>>"] = self.doDelete
def deleteTopicBranch(self, args):
self.askWhetherToDelete(args)
if self.doDelete[args["--topic"]]:
utility.printMsg("Deleting %s" % args["--topic"])
grapeMenu.menu().applyMenuChoice("db", [args["--topic"]])
# If the branch was not deleted, offer to return to that branch
try:
# SHA will raise an exception if the branch has been deleted
if git.SHA(args["--topic"]):
checkout = utility.userInput("You are currently on %s. Would you like to checkout %s? [y,n]" % (git.currentBranch(), args["--topic"]), "n")
if checkout:
grapeMenu.menu().applyMenuChoice("checkout", [args["--topic"]])
except:
pass
return True
@staticmethod
def validateInput(policy, args):
policy = policy.strip().lower()
valid = False
if policy == "merge" or policy == "squash":
valid = bool(args["-m"])
print args["-m"]
if not valid:
print("Commit message required for merge or squash merge publish policies.")
if policy == "rebase":
valid = True
if not valid:
print("Type grape publish -h for more details")
return valid
@staticmethod
def remoteMerge(public, topic, repo, args, isSubmodule, isNested):
atlassian = Atlassian.Atlassian(username=args["--user"], url=args["--stashURL"], verify=args["--verifySSL"])
remoteRepo = atlassian.repoFromWorkspaceRepoPath(repo,
isSubmodule=isSubmodule,
isNested=isNested)
pr = remoteRepo.getOpenPullRequest(topic, public)
if pr is not None:
utility.printMsg("remotely merging %s into %s" % (topic, public))
if pr.merge():
git.checkout(public)
git.pull("")
print("%s merged successfully to %s" % (topic, public))
print("You are currently on %s" % public)
return True
else:
utility.printMsg("Failed to do a remote merge.")
else:
utility.printMsg("Could not find open Pull Request for %s in %s" % (topic, repo))
return False
@staticmethod
def merge(public, topic, repo, args):
with utility.cd(repo):
print("merging %s into %s" % (topic, public))
git.checkout(public)
git.merge("%s -m \"%s\" " % (topic, args["-m"]))
print("%s merged successfully to %s" % (topic, public))
print("You are currently on %s" % public)
@staticmethod
def squashMerge(public, topic, repo, args):
with utility.cd(repo):
print("squash merging %s into %s" % (topic, public))
git.checkout(public)
git.merge("--squash %s" % topic)
git.commit("-m \"%s\"" % args["-m"])
print("%s squash-merged successfully to %s" % (topic, public))
print("You are currently on %s" % public)
@staticmethod
def rebase(public, topic, repo):
with utility.cd(repo):
print("rebasing %s onto %s" % (topic, public))
git.rebase(public)
print("%s successfully rebased onto %s" % (topic, public))
git.checkout(public)
git.merge(topic)
print("You are currently on %s" % public)
def parseConfigPublishPolicy(self, args, policy, defaultCascadeDestination, repoType="outer"):
# if the policy starts with cascade, we allow a cascade->Branch->branch2->... syntax in the config file
policyToks = policy.strip().lower().split('->')
if policyToks[0] == "cascade":
policy = "squash"
# restore cascade info from an abort if necessary
if "<<cascadeDict>>" in args and args["<<cascadeDict>>"] is not None:
self.cascadeDict = args["<<cascadeDict>>"]
args["<<cascadeDict>>"] = None
if len(policyToks) > 1:
self.cascadeDict[repoType] = policyToks[1:]
else:
self.cascadeDict[repoType] = [defaultCascadeDestination]
args["<<cascadeDict>>"] = self.cascadeDict
return policy
def parseCascadeArgs(self, args):
if args["--cascade"]:
self.cascadeDict["outer"] = args["--cascade"]
args["<<cascadeDict>>"] = self.cascadeDict
def performCascade(self, status,args,mergeID,repo, branch, public):
if not mergeID in status:
status[mergeID] = "READY"
if status[mergeID] == "DONE":
return True
if status[mergeID] == "READY":
git.checkout(branch)
status[mergeID] = "SWITCHED"
if status[mergeID] == "SWITCHED":
status[mergeID] = "MERGING"
try:
git.merge("%s -m \"GRAPE PUBLISH: cascade merge of %s to %s after publish.\"" % (public, public, branch))
status[mergeID] = "MERGED"
except git.GrapeGitError as e:
if "conflict" in e.gitOutput.lower():
utility.printMsg("Conflicts generated in cascade merge from %s to %s in %s.\n"
"Please use git mergetool to resolve, and then git commit to commit your changes.\n"
"Once done, please run grape publish --continue ."% (public, branch, repo))
return False
if status[mergeID] == "MERGING":
clean = self.testForCleanWorkspace(args)
if clean:
utility.printMsg("Resuming with cascades...")
status[mergeID] = "MERGED"
if not clean:
utility.printMsg("Workspace not clean after resuming from a cascade.\n"
"Please commit your merge resolution or otherwise clean up your workspace.")
return False
if status[mergeID] == "MERGED":
public = branch
git.push("origin %s" % branch)
status[mergeID] = "PUSHED"
if status[mergeID] == "PUSHED":
if "outer" in mergeID and args["--tickOnCascade"] > 0:
grapeMenu.menu().applyMenuChoice("version",["tick", "--tag","--slot=%i"%args["--tickOnCascade"]])
git.push("--tags origin")
status[mergeID] = "DONE"
return True
def performCascades(self, args):
self.loadPublishTargets(args)
if "<<cascadeDict>>" in args and args["<<cascadeDict>>"]:
self.cascadeDict = args["<<cascadeDict>>"]
if "<<cascadeMergeStatus>>" not in args:
args["<<cascadeMergeStatus>>"] = {}
status = args["<<cascadeMergeStatus>>"]
wsdir = utility.workspaceDir()
if self.cascadeDict:
# do outer level and nested project cascades
cascade = self.cascadeDict["outer"]
repos= [""]+grapeConfig.GrapeConfigParser.getAllActiveNestedSubprojectPrefixes()
repos = [os.path.join(wsdir,r) for r in repos]
for repo in repos:
public = args["--public"]
with utility.cd(repo):
for branch in cascade:
mergeID = "%s_%s_%s" % ("outer", repo, branch)
if not self.performCascade(status, args, mergeID, repo, branch, public):
return False
if "submodules" in self.cascadeDict and "<<publishedSubmodules>>" in args:
cascade = self.cascadeDict["submodules"]
repos = [os.path.join(wsdir,r) for r in args["<<publishedSubmodules>>"]]
for repo in repos:
with utility.cd(repo):
public = args["--submodulePublic"]
for branch in cascade:
mergeID = "%s_%s_%s" % ("submodules", repo, branch)
if not self.performCascade(status, args, mergeID, repo, branch, public):
return False
return True
def publish(self, policy, public, topic, repo, args, isSubmodule=False, isNested=False):
# don't bother publishing if public and topic are the same commit
if git.shortSHA(public).strip() == git.shortSHA(topic).strip():
git.checkout(public)
return
policy = policy.strip().lower()
if policy == "merge":
if args["--remoteMerge"]:
try:
if self.remoteMerge(public, topic, repo, args, isSubmodule, isNested):
return
else:
utility.printMsg("Bitbucket seems to think %s in %s is not mergeable... aborting" % (topic, repo))
raise Exception
except stashyErrors.GenericException as e:
utility.printMsg("WARNING: Remote merge failed. Attempting local merge instead.")
self.merge(public, topic, repo, args)
else:
self.merge(public, topic, repo, args)
elif policy == "squash":
self.squashMerge(public, topic, repo, args)
elif policy == "rebase":
self.rebase(public, topic, repo)
if not args["--nopush"]:
try:
with utility.cd(repo):
git.push("-u origin HEAD", throwOnFail=True)
except git.GrapeGitError as e:
if e.commError:
utility.printMsg("Unable to push result of publish to origin due to connectivity issue.")
raise e
def loadPublishTargets(self, args):
config = grapeConfig.grapeConfig()
public = args["--public"]
topic = args["--topic"]
# decide whether to recurse into submodules
recurse = config.get('workspace', 'manageSubmodules')
if args["--recurse"]:
recurse = True
if args["--noRecurse"]:
recurse = False
args["--recurse"] = recurse
if args["--recurse"]:
if not args["--submodulePublic"]:
submapping = config.getMapping('workspace', 'submoduleTopicPrefixMappings')
submodulePublic = submapping[self.branchPrefix]
args["--submodulePublic"] = submodulePublic
# deal with subtrees
push_subtrees = config.getboolean("subtrees", 'pushOnPublish') or args["--pushSubtrees"]
push_subtrees = push_subtrees and not args["--noPushSubtrees"]
args["--pushSubtrees"] = push_subtrees
if push_subtrees:
allsubtrees = config.get('subtrees', 'names').strip().split()
self.modifiedSubtrees = self.modifiedSubtrees.union(set(args["--forcePushSubtree"]))
for st in allsubtrees:
prefix = config.get('subtree-%s' % st, 'prefix')
if git.diff("--name-only %s %s -- %s" % (public, topic, os.path.join(utility.workspaceDir(),prefix))):
self.modifiedSubtrees.add(st)
for st in self.modifiedSubtrees:
self.st_prefixes[st] = config.get('subtree-%s' % st, 'prefix')
self.st_remotes[st] = utility.parseSubprojectRemoteURL(config.get('subtree-%s' % st, 'remote'))
self.st_branches[st] = config.getMapping('subtree-%s' % st, 'topicPrefixMappings')[topic]
# deal with nested subprojects
self.modifiedNestedProjects = grapeConfig.GrapeConfigParser.getAllModifiedNestedSubprojectPrefixes(public,topic)
self.modifiedOuter = True if git.log("--oneline %s..%s" % (public, topic)) else False
return True
def verifyPublishTargetsWithUser(self, args):
if args["--noverify"]:
return True
if "targetsVerified" in self.progress and self.progress["targetsVerified"]:
return True
if not self.loadPublishTargets(args):
return False
recurse = args["--recurse"]
public = args["--public"]
topic = args["--topic"]
submodules = git.getModifiedSubmodules(public, topic)
userMsg = "GRAPE: When ready, grape will publish %s to:\n" % topic
useAnd = False
if recurse:
if (submodules):
userMsg += "%s for the following submodules:\n\t\t%s\n" % (args["--submodulePublic"], "\n\t\t".join(submodules))
useAnd = True
if self.modifiedNestedProjects:
prefixes = self.modifiedNestedProjects
userMsg += "%s for the following nested subprojects:\n\t\t%s\n" % (public, "\n\t\t".join(prefixes))
useAnd = True
if self.modifiedOuter:
userMsg += "%s%s for the outer level repo. \n" % ("and " if useAnd else "", public)
push_subtrees = args["--pushSubtrees"]
if push_subtrees:
if self.modifiedSubtrees:
userMsg += "Additionally, grape will publish the following subtrees to the following destinations:\n"
for st in self.modifiedSubtrees:
userMsg += "subtree: %s\trepo: %s\tbranch:%s\n" % (self.st_prefixes[st], self.st_remotes[st],
self.st_branches[st])
proceed = utility.userInput(userMsg + "\nProceed? [y/n]", 'y')
if not proceed:
return False
self.progress["targetsVerified"] = True
# get the commit message here as well.
if not self.loadCommitMessage(args):
return False
self.askWhetherToDelete(args)
return True
def publishAllProjects(self, args):
# make sure we have a commit message
if not (self.loadCommitMessage(args) and self.loadPublishTargets(args)):
return False
public = args["--public"]
topic = args["--topic"]
recurse = args["--recurse"]
config = grapeConfig.grapeConfig()
# make sure public branch is up to date.
grapeMenu.menu().applyMenuChoice('up', ['up', '--public=%s' % public])
# set any CL defined publish policy
policy = None
if args["--merge"]:
policy = "merge"
if args["--squash"]:
policy = "squash"
if args["--rebase"]:
policy = "rebase"
# remember this since Command Line defined policies override the submodule policies as well.
CLPolicy = policy
# update policy from config if not set on CL
if not policy:
policy = self.parseConfigPublishPolicy(args, config.getMapping('flow', 'publishPolicy')[public], topic)
self.parseCascadeArgs(args)
wsdir = utility.workspaceDir()
os.chdir(wsdir)
if recurse:
submodulePublic = args["--submodulePublic"]
activeSubmodules = git.getActiveSubmodules()
modifiedSubmodules = git.getModifiedSubmodules(public, topic)
unmodifiedSubmodules = list(set(activeSubmodules) - set(modifiedSubmodules))
# submodule policy is Command Line requested policy, otherwise is based on
# .grapeconfig.workspace.submodulePublishPolicy
submodulePolicy = CLPolicy
# store current value for args["--cascade"]
outerCascadeOption = args["--cascade"]
if not submodulePolicy:
submodulePolicy = config.getMapping('workspace', 'submodulePublishPolicy')[submodulePublic]
submodulePolicy = self.parseConfigPublishPolicy(args, submodulePolicy, topic, repoType="submodule")
valid = self.validateInput(submodulePolicy, args)
if valid and self.verifyPublishTargetsWithUser(args):
for sub in modifiedSubmodules:
subpath = os.path.join(wsdir,sub)
with utility.cd(subpath):
grapeMenu.menu().applyMenuChoice('up', ['up', '--noRecurse', '--wd=%s' % subpath, '--public=%s' % submodulePublic])
self.publish(submodulePolicy, submodulePublic, topic, subpath, args, isSubmodule=True)
#add and commit any new merge commits in submodules as a result of the publish
git.add(sub)
try:
# we are cool with this not working - only will have something to commit if the
# submodules were published without fast forward merges
git.commit("-m \"%s - submodules published\"" % args["-m"])
except git.GrapeGitError:
pass
# ensure submodules that aren't modified end up on the public branch
for sub in unmodifiedSubmodules:
with utility.cd(os.path.join(wsdir, sub)):
git.checkout(submodulePublic)
# restore value for args["--cascade"]
args["<<publishedSubmodules>>"] = modifiedSubmodules
args["--cascade"] = outerCascadeOption
os.chdir(wsdir)
# push subtrees to their respective remote branches
push_subtrees = args["--pushSubtrees"]
if push_subtrees:
modifiedSubtrees = self.modifiedSubtrees
if modifiedSubtrees:
proceed = self.verifyPublishTargetsWithUser(args)
if proceed:
squash = "--squash" if config.get("subtrees", "mergepolicy").lower() == "squash" else ""
for st in modifiedSubtrees:
utility.printMsg("pushing subtree %s to %s (branch %s)..." % (self.st_prefixes[st],
self.st_remotes[st], self.st_branches[st]))
try:
git.subtree("push --prefix=%s %s %s " % (self.st_prefixes[st],
self.st_remotes[st], self.st_branches[st]))
except git.GrapeGitError:
# the push can fail if there has never been a subtree add / pull in this repo.
utility.printMsg("First attempt failed. Attempting a subtree pull then push...")
git.subtree("pull %s --prefix=%s %s %s " % (squash, self.st_prefixes[st],
self.st_remotes[st], self.st_branches[st]))
git.subtree("push --prefix=%s %s %s " % ( self.st_prefixes[st],
self.st_remotes[st], self.st_branches[st]))
utility.printMsg("Succeeded!")
valid = self.validateInput(policy, args)
if valid and self.verifyPublishTargetsWithUser(args):
for nested in grapeConfig.GrapeConfigParser.getAllActiveNestedSubprojectPrefixes():
self.publish(policy, public, topic, os.path.join(wsdir, nested), args, isNested=True)
if self.modifiedOuter:
self.publish(policy, public, topic, wsdir, args)
else:
git.checkout(public)
return True
else:
return False
|
<filename>pylearn2/utils/logger.py
"""Local facilities to configure the logger to our needs."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2012, Universite de Montreal"
__credits__ = ["<NAME>"]
__license__ = "3-clause BSD"
__email__ = "<EMAIL>"
__maintainer__ = "<NAME>"
# Portions cribbed from the standard library logging module,
# Copyright 2001-2010 by <NAME>. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import logging
import sys
from logging import Handler, Formatter
import six
from six.moves import xrange
class CustomFormatter(Formatter):
"""
Conditionally displays log level names and source loggers, only if
the log level is WARNING or greater.
Parameters
----------
prefix : WRITEME
only_from : WRITEME
"""
def __init__(self, prefix='', only_from=None):
Formatter.__init__(self)
self._info_fmt = prefix + "%(message)s"
self._fmt = prefix + "%(levelname)s (%(name)s): %(message)s"
self._only_from = only_from
def format(self, record):
"""
Format the specified record as text.
Parameters
----------
record : object
A LogRecord object with the appropriate attributes.
Returns
-------
s : str
A string containing the formatted log message.
Notes
-----
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory
steps are carried out. The message attribute of the record is
computed using LogRecord.getMessage(). If the formatting
string uses the time (as determined by a call to usesTime(),
formatTime() is called to format the event time. If there is
exception information, it is formatted using formatException()
and appended to the message.
"""
record.message = record.getMessage()
# Python 2.6 don't have usesTime() fct.
# So we skip that information for them.
if hasattr(self, 'usesTime') and self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
emit_special = (self._only_from is None or
record.name.startswith(self._only_from))
if record.levelno == logging.INFO and emit_special:
s = self._info_fmt % record.__dict__
else:
s = self._fmt % record.__dict__
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
try:
s = s + record.exc_text
except UnicodeError:
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924
s = s + record.exc_text.decode(sys.getfilesystemencoding())
return s
class CustomStreamHandler(Handler):
"""
A handler class which writes logging records, appropriately
formatted, to one of two streams. DEBUG and INFO messages
get written to the provided `stdout`, all other messages to
`stderr`.
If stream is not specified, sys.stderr is used.
Parameters
----------
stdout : file-like object, optional
Stream to which DEBUG and INFO messages should be written.
If `None`, `sys.stdout` will be used.
stderr : file-like object, optional
Stream to which WARNING, ERROR, CRITICAL messages will be
written. If `None`, `sys.stderr` will be used.
formatter : `logging.Formatter` object, optional
Assigned to `self.formatter`, used to format outgoing log messages.
Notes
-----
N.B. it is **not** recommended to pass `sys.stdout` or `sys.stderr` as
constructor arguments explicitly, as certain things (like nosetests) can
reassign these during code execution! Instead, simply pass `None`.
"""
def __init__(self, stdout=None, stderr=None, formatter=None):
Handler.__init__(self)
self._stdout = stdout
self._stderr = stderr
self.formatter = formatter
@property
def stdout(self):
"""
.. todo::
WRITEME
"""
return sys.stdout if self._stdout is None else self._stdout
@property
def stderr(self):
"""
.. todo::
WRITEME
"""
return sys.stderr if self._stderr is None else self._stderr
def flush(self):
"""Flushes the stream."""
for stream in (self.stdout, self.stderr):
stream.flush()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
Parameters
----------
record : WRITEME
"""
try:
msg = self.format(record)
if record.levelno > logging.INFO:
stream = self.stderr
else:
stream = self.stdout
fs = u"%s\n"
#if no unicode support...
#Python 2.6 don't have logging._unicode, so use the no unicode path
# as stream.encoding also don't exist.
if not getattr(logging, '_unicode', True):
stream.write(fs % msg)
else:
try:
if (isinstance(msg, six.text_type) and
getattr(stream, 'encoding', None)):
try:
stream.write(fs % msg)
except UnicodeEncodeError:
# Printing to terminals sometimes fails. For
# example, with an encoding of 'cp1251', the above
# write will work if written to a stream opened or
# wrapped by the codecs module, but fail when
# writing to a terminal even when the codepage is
# set to cp1251. An extra encoding step seems to
# be needed.
stream.write((fs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except (UnicodeError, TypeError):
stream.write((fs % msg).encode("UTF-8"))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def configure_custom(debug=False, stdout=None, stderr=None):
"""
Configure the logging module to output logging messages to the
console via `stdout` and `stderr`.
Parameters
----------
debug : bool
If `True`, display DEBUG messages on `stdout` along with
INFO-level messages.
stdout : file-like object, optional
Stream to which DEBUG and INFO messages should be written.
If `None`, `sys.stdout` will be used.
stderr : file-like object, optional
Stream to which WARNING, ERROR, CRITICAL messages will be
written. If `None`, `sys.stderr` will be used.
Notes
-----
This uses `CustomStreamHandler` defined in this module to
set up a console logger. By default, messages are formatted
as "LEVEL: message", where "LEVEL:" is omitted if the
level is INFO.
WARNING, ERROR and CRITICAL level messages are logged to
`stderr` (or the provided substitute)
N.B. it is **not** recommended to pass `sys.stdout` or
`sys.stderr` as constructor arguments explicitly, as certain
things (like nosetests) can reassign these during code
execution! Instead, simply pass `None`.
"""
top_level_logger = logging.getLogger(__name__.split('.')[0])
# Do not propagate messages to the root logger.
top_level_logger.propagate = False
# Set the log level of our logger, either to DEBUG or INFO.
top_level_logger.setLevel(logging.DEBUG if debug else logging.INFO)
# Get rid of any extant logging handlers that are installed.
# This means we can call configure_custom() more than once
# and have it be idempotent.
while top_level_logger.handlers:
top_level_logger.handlers.pop()
# Install our custom-configured handler and formatter.
fmt = CustomFormatter()
handler = CustomStreamHandler(stdout=stdout, stderr=stderr, formatter=fmt)
top_level_logger.addHandler(handler)
def restore_defaults():
"""
Use this if you are embedding our library in a larger application
and wish to handle logging yourself at the level of the root
logger.
Undoes the effects of `configure_custom()`. By default, this
shuts us up on the console except for WARNING, ERROR, and
CRITICAL. See the documentation for the `logging` standard library
module for details.
"""
top_level_logger = logging.getLogger(__name__.split('.')[0])
# Propagate log messages upwards.
top_level_logger.propagate = True
# Restore the log level to its default value, i.e. logging.NOTSET.
top_level_logger.setLevel(logging.NOTSET)
# Delete any handlers that might be installed on our logger.
while top_level_logger.handlers:
top_level_logger.handlers.pop()
def newline(logger, nb_blank_lines=1):
"""
A simple method to write a real new line to logging.
Only works with the INFO level at the moment.
Parameters
----------
logger : Logger object
The logger where the blank line will be added.
nb_blank_lines : int, optional
Number of blank lines in a row.
"""
formatter = logging.Formatter(fmt='')
handler = CustomStreamHandler(formatter=formatter)
logger.addHandler(handler)
for i in xrange(nb_blank_lines):
logger.info('')
logger.removeHandler(handler)
|
from functools import partial
from ipaddress import IPv4Address, IPv6Address
from socket import AddressFamily # pylint: disable=no-name-in-module
from typing import List, Literal, Tuple, Union, cast
from wsgiref.handlers import format_date_time
import h11
from anyio import BrokenResourceError, EndOfStream, create_tcp_listener, move_on_after
from anyio.abc import SocketStream, TaskGroup
from xmlrpcproto.server import build_xml, format_error, format_success, parse_reqeuest
AnyIPAddressFamily = Literal[
AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6
]
TIMEOUT = 10
MAX_RECV = 2 ** 16
class ServerHandle:
pass
class ClientWrapper:
def __init__(self, stream: SocketStream) -> None:
self.stream = stream
self.conn = h11.Connection(h11.SERVER)
async def send(self, event) -> None:
assert not isinstance(event, h11.ConnectionClosed)
data = self.conn.send(event)
await self.stream.send(data)
async def _read_from_peer(self) -> None:
if self.conn.they_are_waiting_for_100_continue:
go_ahead = h11.InformationalResponse(
status_code=100, headers=self.basic_headers()
)
await self.send(go_ahead)
try:
data = await self.stream.receive(MAX_RECV)
except (ConnectionError, EndOfStream):
data = b""
self.conn.receive_data(data)
async def next_event(self):
while True:
event = self.conn.next_event()
if event is h11.NEED_DATA:
await self._read_from_peer()
continue
return event
async def shutdown(self) -> None:
try:
await self.stream.send_eof()
except (BrokenResourceError, EndOfStream):
return
async with move_on_after(TIMEOUT):
try:
while True:
got = await self.stream.receive(MAX_RECV)
if not got:
break
except (BrokenResourceError, EndOfStream):
pass
finally:
await self.stream.aclose()
def basic_headers(self) -> List[Tuple[str, bytes]]:
return [
("Date", format_date_time(None).encode("ascii")),
(
"Server",
f"anyio_xmlrpc/{h11.__version__} {h11.PRODUCT_ID}".encode("ascii"),
),
("content-type", b"text/xml"),
]
async def handle(server_handle: ServerHandle, stream: SocketStream) -> None:
wrapper = ClientWrapper(stream)
while True:
event = await wrapper.next_event()
if isinstance(event, h11.Request):
await handle_request(server_handle, wrapper, event)
if wrapper.conn.our_state is h11.MUST_CLOSE:
await wrapper.shutdown()
return
try:
wrapper.conn.start_next_cycle()
except h11.ProtocolError:
await wrapper.shutdown()
return
async def handle_request(
server_handle: ServerHandle,
wrapper: ClientWrapper,
request: h11.Request,
) -> None:
body = b""
while True:
event = await wrapper.next_event()
if isinstance(event, h11.EndOfMessage):
break
body += cast(h11.Data, event).data
method_name, args = parse_reqeuest(body, dict(request.headers))
try:
method = getattr(server_handle, method_name)
result = await method(*args)
root = format_success(result)
status_code = 200
except Exception as exc:
root = format_error(exc)
status_code = 500
response = h11.Response(status_code=status_code, headers=wrapper.basic_headers())
await wrapper.send(response)
await wrapper.send(h11.Data(data=build_xml(root)))
await wrapper.send(h11.EndOfMessage())
async def start_server(
server_handle: ServerHandle,
*,
local_host: Union[str, IPv4Address, IPv6Address] = None,
local_port: int = 0,
family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
backlog: int = 65536,
reuse_port: bool = False,
task_group: TaskGroup = None,
) -> None:
listener = await create_tcp_listener(
local_host=local_host,
local_port=local_port,
family=family,
backlog=backlog,
reuse_port=reuse_port,
)
await listener.serve(partial(handle, server_handle), task_group=task_group)
|
<reponame>bcgov/wps-api<filename>app/models/process_grib.py
""" Read a grib file, and store values relevant to weather stations in database.
"""
import math
import struct
import logging
import logging.config
from typing import List
from sqlalchemy.dialects.postgresql import array
import sqlalchemy.exc
import gdal
import app.db.database
from app.wildfire_one import _get_stations_local
from app.db.models import (
PredictionModel, PredictionModelRunTimestamp, ModelRunGridSubsetPrediction)
from app.db.crud import get_prediction_model, get_or_create_prediction_run, get_or_create_grid_subset
logger = logging.getLogger(__name__)
class PredictionModelNotFound(Exception):
""" Exception raised when specified model cannot be found in database. """
class DatabaseException(Exception):
""" Exception raised to to database related issue. """
class ModelRunInfo():
""" Information relation to a particular model run
"""
def __init__(self):
self.model_abbreviation = None
self.projection = None
self.model_run_timestamp = None
self.prediction_timestamp = None
self.variable_name = None
def get_surrounding_grid(
band: gdal.Dataset, x_index: int, y_index: int) -> (List[int], List[float]):
""" Get the grid and values surrounding a given station
NOTE: Order of the points is super important! Vertices are ordered clockwise, values are also
ordered clockwise.
"""
# Read scanlines of the raster, build up the four points and corresponding values:
scanline_one = band.ReadRaster(xoff=x_index, yoff=y_index, xsize=2, ysize=1,
buf_xsize=2, buf_ysize=1, buf_type=gdal.GDT_Float32)
row_one = struct.unpack('f' * 2, scanline_one)
values = []
values.extend(row_one)
scanline_two = band.ReadRaster(xoff=x_index, yoff=y_index+1, xsize=2, ysize=1,
buf_xsize=2, buf_ysize=1, buf_type=gdal.GDT_Float32)
row_two = struct.unpack('f' * 2, scanline_two)
values.append(row_two[1])
values.append(row_two[0])
points = [[x_index, y_index], [x_index+1, y_index],
[x_index+1, y_index+1], [x_index, y_index+1]]
return points, values
def calculate_raster_coordinate(longitude: float, latitude: float, origin: List[int], pixel: List[int]):
""" From a given longitude and latitude, calculate the raster coordinate corresponding to the
top left point of the grid surrounding the given geographic coordinate.
"""
delta_x = longitude - origin[0]
delta_y = latitude - origin[1]
x_coordinate = delta_x / pixel[0]
y_coordinate = delta_y / pixel[1]
return math.floor(x_coordinate), math.floor(y_coordinate)
def calculate_geographic_coordinate(point: List[int], origin: List[float], pixel: List[float]) -> List[float]:
""" Calculate the geographic coordinates for a given points """
x_coordinate = origin[0] + point[0] * pixel[0]
y_coordinate = origin[1] + point[1] * pixel[1]
return (x_coordinate, y_coordinate)
def open_grib(filename: str) -> gdal.Dataset:
""" Open grib file """
return gdal.Open(filename, gdal.GA_ReadOnly)
def get_dataset_geometry(dataset: gdal.Dataset) -> (List[int], List[int]):
""" Get the geometry info (origin and pixel size) of the dataset.
"""
geotransform = dataset.GetGeoTransform()
# Upper left corner:
origin = (geotransform[0], geotransform[3])
# Pixel width and height:
pixel = (geotransform[1], geotransform[5])
return origin, pixel
class GribFileProcessor():
""" Instances of this object can be used to process and ingest a grib file.
"""
def __init__(self):
# Get list of stations we're interested in, and store it so that we only call it once.
self.stations = _get_stations_local()
self.session = app.db.database.get_session()
self.origin = None
self.pixel = None
self.prediction_model = None
def get_prediction_model(self, grib_info: ModelRunInfo) -> PredictionModel:
""" Get the prediction model, raising an exception if not found """
prediction_model = get_prediction_model(
self.session, grib_info.model_abbreviation, grib_info.projection)
if not prediction_model:
raise PredictionModelNotFound(
'Could not find this prediction model in the database',
grib_info.model_abbreviation, grib_info.projection)
return prediction_model
def yield_data_for_stations(self, raster_band):
""" Given a list of stations, and a gdal dataset, yield relevant data
"""
for station in self.stations:
longitude = float(station['long'])
latitude = float(station['lat'])
x_coordinate, y_coordinate = calculate_raster_coordinate(
longitude, latitude, self.origin, self.pixel)
points, values = get_surrounding_grid(
raster_band, x_coordinate, y_coordinate)
yield (points, values)
def store_bounding_values(self, points, values, preduction_model_run: PredictionModelRunTimestamp,
grib_info: ModelRunInfo):
""" Store the values around the area of interest.
"""
# Convert points to geographic coordinates:
geographic_points = []
for point in points:
geographic_points.append(
calculate_geographic_coordinate(point, self.origin, self.pixel))
# Get the grid subset, i.e. the relevant bounding area for this particular model.
grid_subset = get_or_create_grid_subset(
self.session, self.prediction_model, geographic_points)
# Load the record if it exists.
# pylint: disable=no-member
prediction = self.session.query(ModelRunGridSubsetPrediction).\
filter(
ModelRunGridSubsetPrediction.prediction_model_run_timestamp_id == preduction_model_run.id).\
filter(ModelRunGridSubsetPrediction.prediction_timestamp == grib_info.prediction_timestamp).\
filter(ModelRunGridSubsetPrediction.prediction_model_grid_subset_id ==
grid_subset.id).first()
if not prediction:
# Record doesn't exist, so we create it.
prediction = ModelRunGridSubsetPrediction()
prediction.prediction_model_run_timestamp_id = preduction_model_run.id
prediction.prediction_timestamp = grib_info.prediction_timestamp
prediction.prediction_model_grid_subset_id = grid_subset.id
setattr(prediction, grib_info.variable_name.lower(), array(values))
self.session.add(prediction)
self.session.commit()
def process_grib_file(self, filename, grib_info: ModelRunInfo):
""" Process a grib file, extracting and storing relevant information. """
try:
logger.info('processing %s', filename)
# Open grib file
dataset = open_grib(filename)
self.origin, self.pixel = get_dataset_geometry(dataset)
# get the model (.e.g. GPDS/RDPS latlon24x.24):
self.prediction_model = self.get_prediction_model(grib_info)
# get the model run (e.g. GDPS latlon24x.24 for 2020 07 07 12h00):
prediction_run = get_or_create_prediction_run(
self.session, self.prediction_model, grib_info.model_run_timestamp)
raster_band = dataset.GetRasterBand(1)
# Iterate through stations:
for (points, values) in self.yield_data_for_stations(raster_band):
self.store_bounding_values(
points, values, prediction_run, grib_info)
except sqlalchemy.exc.OperationalError:
# Sometimes this exception is thrown with a "server closed the connection unexpectedly" error.
# This could happen due to the connection being closed.
if self.session.is_disconnect():
logger.error("Database disconnected!")
# Try to re-connect, so that subsequent calls to this function may succeed.
# NOTE: I'm not sure if this will solve the problem!
self.session = app.db.database.get_session()
raise DatabaseException('Database disconnection')
# Re-throw the exception.
raise
|
<filename>test/data/generate_fmm_data.py
# Script to be run with legacy Bempp to generate the comparison data.
import bempp.api
import numpy as np
import os.path
import sys
# run `python generate_fmm_data.py REGENERATE` to regenerate everything
REGENERATE = "REGENERATE" in sys.argv
data = {}
def generate_vector(size, filename):
global data
if filename not in data:
if REGENERATE or not os.path.exists(filename + ".npy"):
rand = np.random.RandomState(0)
vec = rand.rand(size)
np.save(filename, vec)
else:
vec = np.load(filename + ".npy")
data[filename] = vec
return data[filename]
def generate_points(size, filename):
global data
if filename not in data:
if REGENERATE or not os.path.exists(filename + ".npy"):
rand = np.random.RandomState(0)
points = np.vstack(
[2 * np.ones(size, dtype="float64"), rand.randn(size), rand.randn(size)]
)
np.save(filename, points)
else:
points = np.load(filename + ".npy")
data[filename] = points
return data[filename]
def save_matvec_result(operator, space1, space2, space3, vec, filename, *args):
if REGENERATE or not os.path.exists(filename + ".npy"):
print("Generating " + filename)
dense_mat = operator(
space1, space2, space3, *args, assembler="dense"
).weak_form()
np.save(filename, dense_mat @ vec)
else:
print("Skipping " + filename + " (already generated)")
def save_potential_eval_result(operator, space, vec, points, filename, *args):
if REGENERATE or not os.path.exists(filename + ".npy"):
print("Generating " + filename)
grid_fun = bempp.api.GridFunction(space, coefficients=vec)
result = operator(space, points, *args, assembler="dense").evaluate(grid_fun)
np.save(filename, result)
else:
print("Skipping " + filename + " (already generated)")
grid = bempp.api.shapes.ellipsoid(1, 0.5, 0.3, h=0.1)
bempp.api.export("fmm_grid.msh", grid=grid)
space = bempp.api.function_space(grid, "P", 1)
vec = generate_vector(space.global_dof_count, "fmm_p1_vec")
points = generate_points(30, "fmm_potential_points.npy")
# Generate P1 boundary operator results
for filename, operator in [
("fmm_laplace_single", bempp.api.operators.boundary.laplace.single_layer),
("fmm_laplace_double", bempp.api.operators.boundary.laplace.double_layer),
("fmm_laplace_adjoint", bempp.api.operators.boundary.laplace.adjoint_double_layer),
("fmm_laplace_hyper", bempp.api.operators.boundary.laplace.hypersingular),
]:
save_matvec_result(operator, space, space, space, vec, filename)
for filename, operator in [
("fmm_helmholtz_single", bempp.api.operators.boundary.helmholtz.single_layer),
("fmm_helmholtz_double", bempp.api.operators.boundary.helmholtz.double_layer),
(
"fmm_helmholtz_adjoint",
bempp.api.operators.boundary.helmholtz.adjoint_double_layer,
),
("fmm_helmholtz_hyper", bempp.api.operators.boundary.helmholtz.hypersingular),
(
"fmm_modified_helmholtz_single",
bempp.api.operators.boundary.modified_helmholtz.single_layer,
),
(
"fmm_modified_helmholtz_double",
bempp.api.operators.boundary.modified_helmholtz.double_layer,
),
(
"fmm_modified_helmholtz_adjoint",
bempp.api.operators.boundary.modified_helmholtz.adjoint_double_layer,
),
(
"fmm_modified_helmholtz_hyper",
bempp.api.operators.boundary.modified_helmholtz.hypersingular,
),
]:
save_matvec_result(operator, space, space, space, vec, filename, 1.5)
# Generate P1 potential operator results
for filename, operator in [
(
"fmm_laplace_potential_single",
bempp.api.operators.potential.laplace.single_layer,
),
(
"fmm_laplace_potential_double",
bempp.api.operators.potential.laplace.double_layer,
),
]:
save_potential_eval_result(operator, space, vec, points, filename)
for filename, operator in [
(
"fmm_helmholtz_potential_single",
bempp.api.operators.potential.helmholtz.single_layer,
),
(
"fmm_helmholtz_potential_double",
bempp.api.operators.potential.helmholtz.double_layer,
),
(
"fmm_modified_potential_helmholtz_single",
bempp.api.operators.potential.modified_helmholtz.single_layer,
),
(
"fmm_modified_potential_helmholtz_double",
bempp.api.operators.potential.modified_helmholtz.double_layer,
),
]:
save_potential_eval_result(operator, space, vec, points, filename, 1.5)
rwg = bempp.api.function_space(grid, "RWG", 0)
snc = bempp.api.function_space(grid, "SNC", 0)
vec2 = generate_vector(rwg.global_dof_count, "fmm_rwg_vec")
# Generate Maxwell boundary operator results
for filename, operator in [
("fmm_maxwell_electric", bempp.api.operators.boundary.maxwell.electric_field),
("fmm_maxwell_magnetic", bempp.api.operators.boundary.maxwell.magnetic_field),
]:
save_matvec_result(operator, rwg, rwg, snc, vec2, filename, 1.5)
# Generate Maxwell potential operator results
for filename, operator in [
(
"fmm_maxwell_potential_electric",
bempp.api.operators.potential.maxwell.electric_field,
),
(
"fmm_maxwell_potential_magnetic",
bempp.api.operators.potential.maxwell.magnetic_field,
),
]:
save_potential_eval_result(operator, rwg, vec2, points, filename, 1.5)
# Generate two grid data
grid1 = bempp.api.shapes.ellipsoid(0.5, 0.5, 0.3, h=0.1)
grid2 = bempp.api.shapes.sphere(r=1.5, h=0.1)
bempp.api.export("fmm_grid1.msh", grid=grid1)
bempp.api.export("fmm_grid2.msh", grid=grid2)
p1_space1 = bempp.api.function_space(grid1, "P", 1)
p1_space2 = bempp.api.function_space(grid2, "P", 1)
vec = generate_vector(p1_space1.global_dof_count, "fmm_two_mesh_vec")
for filename, operator in [
("fmm_two_mesh_laplace_single", bempp.api.operators.boundary.laplace.single_layer),
("fmm_two_mesh_laplace_hyper", bempp.api.operators.boundary.laplace.hypersingular),
]:
save_matvec_result(operator, p1_space1, p1_space2, p1_space2, vec, filename)
|
<filename>code/main.py
import numpy as np
import pandas as pd
import time
import logging
import pprint
import work_data
import config
def main():
logger = config.config_logger(__name__, 10)
t0 = time.time()
pdf_path = './data/pdf/'
txt_path = './data/txt/'
dict_path = './data/dict/'
output_path = './output/'
convert_files = False
logger.info('Begin execution')
if convert_files:
logger.info('Coonvert files: {0}'.format(convert_files))
logging.getLogger().setLevel(30)
all_docs = work_data.convert_pdf_to_txt(pdf_path, txt_path)
logging.getLogger().setLevel(10)
else:
logger.info('Import testimonials')
all_docs = work_data.open_testimonies(txt_path)
logger.info('Create wordcloud')
wordcloud_words = work_data.generate_wordcloud(all_docs, output_path)
wordcloud_words.to_csv('./output/word_count.csv')
logger.info('Remove protocol paragraphs')
filter_docs = [doc.filter_protocol() for doc in all_docs]
filter_docs = [doc for doc in filter_docs if len(doc) > 5]
logger.info('Load dictionaries')
dict1 = pd.read_csv(dict_path + 'dict_ale.csv', index_col=0, header=0)
dict2 = pd.read_csv(dict_path + 'dict_erika.csv', index_col=0, header=0)
dict3 = pd.read_csv(dict_path + 'dict_macla.csv', index_col=0, header=0)
dict_agents = pd.read_csv(dict_path + 'dict_agentes.csv', index_col=0, header=0)
filter_docs = [work_data.input_sentiment(doc, dict1) for doc in filter_docs]
filter_docs = [work_data.input_sentiment(doc, dict2) for doc in filter_docs]
filter_docs = [work_data.input_sentiment(doc, dict3) for doc in filter_docs]
#clean_docs = [work_data.input_agent(doc, dict_agents) for doc in clean_docs]
tagged = not_tagged = 0
for i in filter_docs:
for par in i:
if par.sentiment != 'none':
tagged += 1
else:
not_tagged += 1
logger.info('Paragraphs tagged {0} - not tagged {1}'.format(tagged, not_tagged))
logger.info('Number of testimonials: {0}'.format(len(all_docs)))
logger.info('Clean testimonials')
clean_docs = [[parag.clean_data() for parag in doc] for doc in filter_docs]
clean_docs, quechua = work_data.extract_quechua(clean_docs)
logger.info('Testimonials in spanish {0} - quechua {1}'.format(len(clean_docs), len(quechua)))
print([doc[0].name for doc in quechua])
tagged = not_tagged = 0
for i in clean_docs:
for par in i:
if par.sentiment != 'none':
tagged += 1
else:
not_tagged += 1
logger.info('Paragraphs tagged {0} - not tagged {1}'.format(tagged, not_tagged))
logger.info('Train model')
parag_trained1 = work_data.input_sentiment_posneg(filter_docs)
parag_trained2 = work_data.train_sentiment(clean_docs)
logger.info('Save preditions')
parag_trained1.to_csv(output_path + 'reg_database1.csv')
parag_trained2.to_csv(output_path + 'reg_database2.csv')
# LDA implementation
mat_docs, dictionary = work_data.list_to_matrix(clean_docs)
print(mat_docs[0])
pprint.pprint(dictionary.dfs)
lda_model = work_data.lda_model(dictionary, mat_docs, 10)
print(lda_model)
pprint.pprint(lda_model.print_topics(num_topics=10, num_words=10))
config.time_taken_display(t0)
if __name__ == '__main__':
main() |
import torch
import torch.nn as nn
from layers import NodeAttentionLayer, SemanticAttentionLayer, GRUSet2Set, AvgReadout, GATLayerImp3
class HEncoder(nn.Module):
def __init__(self, nfeat, nhid, shid, alpha, nheads, mp_num, device):
"""Dense version of GAT and semantic level aggregation(soft attention)"""
super(HEncoder, self).__init__()
self.device = device
self.node_level_attentions = []
self.P = mp_num # the number of meta-path
self.read = AvgReadout()
for _ in range(mp_num):
self.node_level_attentions.append(GATLayerImp3(nfeat, nhid, nheads).to(device))
self.semantic_level_attention = SemanticAttentionLayer(nhid * nheads, shid)
def forward(self, x, adjs, msk, edge_msk):
# print(x.size())
meta_path_x = []
for i, adj in enumerate(adjs):
adj = torch.squeeze(adj, 0)
m_x = self.node_level_attentions[i]((x, adj, edge_msk[i]))
meta_path_x.append(m_x)
meta_path_x = torch.stack(meta_path_x, dim=0)
meta_path_x = meta_path_x.permute(1, 0, 2, 3)
return meta_path_x
class HAggregate(nn.Module):
def __init__(self, nfeat, nhid, shid, alpha, nheads, mp_num, device):
super(HAggregate, self).__init__()
self.device = device
self.node_level_attentions = []
self.P = mp_num # the number of meta-path
self.add_pool = AvgReadout() # graph pooling
self.read = GRUSet2Set(nheads * nhid, 10, device) # item order reconstruction in fine-tuning
self.hencoder = HEncoder(nfeat, nhid, shid, alpha, nheads, mp_num, device)
self.semantic_level_attention = SemanticAttentionLayer(nhid * nheads, shid)
self.fc = nn.Linear(2 * nhid * nheads, nhid * nheads)
# fully connected layer for graph embedding in fine-tuning, similar to projection head but with different parameters
def forward(self, x, adjs, msk, edge_msk):
"""
The pre-training forward procedure
:param x: input embeddings(node embeddings)
:param adjs: meta-path adjacent matrix
:param msk: mask on node information
:param edge_msk: mask on edge information
:return: aggregated graph embedding
"""
adjs = adjs.permute(1, 0, 2, 3)
msk = msk.permute(1, 0, 2)
edge_msk = edge_msk.permute(1, 0, 2)
x = torch.squeeze(x, 0)
meta_path_x = self.hencoder(x, adjs, msk, edge_msk)
graph_embed = []
meta_path_x = meta_path_x.permute(1, 0, 2, 3)
for i, adj in enumerate(adjs):
m_x = self.add_pool(meta_path_x[i], msk[i])
graph_embed.append(m_x)
graph_embed = torch.stack(graph_embed, dim=0)
graph_embed = graph_embed.permute(1, 0, 2)
# x = torch.cat([m_x for m_x in meta_path_x], dim=0)
# print(x.size())
x = self.semantic_level_attention(graph_embed, self.P) # 最后语义层面聚合输入的维度有[P*input*head,embed_dim]
# x = torch.unsqueeze(x, 0)
return x
def ft_forward(self, x, adjs, msk, edge_msk):
"""
The fine-tuning procedure. The parameters refers to forward function
"""
# with torch.no_grad():
adjs = adjs.permute(1, 0, 2, 3)
msk = msk.permute(1, 0, 2)
edge_msk = edge_msk.permute(1, 0, 2)
x = torch.squeeze(x, 0)
meta_path_x = self.hencoder(x, adjs, msk, edge_msk)
graph_embed = []
meta_path_x = meta_path_x.permute(1, 0, 2, 3)
for i, adj in enumerate(adjs):
m_x = self.read(meta_path_x[i], msk[i])
graph_embed.append(m_x)
graph_embed = torch.stack(graph_embed, dim=0)
graph_embed = graph_embed.permute(1, 0, 2)
graph_embed = self.fc(graph_embed)
graph_embed = self.semantic_level_attention(graph_embed, self.P)
return graph_embed
|
<filename>src/board.py
"""Contains Connect 4 AI Game Implementation, such as the Board and other useful functions
"""
import numpy as np
from typing import Dict
import board_utl
BOARD_ROWS: int = 6
BOARD_COLUMNS: int = 7
class Board:
"""Represents a board for a game of connect 4
"""
def __init__(self):
self.data: np.ndarray[6, 7] = np.zeros((BOARD_ROWS, BOARD_COLUMNS))
def __repr__(self):
return str(self.data)
def column_full(self, i: int) -> bool:
"""check if a given column is full
Arguments:
i {int} -- the index of the column
Returns:
bool -- if the column is full or not
"""
return 0 not in self.data[:, i]
def insert_piece(self, player: int, column: int):
"""insert a piece into a given column
Arguments:
player {int} -- the active player token
column {int} -- the column to insert into
"""
column_data = self.data[:, column]
rev_column = column_data[::-1]
# Get the index of the first 0, then account for reverse
index = np.argmin(rev_column)
index = self.data.shape[0] - 1 - index
self.data[index, column] = player
def generate_neighbors(self, player: int) -> Dict[int, np.ndarray]:
"""generate all possible outcomes of the given players turn
Arguments:
player {int} -- the current player
Returns:
Dict[np.ndarray] -- a map of each possible column play and it's associated board
"""
neighbors: Dict[int, np.ndarray] = {}
for column in range(self.data.shape[1]):
if not self.column_full(column):
new_board: np.ndarray = self.copy()
new_board.insert_piece(player, column)
neighbors[column] = new_board
return neighbors
def copy(self):
"""get a copy of this board
Returns:
Board -- the copied board
"""
new_board = Board()
new_board.data = np.copy(self.data)
return new_board
def score(self) -> int:
"""generate the score for this board for minimax
Returns:
int -- the boards associated score
"""
score: int = 0
# Check for winner
winner: int = self.winner()
if winner == 1:
score += 100000
elif winner == 2:
score -= 100000
# Determine whose turn it is
one_count: int = np.sum(self.data == 1)
two_count: int = np.sum(self.data == 2)
turn: int = 1 if one_count == two_count else 2
# Assign score based on lines of two
counts: Dict[int, int] = board_utl.count_lines_of_two(self.data)
score += counts[1] * 2
score -= counts[2] * 2
# Assign score based on lines of three
counts = board_utl.count_lines_of_three(self.data)
if turn == 1:
score += counts[1] * 40
score -= counts[2] * 400
else:
score += counts[1] * 400
score -= counts[2] * 40
# Assign score based on number of tiles in center column
score += np.sum(self.data[:, 3] == 1) * 12
score -= np.sum(self.data[:, 3] == 2) * 12
return score
def set_board(self, board: np.ndarray):
"""set the board state given a numpy array
Arguments:
board {np.ndarray} -- the new board state
"""
self.data = board
def winner(self) -> int:
"""check to see if a winner exists
Returns:
int -- the winner's number (or 0 if no winner exists)
"""
for i in range(BOARD_ROWS):
for j in range(BOARD_COLUMNS):
# Check vertical for winner
result = board_utl.check_vertical(self.data, i, j)
if result != 0:
return result
# Check horizontal for winner
result = board_utl.check_horizontal(self.data, i, j)
if result != 0:
return result
# Check positive diagonal for winner
result = board_utl.check_positive_diagonal(self.data, i, j)
if result != 0:
return result
# Check negative diagonal for winner
result = board_utl.check_negative_diagonal(self.data, i, j)
if result != 0:
return result
return 0
def terminal(self) -> bool:
"""check to see if the game is over
Returns:
bool -- whether or not the game is over
"""
return self.winner() > 0 or np.sum(self.data > 0) == BOARD_COLUMNS * BOARD_ROWS
|
<reponame>hinczhang/OSPyQGIS
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainDlg.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1601, 945)
desktop = QApplication.desktop()
Dialog.move(desktop.width() * 0.066, desktop.height() * 0.040)
self.main_treeView = QtWidgets.QTreeView(Dialog)
self.main_treeView.setGeometry(QtCore.QRect(65, 186, 305, 665))
self.main_treeView.setObjectName("main_treeView")
self.main_wg_canvas = QtWidgets.QWidget(Dialog)
self.main_wg_canvas.setGeometry(QtCore.QRect(420, 190, 1101, 695))
self.main_wg_canvas.setObjectName("main_wg_canvas")
self.main_progressBar = QtWidgets.QProgressBar(Dialog)
self.main_progressBar.setGeometry(QtCore.QRect(80, 875, 211, 23))
self.main_progressBar.setProperty("value", 24)
self.main_progressBar.setTextVisible(False)
self.main_progressBar.setObjectName("main_progressBar")
self.main_pB_dk = QtWidgets.QPushButton(Dialog)
self.main_pB_dk.setGeometry(QtCore.QRect(60, 50, 120, 45))
self.main_pB_dk.setText("")
self.main_pB_dk.setObjectName("main_pB_dk")
self.main_pB_fd = QtWidgets.QPushButton(Dialog)
self.main_pB_fd.setGeometry(QtCore.QRect(435, 50, 120, 45))
self.main_pB_fd.setText("")
self.main_pB_fd.setObjectName("main_pB_fd")
self.main_pB_sx = QtWidgets.QPushButton(Dialog)
self.main_pB_sx.setGeometry(QtCore.QRect(560, 50, 120, 45))
self.main_pB_sx.setText("")
self.main_pB_sx.setObjectName("main_pB_sx")
self.main_pB_td = QtWidgets.QPushButton(Dialog)
self.main_pB_td.setGeometry(QtCore.QRect(685, 50, 120, 45))
self.main_pB_td.setText("")
self.main_pB_td.setObjectName("main_pB_td")
self.main_pB_qt = QtWidgets.QPushButton(Dialog)
self.main_pB_qt.setGeometry(QtCore.QRect(810, 50, 120, 45))
self.main_pB_qt.setText("")
self.main_pB_qt.setObjectName("main_pB_qt")
self.pb_fl = QtWidgets.QPushButton(Dialog)
self.pb_fl.setGeometry(QtCore.QRect(215, 90, 80, 80))
self.pb_fl.setText("")
self.pb_fl.setObjectName("pb_fl")
self.pB_js = QtWidgets.QPushButton(Dialog)
self.pB_js.setGeometry(QtCore.QRect(295, 90, 80, 80))
self.pB_js.setText("")
self.pB_js.setObjectName("pB_js")
self.pB_lb = QtWidgets.QPushButton(Dialog)
self.pB_lb.setGeometry(QtCore.QRect(135, 90, 80, 80))
self.pB_lb.setText("")
self.pB_lb.setObjectName("pB_lb")
self.pB_jd = QtWidgets.QPushButton(Dialog)
self.pB_jd.setGeometry(QtCore.QRect(375, 90, 80, 80))
self.pB_jd.setText("")
self.pB_jd.setObjectName("pB_jd")
self.pB_fg_1 = QtWidgets.QPushButton(Dialog)
self.pB_fg_1.setGeometry(QtCore.QRect(455, 90, 80, 80))
self.pB_fg_1.setText("")
self.pB_fg_1.setObjectName("pB_fg_1")
self.pB_fg_2 = QtWidgets.QPushButton(Dialog)
self.pB_fg_2.setGeometry(QtCore.QRect(535, 90, 80, 80))
self.pB_fg_2.setText("")
self.pB_fg_2.setObjectName("pB_fg_2")
self.pB_fg_3 = QtWidgets.QPushButton(Dialog)
self.pB_fg_3.setGeometry(QtCore.QRect(615, 90, 80, 80))
self.pB_fg_3.setText("")
self.pB_fg_3.setObjectName("pB_fg_3")
self.main_pB_bc = QtWidgets.QPushButton(Dialog)
self.main_pB_bc.setGeometry(QtCore.QRect(185, 50, 120, 45))
self.main_pB_bc.setText("")
self.main_pB_bc.setObjectName("main_pB_bc")
self.main_mini = QtWidgets.QPushButton(Dialog)
self.main_mini.setGeometry(QtCore.QRect(1520, 5, 31, 28))
self.main_mini.setText("")
self.main_mini.setObjectName("main_mini")
self.main_close = QtWidgets.QPushButton(Dialog)
self.main_close.setGeometry(QtCore.QRect(1560, 5, 31, 28))
self.main_close.setText("")
self.main_close.setObjectName("main_close")
self.main_widget1 = QtWidgets.QWidget(Dialog)
self.main_widget1.setGeometry(QtCore.QRect(0, 0, 191, 31))
self.main_widget1.setObjectName("main_widget1")
self.pB_cj = QtWidgets.QPushButton(Dialog)
self.pB_cj.setGeometry(QtCore.QRect(55, 90, 80, 80))
self.pB_cj.setText("")
self.pB_cj.setObjectName("pB_cj")
self.main_pB_sc = QtWidgets.QPushButton(Dialog)
self.main_pB_sc.setGeometry(QtCore.QRect(310, 50, 120, 45))
self.main_pB_sc.setText("")
self.main_pB_sc.setObjectName("main_pB_sc")
self.pB_jdjc = QtWidgets.QPushButton(Dialog)
self.pB_jdjc.setGeometry(QtCore.QRect(695, 90, 80, 80))
self.pB_jdjc.setText("")
self.pB_jdjc.setObjectName("pB_jdjc")
self.pB_fast = QtWidgets.QPushButton(Dialog)
self.pB_fast.setGeometry(QtCore.QRect(775, 90, 80, 80))
self.pB_fast.setText("")
self.pB_fast.setObjectName("pB_fast")
self.pB_sllj = QtWidgets.QPushButton(Dialog)
self.pB_sllj.setGeometry(QtCore.QRect(855, 90, 80, 80))
self.pB_sllj.setText("")
self.pB_sllj.setObjectName("pB_sllj")
self.main_pushButton1 = QtWidgets.QPushButton(Dialog)
self.main_pushButton1.setGeometry(QtCore.QRect(935, 90, 80, 80))
self.main_pushButton1.setText("")
self.main_pushButton1.setObjectName("main_pushButton1")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
|
import unittest
from ctypes import ArgumentError
import os
import numpy as np
from sdl2 import *
from glaze.GL import *
BASEPATH = 'shots_results'
def getSDLError():
sderr = SDL_GetError()
try:
sderr = sderr.decode()
except Exception:
pass
return sderr
class OGL3Tester(unittest.TestCase):
def setUp(self):
self.addCleanup(self.close)
if SDL_Init(SDL_INIT_EVERYTHING) != 0:
self.fail(getSDLError())
# set_attribs_buffer
SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8)
SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8)
SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8)
SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8)
SDL_GL_SetAttribute(SDL_GL_BUFFER_SIZE, 24)
# set_attribs_depth
for depth in [24, 16]:
if SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, depth) == 0:
break
else:
if depth == 16:
error = 'Error setting depth size: ' + getSDLError()
self.fail(error)
# set_attribs_restrict_Context
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 2)
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 1)
# SDL_GL_SetAttribute(SDL_GL_CONTEXT_FLAGS, SDL_GL_CONTEXT_FORWARD_COMPATIBLE_FLAG)
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE)
errStr = getSDLError()
if errStr != '':
self.fail(errStr)
# set_attribs_use_debug
res = SDL_GL_SetAttribute(SDL_GL_CONTEXT_FLAGS, SDL_GL_CONTEXT_DEBUG_FLAG)
if res != 0:
error = 'Error setting SDL debug context flag: ' + getSDLError()
self.fail(error)
# set_attribs_share_context
if SDL_GL_SetAttribute(SDL_GL_SHARE_WITH_CURRENT_CONTEXT, 1) != 0:
error = 'Error setting SDL shared context flag: ' + getSDLError()
self.fail(error)
# set_attribs_set_double_buffer
isDoubleBuffered = not SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1)
if not isDoubleBuffered:
self.fail('Error setting SDL double buffer flag: ' + getSDLError())
# set_attribs_set_multisample
# from warnings import warn
# warn('Multisample is not implemented')
# open_window
self.size = w, h = 400, 400
flags = SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE | SDL_WINDOW_HIDDEN
try:
self._SDL_Window = SDL_CreateWindow('test window', 200, 200, w, h, flags)
except ArgumentError:
self._SDL_Window = SDL_CreateWindow(b'test window', 200, 200, w, h, flags)
except Exception as err:
self.fail('error creating sdl window: ' + str(err))
if not self._SDL_Window:
sdlerr = SDL_GetError()
msg = 'Error creating window {}'.format(sdlerr)
self.fail(msg)
self._context = newContext = SDL_GL_CreateContext(self._SDL_Window)
if not newContext:
sdlerr = getSDLError()
error = 'Error creating context: ' + sdlerr
self.fail(error)
self.windowID = SDL_GetWindowID(self._SDL_Window)
loadGL()
def _pollEvents(self):
event = SDL_Event()
while SDL_PollEvent(event):
pass
def GLClear(self):
glClearColor(.1, .3, .8, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
def GLPresent(self):
SDL_GL_SwapWindow(self._SDL_Window)
self._pollEvents()
def test_clear_screen(self):
self.GLClear()
self.GLPresent()
self.compareScreenShot('clearScreen')
def test_draw_triangle_color(self):
self.GLClear()
self.drawTriangle(True)
self.GLPresent()
self.compareScreenShot('drawColorTriangle')
def drawTriangle(self, useShader):
# An array of 3 vectors which represents 3 vertices
g_vertex_buffer_data = np.array([-1.0, -1.0, 0.0, 1.0, -1.0, 0.0, 0.0, 1.0, 0.0], np.float32)
# This will identify our vertex buffer
vertexbuffer = np.array([0], np.uint32)
# Generate 1 buffer, put the resulting identifier in vertexbuffer
glGenBuffers(1, vertexbuffer)
# The following commands will talk about our 'vertexbuffer' buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer)
# Give our vertices to OpenGL.
glBufferData(GL_ARRAY_BUFFER, g_vertex_buffer_data.strides[0] * len(g_vertex_buffer_data),
# todo: replace with sizeof
g_vertex_buffer_data, GL_STATIC_DRAW)
VertexArrayID = np.array([0], np.uint32)
glGenVertexArrays(1, VertexArrayID)
glBindVertexArray(VertexArrayID)
if useShader:
programID = LoadShaders()
glUseProgram(programID)
# Draw triangle...
# 1rst attribute buffer : vertices
glEnableVertexAttribArray(0)
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer)
glVertexAttribPointer(0,
3, # size
GL_FLOAT, # type
GL_FALSE, # normalized?
0, # stride
0) # array buffer offset)
# Draw the triangle !
glDrawArrays(GL_TRIANGLES, 0, 3) # Starting from vertex 0 3 vertices total -> 1 triangle
glDisableVertexAttribArray(0)
# end the current frame (internally swaps the front and back buffers)
glDeleteBuffers(1, vertexbuffer)
def compareScreenShot(self, testName):
w, h = self.size
dest = np.empty(w * h * 3, np.uint8)
self.getBackBufferContent(w, h, dest)
filePath = os.path.join(BASEPATH, testName + '.png')
from PIL.Image import fromarray, merge, open
from PIL.ImageOps import flip
capture = fromarray(dest.reshape(h, w, 3))
capture = flip(capture)
b, g, r = capture.split()
capture = merge("RGB", (r, g, b))
if not os.path.exists(filePath):
capture.save(filePath)
else:
stored = open(filePath)
isEqual = np.all(np.asarray(capture) == np.asarray(stored))
self.assertTrue(isEqual)
def getBackBufferContent(self, w, h, destBuffer):
glPixelStorei(GL_PACK_ALIGNMENT, 1)
glReadPixels(0, 0, w, h, GL_BGR, GL_UNSIGNED_BYTE, destBuffer)
def close(self):
SDL_GL_DeleteContext(self._context)
SDL_DestroyWindow(self._SDL_Window)
def LoadShaders():
# Create the shaders
VertexShaderID = glCreateShader(GL_VERTEX_SHADER)
FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER)
# Vertex Shader code
VertexShaderCode = '''#version 120
varying vec3 pos;
attribute vec3 position;
void main(){
pos = position;
gl_Position.xyz = position;
gl_Position.w = 1.0;
}
'''
# Fragment Shader code
FragmentShaderCode = '''#version 120
varying vec3 pos;
void main(){
gl_FragColor = vec4(vec3(pos + 0.25), 1);
}
'''
result = np.array([GL_FALSE], np.int32)
InfoLogLength = np.array([0], np.int32)
# Compile Vertex Shader
VertexSourcePointer = stringToArray(VertexShaderCode)
glShaderSource(VertexShaderID, 1, VertexSourcePointer, None)
glCompileShader(VertexShaderID)
# Check Vertex Shader
glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS, result)
if result[0] == GL_FALSE:
glGetShaderiv(VertexShaderID, GL_INFO_LOG_LENGTH, InfoLogLength)
VertexShaderErrorMessage = np.empty((InfoLogLength[0],), np.int8)
glGetShaderInfoLog(VertexShaderID, InfoLogLength, None, VertexShaderErrorMessage)
raise RuntimeError('Compiling vertex shader: ' + arrayToString(VertexShaderErrorMessage))
# Compile Fragment Shader
FragmentSourcePointer = stringToArray(FragmentShaderCode)
glShaderSource(FragmentShaderID, 1, FragmentSourcePointer, None)
glCompileShader(FragmentShaderID)
# Check Fragment Shader
glGetShaderiv(FragmentShaderID, GL_COMPILE_STATUS, result)
if result[0] == GL_FALSE:
glGetShaderiv(FragmentShaderID, GL_INFO_LOG_LENGTH, InfoLogLength)
FragmentShaderErrorMessage = np.empty((InfoLogLength[0],), np.int8)
glGetShaderInfoLog(FragmentShaderID, InfoLogLength, None, FragmentShaderErrorMessage)
raise RuntimeError('Compiling fragment shader: ' + arrayToString(FragmentShaderErrorMessage))
# Link the program
ProgramID = glCreateProgram()
glAttachShader(ProgramID, VertexShaderID)
glAttachShader(ProgramID, FragmentShaderID)
glLinkProgram(ProgramID)
# Check the program
glGetProgramiv(ProgramID, GL_LINK_STATUS, result)
if result[0] == GL_FALSE:
glGetProgramiv(ProgramID, GL_INFO_LOG_LENGTH, InfoLogLength)
ProgramErrorMessage = np.empty((InfoLogLength[0],), np.int8)
glGetProgramInfoLog(ProgramID, InfoLogLength, None, ProgramErrorMessage)
RuntimeError('Linking program: ' + arrayToString(ProgramErrorMessage))
glDetachShader(ProgramID, VertexShaderID)
glDetachShader(ProgramID, FragmentShaderID)
glDeleteShader(VertexShaderID)
glDeleteShader(FragmentShaderID)
return ProgramID
def stringToArray(string):
return [string]
def arrayToString(array):
strList = [0] * len(array)
for i in range(len(array)):
strList[i] = chr(array[i])
return ''.join(strList)
|
<reponame>alex-dudin/Aspose.Words-for-Python-via-.NET
# Copyright (c) 2001-2022 Aspose Pty Ltd. All Rights Reserved.
#
# This file is part of Aspose.Words. The source code in this file
# is only intended as a supplement to the documentation, and is provided
# "as is", without warranty of any kind, either expressed or implied.
import io
import aspose.words as aw
from api_example_base import ApiExampleBase, MY_DIR
class ExDocumentVisitor(ApiExampleBase):
#ExStart
#ExFor:Document.accept(DocumentVisitor)
#ExFor:Body.accept(DocumentVisitor)
#ExFor:SubDocument.accept(DocumentVisitor)
#ExFor:DocumentVisitor
#ExFor:DocumentVisitor.visit_run(Run)
#ExFor:DocumentVisitor.visit_document_end(Document)
#ExFor:DocumentVisitor.visit_document_start(Document)
#ExFor:DocumentVisitor.visit_section_end(Section)
#ExFor:DocumentVisitor.visit_section_start(Section)
#ExFor:DocumentVisitor.visit_body_start(Body)
#ExFor:DocumentVisitor.visit_body_end(Body)
#ExFor:DocumentVisitor.visit_paragraph_start(Paragraph)
#ExFor:DocumentVisitor.visit_paragraph_end(Paragraph)
#ExFor:DocumentVisitor.visit_sub_document(SubDocument)
#ExSummary:Shows how to use a document visitor to print a document's node structure.
def test_doc_structure_to_text(self):
doc = aw.Document(MY_DIR + "DocumentVisitor-compatible features.docx")
visitor = ExDocumentVisitor.DocStructurePrinter()
# When we get a composite node to accept a document visitor, the visitor visits the accepting node,
# and then traverses all the node's children in a depth-first manner.
# The visitor can read and modify each visited node.
doc.accept(visitor)
print(visitor.get_text())
self._test_doc_structure_to_text(visitor) #ExSkip
class DocStructurePrinter(aw.DocumentVisitor):
"""Traverses a node's tree of child nodes.
Creates a map of this tree in the form of a string."""
def __init__(self):
aw.DocumentVisitor.__init__(self)
self.accepting_node_child_tree = io.StringIO()
self.doc_traversal_depth = 0
def get_text(self):
return self.accepting_node_child_tree.getvalue()
def visit_document_start(self, doc: aw.Document) -> aw.VisitorAction:
"""Called when a Document node is encountered."""
child_node_count = doc.get_child_nodes(aw.NodeType.ANY, True).count
self._indent_and_append_line("[Document start] Child nodes: " + child_node_count)
self.doc_traversal_depth += 1
# Allow the visitor to continue visiting other nodes.
return aw.VisitorAction.CONTINUE
def visit_document_end(self, doc: aw.Document) -> aw.VisitorAction:
"""Called after all the child nodes of a Document node have been visited."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[Document end]")
return aw.VisitorAction.CONTINUE
def visit_section_start(self, section: aw.Section) -> aw.VisitorAction:
"""Called when a Section node is encountered in the document."""
# Get the index of our section within the document.
doc_sections = section.document.get_child_nodes(aw.NodeType.SECTION, False)
section_index = doc_sections.index_of(section)
self._indent_and_append_line("[Section start] Section index: " + section_index)
self.doc_traversal_depth += 1
return aw.VisitorAction.CONTINUE
def visit_section_end(self, section: aw.Section) -> aw.VisitorAction:
"""Called after all the child nodes of a Section node have been visited."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[Section end]")
return aw.VisitorAction.CONTINUE
def visit_body_start(self, body: aw.Body) -> aw.VisitorAction:
"""Called when a Body node is encountered in the document."""
paragraph_count = body.paragraphs.count
self._indent_and_append_line("[Body start] Paragraphs: " + paragraph_count)
self.doc_traversal_depth += 1
return aw.VisitorAction.CONTINUE
def visit_body_end(self, body: aw.Body) -> aw.VisitorAction:
"""Called after all the child nodes of a Body node have been visited."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[Body end]")
return aw.VisitorAction.CONTINUE
def visit_paragraph_start(self, paragraph: aw.Paragraph) -> aw.VisitorAction:
"""Called when a Paragraph node is encountered in the document."""
self._indent_and_append_line("[Paragraph start]")
self.doc_traversal_depth += 1
return aw.VisitorAction.CONTINUE
def visit_paragraph_end(self, paragraph: aw.Paragraph) -> aw.VisitorAction:
"""Called after all the child nodes of a Paragraph node have been visited."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[Paragraph end]")
return aw.VisitorAction.CONTINUE
def visit_run(self, run: aw.Run) -> aw.VisitorAction:
"""Called when a Run node is encountered in the document."""
self._indent_and_append_line("[Run] \"" + run.get_text() + "\"")
return aw.VisitorAction.CONTINUE
def visit_sub_document(self, sub_document: aw.SubDocument) -> aw.VisitorAction:
"""Called when a SubDocument node is encountered in the document."""
self._indent_and_append_line("[SubDocument]")
return aw.VisitorAction.CONTINUE
def _indent_and_append_line(self, text: str):
"""Append a line to the StringBuilder and indent it depending on how deep the visitor is into the document tree."""
for i in range(self.doc_traversal_depth):
self.accepting_node_child_tree.write("| ")
self.accepting_node_child_tree.write(text + "\n")
#ExEnd
def _test_doc_structure_to_text(self, visitor: ExDocumentVisitor.DocStructurePrinter):
visitor_text = visitor.get_text()
self.assertIn("[Document start]", visitor_text)
self.assertIn("[Document end]", visitor_text)
self.assertIn("[Section start]", visitor_text)
self.assertIn("[Section end]", visitor_text)
self.assertIn("[Body start]", visitor_text)
self.assertIn("[Body end]", visitor_text)
self.assertIn("[Paragraph start]", visitor_text)
self.assertIn("[Paragraph end]", visitor_text)
self.assertIn("[Run]", visitor_text)
self.assertIn("[SubDocument]", visitor_text)
#ExStart
#ExFor:Cell.accept(DocumentVisitor)
#ExFor:Cell.is_first_cell
#ExFor:Cell.is_last_cell
#ExFor:DocumentVisitor.visit_table_end(Table)
#ExFor:DocumentVisitor.visit_table_start(Table)
#ExFor:DocumentVisitor.visit_row_end(Row)
#ExFor:DocumentVisitor.visit_row_start(Row)
#ExFor:DocumentVisitor.visit_cell_start(Cell)
#ExFor:DocumentVisitor.visit_cell_end(Cell)
#ExFor:Row.accept(DocumentVisitor)
#ExFor:Row.first_cell
#ExFor:Row.get_text
#ExFor:Row.is_first_row
#ExFor:Row.last_cell
#ExFor:Row.parent_table
#ExSummary:Shows how to print the node structure of every table in a document.
def test_table_to_text(self):
doc = aw.Document(MY_DIR + "DocumentVisitor-compatible features.docx")
visitor = ExDocumentVisitor.TableStructurePrinter()
# When we get a composite node to accept a document visitor, the visitor visits the accepting node,
# and then traverses all the node's children in a depth-first manner.
# The visitor can read and modify each visited node.
doc.accept(visitor)
print(visitor.get_text())
self._test_table_to_text(visitor) #ExSkip
class TableStructurePrinter(aw.DocumentVisitor):
"""Traverses a node's non-binary tree of child nodes.
Creates a map in the form of a string of all encountered Table nodes and their children."""
def __init__(self):
aw.DocumentVisitor.__init__(self)
self.visited_tables = io.StringIO()
self.visitor_is_inside_table = False
self.doc_traversal_depth = 0
def get_text(self):
return self.visited_tables.getvalue()
def visit_run(self, run: aw.Run) -> aw.VisitorAction:
"""Called when a Run node is encountered in the document.
Runs that are not within tables are not recorded."""
if self.visitor_is_inside_table:
self._indent_and_append_line("[Run] \"" + run.get_text() + "\"")
return aw.VisitorAction.CONTINUE
def visit_table_start(self, table: aw.Table) -> aw.VisitorAction:
"""Called when a Table is encountered in the document."""
rows = 0
columns = 0
if table.rows.count > 0:
rows = table.rows.count
columns = table.first_row.count
self._indent_and_append_line("[Table start] Size: " + rows + "x" + columns)
self.doc_traversal_depth += 1
self.visitor_is_inside_table = True
return aw.VisitorAction.CONTINUE
def visit_table_end(self, table: aw.Table) -> aw.VisitorAction:
"""Called after all the child nodes of a Table node have been visited."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[Table end]")
self.visitor_is_inside_table = False
return aw.VisitorAction.CONTINUE
def visit_row_start(self, row: aw.Row) -> aw.VisitorAction:
"""Called when a Row node is encountered in the document."""
row_contents = row.get_text().rstrip("\u0007 ").replace("\u0007", ", ")
row_width = row.index_of(row.last_cell) + 1
row_index = row.parent_table.index_of(row)
if row.is_first_row and row.is_last_row:
row_status_in_table = "only"
elif row.is_first_row:
row_status_in_table = "first"
elif row.is_last_row:
row_status_in_table = "last"
else:
row_status_in_table = ""
if row_status_in_table != "":
row_status_in_table = f", the {row_status_in_table} row in this table,"
row_index += 1
self._indent_and_append_line(f"[Row start] Row #{row_index}{row_status_in_table} width {row_width}, \"{row_contents}\"")
self.doc_traversal_depth += 1
return aw.VisitorAction.CONTINUE
def visit_row_end(self, row: aw.Row) -> aw.VisitorAction:
"""Called after all the child nodes of a Row node have been visited."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[Row end]")
return aw.VisitorAction.CONTINUE
def visit_cell_start(self, cell: aw.Cell) -> aw.VisitorAction:
"""Called when a Cell node is encountered in the document."""
row = cell.parent_row
table = row.parent_table
if cell.is_first_cell and cell.is_last_cell:
cell_status_in_row = "only"
elif cell.is_first_cell:
cell_status_in_row = "first"
elif cell.is_last_cell:
cell_status_in_row = "last"
else:
cell_status_in_row = ""
if cell_status_in_row != "":
cell_status_in_row = f", the {cell_status_in_row} cell in this row"
self._indent_and_append_line(f"[Cell start] Row {table.index_of(row) + 1}, Col {row.index_of(cell) + 1}{cell_status_in_row}")
self.doc_traversal_depth += 1
return aw.VisitorAction.CONTINUE
def visit_cell_end(self, cell: aw.Cell) -> aw.VisitorAction:
"""Called after all the child nodes of a Cell node have been visited."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[Cell end]")
return aw.VisitorAction.CONTINUE
def _indent_and_append_line(self, text: str):
"""Append a line to the output, and indent it depending on how deep the visitor is
into the current table's tree of child nodes."""
for i in range(self.doc_traversal_depth):
self.visited_tables.write("| ")
self.visited_tables.write(text + "\n")
#ExEnd
def _test_table_to_text(self, visitor: ExDocumentVisitor.TableStructurePrinter):
visitor_text = visitor.get_text()
self.assertIn("[Table start]", visitor_text)
self.assertIn("[Table end]", visitor_text)
self.assertIn("[Row start]", visitor_text)
self.assertIn("[Row end]", visitor_text)
self.assertIn("[Cell start]", visitor_text)
self.assertIn("[Cell end]", visitor_text)
self.assertIn("[Run]", visitor_text)
#ExStart
#ExFor:DocumentVisitor.visit_comment_start(Comment)
#ExFor:DocumentVisitor.visit_comment_end(Comment)
#ExFor:DocumentVisitor.visit_comment_range_end(CommentRangeEnd)
#ExFor:DocumentVisitor.visit_comment_range_start(CommentRangeStart)
#ExSummary:Shows how to print the node structure of every comment and comment range in a document.
def test_comments_to_text(self):
doc = aw.Document(MY_DIR + "DocumentVisitor-compatible features.docx")
visitor = ExDocumentVisitor.CommentStructurePrinter()
# When we get a composite node to accept a document visitor, the visitor visits the accepting node,
# and then traverses all the node's children in a depth-first manner.
# The visitor can read and modify each visited node.
doc.accept(visitor)
print(visitor.get_text())
self._test_comments_to_text(visitor) #ExSkip
class CommentStructurePrinter(aw.DocumentVisitor):
"""Traverses a node's non-binary tree of child nodes.
Creates a map in the form of a string of all encountered Comment/CommentRange nodes and their children."""
def __init__(self):
aw.DocumentVisitor.__init__(self)
self.builder = io.StringIO()
self.visitor_is_inside_comment = False
self.doc_traversal_depth = 0
def get_text(self):
return self.builder.getvalue()
def visit_run(self, run: aw.Run) -> aw.VisitorAction:
"""Called when a Run node is encountered in the document.
A Run is only recorded if it is a child of a Comment or CommentRange node."""
if self.visitor_is_inside_comment:
self._indent_and_append_line("[Run] \"" + run.get_text() + "\"")
return aw.VisitorAction.CONTINUE
def visit_comment_range_start(self, comment_range_start: aw.CommentRangeStart) -> aw.VisitorAction:
"""Called when a CommentRangeStart node is encountered in the document."""
self._indent_and_append_line("[Comment range start] ID: " + comment_range_start.id)
self.doc_traversal_depth += 1
self.visitor_is_inside_comment = True
return aw.VisitorAction.CONTINUE
def visit_comment_range_end(self, comment_range_end: aw.CommentRangeEnd) -> aw.VisitorAction:
"""Called when a CommentRangeEnd node is encountered in the document."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[Comment range end]")
self.visitor_is_inside_comment = False
return aw.VisitorAction.CONTINUE
def visit_comment_start(self, comment: aw.Comment) -> aw.VisitorAction:
"""Called when a Comment node is encountered in the document."""
self._indent_and_append_line(
f"[Comment start] For comment range ID {comment.Id}, By {comment.Author} on {comment.DateTime}")
self.doc_traversal_depth += 1
self.visitor_is_inside_comment = True
return aw.VisitorAction.CONTINUE
def visit_comment_end(self, comment: aw.Comment) -> aw.VisitorAction:
"""Called after all the child nodes of a Comment node have been visited."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[Comment end]")
self.visitor_is_inside_comment = False
return aw.VisitorAction.CONTINUE
def _indent_and_append_line(self, text: str):
"""Append a line to the output, and indent it depending on how deep the visitor is
into a comment/comment range's tree of child nodes."""
for i in range(self.doc_traversal_depth):
self.builder.write("| ")
self.builder.write(text + "\n")
#ExEnd
def _test_comments_to_text(self, visitor: ExDocumentVisitor.CommentStructurePrinter):
visitor_text = visitor.get_text()
self.assertIn("[Comment range start]", visitor_text)
self.assertIn("[Comment range end]", visitor_text)
self.assertIn("[Comment start]", visitor_text)
self.assertIn("[Comment end]", visitor_text)
self.assertIn("[Run]", visitor_text)
#ExStart
#ExFor:DocumentVisitor.visit_field_start
#ExFor:DocumentVisitor.visit_field_end
#ExFor:DocumentVisitor.visit_field_separator
#ExSummary:Shows how to print the node structure of every field in a document.
def test_field_to_text(self):
doc = aw.Document(MY_DIR + "DocumentVisitor-compatible features.docx")
visitor = ExDocumentVisitor.FieldStructurePrinter()
# When we get a composite node to accept a document visitor, the visitor visits the accepting node,
# and then traverses all the node's children in a depth-first manner.
# The visitor can read and modify each visited node.
doc.accept(visitor)
print(visitor.get_text())
self._test_field_to_text(visitor) #ExSkip
class FieldStructurePrinter(aw.DocumentVisitor):
"""Traverses a node's non-binary tree of child nodes.
Creates a map in the form of a string of all encountered Field nodes and their children."""
def __init__(self):
aw.DocumentVisitor.__init__(self)
self.builder = io.StringIO()
self.visitor_is_inside_field = False
self.doc_traversal_depth = 0
def get_text(self):
return self.builder.getvalue()
def visit_run(self, run: aw.Run) -> aw.VisitorAction:
"""Called when a Run node is encountered in the document."""
if self.visitor_is_inside_field:
self._indent_and_append_line("[Run] \"" + run.get_text() + "\"")
return aw.VisitorAction.CONTINUE
def visit_field_start(self, field_start: aw.fields.FieldStart) -> aw.VisitorAction:
"""Called when a FieldStart node is encountered in the document."""
self._indent_and_append_line("[Field start] FieldType: " + field_start.field_type)
self.doc_traversal_depth += 1
self.visitor_is_inside_field = True
return aw.VisitorAction.CONTINUE
def visit_field_end(self, field_end: aw.fields.FieldEnd) -> aw.VisitorAction:
"""Called when a FieldEnd node is encountered in the document."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[Field end]")
self.visitor_is_inside_field = False
return aw.VisitorAction.CONTINUE
def visit_field_separator(self, field_separator: aw.fields.FieldSeparator) -> aw.VisitorAction:
"""Called when a FieldSeparator node is encountered in the document."""
self._indent_and_append_line("[FieldSeparator]")
return aw.VisitorAction.CONTINUE
def _indent_and_append_line(self, text: str):
"""Append a line to the output, and indent it depending on how deep the visitor is
into the field's tree of child nodes."""
for i in range(self.doc_traversal_depth):
self.builder.write("| ")
self.builder.write(text + "\n")
#ExEnd
def _test_field_to_text(self, visitor: aw.fields.FieldStructurePrinter):
visitor_text = visitor.get_text()
self.assertIn("[Field start]", visitor_text)
self.assertIn("[Field end]", visitor_text)
self.assertIn("[FieldSeparator]", visitor_text)
self.assertIn("[Run]", visitor_text)
#ExStart
#ExFor:DocumentVisitor.visit_header_footer_start(HeaderFooter)
#ExFor:DocumentVisitor.visit_header_footer_end(HeaderFooter)
#ExFor:HeaderFooter.accept(DocumentVisitor)
#ExFor:HeaderFooterCollection.to_array
#ExFor:Run.accept(DocumentVisitor)
#ExFor:Run.get_text
#ExSummary:Shows how to print the node structure of every header and footer in a document.
def test_header_footer_to_text(self):
doc = aw.Document(MY_DIR + "DocumentVisitor-compatible features.docx")
visitor = ExDocumentVisitor.HeaderFooterStructurePrinter()
# When we get a composite node to accept a document visitor, the visitor visits the accepting node,
# and then traverses all the node's children in a depth-first manner.
# The visitor can read and modify each visited node.
doc.accept(visitor)
print(visitor.get_text())
# An alternative way of accessing a document's header/footers section-by-section is by accessing the collection.
header_footers = doc.first_section.headers_footers.to_array()
self.assertEqual(3, len(header_footers))
self._test_header_footer_to_text(visitor) #ExSkip
class HeaderFooterStructurePrinter(aw.DocumentVisitor):
"""Traverses a node's non-binary tree of child nodes.
Creates a map in the form of a string of all encountered HeaderFooter nodes and their children."""
def __init__(self):
aw.DocumentVisitor.__init__(self)
self.builder = io.StringIO()
self.visitor_is_inside_header_footer = False
self.doc_traversal_depth = 0
def get_text(self):
return self.builder.getvalue()
def visit_run(self, run: aw.Run) -> aw.VisitorAction:
"""Called when a Run node is encountered in the document."""
if self.visitor_is_inside_header_footer:
self._indent_and_append_line("[Run] \"" + run.get_text() + "\"")
return aw.VisitorAction.CONTINUE
def visit_header_footer_start(self, header_footer: aw.HeaderFooter) -> aw.VisitorAction:
"""Called when a HeaderFooter node is encountered in the document."""
self._indent_and_append_line("[HeaderFooter start] HeaderFooterType: " + header_footer.header_footer_type)
self.doc_traversal_depth += 1
self.visitor_is_inside_header_footer = True
return aw.VisitorAction.CONTINUE
def visit_header_footer_end(self, header_footer: aw.HeaderFooter) -> aw.VisitorAction:
"""Called after all the child nodes of a HeaderFooter node have been visited."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[HeaderFooter end]")
self.visitor_is_inside_header_footer = False
return aw.VisitorAction.CONTINUE
def _indent_and_append_line(self, text: str):
"""Append a line to the output, and indent it depending on how deep the visitor is into the document tree."""
for i in range(self.doc_traversal_depth):
self.builder.write("| ")
self.builder.write(text + "\n")
#ExEnd
def _test_header_footer_to_text(self, visitor: aw.HeaderFooterStructurePrinter):
visitor_text = visitor.get_text()
self.assertIn("[HeaderFooter start] HeaderFooterType: HeaderPrimary", visitor_text)
self.assertIn("[HeaderFooter end]", visitor_text)
self.assertIn("[HeaderFooter start] HeaderFooterType: HeaderFirst", visitor_text)
self.assertIn("[HeaderFooter start] HeaderFooterType: HeaderEven", visitor_text)
self.assertIn("[HeaderFooter start] HeaderFooterType: FooterPrimary", visitor_text)
self.assertIn("[HeaderFooter start] HeaderFooterType: FooterFirst", visitor_text)
self.assertIn("[HeaderFooter start] HeaderFooterType: FooterEven", visitor_text)
self.assertIn("[Run]", visitor_text)
#ExStart
#ExFor:DocumentVisitor.visit_editable_range_end(EditableRangeEnd)
#ExFor:DocumentVisitor.visit_editable_range_start(EditableRangeStart)
#ExSummary:Shows how to print the node structure of every editable range in a document.
def test_editable_range_to_text(self):
doc = aw.Document(MY_DIR + "DocumentVisitor-compatible features.docx")
visitor = ExDocumentVisitor.EditableRangeStructurePrinter()
# When we get a composite node to accept a document visitor, the visitor visits the accepting node,
# and then traverses all the node's children in a depth-first manner.
# The visitor can read and modify each visited node.
doc.accept(visitor)
print(visitor.get_text())
self._test_editable_range_to_text(visitor) #ExSkip
class EditableRangeStructurePrinter(aw.DocumentVisitor):
"""Traverses a node's non-binary tree of child nodes.
Creates a map in the form of a string of all encountered EditableRange nodes and their children."""
def __init__(self):
aw.DocumentVisitor.__init__(self)
self.builder = io.StringIO()
self.visitor_is_inside_editable_range = False
self.doc_traversal_depth = 0
def get_text(self) -> str:
"""Gets the plain text of the document that was accumulated by the visitor."""
return self.builder.getvalue()
def visit_run(self, run: aw.Run) -> aw.VisitorAction:
"""Called when a Run node is encountered in the document."""
# We want to print the contents of runs, but only if they are inside shapes, as they would be in the case of text boxes
if self.visitor_is_inside_editable_range:
self._indent_and_append_line("[Run] \"" + run.get_text() + "\"")
return aw.VisitorAction.CONTINUE
def visit_editable_range_start(self, editable_range_start: aw.EditableRangeStart) -> aw.VisitorAction:
"""Called when an EditableRange node is encountered in the document."""
self._indent_and_append_line("[EditableRange start] ID: " + editable_range_start.id + " Owner: " +
editable_range_start.editable_range.single_user)
self.doc_traversal_depth += 1
self.visitor_is_inside_editable_range = True
return aw.VisitorAction.CONTINUE
def visit_editable_range_end(self, editable_range_end: aw.EditableRangeEnd) -> aw.VisitorAction:
"""Called when the visiting of a EditableRange node is ended."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[EditableRange end]")
self.visitor_is_inside_editable_range = False
return aw.VisitorAction.CONTINUE
def _indent_and_append_line(self, text: str):
"""Append a line to the output and indent it depending on how deep the visitor is into the document tree."""
for i in range(self.doc_traversal_depth):
self.builder.write("| ")
self.builder.write(text + "\n")
#ExEnd
def _test_editable_range_to_text(self, visitor: ExDocumentVisitor.EditableRangeStructurePrinter):
visitor_text = visitor.get_text()
self.assertIn("[EditableRange start]", visitor_text)
self.assertIn("[EditableRange end]", visitor_text)
self.assertIn("[Run]", visitor_text)
#ExStart
#ExFor:DocumentVisitor.visit_footnote_end(Footnote)
#ExFor:DocumentVisitor.visit_footnote_start(Footnote)
#ExFor:Footnote.accept(DocumentVisitor)
#ExSummary:Shows how to print the node structure of every footnote in a document.
def test_footnote_to_text(self):
doc = aw.Document(MY_DIR + "DocumentVisitor-compatible features.docx")
visitor = ExDocumentVisitor.FootnoteStructurePrinter()
# When we get a composite node to accept a document visitor, the visitor visits the accepting node,
# and then traverses all the node's children in a depth-first manner.
# The visitor can read and modify each visited node.
doc.accept(visitor)
print(visitor.get_text())
self._test_footnote_to_text(visitor) #ExSkip
class FootnoteStructurePrinter(aw.DocumentVisitor):
"""Traverses a node's non-binary tree of child nodes.
Creates a map in the form of a string of all encountered Footnote nodes and their children."""
def __init__(self):
aw.DocumentVisitor.__init__(self)
self.builder = io.StringIO()
self.visitor_is_inside_footnote = False
self.doc_traversal_depth = 0
def get_text(self) -> str:
"""Gets the plain text of the document that was accumulated by the visitor."""
return self.builder.getvalue()
def visit_footnote_start(self, footnote: aw.Footnote) -> aw.VisitorAction:
"""Called when a Footnote node is encountered in the document."""
self._indent_and_append_line("[Footnote start] Type: " + footnote.footnote_type)
self.doc_traversal_depth += 1
self.visitor_is_inside_footnote = True
return aw.VisitorAction.CONTINUE
def visit_footnote_end(self, footnote: aw.Footnote) -> aw.VisitorAction:
"""Called after all the child nodes of a Footnote node have been visited."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[Footnote end]")
self.visitor_is_inside_footnote = False
return aw.VisitorAction.CONTINUE
def visit_run(self, run: aw.Run) -> aw.VisitorAction:
"""Called when a Run node is encountered in the document."""
if self.visitor_is_inside_footnote:
self._indent_and_append_line("[Run] \"" + run.get_text() + "\"")
return aw.VisitorAction.CONTINUE
def _indent_and_append_line(self, text: str):
"""Append a line to the output and indent it depending on how deep the visitor is into the document tree."""
for i in range(self.doc_traversal_depth):
self.builder.write("| ")
self.builder.write(text + "\n")
#ExEnd
def _test_footnote_to_text(self, visitor: aw.FootnoteStructurePrinter):
visitor_text = visitor.get_text()
self.assertIn("[Footnote start] Type: Footnote", visitor_text)
self.assertIn("[Footnote end]", visitor_text)
self.assertIn("[Run]", visitor_text)
#ExStart
#ExFor:DocumentVisitor.visit_office_math_end(OfficeMath)
#ExFor:DocumentVisitor.visit_office_math_start(OfficeMath)
#ExFor:MathObjectType
#ExFor:OfficeMath.accept(DocumentVisitor)
#ExFor:OfficeMath.math_object_type
#ExSummary:Shows how to print the node structure of every office math node in a document.
def test_office_math_to_text(self):
doc = aw.Document(MY_DIR + "DocumentVisitor-compatible features.docx")
visitor = ExDocumentVisitor.OfficeMathStructurePrinter()
# When we get a composite node to accept a document visitor, the visitor visits the accepting node,
# and then traverses all the node's children in a depth-first manner.
# The visitor can read and modify each visited node.
doc.accept(visitor)
print(visitor.get_text())
self._test_office_math_to_text(visitor) #ExSkip
class OfficeMathStructurePrinter(aw.DocumentVisitor):
"""Traverses a node's non-binary tree of child nodes.
Creates a map in the form of a string of all encountered OfficeMath nodes and their children."""
def __init__(self):
aw.DocumentVisitor.__init__(self)
self.builder = io.StringIO()
self.visitor_is_inside_office_math = False
self.doc_traversal_depth = 0
def get_text(self) -> str:
"""Gets the plain text of the document that was accumulated by the visitor."""
return self.builder.getvalue()
def visit_run(self, run: aw.Run) -> aw.VisitorAction:
"""Called when a Run node is encountered in the document."""
if self.visitor_is_inside_office_math:
self._indent_and_append_line("[Run] \"" + run.get_text() + "\"")
return aw.VisitorAction.CONTINUE
def visit_office_math_start(self, office_math: aw.OfficeMath) -> aw.VisitorAction:
"""Called when an OfficeMath node is encountered in the document."""
self._indent_and_append_line("[OfficeMath start] Math object type: " + office_math.math_object_type)
self.doc_traversal_depth += 1
self.visitor_is_inside_office_math = True
return aw.VisitorAction.CONTINUE
def visit_office_math_end(self, office_math: aw.OfficeMath) -> aw.VisitorAction:
"""Called after all the child nodes of an OfficeMath node have been visited."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[OfficeMath end]")
self.visitor_is_inside_office_math = False
return aw.VisitorAction.CONTINUE
def _indent_and_append_line(self, text: str):
"""Append a line to the StringBuilder and indent it depending on how deep the visitor is into the document tree."""
for i in range(self.doc_traversal_depth):
self.builder.write("| ")
self.builder.write(text + "\n")
#ExEnd
def _test_office_math_to_text(self, visitor: aw.OfficeMathStructurePrinter):
visitor_text = visitor.get_text()
self.assertIn("[OfficeMath start] Math object type: OMathPara", visitor_text)
self.assertIn("[OfficeMath start] Math object type: OMath", visitor_text)
self.assertIn("[OfficeMath start] Math object type: Argument", visitor_text)
self.assertIn("[OfficeMath start] Math object type: Supercript", visitor_text)
self.assertIn("[OfficeMath start] Math object type: SuperscriptPart", visitor_text)
self.assertIn("[OfficeMath start] Math object type: Fraction", visitor_text)
self.assertIn("[OfficeMath start] Math object type: Numerator", visitor_text)
self.assertIn("[OfficeMath start] Math object type: Denominator", visitor_text)
self.assertIn("[OfficeMath end]", visitor_text)
self.assertIn("[Run]", visitor_text)
#ExStart
#ExFor:DocumentVisitor.visit_smart_tag_end(SmartTag)
#ExFor:DocumentVisitor.visit_smart_tag_start(SmartTag)
#ExSummary:Shows how to print the node structure of every smart tag in a document.
def test_smart_tag_to_text(self):
doc = aw.Document(MY_DIR + "Smart tags.doc")
visitor = ExDocumentVisitor.SmartTagStructurePrinter()
# When we get a composite node to accept a document visitor, the visitor visits the accepting node,
# and then traverses all the node's children in a depth-first manner.
# The visitor can read and modify each visited node.
doc.accept(visitor)
print(visitor.get_text())
self._test_smart_tag_to_text(visitor) #ExSkip
class SmartTagStructurePrinter(aw.DocumentVisitor):
"""Traverses a node's non-binary tree of child nodes.
Creates a map in the form of a string of all encountered SmartTag nodes and their children."""
def __init__(self):
aw.DocumentVisitor.__init__(self)
self.builder = io.StringIO()
self.visitor_is_inside_smart_tag = False
self.doc_traversal_depth = 0
def get_text(self) -> str:
"""Gets the plain text of the document that was accumulated by the visitor."""
return self.builder.getvalue()
def visit_run(self, run: aw.Run) -> aw.VisitorAction:
"""Called when a Run node is encountered in the document."""
if self.visitor_is_inside_smart_tag:
self._indent_and_append_line("[Run] \"" + run.get_text() + "\"")
return aw.VisitorAction.CONTINUE
def visit_smart_tag_start(self, smart_tag: aw.SmartTag) -> aw.VisitorAction:
"""Called when a SmartTag node is encountered in the document."""
self._indent_and_append_line("[SmartTag start] Name: " + smart_tag.element)
self.doc_traversal_depth += 1
self.visitor_is_inside_smart_tag = True
return aw.VisitorAction.CONTINUE
def visit_smart_tag_end(self, smart_tag: aw.SmartTag) -> aw.VisitorAction:
"""Called after all the child nodes of a SmartTag node have been visited."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[SmartTag end]")
self.visitor_is_inside_smart_tag = False
return aw.VisitorAction.CONTINUE
def _indent_and_append_line(self, text: str):
"""Append a line to the StringBuilder and indent it depending on how deep the visitor is into the document tree."""
for i in range(self.doc_traversal_depth):
self.builder.write("| ")
self.builder.write(text + "\n")
#ExEnd
def _test_smart_tag_to_text(self, visitor: aw.SmartTagStructurePrinter):
visitor_text = visitor.get_text()
self.assertIn("[SmartTag start] Name: address", visitor_text)
self.assertIn("[SmartTag start] Name: Street", visitor_text)
self.assertIn("[SmartTag start] Name: PersonName", visitor_text)
self.assertIn("[SmartTag start] Name: title", visitor_text)
self.assertIn("[SmartTag start] Name: GivenName", visitor_text)
self.assertIn("[SmartTag start] Name: Sn", visitor_text)
self.assertIn("[SmartTag start] Name: stockticker", visitor_text)
self.assertIn("[SmartTag start] Name: date", visitor_text)
self.assertIn("[SmartTag end]", visitor_text)
self.assertIn("[Run]", visitor_text)
#ExStart
#ExFor:StructuredDocumentTag.accept(DocumentVisitor)
#ExFor:DocumentVisitor.visit_structured_document_tag_end(StructuredDocumentTag)
#ExFor:DocumentVisitor.visit_structured_document_tag_start(StructuredDocumentTag)
#ExSummary:Shows how to print the node structure of every structured document tag in a document.
def test_structured_document_tag_to_text(self):
doc = aw.Document(MY_DIR + "DocumentVisitor-compatible features.docx")
visitor = ExDocumentVisitor.StructuredDocumentTagNodePrinter()
# When we get a composite node to accept a document visitor, the visitor visits the accepting node,
# and then traverses all the node's children in a depth-first manner.
# The visitor can read and modify each visited node.
doc.accept(visitor)
print(visitor.get_text())
self._test_structured_document_tag_to_text(visitor) #ExSkip
class StructuredDocumentTagNodePrinter(aw.DocumentVisitor):
"""Traverses a node's non-binary tree of child nodes.
Creates a map in the form of a string of all encountered StructuredDocumentTag nodes and their children."""
def __init__(self):
aw.DocumentVisitor.__init__(self)
self.builder = io.StringIO()
self.visitor_is_inside_structured_document_tag = False
self.doc_traversal_depth = 0
def get_text(self) -> str:
"""Gets the plain text of the document that was accumulated by the visitor."""
return self.builder.getvalue()
def visit_run(self, run: aw.Run) -> aw.VisitorAction:
"""Called when a Run node is encountered in the document."""
if self.visitor_is_inside_structured_document_tag:
self._indent_and_append_line("[Run] \"" + run.get_text() + "\"")
return aw.VisitorAction.CONTINUE
def visit_structured_document_tag_start(self, sdt: aw.StructuredDocumentTag) -> aw.VisitorAction:
"""Called when a StructuredDocumentTag node is encountered in the document."""
self._indent_and_append_line("[StructuredDocumentTag start] Title: " + sdt.title)
self.doc_traversal_depth += 1
return aw.VisitorAction.CONTINUE
def visit_structured_document_tag_end(self, sdt: aw.StructuredDocumentTag) -> aw.VisitorAction:
"""Called after all the child nodes of a StructuredDocumentTag node have been visited."""
self.doc_traversal_depth -= 1
self._indent_and_append_line("[StructuredDocumentTag end]")
return aw.VisitorAction.CONTINUE
def _indent_and_append_line(self, text: str):
"""Append a line to the output and indent it depending on how deep the visitor is into the document tree."""
for i in range(self.doc_traversal_depth):
self.builder.write("| ")
self.builder.write(text + "\n")
#ExEnd
def _test_structured_document_tag_to_text(self, visitor: aw.StructuredDocumentTagNodePrinter):
visitor_text = visitor.get_text()
self.assertIn("[StructuredDocumentTag start]", visitor_text)
self.assertIn("[StructuredDocumentTag end]", visitor_text)
|
import parslepy
import parslepy.base
import parslepy.selectors
import lxml.cssselect
from nose.tools import *
from .tools import *
class TestInvalidParseletInit(object):
init_parselets = (
#{ "title": ".test #"}, # this does not raise SyntaxError in lxml<3
{ "title": "/h1[@]"},
{ "title": "h1", "paragraphs": [".//p[@class,'news']"]},
)
def test_invalid_parselet_init(self):
for parselet in self.init_parselets:
yield self.init_parselet_expect_syntax_error, parselet
@raises(SyntaxError)
def init_parselet_expect_syntax_error(self, parselet):
parslepy.Parselet(parselet)
class TestDefaultValidSelectors(object):
dsh = parslepy.base.DefaultSelectorHandler()
selectors = [
("div.content", lxml.etree.XPath),
(".content #bogus span.first", lxml.etree.XPath),
("div#main", lxml.etree.XPath),
("div[@id='main']", lxml.etree.XPath),
('div[@id="main"]', lxml.etree.XPath),
("div", lxml.etree.XPath),
("//div", lxml.etree.XPath),
("//a/@href", lxml.etree.XPath),
("img @src", lxml.etree.XPath),
("table tr[class='main']", lxml.etree.XPath),
("tr[2]", lxml.etree.XPath),
]
try:
from cssselect.parser import FunctionalPseudoElement
selectors.extend([
("img::attr(src)", lxml.etree.XPath),
])
except:
pass
def test_selector_class(self):
for selector_string, target_class in self.selectors:
yield self.compare_selector_class, selector_string, target_class
def compare_selector_class(self, selector_string, target_class):
s = self.dsh.make(selector_string)
assert_is_instance(s, parslepy.selectors.Selector)
assert_is_instance(
s.selector, target_class,
"\n%s compiled to '%s' of type %s \n and is not an instance of %s" % (
selector_string, s.selector, type(s.selector), target_class)
)
class TestDefaultInvalidSelectors(object):
dsh = parslepy.selectors.DefaultSelectorHandler()
invalid_selectors = (
# these does not raise SyntaxError in lxml<3
#'# ',
#'.#',
#'#t-#',
'#t.',
'.///e',
'.//div class',
'.//div[@class="test]',
'div[]',
'.div[id@]',
'div[@]',
'span @',
'span@',
'.//span//',
)
def test_invalid_css_selectors(self):
for s in self.invalid_selectors:
yield self.make_selector_expect_syntax_error, s
@raises(SyntaxError)
def make_selector_expect_syntax_error(self, s):
self.dsh.make(s)
class TestXPathValidSelectors(object):
xsh = parslepy.selectors.XPathSelectorHandler()
selectors = (
"div.content",
"span[@id='main']",
'header[@id="main"]',
"div",
"//div",
"//a/@href",
"img/@src",
"./img/@src",
".//img/@alt",
"table/tr[@class='main']",
'//div[@id="main"]//tr[@class="item"]',
"tr[2]",
)
def test_selector_class(self):
for selector_string in self.selectors:
yield self.compare_selector_class, selector_string
def compare_selector_class(self, selector_string):
s = self.xsh.make(selector_string)
assert_is_instance(s, parslepy.selectors.Selector)
assert_is_instance(
s.selector, lxml.etree.XPath,
"\n%s compiled to '%s' of type %s \n and is not an instance of %s" % (
selector_string, s.selector, type(s.selector), lxml.etree.XPath)
)
class TestXPathInvalidSelectors(object):
xsh = parslepy.selectors.XPathSelectorHandler()
invalid_selectors = (
'.///e',
'.//div class',
'.//div[@class="test]',
'div[]',
'.div[id@]',
'div[@]',
'span//',
'span/@class/',
'.//span//',
)
def test_invalid_xpath_selectors(self):
for s in self.invalid_selectors:
yield self.make_selector_expect_syntax_error, s
@raises(SyntaxError)
def make_selector_expect_syntax_error(self, s):
self.xsh.make(s)
|
import unittest
import datetime
import expediaRequester
"""
The data returned will be different at different times.
Hence we just validate if we are getting a OK response.
"""
apiKey = ""
class TestCases(unittest.TestCase):
"""
Base class for test cases
"""
def setUp(self):
self.client = expediaRequester.ExpediaRequester(apiKey)
class TestActivities(TestCases):
"""
Unit test activities API
"""
def test_activities(self):
tomorrow = datetime.datetime.today() + datetime.timedelta(days=1)
nextDay = tomorrow + datetime.timedelta(days=1)
code, json = self.client.activities("London",
datetime.datetime.strftime(tomorrow, "%Y-%m-%d"),
datetime.datetime.strftime(nextDay, "%Y-%m-%d"))
self.assertEqual(code, 200)
class testHotels(TestCases):
"""
Unit test hotesls API
"""
def test_hotels(self):
tomorrow = datetime.datetime.today() + datetime.timedelta(days=1)
nextDay = tomorrow + datetime.timedelta(days=1)
code, json = self.client.hotels("London",
datetime.datetime.strftime(tomorrow, "%Y-%m-%d"),
datetime.datetime.strftime(nextDay, "%Y-%m-%d"), 3)
self.assertEqual(code, 200)
def test_hotel_reviews(self):
code, json = self.client.hotel_reviews( 234, True, "DATEASC", 0, 10, "Everyone")
self.assertEqual(code, 200)
class testCars(TestCases):
"""
Unit test cars API
"""
def test_cars(self):
tomorrow = datetime.datetime.today() + datetime.timedelta(days=1)
nextDay = tomorrow + datetime.timedelta(days=1)
code, json = self.client.cars_search(datetime.datetime.strftime(tomorrow, "%Y-%m-%d") , datetime.datetime.strftime(nextDay, "%Y-%m-%d"),
"SFO", "LAX", "price", 10, "All-American", "economy")
self.assertEqual(code, 200)
class testFlightsSearch(TestCases):
"""
Unit test flights search API
"""
def test_flights_search(self):
tomorrow = datetime.datetime.today() + datetime.timedelta(days=1)
nextDay = tomorrow + datetime.timedelta(days=1)
code, json = self.client.flights_search(datetime.datetime.strftime(tomorrow, "%Y-%m-%d") , datetime.datetime.strftime(nextDay, "%Y-%m-%d"), "SFO", "LAX", True, 2, "12", False, False, None, 15)
self.assertEqual(code, 200)
def test_flights_price_range_search(self):
tomorrow = datetime.datetime.today() + datetime.timedelta(days=1)
code, json = self.client.flights_price_range_search("SFO","LAX",datetime.datetime.strftime(tomorrow, "%Y-%m-%d"))
self.assertEqual(code, 200)
def test_flights_trends(self):
tomorrow = datetime.datetime.today() + datetime.timedelta(days=1)
code, json = self.client.flights_trends_and_predictions("SFO","LAX",datetime.datetime.strftime(tomorrow, "%Y-%m-%d"))
self.assertEqual(code, 200)
class testPackages(TestCases):
"""
Unit test packages
"""
def test_packages(self):
tomorrow = datetime.datetime.today() + datetime.timedelta(days=1)
nextDay = tomorrow + datetime.timedelta(days=1)
code, json = self.client.packages("SFO", "ORL",
datetime.datetime.strftime(tomorrow, "%Y-%m-%d"),
datetime.datetime.strftime(nextDay, "%Y-%m-%d"),
178294, None, 2, "5", False, 10, "coach", False)
self.assertEqual(code, 200)
class testHotelSearch(TestCases):
"""
Unit test hotels search
"""
def test_hotels_search(self):
tomorrow = datetime.datetime.today() + datetime.timedelta(days=1)
nextDay = tomorrow + datetime.timedelta(days=1)
code, json = self.client.hotel_search("SFO", None, None, None, False, "Inn", "0, 5, 1", None, False, 100, "mobileweb", datetime.datetime.strftime(tomorrow, "%Y-%m-%d"),
datetime.datetime.strftime(nextDay, "%Y-%m-%d"), "2", "2")
self.assertEqual(code, 200)
def test_hotels_offers(self):
tomorrow = datetime.datetime.today() + datetime.timedelta(days=1)
nextDay = tomorrow + datetime.timedelta(days=1)
code, json = self.client.hotel_offers(15490, "TOTAL_PRICE", "mobileweb",
datetime.datetime.strftime(tomorrow, "%Y-%m-%d"),
datetime.datetime.strftime(nextDay, "%Y-%m-%d"),
"2", "3, 4")
self.assertEqual(code, 200)
def test_hotels_info(self):
code, json = self.client.hotel_info(15490)
self.assertEqual(code, 200)
if __name__ == '__main__':
unittest.main()
|
from django.shortcuts import render, redirect
from siruco.db import Database
from django.http import HttpResponse
from datetime import date, datetime
from django.contrib import messages
import json
def reservasi(request):
response = {}
peran = session(request, 'peran')
if peran == "admin_satgas": # read all reservasi
isAdminSatgas = True
reservasilist = get_reservasi()
elif peran == "pengguna_publik": # read only its reservasi
isAdminSatgas = False
username = session(request, 'username')
reservasilist = get_reservasi_by_user(username)
else:
return redirect('/')
response['reservasilist'] = reservasilist
response['isAdminSatgas'] = isAdminSatgas
return render(request, 'reservasi.html', response)
def reservasi_create(request): #handle tanggal masuk keluar
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
if (request.method == 'GET'):
response = {}
response['pasienlist'] = get_nik_pasien()
response['rslist'] = get_kode_rs()
return render(request, 'reservasi_create.html', response)
else :
rep = request.POST
if (rep.get('tgl_masuk') >= rep.get('tgl_keluar')):
messages.error(request, "Tanggal Keluar harus lebih lama dari Tanggal Masuk ❗")
response = {}
response['pasienlist'] = get_nik_pasien()
response['rslist'] = get_kode_rs()
return render(request, 'reservasi_create.html', response)
# print(rep.get('tgl_masuk'))
# print(rep.get('tgl_keluar'))
db = Database(schema='siruco')
db.query(f'''
INSERT INTO RESERVASI_RS VALUES
('{rep.get('nik')}',
'{rep.get('tgl_masuk')}',
'{rep.get('tgl_keluar')}',
'{rep.get('kode_rs')}',
'{rep.get('kode_ruangan')}',
'{rep.get('kode_bed')}');
''')
db.close()
return redirect('/faskes/reservasi/')
def reservasi_update(request, pk): #handle tanggal masuk keluar
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
nik = pk[:-8]
tglmasuk = pk[-4:] + '-' + pk[-6:-4] + '-' + pk[-8:-6]
if (request.method == 'GET'):
response = {}
response['reservasi'] = get_reservasi_by_nik_and_tgl(nik, tglmasuk)
return render(request, 'reservasi_update.html', response)
else :
rep = request.POST
if (datetime.strptime(tglmasuk, "%Y-%m-%d") >= datetime.strptime(rep.get('tgl_keluar'), "%Y-%m-%d")):
messages.error(request, "Tanggal Keluar harus lebih lama dari Tanggal Masuk ❗")
response = {}
response['reservasi'] = get_reservasi_by_nik_and_tgl(nik, tglmasuk)
return render(request, 'reservasi_update.html', response)
# print(rep.get('tgl_keluar'))
db = Database(schema='siruco')
db.query(f'''
UPDATE RESERVASI_RS
SET tglkeluar='{rep.get('tgl_keluar')}'
WHERE kodepasien='{nik}'
and tglmasuk='{tglmasuk}';
''')
db.close()
return redirect('/faskes/reservasi/')
def reservasi_delete(request, pk): #handle belum tanggal masuk
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
nik = pk[:-8]
tglmasuk = pk[-4:] + pk[-6:-4] + pk[-8:-6]
if datetime.today() < datetime.strptime(tglmasuk, "%Y%m%d"):
db = Database(schema='siruco')
db.query(f'''
DELETE FROM RESERVASI_RS
WHERE kodepasien='{nik}'
and tglmasuk='{tglmasuk}';
''')
db.close()
return redirect('/faskes/reservasi/')
def faskes(request):
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
response = {}
response['faskeslist'] = get_faskes()
return render(request, 'faskes.html', response)
def faskes_detail(request, pk):
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
response = {}
response['faskes'] = get_faskes_by_kode(pk)
return render(request, 'faskes_detail.html', response)
def faskes_create(request):
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
#generate new kode faskes
faskeslist = get_faskes()
newid = 0
for faskes in faskeslist :
oldid = int(faskes['kode'][2:])
if (oldid > newid):
newid = oldid
newkode = 'FK' + str(newid + 1)
if (request.method == 'GET'):
response = {}
response['newkode'] = newkode
return render(request, 'faskes_create.html', response)
else :
rep = request.POST
db = Database(schema='siruco')
db.query(f'''
INSERT INTO FASKES VALUES
('{newkode}',
'{rep.get('tipe')}',
'{rep.get('nama')}',
'{rep.get('statusmilik')}',
'{rep.get('jalan')}',
'{rep.get('kelurahan')}',
'{rep.get('kecamatan')}',
'{rep.get('kabkot')}',
'{rep.get('prov')}');
''')
db.close()
return redirect('/faskes/')
def faskes_update(request, pk):
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
if (request.method == 'GET'):
response = {}
response['faskes'] = get_faskes_by_kode(pk)
return render(request, 'faskes_update.html', response)
else :
rep = request.POST
db = Database(schema='siruco')
db.query(f'''
UPDATE FASKES
SET tipe='{rep.get('tipe')}',
nama='{rep.get('nama')}',
statusmilik='{rep.get('statusmilik')}',
jalan='{rep.get('jalan')}',
kelurahan='{rep.get('kelurahan')}',
kecamatan='{rep.get('kecamatan')}',
kabkot='{rep.get('kabkot')}',
prov='{rep.get('prov')}'
WHERE kode='{pk}';
''')
db.close()
return redirect('/faskes/')
def faskes_delete(request, pk):
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
db = Database(schema='siruco')
db.query(f'''
DELETE FROM FASKES
WHERE kode='{pk}';
''')
db.close()
return redirect('/faskes/')
def jadwal(request):
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
response = {}
response['jadwallist'] = get_jadwal()
return render(request, 'jadwal.html', response)
def jadwal_create(request):
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
if (request.method == 'GET'):
response = {}
response['kodefaskeslist'] = get_kode_faskes()
return render(request, 'jadwal_create.html', response)
else :
rep = request.POST
db = Database(schema='siruco')
db.query(f'''
INSERT INTO JADWAL VALUES
('{rep.get('kode_faskes')}',
'{rep.get('shift')}',
'{rep.get('tanggal')}');
''')
db.close()
return redirect('/faskes/jadwal/')
def rumahsakit(request):
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
response = {}
rumahsakit = get_rumahsakit()
response['rslist'] = rumahsakit
return render(request, 'rumahsakit.html', response)
def rumahsakit_create(request):
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
if (request.method == 'GET'):
response = {}
response['kodefaskeslist'] = get_kode_faskes()
return render(request, 'rumahsakit_create.html', response)
else :
rep = request.POST
isrujukan = 1 if rep.get('isrujukan') else 0
db = Database(schema='siruco')
db.query(f'''
INSERT INTO RUMAH_SAKIT VALUES
('{rep.get('kode_faskes')}',
'{isrujukan}');
''')
db.close()
return redirect('/faskes/rumahsakit/')
def rumahsakit_update(request, pk):
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
if (request.method == 'GET'):
response = {}
response['rumahsakit'] = get_rumahsakit_by_kode(pk)
return render(request, 'rumahsakit_update.html', response)
else :
rep = request.POST
# print(rep.get('kode_faskes'))
isrujukan = 1 if rep.get('isrujukan') else 0
db = Database(schema='siruco')
db.query(f'''
UPDATE RUMAH_SAKIT
SET isrujukan='{isrujukan}'
WHERE kode_faskes='{pk}';
''')
db.close()
return redirect('/faskes/rumahsakit/')
def transaksi(request): #perlu tambahin tulisan TRS00 ga ? waktu tgl uang gmn ?
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
response = {}
response['transaksilist'] = get_transaksi()
return render(request, 'transaksi.html', response)
def transaksi_update(request, pk):
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
if (request.method == 'GET'):
response = {}
response['transaksi'] = get_transaksi_by_id(pk)
return render(request, 'transaksi_update.html', response)
else :
rep = request.POST
db = Database(schema='siruco')
db.query(f'''
UPDATE TRANSAKSI_RS
SET statusbayar='{rep.get('statusbayar')}'
WHERE idtransaksi='{pk}';
''')
db.close()
return redirect('/faskes/transaksi/')
def transaksi_delete(request, pk):
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
id = str(pk)
db = Database(schema='siruco')
db.query(f'''
DELETE FROM TRANSAKSI_RS
WHERE idtransaksi=CAST({id} AS VARCHAR);
''')
db.close()
return redirect('/faskes/transaksi/')
# api views
def reservasi_ruangan_api(request, koders):
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
ruanganlist = get_koderuangan_by_koders(koders)
data_list = json.dumps(ruanganlist)
return HttpResponse(data_list, content_type="text/json-comment-filtered")
def reservasi_bed_api(request, koders, koderuangan):
peran = session(request, 'peran')
if peran != "admin_satgas":
return redirect('/')
bedlist = get_kodebed_by_koders_koderuangan(koders, koderuangan)
data_list = json.dumps(bedlist)
return HttpResponse(data_list, content_type="text/json-comment-filtered")
# Helper Functions
def get_transaksi():
db = Database(schema='siruco')
query = db.query(f'''
SELECT * FROM TRANSAKSI_RS;
''')
db.close()
result = [{
"idtransaksi": query[r][0],
"kodepasien": query[r][1],
"tanggalpembayaran": "-" if query[r][2]==None else query[r][2],
"waktupembayaran": "-" if query[r][3]==None else query[r][3],
"tglmasuk": query[r][4],
"totalbiaya": parsebiaya(query[r][5]),
"statusbayar": query[r][6],
} for r in range(len(query))
]
# print(result)
return result
def parsebiaya(biaya):
remain = str(biaya)[-3:]
biaya = biaya//1000
result = remain
while(biaya > 0):
remain = str(biaya)[-3:]
biaya = biaya//1000
result = remain + "." + result
return "Rp" + result
def get_transaksi_by_id(pk):
db = Database(schema='siruco')
query = db.query(f'''
SELECT * FROM TRANSAKSI_RS
WHERE idtransaksi='{pk}';
''')
db.close()
result = {
"id": query[0][0],
"kode": query[0][1],
"tanggalpembayaran": "-" if query[0][2]==None else query[0][2],
"waktupembayaran": "-" if query[0][3]==None else query[0][3],
"tglmasuk": query[0][4],
"totalbiaya": parsebiaya(query[0][5]),
"statusbayar": query[0][6],
}
# print(result)
return result
def get_rumahsakit():
db = Database(schema='siruco')
query = db.query(f'''
SELECT * FROM RUMAH_SAKIT;
''')
db.close()
result = [{
"kode_faskes": query[r][0],
"isrujukan": True if query[r][1]=="1" else False,
} for r in range(len(query))
]
# print(result)
return result
def get_rumahsakit_by_kode(kode):
db = Database(schema='siruco')
query = db.query(f'''
SELECT * FROM RUMAH_SAKIT
WHERE kode_faskes='{kode}';
''')
db.close()
result = {
"kode_faskes": query[0][0],
"isrujukan": True if query[0][1]=="1" else False
}
# print(result)
return result
def get_jadwal():
db = Database(schema='siruco')
query = db.query(f'''
SELECT * FROM JADWAL;
''')
db.close()
result = [{
"kode_faskes": query[r][0],
"shift": query[r][1],
"tanggal": query[r][2],
} for r in range(len(query))
]
# print(result)
return result
def get_faskes():
db = Database(schema='siruco')
query = db.query(f'''
SELECT * FROM FASKES;
''')
db.close()
result = [{
"kode": query[r][0],
"tipe": query[r][1],
"nama": query[r][2],
"statusmilik": query[r][3],
"jalan": query[r][4],
"kelurahan": query[r][5],
"kecamatan": query[r][6],
"kabkot": query[r][7],
"prov": query[r][8],
} for r in range(len(query))
]
# print(result)
return result
def get_kode_faskes():
db = Database(schema='siruco')
query = db.query(f'''
SELECT * FROM FASKES;
''')
db.close()
result = []
for item in query:
result.append(item[0])
# print(result)
return result
def get_faskes_by_kode(kode):
db = Database(schema='siruco')
query = db.query(f'''
SELECT * FROM FASKES
WHERE kode='{kode}';
''')
db.close()
result = {
"kode": query[0][0],
"tipe": query[0][1],
"nama": query[0][2],
"statusmilik": query[0][3],
"jalan": query[0][4],
"kelurahan": query[0][5],
"kecamatan": query[0][6],
"kabkot": query[0][7],
"prov": query[0][8],
}
# print(result)
return result
def get_reservasi():
db = Database(schema='siruco')
query = db.query(f'''
SELECT * FROM RESERVASI_RS;
''')
db.close()
result = [{
"nik": query[r][0],
"tglmsk": query[r][1],
"tglklr": query[r][2],
"koders": query[r][3],
"koderuang": query[r][4],
"kodebed": query[r][5],
"idreservasi": (query[r][0] + query[r][1].strftime("%d%m%Y")),
"isDue": False if date.today() < query[r][1] else True
} for r in range(len(query))
]
# print(result)
return result
def get_reservasi_by_nik_and_tgl(nik, tgl_masuk):
db = Database(schema='siruco')
query = db.query(f'''
SELECT * FROM RESERVASI_RS
WHERE kodepasien='{nik}' and tglmasuk='{tgl_masuk}';
''')
db.close()
result = {
"nik": query[0][0],
"tglmsk": query[0][1],
"tglklr": query[0][2],
"koders": query[0][3],
"koderuang": query[0][4],
"kodebed": query[0][5],
"idreservasi": (query[0][0] + query[0][1].strftime("%d%m%Y")),
}
# print(result)
return result
def get_reservasi_by_user(username):
db = Database(schema='siruco')
query = db.query(f'''
SELECT kodepasien, tglmasuk, tglkeluar, koders, koderuangan, kodebed
FROM RESERVASI_RS R
JOIN PASIEN P ON P.nik=R.kodepasien
JOIN PENGGUNA_PUBLIK PP ON P.idpendaftar=PP.username
WHERE PP.username='{username}';
''')
db.close()
result = [{
"nik": query[r][0],
"tglmsk": query[r][1],
"tglklr": query[r][2],
"koders": query[r][3],
"koderuang": query[r][4],
"kodebed": query[r][5],
"idreservasi": (query[r][0] + query[r][1].strftime("%d%m%Y")),
} for r in range(len(query))
]
# print(result)
return result
def get_nik_pasien():
db = Database(schema='siruco')
query = db.query(f'''
SELECT nik FROM PASIEN;
''')
db.close()
result = []
for item in query:
result.append(item[0])
# print(result)
return result
def get_kode_rs():
db = Database(schema='siruco')
query = db.query(f'''
SELECT kode_faskes FROM RUMAH_SAKIT;
''')
db.close()
result = []
for item in query:
result.append(item[0])
# print(result)
return result
def get_koderuangan_by_koders(koders):
db = Database(schema='siruco')
query = db.query(f'''
SELECT koderuangan FROM RUANGAN_RS
WHERE koders='{koders}';
''')
db.close()
result = []
for item in query:
result.append(item[0])
# print(result)
return result
def get_kodebed_by_koders_koderuangan(koders, koderuangan):
db = Database(schema='siruco')
query = db.query(f'''
SELECT kodebed FROM BED_RS
WHERE koders='{koders}'
and koderuangan='{koderuangan}';
''')
db.close()
result = []
for item in query:
result.append(item[0])
# print(result)
return result
def session(http_handler, key, value=None):
if value:
http_handler.session[key] = value
return http_handler
else:
returning = None
try:
returning = http_handler.session[key]
except Exception:
pass
return returning |
class DerivationStep:
"""Step within an axiomatic derivation.
Parameters
----------
content: logics.classes.propositional.Formula
The formula present in the step
justification: str
The name of the rule or axiom used in obtaining this step. May be 'premise' as well.
on_steps: list of int
Steps to which the derivation rule was applied to (may be empty). These are 0-based (first step of the
derivation is step 0).
Examples
--------
A derivation step will have the following form
>>> from logics.classes.propositional.proof_theories import DerivationStep
>>> from logics.utils.parsers import classical_parser
>>> s = DerivationStep(content=classical_parser.parse('p or ~q'), justification='mp', on_steps=[0, 2])
>>> s # note the format in which it prints to the console
['∨', ['p'], ['~', ['q']]]; mp; [0, 2]
Also note that you can parse an entire derivation, which will be comprised of DerivationStep
>>> classical_parser.parse_derivation("p → (p → p); ax1")
>>> deriv = classical_parser.parse_derivation("p → (p → p); ax1")
>>> deriv[0]
['→', ['p'], ['→', ['p'], ['p']]]; ax1; []
>>> type(deriv[0])
<class 'logics.classes.propositional.proof_theories.derivation.DerivationStep'>
See Also
--------
logics.utils.parsers.classical_parser
"""
def __init__(self, content, justification, on_steps=None):
self.content = content
self.justification = justification
if on_steps is None:
on_steps = []
self.on_steps = on_steps
def unparse(self, parser):
return f"{parser.unparse(self.content)}; {self.justification}; {self.on_steps}"
def __eq__(self, other):
# done with __dict__ because it will enable comparisons of instances of classes that extend this one
return isinstance(other, DerivationStep) and (self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return f"{self.content}; {self.justification}; {self.on_steps}"
class Derivation(list):
"""An axiomatic or natural deduction derivation.
Extends `list`. A derivation is a list of DerivationStep. Each step contains formula, justification, [on_steps]
Examples
--------
>>> from logics.classes.propositional.proof_theories import DerivationStep, Derivation
>>> from logics.classes.propositional import Formula
>>> derivation = Derivation([
... DerivationStep(Formula(['∧', ['p'], ['q']]), 'premise'),
... DerivationStep(Formula(['p']), 'E∧', [2])
... ])
>>> derivation
0. ['∧', ['p'], ['q']]; premise; []
1. ['p']; E∧; [2]
would be a derivation of p from p ∧ q, using the rule E∧ ((A ^ B) / A). Note that you can also obtain a Derivation
using the parser, e.g.
>>> from logics.utils.parsers import classical_parser
>>> classical_parser.parse_derivation('''
... p ∧ q; premise
... p; E∧; [0]
... ''')
0. ['∧', ['p'], ['q']]; premise; []
1. ['p']; E∧; [0]
Slicing a Derivation also returns a Derivation
>>> derivation[1:]
0. ['p']; E∧; [2]
>>> type(derivation[1:])
<class 'logics.classes.propositional.proof_theories.derivation.Derivation'>
"""
@property
def premises(self):
"""Returns a list of the premises of the derivation (the DerivationStep's with a justification of 'premise')"""
return [step.content for step in self if step.justification == 'premise']
@property
def conclusion(self):
"""Returns the formula in the last step of the derivation (or ``None`` if the derivation is empty)"""
if not self:
return None
return self[-1].content
def print_derivation(self, parser):
"""For even prettier printing of a derivation in the console, you can pass a parser and it will unparse the
formula
Examples
--------
>>> from logics.classes.propositional.proof_theories import DerivationStep, Derivation
>>> from logics.utils.parsers import classical_parser
>>> from logics.classes.propositional import Formula
>>> derivation = Derivation([
... DerivationStep(Formula(['∧', ['p'], ['q']]), 'premise'),
... DerivationStep(Formula(['p']), 'E∧', [2])
... ])
>>> derivation.print_derivation(classical_parser)
0. (p ∧ q); premise; []
1. p; E∧; [2]
"""
for step_index in range(len(self)):
if hasattr(self[step_index], 'open_suppositions'):
print("| " * len(self[step_index].open_suppositions) + f"{step_index}. "
f"{parser.unparse(self[step_index].content)}; "
f"{self[step_index].justification}; "
f"{self[step_index].on_steps}")
else:
print(f"{step_index}. {self[step_index].unparse(parser)}")
def __repr__(self):
"""For prettier printing of a derivation in the console"""
string = ''
for step_index in range(len(self)):
if hasattr(self[step_index], 'open_suppositions'):
string += "| " * len(self[step_index].open_suppositions) + \
f"{step_index}. {self[step_index].content}; " \
f"{self[step_index].justification}; " \
f"{self[step_index].on_steps}\n"
else:
string += f"{step_index}. {self[step_index]}\n"
return string
def __getitem__(self, item):
"""If you slice a Derivation (e.g. derivation[:4]) you get a Derivation"""
if isinstance(item, slice):
return Derivation(super().__getitem__(item))
return super().__getitem__(item)
|
<filename>3dmap-master/data/translator/Translator/kmlSorter.py
### IMPORTS ###
## KML ##
from copy import deepcopy
from math import floor
import sys, argparse
from time import time
from lxml import etree
from pykml import parser as kml_parser
from pykml.factory import KML_ElementMaker as KML
import numpy as np
def locate_centroid(coords):
#points must be in clockwise (or counterclockwise) order on the boundary for this to work
#coords = coords[ind]
##create shifted array so that it starts with the second point
#coords_loop = np.zeros(coords.shape)
#coords_loop[:-1,:] = coords[1:,:]
#coords_loop[-1,:] = coords[1,:]
##compute area
#area = 0.5 * np.sum(coords[:,0]*coords_loop[:,1] - coords[:,1]*coords_loop[:,0])
##compute centroid
#cx = 1./(6*area) * np.sum((coords[:,0]+coords_loop[:,0])*(coords[:,0]*coords_loop[:,1] - coords[:,1]*coords_loop[:,0]))
#cy = 1./(6*area) * np.sum((coords[:,1]+coords_loop[:,1])*(coords[:,0]*coords_loop[:,1] - coords[:,1]*coords_loop[:,0]))
#take mean of points instead as approximation
cx, cy = np.mean(coords[:,0]), np.mean(coords[:,1])
return cx,cy
def get_grid_numbers(coords,n):
max_x, min_x = np.max(coords[:,0]), np.min(coords[:,0])
max_y, min_y = np.max(coords[:,1]), np.min(coords[:,1])
grid_size_x = (max_x - min_x) / float(n)
grid_size_y = (max_y - min_y) / float(n)
grid_numbers = np.zeros(coords.shape[0])
for i,coord in enumerate(coords):
x,y = coord[0], coord[1]
y_ind = floor((y - min_y) / grid_size_y)
x_ind = floor((x - min_x) / grid_size_x)
#make sure that for those coords which are the maximum lie in a valid grid number
if y_ind == n : y_ind = n-1
if x_ind == n : x_ind = n-1
grid_numbers[i] = y_ind * n + x_ind
return grid_numbers
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Given a KML file containing placemarks represented as polygons,\n\
this scripts sorts the placemarks based on their centroid coordinates\n\
and writes them into a new KML file.')
parser.add_argument('-i','--input', required=True, metavar='<input_file>',
help='Path to the inputfile.')
parser.add_argument('-o','--output', required=True, metavar='<output_file>',
help='Specifies the output file.')
parser.add_argument('-n', type=int, default=10, required=False, metavar='<grid_size>',
help='Specifies the grid size nxn. (default: n=10')
#parse arguments
args = parser.parse_args()
kml_file = args.input
sys.stdout.write('Reading file %s\n' % kml_file)
sys.stdout.flush()
name = None;
with open(kml_file) as f:
#first retrieving any header for use when creating new file
header = f.readline().encode('utf-8')
#parsing the kml document
doc = kml_parser.parse(f).getroot().Document.Folder
name = doc.name
f.close()
sys.stdout.write('Sorting placemarks based on coordinates.\n')
sys.stdout.flush()
start = time()
building_coordinates = []
#iterate through all children of folder which are a placemark
for pm in doc.Placemark:
#get to the coordinates
coordinates_string = pm.Polygon.outerBoundaryIs.LinearRing.coordinates.text
coordinates = coordinates_string.split()
xy_coords = []
for c in coordinates:
coord = list(map(float,c.split(',')[:2]))
xy_coords.append(coord)
cx,cy = locate_centroid(np.array(xy_coords))
building_coordinates.append([cx,cy])
building_coordinates = np.array(building_coordinates)
#calculate the number of the grid quadrant a polygon lies in
if args.n > 1: grid_numbers = get_grid_numbers(building_coordinates,args.n)
#sort indeces first based on x than y
sort_indeces = np.lexsort((building_coordinates[:,1],building_coordinates[:,0]))
#create the new KML document structure for adding the placemarks
new_doc = KML.Kml(
KML.Document(
KML.Folder(
name
)
)
)
children = doc.Placemark
#iterate through all children of folder which are a placemark
for i in sort_indeces:
placemark = children[i]
if args.n > 1:
cur_grid_number = grid_numbers[i]
#create description tag
descrp = KML.description('grid_n=%i' % int(cur_grid_number))
placemark.append(descrp)
pm = deepcopy(placemark)
new_doc.Document.Folder.append(pm)
i += 1
sys.stdout.write('%i buildings sorted in %.2f sec.\n' % (len(sort_indeces),time()-start))
sys.stdout.write('Writing new kml file.\n')
sys.stdout.flush()
new_file = open(args.output,'wb')
new_file.write(header)
new_kml = etree.tostring(new_doc, pretty_print=True)
#print(new_kml)
new_file.write(new_kml)
new_file.close()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.