text stringlengths 0 1.05M | meta dict |
|---|---|
from framework.deprecated.controllers import AdvCommScheduler, CheckpointDriving, MathScheduler, AudioRewardLogic, VisualSearchTask
from framework.latentmodule import LatentModule
from framework.convenience import ConvenienceFunctions
from framework.ui_elements.EventWatcher import EventWatcher
from framework.ui_elements.ScrollPresenter import ScrollPresenter
from framework.ui_elements.AudioPresenter import AudioPresenter
from framework.ui_elements.TextPresenter import TextPresenter
from panda3d.core import TextProperties,TextPropertiesManager
from direct.gui.DirectGui import *
import framework.speech
import random
import time
import copy
# =======================
# === Subtask classes ===
# =======================
class WarningLightTask(LatentModule):
"""
A Warning light class (red/blue/green) that sporadically turns on/off and demands a
response that can be configured (press a button when turning on / turning off / stopping to blink).
Has some support for a cue status object/task which is currently unused here (was for MBF2A).
"""
def __init__(self,
# general properties
rewardlogic, # reward handling logic
watcher = None, # optional event watcher
focused = True, # whether this task is currently focused
markerbase = 1, # markers markerbase..markerbase+6 are used
event_interval=lambda: random.uniform(45,85), # interval between two successive events
# cueing control
cueobj = None, # an object that might have .iscued set to true
# graphics parameters
pic_off='light_off.png', # picture to display for the disabled light
pic_on='light_on.png', # picture to display for the enabled light
screen_offset=0, # offset to position this icon on one of the three screens
pic_params={'pos':[0,0],'scale':0.15}, # parameters for the picture() command
snd_params={'volume':0.3,'direction':0.0}, # parameters for the sound() command
# response handling
snd_hit='click2s.wav', # sound when the user correctly detected the warning state
snd_wrongcue='xBuzz01.wav', # the sound that is overlaid with the buzzer when the response was wrong due to incorrect cueing
response_key='sysmonv-check', # key to press in case of an event
timeout=2.5, # response timeout for the user
hit_reward=0, # reward if hit
miss_penalty=-20, # penalty if missed
false_penalty=-5, # penalty for false positives
# ticking support
pic_tick_off=None, # optional blinking in off status
pic_tick_on=None, # optional blinking in on status
tick_rate = None, # tick rate (duration in non-tick status, duration in tick status)
):
LatentModule.__init__(self)
self.rewardlogic = rewardlogic
self.focused = focused
self.markerbase = markerbase
self.event_interval = event_interval
self.pic_off = pic_off
self.pic_on = pic_on
self.pic_params = copy.deepcopy(pic_params)
self.snd_wrongcue = snd_wrongcue
self.snd_params = snd_params
self.snd_hit = snd_hit
self.response_key = response_key
self.timeout = timeout
self.hit_reward = hit_reward
self.miss_penalty = miss_penalty
self.false_penalty = false_penalty
self.screen_offset = screen_offset
self.cueobj = cueobj
self.control = False
self.pic_tick_off = pic_tick_off
self.pic_tick_on = pic_tick_on
if self.pic_tick_on is None:
self.pic_tick_on = self.pic_on
if self.pic_tick_off is None:
self.pic_tick_off = self.pic_off
self.tick_rate = tick_rate
self.watcher = watcher
def run(self):
self.pic_params['pos'][0] += self.screen_offset
# pre-cache the media files...
self.precache_picture(self.pic_on)
self.precache_picture(self.pic_off)
self.precache_picture(self.pic_tick_on)
self.precache_picture(self.pic_tick_off)
self.precache_sound(self.snd_wrongcue)
self.precache_sound(self.snd_hit)
self.accept('control',self.oncontrol,[True])
self.accept('control-up',self.oncontrol,[False])
# set up an event watcher (taking care of timeouts and inappropriate responses)
if self.watcher is None:
self.watcher = EventWatcher(eventtype=self.response_key,
handleduration=self.timeout,
defaulthandler=self.false_detection)
while True:
# show the "off" picture for the inter-event interval
if self.tick_rate is not None:
t_end = time.time()+self.event_interval()
while time.time() < t_end:
self.marker(self.markerbase+10)
# show the off/tic pic
self.picture(self.pic_tick_off, self.tick_rate[1], **self.pic_params)
# show the off pic
self.picture(self.pic_off, self.tick_rate[0], **self.pic_params)
else:
# just show the off pick
self.picture(self.pic_off, self.event_interval(), **self.pic_params)
# start watching for a response
self.watcher.watch_for(self.correct, self.timeout, self.missed)
self.marker(self.markerbase if self.focused else (self.markerbase+1))
if self.tick_rate is not None:
t_end = time.time()+self.timeout
while time.time() < t_end:
self.marker(self.markerbase+11)
# show the on/tic pic
self.picture(self.pic_tick_on, self.tick_rate[1], **self.pic_params)
# show the off pic
self.picture(self.pic_on, self.tick_rate[0], **self.pic_params)
else:
# just show the "on" picture
self.picture(self.pic_on, self.timeout, **self.pic_params)
self.marker(self.markerbase+2)
# reset the cue status
if self.cueobj is not None:
self.cueobj.iscued = False
def oncontrol(self,status):
self.control = status
def missed(self):
if self.focused:
self.marker(self.markerbase+3)
self.rewardlogic.score_event(self.miss_penalty)
def false_detection(self):
self.marker(self.markerbase+4)
self.rewardlogic.score_event(self.false_penalty)
def correct(self):
if self.focused:
if ((self.cueobj is not None) and self.cueobj.iscued):
self.marker(self.markerbase+5 if self.control else self.markerbase+6)
else:
self.marker(self.markerbase+7 if self.control else self.markerbase+8)
if self.control == ((self.cueobj is not None) and self.cueobj.iscued):
# the user correctly spots the warning event
self.sound(self.snd_hit,**self.snd_params)
self.rewardlogic.score_event(self.hit_reward)
else:
# the user spotted it, but didn't get the cue right
self.sound(self.snd_wrongcue,**self.snd_params)
self.rewardlogic.score_event(self.false_penalty)
else:
self.marker(self.markerbase+9)
# the user spotted it, but was not tasked to do so...
self.rewardlogic.score_event(self.false_penalty)
def flash(self,status,duration=1):
self.picture(self.pic_on if status else self.pic_off,duration=duration, **self.pic_params)
class WarningSoundTask(LatentModule):
"""
A warning sound class that turns on sporadically. Demands that the subject responds in some way
when the sound goes on / off or stops "ticking" (if a tick sound).
Has some support for a cue status object/task which is currently unused here (was for MBF2A).
"""
def __init__(self,
# general properties
rewardlogic, # reward handling logic
watcher = None, # response event watcher
focused = True, # whether this task is currently focused
markerbase = 1, # markers markerbase..markerbase+6 are used
event_interval=lambda: random.uniform(45,85), # interval between two successive events
# cueing control
cueobj = None, # an object that might have .iscued set to true
# audio parameters
screen_offset=0, # offset to position this source on one of the three screens
snd_on='xHyprBlip.wav', # sound to play in case of an event
snd_params={'volume':0.25,'direction':0.0}, # parameters for the sound() command
# response handling
snd_hit='click2s.wav', # sound when the user correctly detected the warning state
snd_wrongcue='xBuzz01.wav', # the sound that is overlaid with the buzzer when the response was wrong due to incorrect cueing
response_key='sysmona-check', # key to press in case of an event
timeout=5.5, # response timeout for the user
hit_reward=0, # reward if hit
miss_penalty=-20, # penalty if missed
false_penalty=-5, # penalty for false positives
# ticking support
snd_tick_off=None, # optional ticking in off status
snd_tick_on=None, # optional ticking in on status
tick_rate = None, # tick rate (duration in non-tick status, duration in tick status)
):
LatentModule.__init__(self)
self.rewardlogic = rewardlogic
self.focused = focused
self.markerbase = markerbase
self.event_interval = event_interval
self.snd_on = snd_on
self.snd_params = snd_params
self.snd_wrongcue = snd_wrongcue
self.snd_hit = snd_hit
self.response_key = response_key
self.timeout = timeout
self.hit_reward = hit_reward
self.miss_penalty = miss_penalty
self.false_penalty = false_penalty
self.screen_offset = screen_offset
self.snd_params = copy.deepcopy(snd_params)
self.cueobj = cueobj
self.control = False
self.snd_tick_off = snd_tick_off
self.snd_tick_on = snd_tick_on
self.tick_rate = tick_rate
self.watcher = watcher
def run(self):
self.snd_params['direction'] += self.screen_offset
# pre-cache the media files...
self.precache_sound(self.snd_on)
self.precache_sound(self.snd_tick_on)
self.precache_sound(self.snd_tick_off)
self.precache_sound(self.snd_wrongcue)
self.precache_sound(self.snd_hit)
self.accept('control',self.oncontrol,[True])
self.accept('control-up',self.oncontrol,[False])
# set up an event watcher (taking care of timeouts and inappropriate responses)
if self.watcher is None:
self.watcher = EventWatcher(eventtype=self.response_key,
handleduration=self.timeout,
defaulthandler=self.false_detection)
while True:
# off status
if self.tick_rate is not None:
t_end = time.time()+self.event_interval()
while time.time() < t_end:
self.marker(self.markerbase+10)
# play the off/tic snd
self.sound(self.snd_tick_off, **self.snd_params)
self.sleep(self.tick_rate[1])
# wait
self.sleep(self.tick_rate[0])
else:
# wait
self.sleep(self.event_interval())
# start watching for a response
self.watcher.watch_for(self.correct, self.timeout, self.missed)
self.marker(self.markerbase if self.focused else (self.markerbase+1))
if self.tick_rate is not None:
t_end = time.time()+self.timeout
while time.time() < t_end:
self.marker(self.markerbase+11)
# play the on/tic sound
if self.snd_tick_on is not None:
self.sound(self.snd_tick_on,**self.snd_params)
self.sleep(self.tick_rate[1])
# wait
self.sleep(self.tick_rate[0])
else:
# just play the "on" sound
if self.snd_on is not None:
self.sound(self.snd_on, **self.snd_params)
self.sleep(self.timeout)
self.marker(self.markerbase+2)
# reset the cue status
if self.cueobj is not None:
self.cueobj.iscued = False
def oncontrol(self,status):
self.control = status
def missed(self):
if self.focused:
self.marker(self.markerbase+3)
self.rewardlogic.score_event(self.miss_penalty)
def false_detection(self):
self.marker(self.markerbase+4)
self.rewardlogic.score_event(self.false_penalty)
def correct(self):
if self.focused:
if ((self.cueobj is not None) and self.cueobj.iscued):
self.marker(self.markerbase+5 if self.control else self.markerbase+6)
else:
self.marker(self.markerbase+7 if self.control else self.markerbase+8)
if self.control == ((self.cueobj is not None) and self.cueobj.iscued):
# the user correctly spots the warning event
self.sound(self.snd_hit,**self.snd_params)
self.rewardlogic.score_event(self.hit_reward)
else:
# the user spotted it, but didn't get the cue right
self.sound(self.snd_wrongcue,**self.snd_params)
self.rewardlogic.score_event(self.false_penalty)
else:
self.marker(self.markerbase+9)
# the user spotted it, but was not tasked to do so...
self.rewardlogic.score_event(self.false_penalty)
def flash(self,filename):
self.sound(filename, **self.snd_params)
# ============================
# === Main task definition ===
# ============================
class Main(LatentModule):
def __init__(self):
LatentModule.__init__(self)
# ===============================
# === block design parameters ===
# ===============================
self.randseed = 11463 # some initial randseed for the experiment; note that this should be different for each subject (None = random)
self.uiblocks = 24 # number of blocks with different UI permutation: should be a multiple of 6
self.focus_per_layout = 8 # number of focus conditions within a UI layout block
self.rest_every = 3 # insert a rest period every k UI blocks
self.focus_duration = lambda: random.uniform(30,50) # duration of a focus block (was: 30-50)
self.initial_rest_time = 5 # initial rest time at the beginning of a new UI layout block
self.tasknames = {'sysmonv':'visual system monitoring','sysmona':'auditory system monitoring','comma':'auditory communciations','commv':'text communications','math':'mathematics','satmap':'satellite map','drive':'driving task'}
self.conditions = ['sysmonv-sysmona','math-satmap','math-drive','sysmona-drive','sysmona-satmap','sysmonv','sysmona','satmap','drive','math']
self.bottom_up_probability = 0.5 # probability that the switch stimulus is bottom-up
# (this is the full set of conditions that we're not using any more)
# self.conditions = ['sysmonv-sysmona','commv-comma','math-satmap','math-drive','comma-satmap','comma-drive','comma-sysmona','sysmona-drive','sysmona-satmap','sysmonv','sysmona','commv','comma','satmap','drive','math']
# ==============================
# === score logic parameters ===
# ==============================
self.score_params = {'initial_score':0, # the initial score
'sound_params':{'direction':-0.7}, # properties of the score response sound
'gain_file':'ding.wav', # sound file per point
'loss_file':'xBuzz01-rev.wav', # sound file for losses
'none_file':'click.wav', # file to play if no reward
'buzz_volume':0.4, # volume of the buzz (multiplied by the amount of loss)
'gain_volume':0.5, # volume of the gain sound
'ding_interval':0.1, # interval at which successive gain sounds are played... (if score is > 1)
'scorefile':'C:\\Studies\\DAS\scoretable.txt'} # this is where the scores are logged
self.false_response_penalty = -1 # penalty due to false response in visual/auditory system monitoring
# ===========================================
# === visual system monitoring parameters ===
# ===========================================
self.sysmonv_rect = [-0.4,0.4,0.55,0.9]
self.sysmonv_timeout = 3
self.light_scale = 0.1
self.light_offset = 0.175
self.light_x = 0.09
self.redlight_params = {'markerbase':1, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(15,35), # interval between two successive events
'focused':False,
'pic_off':'buzzer-grey.png', # picture to display for the disabled light
'pic_on':'buzzer-red-real.png', # picture to display for the enabled light
'snd_hit':'xClick01.wav', # sound when the user correctly detected the warning state
'pic_params':{'pos':[self.light_x-2*self.light_offset,0.8],'scale':self.light_scale}, # parameters for the picture() command
'response_key':'sysmonv-check', # key to press in case of an event
'timeout':2.5, # response timeout for the user
'hit_reward':4, # reward if hit
'miss_penalty':-2, # penalty if missed
'false_penalty':-1, # penalty for false positives
}
self.greenlight_params = {'markerbase':20, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(21,41),# interval between two successive events
'focused':False,
'pic_off':'buzzer.png', # picture to display for the disabled light
'pic_on':'buzzer-grey.png', # picture to display for the enabled light
'snd_hit':'xClick01.wav', # sound when the user correctly detected the warning state
'pic_params':{'pos':[self.light_x-1*self.light_offset,0.8],'scale':self.light_scale}, # parameters for the picture() command
'response_key':'sysmonv-check', # key to press in case of an event
'timeout':2.5, # response timeout for the user
'hit_reward':4, # reward if hit
'miss_penalty':-2, # penalty if missed
'false_penalty':-1, # penalty for false positives
}
self.bluelight_params = {'markerbase':40, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(19,44),# interval between two successive events
'focused':False,
'pic_off':'buzzer-grey.png', # picture to display for the disabled light
'pic_on':'buzzer-grey.png', # picture to display for the enabled light
'snd_hit':'xClick01.wav', # sound when the user correctly detected the warning state
'pic_params':{'pos':[self.light_x+0*self.light_offset,0.8],'scale':self.light_scale}, # parameters for the picture() command
'response_key':'sysmonv-check', # key to press in case of an event
'timeout':2.75, # response timeout for the user
'hit_reward':4, # reward if hit
'miss_penalty':-2, # penalty if missed
'false_penalty':-1, # penalty for false positives
'pic_tick_off':'buzzer-blue.png', # picture to display for the disabled light
'tick_rate':[1.2,0.1],
}
self.yellowlight_params = {'markerbase':60, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(40,70),# interval between two successive events
'focused':False,
'pic_off':'buzzer-grey.png', # picture to display for the disabled light
'pic_on':'buzzer-yellow.png', # picture to display for the enabled light
'snd_hit':'xClick01.wav', # sound when the user correctly detected the warning state
'pic_params':{'pos':[self.light_x+1*self.light_offset,0.8],'scale':self.light_scale}, # parameters for the picture() command
'response_key':'sysmonv-check', # key to press in case of an event
'timeout':2.5, # response timeout for the user
'hit_reward':4, # reward if hit
'miss_penalty':-2, # penalty if missed
'false_penalty':-1 # penalty for false positives
}
self.button_sysmonv_par = {'frameSize':(-4.5,4.5,-0.45,0.95),'text':"Check",'scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_sysmonv_pos = [0,0.63]
# =============================================
# === auditory system monitoring parameters ===
# =============================================
self.sysmona_timeout = 3
self.sysmona_rect = [0.1,0.4,-0.34,-0.64]
self.warnsound_params = {'markerbase':80, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(15,35), # interval between two successive events
'focused':False,
'snd_on':'buzzz.wav', # picture to display for the enabled light
'response_key':'sysmona-check', # key to press in case of an event
'timeout':5.5, # response timeout for the user
'hit_reward':4, # reward if hit
'miss_penalty':-2, # penalty if missed
'false_penalty':-1, # penalty for false positives
}
self.ticksound_params = {'markerbase':100, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(19,40), # interval between two successive events
'snd_params':{'volume':0.2,'direction':0.0}, # parameters for the sound() command
'focused':False,
'snd_on':None,
'snd_tick_off':'xTick.wav', # picture to display for the enabled light
'response_key':'sysmona-check', # key to press in case of an event
'timeout':6.5, # response timeout for the user
'hit_reward':6, # reward if hit
'miss_penalty':-2, # penalty if missed
'false_penalty':-1, # penalty for false positives
'tick_rate':[0.7,0.1], # rate of the ticking...
}
self.button_sysmona_par = {'frameSize':(-2,2,-0.5,1),'text':'"Check"','scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_sysmona_pos = [0.25,-0.34]
# ==============================
# === auditory comm elements ===
# ==============================
self.voice_params = {'direction':0,'volume':1}
self.commaud_params = {'markerbase':400, # base marker offset
'message_interval': lambda: random.uniform(7,8), # interval between message presentations
'response_timeout':6, # response timeout...
'lull_time': lambda: random.uniform(30,90), # duration of lulls, in seconds (drawn per lull)
'situation_time': lambda: random.uniform(25,45), # duration of developing situations, in seconds (drawn per situation)
'clearafter': 5, # clear the presenter after this many messages
'message_interval': lambda: random.uniform(5,8), # message interval, in s (drawn per message)
'other_callsign_fraction': lambda: random.uniform(0.3,0.5), # fraction of messages that are for other callsigns (out of all messages presented) (drawn per situation)
'no_callsign_fraction': lambda: random.uniform(0.25,0.35), # fraction, out of the messages for "other callsigns", of messages that have no callsign (drawn per situation)
'time_fraction_until_questions': lambda: random.uniform(0.1,0.2), # the fraction of time into the situation until the first question comes up (drawn per situation)
# in the tutorial mode, this should probably be close to zero
'questioned_fraction': lambda: random.uniform(0.6,0.8), # fraction of targeted messages that incur questions
}
self.button_comma_par = {'frameSize':(-2,2,-0.5,1),'text':'"Roger"','scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_comma_pos = [-0.25,-0.34]
# ============================
# === visual comm elements ===
# ============================
self.scroll_pos = [-0.475,-0.4,-0.18]
self.scroll_params = {'width':28,'scale':0.035,'numlines':4,'height':4}
self.commvis_params = {'markerbase':300, # base marker offset
'clearafter': 5, # clear the presenter after this many messages
'message_interval': lambda: random.uniform(5,8), # message interval, in s (drawn per message)
'response_timeout':5, # response timeout...
'lull_time': lambda: random.uniform(30,90), # duration of lulls, in seconds (drawn per lull)
'situation_time': lambda: random.uniform(25,45), # duration of developing situations, in seconds (drawn per situation)
'message_interval': lambda: random.uniform(4,6), # message interval, in s (drawn per message)
'other_callsign_fraction': lambda: random.uniform(0.3,0.5), # fraction of messages that are for other callsigns (out of all messages presented) (drawn per situation)
'no_callsign_fraction': lambda: random.uniform(0.25,0.35), # fraction, out of the messages for "other callsigns", of messages that have no callsign (drawn per situation)
'time_fraction_until_questions': lambda: random.uniform(0.1,0.2), # the fraction of time into the situation until the first question comes up (drawn per situation)
# in the tutorial mode, this should probably be close to zero
'questioned_fraction': lambda: random.uniform(0.6,0.8), # fraction of targeted messages that incur questions
}
self.button_commv_par_y = {'frameSize':(-1.2,1.2,-0.35,0.85),'text':"Yes",'scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_commv_par_n = {'frameSize':(-1,1,-0.35,0.85),'text':"No",'scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_commv_par_s = {'frameSize':(-1.65,1.65,-0.35,0.85),'text':"Skip",'scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_commv_pos_y = [-0.05,-0.44,-0.22]
self.button_commv_pos_n = [0.15,-0.44,-0.22]
self.button_commv_pos_s = [0.375,-0.44,-0.22]
# =======================
# === math task setup ===
# =======================
self.numpad_topleft = [-0.4,0.7] # top-left corner of the numpad
self.math_rect = [-0.52,0.52,0.9,0.15]
self.math_params = {'difficulty': 2, # difficulty level of the problems (determines the size of involved numbers)
'focused':True,
'problem_interval': lambda: random.uniform(3,12), # delay before a new problem appears after the previous one has been solved
'response_timeout': 10.0, # time within which the subject may respond to a problem
'gain_correct':5,
'loss_incorrect':-2,
'numpad_gridspacing': [0.16,-0.16], # spacing of the button grid
'numpad_buttonsize': [0.75,0.75], # size of the buttons
'numpad_textscale': 0.15 # scale of the text
}
self.math_display_par = {'scale':0.04, 'textcolor':[1,1,1,1],'framecolor':[0,0,0,1],'width':9,'height':10}
self.math_display_pos = [0.12,0.67]
# ================================
# === satellite map task setup ===
# ================================
self.satmap_frame = [0.35,0.65,0.57,0.925] # the display region in which to draw everything
self.satmap_rect = [-0.54,0.54,0.9,0.12] # the display region in which to draw everything
self.satmap_params = {'background':'satellite_baseline.png', # background image to use
'frame_boundary':0.2, # (invisible) zone around the display region in which things can move around and spawn
'focused':False,
# parameters of the target/non-target item processes
'clutter_params':{'pixelated':True,
'num_items':30}, # parameters for the clutter process
'target_params':{'pixelated':True,
'num_items':1,
'item_speed':lambda: random.uniform(0.1,0.25),
'item_spiral':lambda: [random.uniform(0,3.14),random.uniform(0.0075,0.0095),random.uniform(0.06,0.07)], # perform a spiraling motion with the given radius and angular velocity
}, # parameters for the target process
'intro_text':'Find the spiraling object!', # the text that should be displayed before the script starts
# situational control
'target_probability':0.5, # probability of a new situation being a target situation (vs. non-target situation)
'target_duration':lambda: random.uniform(3,6), # duration of a target situation
'nontarget_duration':lambda: random.uniform(5,15),# duration of a non-target situation
# end conditions
'end_trials':1000000, # number of situations to produce (note: this is not the number of targets)
'end_timeout':1000000, # lifetime of this stream, in seconds (the stream ends if the trials are exhausted)
# response control
'response_event':'satmap-target', # the event that is generated when the user presses the response button
'loss_misstarget':0, # the loss incurred by missing a target
'loss_nontarget':-1, # the loss incurred by a false detection
'gain_target':4, # the gain incurred by correctly spotting a target
}
# this button is drawn into the satmap and can currently not be clicked
self.button_satmap_par = {'pos':(0.31,0,0.4),'frameSize':(-2.4,2.4,-0.6,1.1),'sortOrder':10,'text':"Target",'scale':.075,'text_font':loader.loadFont('arial.ttf'),'command':messenger.send,'extraArgs':['satmap-target'],'rolloverSound':None,'clickSound':None}
self.button_satmap_pos = [0,0]
# this button is in 3-screen space and can be clicked; it is behind the other button
self.button_satmap2_par = {'frameSize':(-2.5,2.5,-0.4,0.9),'text':"",'scale':.075,'text_font':loader.loadFont('arial.ttf'),'command':messenger.send,'extraArgs':['satmap-target'],'rolloverSound':None,'clickSound':None}
self.button_satmap2_pos = [0.31,0.77]
# ===============================
# === city driving task setup ===
# ===============================
self.drive_frame = [0.35,0.65,0.2,0.55]
self.drive_rect = [-0.54,0.54,0.12,-0.65]
self.drive_params = {'focused':False,
'show_checkpoints':False,
# media
'envmodel':'big\\citty.egg', # the environment model to use
'trucksound':"Diesel_Truck_idle2.wav",# loopable truck sound....
'trucksound_volume':0.25, # volume of the sound
'trucksound_direction':0, # direction relative to listener
'target_model':"moneybag-rev.egg", # model of the target object
'target_scale':0.01, # scale of the target model
'target_offset':0.2, # y offset for the target object
# checkpoint logic
'points':[[-248.91,-380.77,4.812],[0,0,0]], # the sequence of nav targets...
'radius':10, # proximity to checkpoint at which it is considered reached... (meters)
# end conditions
'end_timeout':100000, # end the task after this time
# movement parameters
'acceleration':0.5, # acceleration during manual driving
'friction':0.95, # friction coefficient
'torque':1, # actually angular velocity during turning
'height':0.7}
self.button_drive_par = {'frameSize':(-2.5,2.5,-0.4,0.9),'text':"Report",'scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_drive_pos = [0.31,0.025]
# ============================
# === main task parameters ===
# ============================
# focus stimuli
self.bu_drive_img = {'image':'salient_warning.png', # bottom-up driving task
'scale':0.25}
self.bu_satmap_img = {'image':'salient_warning.png', # bottom-up satmap task
'scale':0.25}
self.bu_math_img = {'image':'salient_warning.png', # bottom-up math task
'scale':0.15}
self.bu_sysv_img = {'image':'salient_warning.png', # bottom-up sysmonv task
'scale':0.15}
self.bu_sysmona_img = {'image':'salient_warning.png', # bottom-up sysmona task
'scale':0.15}
self.bu_comma_img = {'image':'salient_warning.png', # bottom-up comma task
'scale':0.15}
self.initial_layout_time = 5 # initial time after layout switch
# callsign setup
self.callsign_file = 'callsigns.txt'
self.numcallsigns = 6
# misc parameters
self.screen_offsets = [-1.13,0,1.13] # the three screen offsets for UI permutation...
self.developer = True
# voice control
self.voice_icon_enlarge_duration = 0.5
self.voice_icon_enlarge_size = 0.12
self.allow_speech = True
# set up some global text highlighting functionality
tpHighlight = TextProperties()
tpHighlight.setTextColor(1, 0, 0, 1)
tpHighlight.setSlant(0.3)
tpMgr = TextPropertiesManager.getGlobalPtr()
tpMgr.setProperties("highlight", tpHighlight)
# bci control
self.notification_cutoff = 0.2 # if the probability that a message was noticed is smaller than this, fire off a message
self.notice_probability = 0.5 # this is the bci variable
self.notice_probability_cumulant = 0.5 # this is a smoothed version of the bci variabe
self.notice_probability_history_mixin = 0.6 # this is an update factor that mixes in previous notice-probability estimates (from earlier messages) to get a smoothed update for the current one
self.notification_snd = 'xBleep.wav'
# inter-block pauses
self.pause_duration = lambda: random.uniform(40,60)
# ambience sound setup
self.ambience_sound = 'media\\ambience\\nyc_amb2.wav'
self.ambience_volume = 0.1
self.frames = []
def run(self):
try:
# init the randseed
if self.randseed is not None:
print "WARNING: Randomization of the experiment is currently bypassed."
random.seed(self.randseed)
self.marker(30000+self.randseed)
# =================================
# === Block schedule generation ===
# =================================
# generate the UI block schedule
layouts = [[0,1,2],[0,2,1],[1,0,2],[1,2,0],[2,0,1],[2,1,0]]
if self.uiblocks % len(layouts) > 0:
raise Exception('The # of UI blocks should be a multiple of 6')
layouts = layouts*(self.uiblocks/len(layouts))
random.shuffle(layouts)
# determine the sequence of focus conditions for each layout block
conditions = self.conditions*(1+self.uiblocks*self.focus_per_layout/len(self.conditions))
conditions = conditions[:self.uiblocks*self.focus_per_layout]
random.shuffle(conditions)
# re-group them by layout
focus_conditions = []
for k in range(len(layouts)):
focus_conditions.append(conditions[k*self.focus_per_layout : (1+k)*self.focus_per_layout])
if (k+1) % self.rest_every == 0:
focus_conditions[k].append('') # append resting...
# pre-pend rest to the first block
focus_conditions[0].insert(0,'')
# ================
# === Tutorial ===
# ================
if not self.developer:
self.write('Welcome to the MBF2 experiment B.')
self.write('Press the space bar when you are ready.','space')
# ===============================
# === One-time initialization ===
# ===============================
# set up the reward logic
self.rewardlogic = AudioRewardLogic(**self.score_params)
# load callsign table
self.callsigns = []
with open('media\\'+self.callsign_file,'r') as f:
for line in f:
self.callsigns.append(line.strip())
self.callsigns = self.callsigns[:self.numcallsigns]
# start some ambience sound loop
self.ambience = self.sound(self.ambience_sound,looping=True,volume=self.ambience_volume,direction=0)
# init speech control
if self.allow_speech:
try:
framework.speech.listenfor(['roger','check','yes','no','skip'],self.onspeech)
except:
print "Could not initialiate speech control; falling back to touch screen only."
# initialize question counters
self.num_question_uv = [0]
self.num_question_lv = [0]
self.num_question_au = [0]
# =======================
# === block main loop ===
# =======================
# for each UI layout block...
for k in range(len(layouts)):
if (k+1) % self.rest_every == 0:
# insert pause
self.marker(1701)
self.write("You may now rest for a while...",3,scale=0.04,pos=[0,0.4])
self.show_score()
# main rest block: just sleep and let the center task do the rest
duration = self.pause_duration()
if self.waitfor('f9', duration):
self.rewardlogic.paused = True
self.marker(900)
self.write("Pausing now. Please press f9 again to continue.",10,scale=0.04,pos=[0,0.4],block=False)
self.waitfor('f9', 10000)
self.rewardlogic.paused = False
self.marker(19)
self.sound('nice_bell.wav')
self.write("The rest block has now ended.",2,scale=0.04,pos=[0,0.4])
# =======================================
# === New layout block initialization ===
# =======================================
if not self.developer:
for i in [3,2,1]:
self.write('New block begins in '+str(i))
self.marker(400+k)
layout = layouts[k]
# WARNING -- these are abstract & subject to layout permutation (names referring to some reference unpermuted layout)
left = self.screen_offsets[layout[0]]
center = self.screen_offsets[layout[1]]
right = self.screen_offsets[layout[2]]
# instantiate the center drive task
frameofs = center/3.35
drive_frame = [self.drive_frame[0] + frameofs,self.drive_frame[1] + frameofs,self.drive_frame[2],self.drive_frame[3]]
drive_rect = [self.drive_rect[0] + center,self.drive_rect[1] + center,self.drive_rect[2],self.drive_rect[3]]
self.drive = self.launch(CheckpointDriving(frame=drive_frame,text_pos=[center,-0.55],**self.drive_params))
self.button_drive = DirectButton(command=messenger.send,extraArgs=['drive-report'],rolloverSound=None,clickSound=None,
pos=(self.button_drive_pos[0]+center,0,self.button_drive_pos[1]),**self.button_drive_par)
# instantiate the satmap task
frameofs = center/3.35
satmap_frame = [self.satmap_frame[0] + frameofs,self.satmap_frame[1] + frameofs,self.satmap_frame[2],self.satmap_frame[3]]
satmap_rect = [self.satmap_rect[0] + center,self.satmap_rect[1] + center,self.satmap_rect[2],self.satmap_rect[3]]
self.satmap = self.launch(VisualSearchTask(self.rewardlogic,
frame=satmap_frame,
button_params=self.button_satmap_par,**self.satmap_params))
self.button_satmap2 = DirectButton(pos=(self.button_satmap2_pos[0]+center,0,self.button_satmap2_pos[1]),**self.button_satmap2_par)
# instantiate visual monitoring task
sysmonv_rect = [self.sysmonv_rect[0] + right,self.sysmonv_rect[1] + right,self.sysmonv_rect[2],self.sysmonv_rect[3]]
self.vismonwatcher = EventWatcher(eventtype='sysmonv-check',
handleduration=self.sysmonv_timeout,
defaulthandler=self.sysmonv_false_detection)
self.redlight = self.launch(WarningLightTask(self.rewardlogic,screen_offset=right,watcher=self.vismonwatcher,**self.redlight_params))
self.greenlight = self.launch(WarningLightTask(self.rewardlogic,screen_offset=right,watcher=self.vismonwatcher,**self.greenlight_params))
self.bluelight = self.launch(WarningLightTask(self.rewardlogic,screen_offset=right,watcher=self.vismonwatcher,**self.bluelight_params))
self.yellowlight = self.launch(WarningLightTask(self.rewardlogic,screen_offset=right,**self.yellowlight_params))
self.button_sysmonv = DirectButton(command=messenger.send,extraArgs=['sysmonv-check'],rolloverSound=None,clickSound=None,
pos=(self.button_sysmonv_pos[0]+right,0,self.button_sysmonv_pos[1]),**self.button_sysmonv_par)
# instantiate the auditory monitoring task
sysmona_rect = [self.sysmona_rect[0] + right,self.sysmona_rect[1] + right,self.sysmona_rect[2],self.sysmona_rect[3]]
self.audmonwatcher = EventWatcher(eventtype='sysmona-check',
handleduration=self.sysmona_timeout,
defaulthandler=self.sysmona_false_detection)
self.warnsound = self.launch(WarningSoundTask(self.rewardlogic,screen_offset=right,watcher=self.audmonwatcher,**self.warnsound_params))
self.ticksound = self.launch(WarningSoundTask(self.rewardlogic,screen_offset=right,watcher=self.audmonwatcher,**self.ticksound_params))
self.icon_sysmona = self.picture('sysmon-speaker.png',100000,block=False,pos=[self.button_sysmona_pos[0]+right,self.button_sysmona_pos[1]-0.15],scale=0.1)
# determine callsign
targetsignidx = random.choice(xrange(len(self.callsigns)))
self.marker(600+targetsignidx)
targetsign = self.callsigns[targetsignidx]
# and display it
self.csign = self.write('Callsign: '+targetsign,10000,block=False,pos=[self.scroll_pos[0]+self.screen_offsets[layout[0]],self.scroll_pos[2]+0.06],scale=0.04,align='left',fg=[1,1,1,1])
# instantiate the vis comm task
self.commbox1 = ScrollPresenter(pos=[self.scroll_pos[0]+self.screen_offsets[layout[0]],self.scroll_pos[1]],**self.scroll_params)
self.commvis1 = self.launch(AdvCommScheduler(self.commbox1,self.rewardlogic,targetsign=targetsign,numcallsigns=self.numcallsigns,callsigns=self.callsign_file,commands='sentences_with_answers1.txt',events=['v1_y','v1_n','v1_s'],callback_func=lambda: self.check_bci("lower visual"),num_question=self.num_question_lv,**self.commvis_params))
self.button_commv1_y = DirectButton(command=messenger.send,extraArgs=['v1_y'],rolloverSound=None,clickSound=None,
pos=(self.button_commv_pos_y[0]+left,0,self.button_commv_pos_y[1]),**self.button_commv_par_y)
self.button_commv1_n = DirectButton(command=messenger.send,extraArgs=['v1_n'],rolloverSound=None,clickSound=None,
pos=(self.button_commv_pos_n[0]+left,0,self.button_commv_pos_n[1]),**self.button_commv_par_n)
self.button_commv1_s = DirectButton(command=messenger.send,extraArgs=['v1_s'],rolloverSound=None,clickSound=None,
pos=(self.button_commv_pos_s[0]+left,0,self.button_commv_pos_s[1]),**self.button_commv_par_s)
self.commbox2 = ScrollPresenter(pos=[self.scroll_pos[0]+self.screen_offsets[layout[0]],self.scroll_pos[2]],**self.scroll_params)
self.commvis2 = self.launch(AdvCommScheduler(self.commbox2,self.rewardlogic,targetsign=targetsign,numcallsigns=self.numcallsigns,callsigns=self.callsign_file,commands='sentences_with_answers2.txt',events=['v2_y','v2_n','v2_s'],callback_func=lambda: self.check_bci("upper visual"),num_question=self.num_question_uv,**self.commvis_params))
self.button_commv2_y = DirectButton(command=messenger.send,extraArgs=['v2_y'],rolloverSound=None,clickSound=None,
pos=(self.button_commv_pos_y[0]+left,0,self.button_commv_pos_y[2]),**self.button_commv_par_y)
self.button_commv2_n = DirectButton(command=messenger.send,extraArgs=['v2_n'],rolloverSound=None,clickSound=None,
pos=(self.button_commv_pos_n[0]+left,0,self.button_commv_pos_n[2]),**self.button_commv_par_n)
self.button_commv2_s = DirectButton(command=messenger.send,extraArgs=['v2_s'],rolloverSound=None,clickSound=None,
pos=(self.button_commv_pos_s[0]+left,0,self.button_commv_pos_s[2]),**self.button_commv_par_s)
# instantiate the aud comm task
self.commsnd = AudioPresenter(**self.voice_params)
self.commaud = self.launch(AdvCommScheduler(self.commsnd,self.rewardlogic,targetsign=targetsign,numcallsigns=self.numcallsigns,callsigns=self.callsign_file,commands='sentences_with_answers3.txt',callback_func=lambda: self.check_bci("audio"),num_question=self.num_question_au,**self.commaud_params))
self.icon_comma = self.picture('comma-speaker.png',100000,block=False,pos=[self.button_comma_pos[0]+right,self.button_comma_pos[1]-0.15],scale=0.1)
# instantiate the math task
math_rect = [self.math_rect[0] + left,self.math_rect[1] + left,self.math_rect[2],self.math_rect[3]]
self.mathdisplay = TextPresenter(pos=[self.math_display_pos[0]+left,self.math_display_pos[1]],**self.math_display_par)
self.math = self.launch(MathScheduler(self.rewardlogic,self.mathdisplay,
numpad_topleft=[self.numpad_topleft[0] + self.screen_offsets[layout[0]],self.numpad_topleft[1]],**self.math_params))
# wait until the layout has sunken in...
self.sleep(self.initial_layout_time)
# for each focus condition
prevfocus = ''
for focus in focus_conditions[k]:
# =======================
# === New focus block ===
# =======================
# reconfigure focused state for each object
self.drive.focused = focus.find('drive')>=0
self.satmap.focused = focus.find('satmap')>=0
self.redlight.focused = focus.find('sysmonv')>=0
self.greenlight.focused = focus.find('sysmonv')>=0
self.bluelight.focused = focus.find('sysmonv')>=0
self.yellowlight.focused = focus.find('sysmonv')>=0
self.warnsound.focused = focus.find('sysmona')>=0
self.ticksound.focused = focus.find('sysmona')>=0
self.math.focused = focus.find('math')>=0
# present a switch stimulus
if prevfocus is None or prevfocus == '' or random.random() < self.bottom_up_probability:
# bottom-up stimulus
if focus.find('drive')>=0:
self.picture(block=False,pos=[center,-0.1],**self.bu_drive_img)
if focus.find('satmap')>=0:
self.picture(block=False,pos=[0,0],parent=self.satmap.renderviewport,**self.bu_satmap_img)
if focus.find('commv')>=0:
self.commbox1.submit_wait("\1highlight\1ATTENTION ATTENTION ATTENTION\2", self)
self.commbox2.submit_wait("\1highlight\1ATTENTION ATTENTION ATTENTION\2", self)
if focus.find('math')>=0:
self.picture(block=False,pos=[left,0.6],**self.bu_math_img)
if focus.find('sysmonv')>=0:
self.picture(block=False,pos=[right,0.65],**self.bu_sysv_img)
if focus.find('sysmona')>=0:
self.sound('xHyprBlip.wav',volume=0.3)
self.picture(block=False,pos=[self.button_sysmona_pos[0]+right,self.button_sysmona_pos[1]-0.15],**self.bu_sysmona_img)
if focus.find('comma')>=0:
self.picture(block=False,pos=[self.button_comma_pos[0]+right,self.button_comma_pos[1]-0.15],**self.bu_comma_img)
self.commsnd.submit_wait("ATTENTION COMMUNICATIONS\2", self)
else:
# top-down stimulus; build a text instruction
instruction = "Please continue with"
spl = focus.split('-')
if len(spl) == 1:
articles = [' the ']
elif len(spl) == 2:
articles = [' the ',' and the ']
elif len(spl) == 3:
articles = [' the ',', the ', ' and the ']
for k in xrange(len(spl)):
instruction += articles[k] + self.tasknames[spl[k]]
instruction += '.'
# ... and insert it on the respective displays
if prevfocus.find('math')>=0:
self.write(instruction,5,block=False,pos=[left,0.9],scale=0.04,wordwrap=25)
if prevfocus.find('commv')>=0:
self.commbox1.submit_wait(instruction,self,3,3)
self.commbox2.submit_wait(instruction,self,3,3)
if prevfocus.find('comma')>=0:
self.commsnd.submit_wait(instruction,self,6,6)
if prevfocus.find('sysmona')>=0:
self.commsnd.submit_wait(instruction,self,6,6)
if prevfocus.find('sysmonv')>=0:
self.write(instruction,5,block=False,pos=[right,0.95],scale=0.04,wordwrap=25)
if prevfocus.find('drive')>=0:
self.write(instruction,5,block=False,pos=[center,-0.25],scale=0.04,wordwrap=25)
if prevfocus.find('satmap')>=0:
self.write(instruction,5,block=False,pos=[center,0.35],scale=0.04,wordwrap=25)
# ================================================
# === wait for the duration of the focus block ===
# ================================================
duration = self.focus_duration()
# smoothly fade frames in around the hot spots
# not the finest way to do it, but gets the job done for now
self.sleep(3)
if True:
for k in [j/10.0 for j in range(1,11)]:
if focus.find('drive') >= 0:
self.frame(drive_rect,duration=duration-8,block=False,color=[1,1,1,k])
if focus.find('satmap') >= 0:
self.frame(satmap_rect,duration=duration-8,block=False,color=[1,1,1,k])
if focus.find('math') >= 0:
self.frame(math_rect,duration=duration-8,block=False,color=[1,1,1,k])
if focus.find('sysmonv') >= 0:
self.frame(sysmonv_rect,duration=duration-8,block=False,color=[1,1,1,k])
if focus.find('sysmona') >= 0:
self.frame(sysmona_rect,duration=duration-8,block=False,color=[1,1,1,k])
self.sleep(0.1)
self.sleep(duration-5-3)
prevfocus = focus
# ======================================
# === end of the screen layout block ===
# ======================================
self.redlight.cancel()
self.greenlight.cancel()
self.bluelight.cancel()
self.yellowlight.cancel()
self.warnsound.cancel()
self.ticksound.cancel()
self.commvis1.cancel()
self.commvis2.cancel()
self.commaud.cancel()
self.math.cancel()
self.satmap.cancel()
self.drive.cancel()
self.sleep(0.1)
# and clear display objects
self.clear_objects()
finally:
# ==========================
# === main task shutdown ===
# ==========================
try:
self.clear_objects()
except:
pass
def sysmonv_false_detection(self):
""" Event handler for false system-monitoring responses (if not focused). """
self.marker(701)
self.rewardlogic.score_event(self.false_response_penalty)
def sysmona_false_detection(self):
""" Event handler for false system-monitoring responses (if not focused). """
self.marker(702)
self.rewardlogic.score_event(self.false_response_penalty)
def onspeech(self,phrase,listener):
"""Dispatch speech commands into regular messages."""
if phrase.lower() == 'roger':
self.send_message('comma-roger')
self.icon_comma.setScale(self.voice_icon_enlarge_size)
self.icon_comma_reset_scale_at = time.time() + self.voice_icon_enlarge_duration
taskMgr.doMethodLater(self.voice_icon_enlarge_duration, self.reset_comma, 'reset_comma()')
if phrase.lower() == 'check':
self.send_message('sysmona-check')
self.icon_sysmona.setScale(self.voice_icon_enlarge_size)
self.icon_sysmona_reset_scale_at = time.time() + self.voice_icon_enlarge_duration
taskMgr.doMethodLater(self.voice_icon_enlarge_duration, self.reset_sysmona, 'reset_sysmona()')
if phrase.lower() == 'yes':
self.send_message('y')
if phrase.lower() == 'no':
self.send_message('n')
if phrase.lower() == 'skip':
self.send_message('s')
def reset_comma(self,task):
"""Part of a graphical gimmick."""
if time.time() >= self.icon_comma_reset_scale_at-0.1:
self.icon_comma.setScale(0.1)
return task.done
def reset_sysmona(self,task):
"""Part of a graphical gimmick."""
if time.time() >= self.icon_sysmona_reset_scale_at-0.1:
self.icon_sysmona.setScale(0.1)
return task.done
def clear_objects(self):
""" Destroy on-screen objects for shutdown / reset. """
# remove event watchers
self.vismonwatcher.destroy()
self.audmonwatcher.destroy()
# remove buttons
self.icon_sysmona.destroy()
self.icon_comma.destroy()
self.button_commv1_y.destroy()
self.button_commv1_n.destroy()
self.button_commv1_s.destroy()
self.button_commv2_y.destroy()
self.button_commv2_n.destroy()
self.button_commv2_s.destroy()
self.button_sysmonv.destroy()
self.button_satmap2.destroy()
self.button_drive.destroy()
# remove presenters
self.mathdisplay.destroy()
self.commbox1.destroy()
self.commbox2.destroy()
self.commsnd.destroy()
self.csign.destroy()
def check_bci(self,which):
""" Query the BCI to determine whether the subject noticed the message. """
self.notice_probability_cumulant = self.notice_probability_cumulant*self.notice_probability_history_mixin + self.notice_probability * (1-self.notice_probability_history_mixin)
if self.notice_probability_cumulant < self.notification_cutoff:
self.write("Please don't forget to pay attention to your " +which+ " messages.", 1, False, [0,-0.75])
self.sound(self.notification_snd, False, 0.5, 0)
def show_score(self):
""" Display the score to the subject & log it."""
self.write("Your score is: " + str(self.rewardlogic.score*10),5,scale=0.1,pos=[0,0.8])
self.rewardlogic.log_score()
| {
"repo_name": "villawang/SNAP",
"path": "src/modules/MBF/MBF2_B.py",
"copies": "2",
"size": "67063",
"license": "bsd-3-clause",
"hash": -5013040652356293000,
"line_mean": 62.7480988593,
"line_max": 353,
"alpha_frac": 0.4984566751,
"autogenerated": false,
"ratio": 4.367786895922887,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.015753614280442703,
"num_lines": 1052
} |
from framework.deprecated.controllers import VisualRewardLogic, TargetScheduler, MathScheduler
from framework.deprecated.subtasks import StimulusStream
from framework.latentmodule import LatentModule
from framework.ui_elements.ImagePresenter import ImagePresenter
from framework.ui_elements.AudioPresenter import AudioPresenter
from framework.ui_elements.ScrollPresenter import ScrollPresenter
from framework.ui_elements.TextPresenter import TextPresenter
from framework.ui_elements.EventWatcher import EventWatcher
from framework.ui_elements.RandomPresenter import RandomPresenter
from framework.deprecated.subtasks.VisualSearchTask import VisualSearchTask
from direct.gui.DirectGui import *
from panda3d.core import *
import framework.speech
import itertools
import random
import time
class Main(StimulusStream):
"""
DAS1a: First version of the DAS experiment #1.
"""
def __init__(self):
LatentModule.__init__(self)
# === settings for the visual stimulus presenters ===
# a center presenter (always an image)
self.img_center_params = {'pos':[0,0,0.3],'clearafter':1.5,'scale':0.1}
# two different left presenters - either an image or a text box, depending on block
self.img_left_params = {'pos':[-1.25,0,0.3],'clearafter':1,'color':[1, 1, 1, 0.1],'scale':0.1}
self.txt_left_params = {'pos':[-1.25,0.3],'clearafter':2,'framecolor':[0, 0, 0, 0],'scale':0.1}
# two different right presenters - either an image or a text box, depending on block
self.img_right_params = {'pos':[1.25,0,0.3],'clearafter':1,'color':[1, 1, 1, 0.1],'scale':0.1}
self.txt_right_params = {'pos':[1.25,0.3],'clearafter':2,'framecolor':[0, 0, 0, 0],'scale':0.1}
# === settings for the auditory stimulus presenters ===
# there is a left, a right, and a center location
self.aud_left_params = {'direction':-1}
self.aud_right_params = {'direction':1}
self.aud_center_params = {'direction':0}
# === settings for the block design ===
# parameters of the block configuration
self.num_blocks = 42 # total number of blocks of the following types
self.fraction_avstrong = 12 # audio/visual, strong separation of target probability/reward
self.fraction_avweak = 12 # audio/visual, weak separation of target probability/reward
self.fraction_avruminate = 12 # audio/visual with added rumination (here: math) tasks
self.fraction_rest = 3 # rest block
self.fraction_restmath = 3 # rest block with math tasks
# === settings for the A/V switching design ===
# switch layout for audio/visual blocks
self.switches_per_block = lambda: int(random.uniform(3,3)) # number of switches per a/v block (random draw), was: 7,13
self.switches_withinmodality = 1./3 # probability of a within-modality switch stimulus
self.switches_outofmodality = 1./3 # probability of a (salient) out-of-modality switch stimulus
self.switches_bimodally = 1./3 # probability of a bimodally delivered switch stimulus
self.av_switch_interval = lambda: random.uniform(25,35) # inter-switch interval for the audio/visual condition, was: 25,35
self.switch_time = 1 # duration for which the switch instruction is being displayed
# === settings for the stimulus material ===
# this is formatted as follows:
# {'type of block1 ':{'type of presenter 1': [['targets if focused',...],['nontargets if focused',...],['optional targets if not focused'],['optional nontargets if not focused']]
# 'type of presenter 2': [['targets if focused',...],['nontargets if focused',...],['optional targets if not focused'],['optional nontargets if not focused']]},
# 'type of block 2':{'type of presenter 1': [['targets if focused',...],['nontargets if focused',...],['optional targets if not focused'],['optional nontargets if not focused']]
# 'type of presenter 2': [['targets if focused',...],['nontargets if focused',...],['optional targets if not focused'],['optional nontargets if not focused']]}}
self.stim_material = {'avstrong': {'center_aud':[['Target.'],['nothing special','blah blah','monkey','nothing to report'],['TARGET!']],
'center_vis':[['warning.png'],['onerust.png','tworust.png','threerust.png'],['salient_warning.png']],
'side_img':[['rebel.png'],['onerust.png','tworust.png','threerust.png']],
'side_txt':[['Target'],['Frankfurt','Berlin','Calgary','Barcelona']],
'side_spc':[['Target'],['Frankfurt','Berlin','Calgary','Barcelona']],
'side_snd':[['xHyprBlip.wav'],['xClick01.wav']]},
'avweak': {'center_aud':[['Target.'],['nothing special','blah blah','monkey','nothing to report']],
'center_vis':[['warning.png'],['onerust.png','tworust.png','threerust.png']],
'side_img':[['rebel.png'],['onerust.png','tworust.png','threerust.png']],
'side_txt':[['Target'],['Frankfurt','Berlin','Calgary','Barcelona']],
'side_spc':[['Target'],['Frankfurt','Berlin','Calgary','Barcelona']],
'side_snd':[['xHyprBlip.wav'],['xClick01.wav']]}
}
# probability distribution over locations, if a target should be presented
self.target_probabilities = {'avstrong': {'center_aud':[0.4,0.1], # this is [probability-if-focused, probability-if-unfocused]
'center_vis':[0.4,0.1],
'side_img':[0.25,0.0], # note that there are 2 locations with side_* (left/right) and that usually only one set of these is active at a given time
'side_txt':[0.25,0.0], # also note that all the focused numbers one modality plus the unfocused numbers of the other modality should add up to 1.0
'side_spc':[0.25,0.0], # (however, they will be automatically renormalized if necessary)
'side_snd':[0.25,0.0]},
'avweak': {'center_aud':[0.4,0.2],
'center_vis':[0.4,0.2],
'side_img':[0.2,0.0],
'side_txt':[0.2,0.0],
'side_spc':[0.2,0.0],
'side_snd':[0.2,0.0]}}
# probability distribution over locations, if a non-target should be presented
self.nontarget_probabilities = {'avstrong': {'center_aud':[0.3,0.3],
'center_vis':[0.3,0.3],
'side_img':[0.2,0.0],
'side_txt':[0.2,0.0],
'side_spc':[0.2,0.0],
'side_snd':[0.2,0.0]},
'avweak': {'center_aud':[0.3,0.1],
'center_vis':[0.3,0.1],
'side_img':[0.2,0.1],
'side_txt':[0.2,0.1],
'side_spc':[0.2,0.1],
'side_snd':[0.2,0.1]}}
# rewards and penalities for target hits/misses
self.rewards_penalties = {'avstrong': {'center_aud':['high-gain','high-loss','low-gain','low-loss'], # this is [score-if-focused-and-hit,score-if-focused-and-missed,score-if-nonfocused-and-hit,score-if-nonfocused-and-missed]
'center_vis':['high-gain','high-loss','low-gain','low-loss'],
'side_img':['low-gain','low-loss','low-gain','low-loss'],
'side_txt':['low-gain','low-loss','low-gain','low-loss'],
'side_spc':['low-gain','low-loss','low-gain','low-loss'],
'side_snd':['low-gain','low-loss','low-gain','low-loss']},
'avweak': {'center_aud':['high-gain','high-loss','high-gain','low-loss'],
'center_vis':['high-gain','high-loss','low-gain','low-loss'],
'side_img':['low-gain','low-loss','low-gain','low-loss'],
'side_txt':['low-gain','low-loss','low-gain','low-loss'],
'side_spc':['low-gain','low-loss','low-gain','low-loss'],
'side_snd':['low-gain','low-loss','low-gain','low-loss']}}
# auditory and visual switch stimuli, in and out of modality
self.vis_switch_inmodality = 'switch.png'
self.vis_switch_outmodality = 'switch-target.png'
self.aud_switch_inmodality = 'Switch'
self.aud_switch_outmodality = 'Hey, Switch NOW!'
# === settings for the stimulus appearance ===
# target layout for audio/visual blocks
self.target_probability = 0.2 # overall probability of an event being a target in the a/v condition
self.target_focus_prob_strong = 0.9 # probability of a given target appearing in the focused modality, if strong separation
# (1 - this number) for a target appearing in the non-focused modality
self.target_focus_prob_weak = 0.6 # probability of a given target appearing in the focused modality, if weak separation
# (1 - this number) for a target appearing in the non-focused modality
self.prob_salient = 0.2 # probability that a target appears at the salient location (center)
self.prob_side1 = 0.5 # probability that a target appears at the first side location (side locations may be swapped from block to block)
self.prob_side2 = 0.3 # probability that a target appears a the second side location
# stimulus layout for audio/visual blocks
self.av_stimulus_interval = lambda: random.uniform(0.5,4) # inter-stimulus interval for the audio/visual condition
# === settings for the rest & math tasks ===
self.rest_duration = lambda: random.uniform(45,75) # the duration of the rest condition
self.math_params = {'difficulty': 1, # difficulty level of the problems (determines the size of involved numbers)
'problem_interval': lambda: random.uniform(3,12), # delay before a new problem appears after the previous one has been solved
'response_timeout': 10.0, # time within which the subject may respond to a problem
'numpad_topleft': [1.1,-0.3], # top-left corner of the numpad
'numpad_gridspacing': [0.21,-0.21], # spacing of the button grid
'numpad_buttonsize': [1,1] # size of the buttons
}
# === settings for scoring ===
# scoring parameters
self.scoring_params = {'initial_score': 250, # the initial score at the beginning of the experiment
'score_image_params': {'scale':0.12,'pos':[-1.25,0.5,0.5],'clearafter':2}, # properties of the score image
'score_sound_params': {'direction':-0.7,'volume':0.3}, # properties of the score sound source
'score_responses': {'high-gain':[25,'happy_star.png','xDingLing.wav'], # [points, image, soundfile] for each of the ...
'low-gain':[5,'star.png','ding.wav'], # ... possible scoring conditions
'low-loss':[-5,'worried_smiley.png','xBuzz01.wav'],
'high-loss':[-25,'sad_smiley.png','slap.wav']}}
# === settings for miscellaneous parameters ===
# response control
self.response_window = 3 # response time window in seconds
self.response_event = 'target-response' # response event/message type
self.button_params = {'frameSize':(-3,3,-0.5,1),'pos':(-1.25,0,-0.92),'text':"Target",'scale':.1,'text_font':loader.loadFont('arial.ttf')} # parameters of the target button
self.voiceindicator_params = {'pos':(0,0,-0.925),'scale':0.1,'color':[1, 1, 1, 1]} # parameters of the voice indicator image
self.allow_speech = False
# misc parameters
self.randseed = 34214 # initial randseed for the experiment (NOTE: should be random!)
self.scroller_params = {'pos':[-1.8,-0.5],'width':22,'clearafter':4} # a text box for debugging, output, etc
self.movers_params = {'frame':[0.35,0.65,0.1,0.5], # parameters of the moving-items process
'trials':500,
'target_probability':0}
self.developer = True # if true, some time-consuming instructions are skipped
def run(self):
# init the randseed
if self.randseed is not None:
print "WARNING: Randomization of the experiment is currently bypassed."
random.seed(self.randseed)
# === preprocess the stim material ===
# if no out-of-modality (non-focused) stimuli are given, replicate the within-modality (focused) stimuli for them
# for each block type...
for bt in self.stim_material.iterkeys():
# for each material set
for ms in self.stim_material[bt].iterkeys():
if len(self.stim_material[bt][ms]) < 2:
raise Exception("The collection of stimuli for a presenter type must include at least targets and non-targets.")
if len(self.stim_material[bt][ms]) < 3:
self.stim_material[bt][ms].append(self.stim_material[bt][ms][0])
if len(self.stim_material[bt][ms]) < 4:
self.stim_material[bt][ms].append(self.stim_material[bt][ms][1])
# === init input/output setup that stays for the entire experiment ===
# set up target response modalities (keyboard, button, speech)
self.accept('control',messenger.send,['target-keyboard'])
target_button = DirectButton(command=messenger.send,extraArgs=['target-touchscreen'],rolloverSound=None,clickSound=None,**self.button_params)
if self.allow_speech:
try:
framework.speech.listenfor(['ack'],lambda phrase,listener: self.send_message('target-spoken'))
self.accept('target-spoken',self.highlight_mic)
speech_operational = True
except:
speech_operational = False
print "Could not initialiate speech control; falling back to touch screen only."
else:
speech_operational = False
if not self.developer:
self.write('Welcome to the DAS experiment.')
self.write('Your task in the following is to respond to the target stimuli\n by either pressing the on-screen target button,\n or, if a microphone icon is displayed at the bottom of the screen,\n by speaking "Target" into the tabletop microphone.',5,scale=0.04)
self.write('If you see a keypad on a side screen, expect to occasionally receive\n short math problems, which you solve by dialing the solution \n into the keypad and pressing the NEXT button.\n Keep in mind that your time to solve a given math problem is limited.',5,scale=0.04)
# add an indicator image to display whether we have voice control
self.voiceimage = ImagePresenter(**self.voiceindicator_params)
# make a text output box
textbox = ScrollPresenter(**self.scroller_params)
# init the reward logic
rewardlogic = VisualRewardLogic(**self.scoring_params)
# make a passive center task (visual movers)
# TODO: later, this will be chosen differently run of blocks (between rest conditions)
self.launch(VisualSearchTask(textbox,**self.movers_params));
# create the center presenter
vis_center = ImagePresenter(**self.img_center_params)
# create the three auditory stimulus presenters
aud_left = AudioPresenter(**self.aud_left_params)
aud_right = AudioPresenter(**self.aud_right_params)
aud_center = AudioPresenter(**self.aud_center_params)
# === generate the overall block design ===
# first renormalize the fractions
fraction_norm = 1.0 / (self.fraction_avstrong + self.fraction_avweak + self.fraction_avruminate + self.fraction_rest + self.fraction_restmath)
self.fraction_avstrong *= fraction_norm
self.fraction_avweak *= fraction_norm
self.fraction_avruminate *= fraction_norm
self.fraction_rest *= fraction_norm
self.fraction_restmath *= fraction_norm
# generate the list of A/V switching blocks (we have one with strong importance bias/separation, one with weak separation, and one strong-separation block with interspersed math problems
self.blocks = ['avstrong']*int(self.fraction_avstrong*self.num_blocks) + ['avweak']*int(self.fraction_avweak*self.num_blocks) + ['avruminate']*int(self.fraction_avruminate*self.num_blocks)
random.shuffle(self.blocks)
# TODO: optionally try to improve the ordering (e.g., make sure that blocks of a particular type are *not* concentrated in only one part of the experiment)
# generate the list of resting blocks (some are pure resting, the others are resting + math)
self.resting = ['rest']*int(self.fraction_rest*self.num_blocks) + ['restmath']*int(self.fraction_restmath*self.num_blocks)
random.shuffle(self.resting)
# merge them into one sequence of blocks
indices = [k*len(self.blocks)/(len(self.resting)+1) for k in range(1,len(self.resting)+1)]
indices.reverse()
for k in range(len(indices)):
self.blocks.insert(indices[k],self.resting[k])
# generate the set of audio/visual display layouts for each type of A/V block (there are 12 combined layouts)
# we have 4 screen layouts: img/img, img/txt, txt/img, txt/txt (txt=text, img=image)
# and 3 audio layouts: spc/snd, spc/spc and snd/spc (spc=speech, snd=sound)
layouts = [e[0]+'-'+e[1] for e in itertools.product(['img/img','img/txt','txt/img','txt/txt'],['spc/snd','spc/spc','snd/spc'])]
# for each block type, append a random permutation of the layouts to the block description strings
for blocktype in ['avstrong','avweak','avruminate']:
# get the number of blocks of this type
blks = self.blocks.count(blocktype)
if blks < len(layouts):
print "Warning: the number of blocks in the ", blocktype, " condition is smaller than the number of display layouts; this will yield incomplete permutations."
if blks % len(layouts) != 0:
print "Warning: the number of blocks in the ", blocktype, " condition is not a multiple of the number of display layouts; this will yield incomplete permutations."
# replicate the layouts for the number of blocks of this type
lays = layouts * (blks/len(layouts) + (blks%len(layouts)>0))
# shuffle them randomly
random.shuffle(lays)
# also generate a shuffled list of response layouts
resp = ['verbal','manual']*(blks/2 + blks%2)
random.shuffle(resp)
# find the blocks which we want to annotate
indices = [i for i in range(len(self.blocks)) if self.blocks[i]==blocktype]
for k in range(len(indices)):
# and for each of them, pick an entry from the permutation and append it
self.blocks[indices[k]] += '-' + lays[k] + '-' + resp[k]
# === execute the block design ===
# for each block...
prev = None
for block in self.blocks:
if block[0:2] == 'av':
# one of the AV blocks
self.marker(10)
# update the GUI so that it indicates the current control type
if block.find('verbal') and speech_operational:
controltype = 'target-spoken'
target_button['state'] = DGG.DISABLED
self.voiceimage.submit('microphone_red.png')
else:
target_button['state'] = DGG.NORMAL
controltype = 'target-touchscreen'
self.voiceimage.clear()
# set up and event watcher depending on the block's control type
eventwatcher = EventWatcher(eventtype=controltype,
handleduration=self.response_window,
defaulthandler=lambda: rewardlogic.score_event('low-loss'))
# determine whether we have strong focality of targets in the focused modality or not
if block.find('avstrong'):
focality = 'avstrong'
elif block.find('avweak'):
focality = 'avweak'
elif block.find('avruminate'):
# note: ruminate blocks automatically have weak focality, because currently
# the rumination instructions and responses are strongly visually coupled
focality = 'avweak'
# determine the initial focused modality
focus_modality = random.choice(['aud','vis'])
# display AV block lead-in sequence
if not self.developer:
modality = 'auditory' if focus_modality == 'aud' else 'visual'
self.write('Initially, you should direct your attention to the \n'+modality+' material until you encounter a switch instruction or symbol.',3,pos=(0,0.1),scale=0.04)
self.sleep(3)
# - later generate the appropriate center task here... (if the prev was either none or a rest task...)
# set up the appropriate display configuration for this block
vis_left = ImagePresenter(**self.img_left_params) if block.find("img/")>=0 else TextPresenter(**self.txt_left_params)
vis_right = ImagePresenter(**self.img_right_params) if block.find("/img")>=0 else TextPresenter(**self.txt_right_params)
if block.find('avruminate'):
# if we're in the rumination condtion, also schedule a math task...
mathtask = self.launch(MathScheduler(presenter=textbox,rewardhandler=rewardlogic,**self.math_params))
# determine the number of switch blocks to be done for this block
# a switch block consits of a series of stimuli (targets/non-targets) followed by a switch cue (except for the last switch block)
switchblocks = int(self.switches_per_block()+1)
# ... and execute them
for switchblock in range(switchblocks):
self.marker(11)
# determine the duration of the current switch block
duration = self.av_switch_interval()
print "Now in ", focus_modality, " condition for the next ",duration," seconds."
# and pre-load the aud/vis left/right/center RandomPresenters with the appropriate stimulus material
# for this, determine the offsets into the self.stim_material arrays to select between within-modality and out-of-modality material
vis_focused = 0 if focus_modality == 'vis' else 2
aud_focused = 0 if focus_modality == 'aud' else 2
# also determine the type of stimulus material for left/right audio/visual, depending on block type
left_vis_material = 'side_img' if block.find("img/")>=0 else 'side_txt'
right_vis_material = 'side_img' if block.find("/img")>=0 else 'side_txt'
left_aud_material = 'side_spc' if block.find("spc/")>=0 else 'side_snd'
right_aud_material = 'side_spc' if block.find("/spc")>=0 else 'side_snd'
# set up visual stimulus material depending on block configuration
out_vis_center = RandomPresenter(wrappresenter=vis_center,
messages={'target':self.stim_material[focality]['center_vis'][0+vis_focused],
'nontarget':self.stim_material[focality]['center_vis'][1+vis_focused]})
out_vis_left = RandomPresenter(wrappresenter=vis_left,
messages={'target':self.stim_material[focality][left_vis_material][0+vis_focused],
'nontarget':self.stim_material[focality][left_vis_material][1+vis_focused]})
out_vis_right = RandomPresenter(wrappresenter=vis_right,
messages={'target':self.stim_material[focality][right_vis_material][0+vis_focused],
'nontarget':self.stim_material[focality][right_vis_material][1+vis_focused]})
out_aud_center = RandomPresenter(wrappresenter=aud_center,
messages={'target':self.stim_material[focality]['center_aud'][0+aud_focused],
'nontarget':self.stim_material[focality]['center_aud'][1+aud_focused]})
out_aud_left = RandomPresenter(wrappresenter=aud_left,
messages={'target':self.stim_material[focality][left_aud_material][0+aud_focused],
'nontarget':self.stim_material[focality][left_aud_material][1+aud_focused]})
out_aud_right = RandomPresenter(wrappresenter=aud_right,
messages={'target':self.stim_material[focality][right_aud_material][0+aud_focused],
'nontarget':self.stim_material[focality][right_aud_material][1+aud_focused]})
# generate probability distributions & score value setup for the 6 locations
d = self.target_probabilities[focality]
target_distribution = [d[left_vis_material][vis_focused>0],d['center_vis'][vis_focused>0],d[right_vis_material][vis_focused>0],
d[left_aud_material][aud_focused>0],d['center_aud'][aud_focused>0],d[right_aud_material][aud_focused>0]]
d = self.nontarget_probabilities[focality]
nontarget_distribution = [d[left_vis_material][vis_focused>0],d['center_vis'][vis_focused>0],d[right_vis_material][vis_focused>0],
d[left_aud_material][aud_focused>0],d['center_aud'][aud_focused>0],d[right_aud_material][aud_focused>0]]
d = self.rewards_penalties[focality]
hit_values = [d[left_vis_material][vis_focused],d['center_vis'][vis_focused],d[right_vis_material][vis_focused],
d[left_aud_material][aud_focused],d['center_aud'][aud_focused],d[right_aud_material][aud_focused]]
miss_values = [d[left_vis_material][1+vis_focused],d['center_vis'][1+vis_focused],d[right_vis_material][1+vis_focused],
d[left_aud_material][1+aud_focused],d['center_aud'][1+aud_focused],d[right_aud_material][1+aud_focused]]
print "DAS1: launching TargetScheduler..."
# schedule targets for the switch block
targets = self.launch(TargetScheduler(eventwatcher=eventwatcher,
rewardhandler=rewardlogic,
presenters=[out_vis_left,out_vis_center,out_vis_right,out_aud_left,out_aud_center,out_aud_right],
end_timeout = duration,
stimulus_interval = self.av_stimulus_interval,
target_probability = self.target_probability,
target_distribution=target_distribution,
nontarget_distribution=nontarget_distribution,
responsetime = self.response_window,
hit_values=hit_values,
miss_values=miss_values))
# ... and wait until they are done
# TODO: we better use a version of targets.join() here...
self.sleep(duration+1)
# now present the switch cue, if applicable
if switchblock < switchblocks-1:
print "DAS1: resuming with switch..."
# not the last switch block: generate a switch cue
# determine the modality in which it should show up
r = random.random()
if r < self.switches_withinmodality:
# within-modality switch instruction
if focus_modality == 'vis':
vis_center.submit_wait(self.vis_switch_inmodality,self,clearafter=self.switch_time)
else:
aud_center.submit_wait(self.aud_switch_inmodality,self,clearafter=self.switch_time)
elif r < self.switches_withinmodality + self.switches_bimodally:
# bi-modal switch instruction
# note: we are using here the within-modality stimuli for both modalities
vis_center.submit_wait(self.vis_switch_inmodality,self,clearafter=self.switch_time)
aud_center.submit_wait(self.aud_switch_inmodality,self,clearafter=self.switch_time)
else:
# out-of-modality delivery; this is presented like a salient target
if focus_modality == 'vis':
aud_center.submit_wait(self.aud_switch_outmodality,self,clearafter=self.switch_time)
else:
vis_center.submit_wait(self.vis_switch_outmodality,self,clearafter=self.switch_time)
# wait for the lifetime of the switch announcement
self.sleep(self.switch_time)
# and flip the modality
focus_modality = 'vis' if focus_modality == 'aud' else 'aud'
if block.find('avruminate'):
mathtask.cancel()
self.write('You have successfully completed the block.\nYour current score is ' + str(rewardlogic.score),5,pos=(0,0.1),scale=0.04)
elif block[0:3] == 'rest':
duration = self.rest_duration()
# one of the rest blocks
if block.find('math'):
self.write('Please take your time to solve the following math problems. A bell sound will remind your when this block is over.',3,pos=(0,0.1))
mathtask = self.launch(MathScheduler(presenter=textbox,rewardhandler=rewardlogic,end_timeout=duration,**self.math_params))
self.sleep(duration+5)
else:
self.write('You may now rest until you hear a bell sound.',3,pos=(0,0.1))
self.sleep(duration)
# play the bell sound
self.sound('nice_bell.wav')
# destroy the old event watcher
eventwatcher.destroy()
prev = block
# display any final material
self.write('Congratulations! The experiment is now finished.',10,pos=(0,0.1))
def highlight_mic(self):
self.voiceimage.icon.setScale(self.voiceimage.scale*1.3)
self.voiceimage.reset_scale_at = time.time() + 0.75
taskMgr.doMethodLater(0.75, self.reset_mic, 'DAS1.reset_mic()')
def reset_mic(self,task):
"""Task to reset the mic image to normal size."""
if time.time() >= self.voiceimage.reset_scale_at-0.1: # we don't reset if the schedule has been overridden in the meantime...
self.voiceimage.icon.setScale(self.voiceimage.scale)
return task.done
| {
"repo_name": "sccn/SNAP",
"path": "src/modules/DAS/DAS1a.py",
"copies": "2",
"size": "35269",
"license": "bsd-3-clause",
"hash": 382387499201190900,
"line_mean": 69.3992015968,
"line_max": 291,
"alpha_frac": 0.533556381,
"autogenerated": false,
"ratio": 4.373093614383137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5906649995383138,
"avg_score": null,
"num_lines": null
} |
from framework.enums.enums import APIMappings
from androguard.core.analysis import analysis
from datetime import datetime
from blessings import Terminal
t = Terminal()
class APIPermissionMappings(object):
def __init__(self, apk, apks):
super(APIPermissionMappings, self).__init__()
self.apk = apk
self.apks = apks
@staticmethod
def run_search_method(apks, x, clz, method):
analysis.show_Paths(apks, x.get_tainted_packages().search_methods(clz, method, "."))
def run_find_mapping(self):
"""
Map permissions to API calls with the analyzed
bytecode
"""
# APIMappings enum
# structure
#
enums = APIMappings()
# VM analysis
# object
#
x = analysis.uVMAnalysis(self.apks.get_vm())
for permission in self.apk.get_permissions():
for a, b in enums.mappings.items():
for c, d in b.items():
if "permission" in c:
if permission == d:
print(t.green("[{0}] ".format(datetime.now()) +
t.yellow("Found permission mapping : ") +
permission))
if b.get("class"):
for e, f in b.get("class").items():
print(t.green("[{0}] ".format(datetime.now()) +
t.yellow("Searching for : ") +
e))
if f.get("method"):
self.run_search_method(self.apks, x, e, f.get("method"))
elif f.get("methods"):
for method in f.get("methods"):
self.run_search_method(self.apks, x, e, method)
elif b.get("classes"):
for g, h in b.get("classes").items():
print(t.green("[{0}] ".format(datetime.now()) +
t.yellow("Searching for : ") +
g))
if h.get("method"):
self.run_search_method(self.apks, x, g, h.get("method"))
elif h.get("methods"):
for method in h.get("methods"):
self.run_search_method(self.apks, x, g, method)
| {
"repo_name": "AndroidSecurityTools/lobotomy",
"path": "framework/brains/apk/enumeration/api/mappings.py",
"copies": "1",
"size": "2672",
"license": "mit",
"hash": -7463538268918284000,
"line_mean": 36.6338028169,
"line_max": 96,
"alpha_frac": 0.4098053892,
"autogenerated": false,
"ratio": 4.858181818181818,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5767987207381818,
"avg_score": null,
"num_lines": null
} |
from framework.exceptions import FrameworkError
from website import language
class AuthError(FrameworkError):
"""Base class for auth-related errors."""
pass
class ChangePasswordError(AuthError):
"""Raised if a change password is called with invalid data.
"""
def __init__(self, message):
self.messages = message if isinstance(message, (list, tuple)) else [message]
super(ChangePasswordError, self).__init__(message)
class DuplicateEmailError(AuthError):
"""Raised if a user tries to register an email that is already in the
database.
"""
pass
class EmailConfirmTokenError(FrameworkError):
"""Base class for errors arising from the use of an email confirm token."""
pass
class InvalidTokenError(EmailConfirmTokenError):
"""Raised if an email confirmation token is not found."""
message_short = "Invalid Token"
message_long = language.INVALID_EMAIL_CONFIRM_TOKEN
class ExpiredTokenError(EmailConfirmTokenError):
"""Raised if an email confirmation token is expired."""
message_short = "Expired Token"
message_long = language.EXPIRED_EMAIL_CONFIRM_TOKEN
class MergeConfirmedRequiredError(EmailConfirmTokenError):
"""Raised if a merge is possible, but requires user confirmation"""
def __init__(self, message, user, user_to_merge, *args, **kwargs):
super(MergeConfirmedRequiredError, self).__init__(message, *args, **kwargs)
self.user_to_merge = user_to_merge
self.user = user
message_short = language.MERGE_CONFIRMATION_REQUIRED_SHORT
@property
def message_long(self):
return language.MERGE_CONFIRMATION_REQUIRED_LONG.format(
user=self.user,
user_to_merge=self.user_to_merge,
)
class MergeConflictError(EmailConfirmTokenError):
"""Raised if a merge is not possible due to a conflict"""
message_short = language.CANNOT_MERGE_ACCOUNTS_SHORT
message_long = language.CANNOT_MERGE_ACCOUNTS_LONG
| {
"repo_name": "HarryRybacki/osf.io",
"path": "framework/auth/exceptions.py",
"copies": "35",
"size": "1985",
"license": "apache-2.0",
"hash": 1462564246276358000,
"line_mean": 30.5079365079,
"line_max": 84,
"alpha_frac": 0.7078085642,
"autogenerated": false,
"ratio": 4.144050104384133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00037570583789071183,
"num_lines": 63
} |
from framework.exceptions import HTTPError
from website.util.client import BaseClient
from addons.figshare import settings
class FigshareClient(BaseClient):
def __init__(self, access_token):
self.access_token = access_token
@classmethod
def from_account(cls, account):
if account is None:
return cls(None)
else:
return cls(account.oauth_key)
@property
def _default_headers(self):
if self.access_token:
return {'Authorization': 'token {}'.format(self.access_token)}
return {}
@property
def _default_params(self):
return {'page_size': 100}
def userinfo(self):
return self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'account'),
expects=(200, ),
throws=HTTPError(403)
).json()
# PROJECT LEVEL API
def projects(self):
return self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'account', 'projects')
).json()
def project(self, project_id):
if not project_id:
return
project = self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'account', 'projects', project_id),
expects=(200,)
).json()
if not project:
return
articles = self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'account', 'projects', project_id, 'articles')
).json()
project['articles'] = []
if(articles):
project['articles'] = []
for article in articles:
fetched = self.article(article['id'])
if fetched:
project['articles'].append(fetched)
return project
# ARTICLE LEVEL API
def articles(self, only_folders=False):
article_list = self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'account', 'articles')
).json()
if only_folders:
article_list = [x for x in article_list
if x['defined_type'] in settings.FIGSHARE_FOLDER_TYPES]
return [self.article(article['id']) for article in article_list]
def article_is_public(self, article_id):
return self.article(article_id).get('is_public')
def project_is_public(self, project_id):
return bool(self.project(project_id).get('date_published'))
def container_is_public(self, container_id, container_type):
if container_type == 'project':
return self.project_is_public(container_id)
elif container_id in settings.FIGSHARE_FOLDER_TYPES:
return self.article_is_public(container_id)
def article(self, article_id):
return self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'account', 'articles', article_id),
expects=(200, )
).json()
# OTHER HELPERS
def get_folders(self):
""" Return a list containing both projects and folder-like articles. """
projects = self.projects()
project_list = [
{
'name': project['title'],
'path': 'project',
'id': str(project['id']),
'kind': 'folder',
'permissions': {'view': True},
'addon': 'figshare',
'hasChildren': False
} for project in projects
]
article_list = [
{
'name': (article['title'] or 'untitled article'),
'path': settings.FIGSHARE_IDS_TO_TYPES[article['defined_type']],
'id': str(article['id']),
'kind': 'folder',
'permissions': {'view': True},
'addon': 'figshare',
'hasChildren': False
} for article in self.articles(only_folders=True)
]
return project_list + article_list
def get_linked_folder_info(self, _id):
""" Returns info about a linkable object -- 'project', 'dataset', or 'fileset' """
ret = {}
try:
folder = self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'account', 'projects', _id),
expects=(200, ),
throws=HTTPError(404)
).json()
ret['path'] = 'project'
except HTTPError:
folder = self.article(_id)
if folder.get('defined_type') not in settings.FIGSHARE_FOLDER_TYPES:
raise
ret['path'] = settings.FIGSHARE_IDS_TO_TYPES[folder.get('defined_type')]
ret['name'] = folder['title'] or 'untitled article'
ret['id'] = str(_id)
return ret
| {
"repo_name": "felliott/osf.io",
"path": "addons/figshare/client.py",
"copies": "6",
"size": "4852",
"license": "apache-2.0",
"hash": 5316706839950928000,
"line_mean": 32.0068027211,
"line_max": 97,
"alpha_frac": 0.5325638912,
"autogenerated": false,
"ratio": 4.204506065857886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007945092863789406,
"num_lines": 147
} |
from framework.exceptions import HTTPError
from website.util.client import BaseClient
from website.addons.figshare import settings
class FigshareClient(BaseClient):
def __init__(self, access_token):
self.access_token = access_token
@classmethod
def from_account(cls, account):
if account is None:
return cls(None)
else:
return cls(account.oauth_key)
@property
def _default_headers(self):
if self.access_token:
return {'Authorization': 'token {}'.format(self.access_token)}
return {}
@property
def _default_params(self):
return {'page_size': 100}
def userinfo(self):
return self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'account'),
expects=(200, ),
throws=HTTPError(403)
).json()
# PROJECT LEVEL API
def projects(self):
return self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'account', 'projects')
).json()
def project(self, project_id):
if not project_id:
return
project = self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'account', 'projects', project_id),
expects=(200,)
).json()
if not project:
return
articles = self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'account', 'projects', project_id, 'articles')
).json()
project['articles'] = []
if(articles):
project['articles'] = []
for article in articles:
fetched = self.article(article['id'])
if fetched:
project['articles'].append(fetched)
return project
# ARTICLE LEVEL API
def articles(self):
article_list = self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'account', 'articles')
).json()
return [self.article(article['id']) for article in article_list]
def article_is_public(self, article_id):
return self.article(article_id).get('is_public')
def project_is_public(self, project_id):
return bool(self.project(project_id).get('date_published'))
def container_is_public(self, container_id, container_type):
if container_type == 'project':
return self.project_is_public(container_id)
elif container_type == 'fileset':
return self.article_is_public(container_id)
def article(self, article_id):
return self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'account', 'articles', article_id),
expects=(200, )
).json()
# OTHER HELPERS
def get_folders(self):
projects = [{
'name': project['title'],
'path': 'project',
'id': str(project['id']),
'kind': 'folder',
'permissions': {'view': True},
'addon': 'figshare',
'hasChildren': False
} for project in self.projects()]
articles = [{ # TODO: Figshare needs to make this filterable by defined_type to limit spurious requests
'name': (article['title'] or 'untitled article'),
'path': 'fileset',
'id': str(article['id']),
'kind': 'folder',
'permissions': {'view': True},
'addon': 'figshare',
'hasChildren': False
} for article in self.articles() if article['defined_type'] == settings.FIGSHARE_DEFINED_TYPE_NUM_MAP['fileset']]
return projects + articles
def get_linked_folder_info(self, _id):
""" Returns info about a linkable object -- 'project' or 'fileset' """
ret = {}
try:
folder = self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'account', 'projects', _id),
expects=(200, ),
throws=HTTPError(404)
).json()
ret['path'] = 'project'
except HTTPError:
folder = self.article(_id)
if folder.get('defined_type') != settings.FIGSHARE_DEFINED_TYPE_NUM_MAP['fileset']:
raise
ret['path'] = 'fileset'
ret['name'] = folder['title'] or 'untitled article'
ret['id'] = str(_id)
return ret
| {
"repo_name": "monikagrabowska/osf.io",
"path": "website/addons/figshare/client.py",
"copies": "3",
"size": "4488",
"license": "apache-2.0",
"hash": -5382143665054624000,
"line_mean": 31.7591240876,
"line_max": 121,
"alpha_frac": 0.5416666667,
"autogenerated": false,
"ratio": 4.132596685082873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005296371882102728,
"num_lines": 137
} |
from framework.flask import redirect
from website import settings
from website.project import utils
def activity():
"""Reads node activity from pre-generated popular projects and registrations.
New and Noteworthy projects are set manually or through `scripts/populate_new_and_noteworthy_projects.py`
Popular projects and registrations are generated by `scripts/populate_popular_projects_and_registrations.py`
"""
# Prevent circular import
from osf.models import AbstractNode as Node
# New and Noreworthy Projects
try:
new_and_noteworthy_projects = Node.load(settings.NEW_AND_NOTEWORTHY_LINKS_NODE).nodes_pointer
except AttributeError:
new_and_noteworthy_projects = []
# Popular Projects
try:
popular_public_projects = Node.load(settings.POPULAR_LINKS_NODE).nodes_pointer
except AttributeError:
popular_public_projects = []
# Popular Registrations
try:
popular_public_registrations = Node.load(settings.POPULAR_LINKS_REGISTRATIONS).nodes_pointer
except AttributeError:
popular_public_registrations = []
return {
'new_and_noteworthy_projects': new_and_noteworthy_projects,
'recent_public_registrations': utils.recent_public_registrations(),
'popular_public_projects': popular_public_projects,
'popular_public_registrations': popular_public_registrations,
}
def redirect_explore_to_activity(**kwargs):
return redirect('/activity/')
def redirect_explore_activity_to_activity(**kwargs):
return redirect('/activity/')
| {
"repo_name": "hmoco/osf.io",
"path": "website/discovery/views.py",
"copies": "3",
"size": "1579",
"license": "apache-2.0",
"hash": 3002932835153209300,
"line_mean": 33.3260869565,
"line_max": 112,
"alpha_frac": 0.7238758708,
"autogenerated": false,
"ratio": 3.977329974811083,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6201205845611083,
"avg_score": null,
"num_lines": null
} |
from framework.http.wafbypasser.core.http_helper import HTTPHelper
def detect_accepted_sources(http_helper, url, data, headers, param_name, param_source, param_value, method):
requests = []
new_url = url
new_data = data
new_headers = headers.copy()
sources = ['URL', 'DATA', 'COOKIE', 'HEADER']
for source in sources:
new_url = url
new_data = data
new_headers = headers.copy()
if source is "URL":
new_url = HTTPHelper.add_url_param(url, param_name, param_value)
elif source is "DATA":
new_data = HTTPHelper.add_body_param(data, param_name, param_value)
elif source is "COOKIE":
new_headers = HTTPHelper.add_cookie_param(new_headers, param_name, param_value)
elif source is "HEADER":
new_headers = HTTPHelper.add_cookie_param(new_headers, param_name, param_value)
request = http_helper.create_http_request(method, new_url, new_data, new_headers)
requests.append(request)
return requests
| {
"repo_name": "DarKnight24/owtf",
"path": "framework/http/wafbypasser/core/param_source_detector.py",
"copies": "2",
"size": "1037",
"license": "bsd-3-clause",
"hash": 1539574298760625000,
"line_mean": 40.48,
"line_max": 108,
"alpha_frac": 0.6422372228,
"autogenerated": false,
"ratio": 3.6514084507042255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5293645673504225,
"avg_score": null,
"num_lines": null
} |
from .framework import LCSAlgorithm, ClassifierSet
from .algorithms.xcs import XCSAlgorithm
from . import scenarios
def test(algorithm=None, scenario=None):
"""Run the algorithm on the scenario, creating a new classifier set in
the process. Log the performance as the scenario unfolds. Return a
tuple, (total_steps, total_reward, total_seconds, model), indicating
the performance of the algorithm in the scenario and the resulting
classifier set that was produced. By default, the algorithm used is a
new XCSAlgorithm instance with exploration probability .1 and GA and
action set subsumption turned on, and the scenario is a MUXProblem
instance with 10,000 reward cycles.
Usage:
algorithm = XCSAlgorithm()
scenario = HaystackProblem()
steps, reward, seconds, model = test(algorithm, scenario)
Arguments:
algorithm: The LCSAlgorithm instance which should be run; default
is a new XCSAlgorithm instance with exploration probability
set to .1 and GA and action set subsumption turned on.
scenario: The Scenario instance which the algorithm should be run
on; default is a MUXProblem instance with 10,000 training
cycles.
Return:
A tuple, (total_steps, total_reward, total_time, model), where
total_steps is the number of training cycles executed, total_reward
is the total reward received summed over all executed training
cycles, total_time is the time in seconds from start to end of the
call to model.run(), and model is the ClassifierSet instance that
was created and trained.
"""
assert algorithm is None or isinstance(algorithm, LCSAlgorithm)
assert scenario is None or isinstance(scenario, scenarios.Scenario)
import logging
import time
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
if scenario is None:
# Define the scenario.
scenario = scenarios.MUXProblem(10000)
if not isinstance(scenario, scenarios.ScenarioObserver):
# Put the scenario into a wrapper that will report things back to
# us for visibility.
scenario = scenarios.ScenarioObserver(scenario)
if algorithm is None:
# Define the algorithm.
algorithm = XCSAlgorithm()
algorithm.exploration_probability = .1
algorithm.do_ga_subsumption = True
algorithm.do_action_set_subsumption = True
assert isinstance(algorithm, LCSAlgorithm)
assert isinstance(scenario, scenarios.ScenarioObserver)
# Create the classifier system from the algorithm.
model = ClassifierSet(algorithm, scenario.get_possible_actions())
# Run the algorithm on the scenario. This does two things
# simultaneously:
# 1. Learns a model of the problem space from experience.
# 2. Attempts to maximize the reward received.
# Since initially the algorithm's model has no experience incorporated
# into it, performance will be poor, but it will improve over time as
# the algorithm continues to be exposed to the scenario.
start_time = time.time()
model.run(scenario, learn=True)
end_time = time.time()
logger.info('Classifiers:\n\n%s\n', model)
logger.info("Total time: %.5f seconds", end_time - start_time)
return (
scenario.steps,
scenario.total_reward,
end_time - start_time,
model
)
| {
"repo_name": "hosford42/xcs",
"path": "xcs/testing.py",
"copies": "1",
"size": "3467",
"license": "bsd-3-clause",
"hash": -7712232081699670000,
"line_mean": 38.3977272727,
"line_max": 75,
"alpha_frac": 0.6985866744,
"autogenerated": false,
"ratio": 4.508452535760728,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 88
} |
from framework import *
root.title("Dash patterns test ($Revision: 1.4 $)")
# dash pattern defined by list of length
patterns1 = [
(5),
(5, 10),
(5, 10, 5),
(5, 10, 5, 20),
(5, 10, 5, 20, 5),
(5, 10, 5, 20, 5, 30),
]
top = 1
for i, pattern in enumerate(patterns1):
y = (top + i)*10
canv.create_line(20, y, D-20, y, dash=pattern)
# dash pattern defined by string
patterns2 = [
("-. _, -", 1),
("-. _, -", 2),
("-. _, -", 3),
("-. _, -", 4),
("_.", 2),
("-.", 3),
("- - ", 4),
]
top += len(patterns1)
for i, (pattern, width) in enumerate(patterns2):
y = (top + i)*10
canv.create_line(20, y, D-20, y, dash=pattern, width=width)
# space
patterns3 = [
".",
". ",
". ",
". ",
". ",
]
top += len(patterns2)
for i, pattern in enumerate(patterns1):
y = (top + i)*10
canv.create_line(20, y, D-20, y, dash=pattern)
# disableddash
top += len(patterns3)
for i, pattern in enumerate(patterns1):
y = (top + i)*10
canv.create_line(20, y, D-20, y, width=2, state=DISABLED,
dash=(5,5), disableddash=pattern)
# dashoffset
top += len(patterns1)
for i in xrange(11):
y = (top + i)*10
canv.create_line(20, y, D-20, y, dash=(4,6), dashoffset=i)
thread.start_new_thread(test, (canv, __file__, True))
root.mainloop()
| {
"repo_name": "WojciechMula/canvas2svg",
"path": "test/test-dash.py",
"copies": "1",
"size": "1245",
"license": "bsd-3-clause",
"hash": 7266254839969550000,
"line_mean": 18.1538461538,
"line_max": 60,
"alpha_frac": 0.5638554217,
"autogenerated": false,
"ratio": 2.301293900184843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33651493218848433,
"avg_score": null,
"num_lines": null
} |
from framework import utils
def serialize_initiator(initiator):
return {
'fullname': initiator.fullname,
'id': initiator._id
}
def serialize_meta_schema(meta_schema):
if not meta_schema:
return None
return {
'id': meta_schema._id,
'schema_name': meta_schema.name,
'schema_version': meta_schema.schema_version,
'schema': meta_schema.schema,
'fulfills': meta_schema.fulfills,
'requires_approval': meta_schema.requires_approval,
'requires_consent': meta_schema.requires_consent,
'messages': meta_schema.messages
}
def serialize_meta_schemas(meta_schemas):
return [serialize_meta_schema(schema) for schema in (meta_schemas or [])]
def serialize_draft_registration(draft, auth=None):
from website.project.utils import serialize_node # noqa
from api.base.utils import absolute_reverse
node = draft.branched_from
return {
'pk': draft._id,
'branched_from': serialize_node(node, auth),
'initiator': serialize_initiator(draft.initiator),
'registration_metadata': draft.registration_metadata,
'registration_schema': serialize_meta_schema(draft.registration_schema),
'initiated': utils.iso8601format(draft.datetime_initiated),
'updated': utils.iso8601format(draft.datetime_updated),
'flags': draft.flags,
'urls': {
'edit': node.web_url_for('edit_draft_registration_page', draft_id=draft._id, _guid=True),
'before_register': node.api_url_for('project_before_register'),
'register': absolute_reverse('nodes:node-registrations', kwargs={'node_id': node._id, 'version': 'v2'}),
'register_page': node.web_url_for('draft_before_register_page', draft_id=draft._id, _guid=True),
'registrations': node.web_url_for('node_registrations', _guid=True)
},
'requires_approval': draft.requires_approval,
'is_pending_approval': draft.is_pending_review,
'is_approved': draft.is_approved,
}
def create_jsonschema_from_metaschema(metaschema, required_fields=False, is_reviewer=False):
"""
Creates jsonschema from registration metaschema for validation.
Reviewer schemas only allow comment fields.
"""
json_schema = base_metaschema(metaschema)
required = []
for page in metaschema['pages']:
for question in page['questions']:
is_required = get_required(question)
if is_required and required_fields:
required.append(question['qid'])
json_schema['properties'][question['qid']] = {
'type': 'object',
'additionalProperties': False,
'properties': extract_question_values(question, required_fields, is_reviewer, is_required)
}
if required_fields:
json_schema['properties'][question['qid']]['required'] = ['value']
if required and required_fields:
json_schema['required'] = required
return json_schema
def get_object_jsonschema(question, required_fields, is_reviewer, is_required):
"""
Returns jsonschema for nested objects within schema
"""
object_jsonschema = {
'type': 'object',
'additionalProperties': False,
'properties': {
}
}
required = []
properties = question.get('properties')
if properties:
for property in properties:
if property.get('required', False) and required_fields:
required.append(property['id'])
values = extract_question_values(property, required_fields, is_reviewer, is_required)
object_jsonschema['properties'][property['id']] = {
'type': 'object',
'additionalProperties': False,
'properties': values
}
if required_fields:
object_jsonschema['properties'][property['id']]['required'] = ['value']
if required_fields and is_required:
object_jsonschema['required'] = required
return object_jsonschema
def extract_question_values(question, required_fields, is_reviewer, is_required):
"""
Pulls structure for 'value', 'comments', and 'extra' items
"""
response = {
'value': {'type': 'string'},
'comments': COMMENTS_SCHEMA,
'extra': {'type': 'array'}
}
if question.get('type') == 'object':
response['value'] = get_object_jsonschema(question, required_fields, is_reviewer, is_required)
elif question.get('type') == 'choose':
options = question.get('options')
if options:
enum_options = get_options_jsonschema(options, is_required)
if question.get('format') == 'singleselect':
response['value'] = enum_options
elif question.get('format') == 'multiselect':
response['value'] = {'type': 'array', 'items': enum_options}
elif question.get('type') == 'osf-upload':
response['extra'] = OSF_UPLOAD_EXTRA_SCHEMA
if is_reviewer:
del response['extra']
if not question.get('type') == 'object':
del response['value']
return response
def get_required(question):
"""
Returns True if metaschema question is required.
"""
required = question.get('required', False)
if not required:
properties = question.get('properties', False)
if properties and isinstance(properties, list):
for item, property in enumerate(properties):
if isinstance(property, dict) and property.get('required', False):
required = True
break
return required
def get_options_jsonschema(options, required):
"""
Returns multiple choice options for schema questions
"""
for item, option in enumerate(options):
if isinstance(option, dict) and option.get('text'):
options[item] = option.get('text')
value = {'enum': options}
if not required and '' not in value['enum']: # Non-required fields need to accept empty strings as a value.
value['enum'].append('')
return value
OSF_UPLOAD_EXTRA_SCHEMA = {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': False,
'properties': {
'data': {
'type': 'object',
'additionalProperties': False,
'properties': {
'kind': {'type': 'string'},
'contentType': {'type': 'string'},
'name': {'type': 'string'},
'extra': {
'type': 'object',
'additionalProperties': False,
'properties': {
'downloads': {'type': 'integer'},
'version': {'type': 'integer'},
'latestVersionSeen': {'type': 'string'},
'guid': {'type': 'string'},
'checkout': {'type': 'string'},
'hashes': {
'type': 'object',
'additionalProperties': False,
'properties': {
'sha256': {'type': 'string'},
'md5': {'type': 'string'}
}
}
}
},
'materialized': {'type': 'string'},
'modified': {'type': 'string'},
'nodeId': {'type': 'string'},
'etag': {'type': 'string'},
'provider': {'type': 'string'},
'path': {'type': 'string'},
'nodeUrl': {'type': 'string'},
'waterbutlerURL': {'type': 'string'},
'resource': {'type': 'string'},
'nodeApiUrl': {'type': 'string'},
'type': {'type': 'string'},
'accept': {
'type': 'object',
'additionalProperties': False,
'properties': {
'acceptedFiles': {'type': 'boolean'},
'maxSize': {'type': 'integer'},
}
},
'links': {
'type': 'object',
'additionalProperties': False,
'properties': {
'download': {'type': 'string'},
'move': {'type': 'string'},
'upload': {'type': 'string'},
'delete': {'type': 'string'}
}
},
'permissions': {
'type': 'object',
'additionalProperties': False,
'properties': {
'edit': {'type': 'boolean'},
'view': {'type': 'boolean'}
}
},
'created_utc': {'type': 'string'},
'id': {'type': 'string'},
'modified_utc': {'type': 'string'},
'size': {'type': 'integer'},
'sizeInt': {'type': 'integer'},
}
},
'fileId': {'type': ['string', 'object']},
'descriptionValue': {'type': 'string'},
'sha256': {'type': 'string'},
'selectedFileName': {'type': 'string'},
'nodeId': {'type': 'string'},
'viewUrl': {'type': 'string'}
}
}
}
COMMENTS_SCHEMA = {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': False,
'properties': {
'seenBy': {
'type': 'array',
},
'canDelete': {'type': 'boolean'},
'created': {'type': 'string'},
'lastModified': {'type': 'string'},
'author': {'type': 'string'},
'value': {'type': 'string'},
'isOwner': {'type': 'boolean'},
'getAuthor': {'type': 'string'},
'user': {
'type': 'object',
'additionalProperties': True,
'properties': {
'fullname': {'type': 'string'},
'id': {'type': 'integer'}
}
},
'saved': {'type': 'boolean'},
'canEdit': {'type': 'boolean'},
'isDeleted': {'type': 'boolean'}
}
}
}
def base_metaschema(metaschema):
json_schema = {
'type': 'object',
'description': metaschema['description'],
'title': metaschema['title'],
'additionalProperties': False,
'properties': {
}
}
return json_schema
| {
"repo_name": "Johnetordoff/osf.io",
"path": "website/project/metadata/utils.py",
"copies": "6",
"size": "11056",
"license": "apache-2.0",
"hash": -3523328993723057700,
"line_mean": 36.6054421769,
"line_max": 116,
"alpha_frac": 0.4888748191,
"autogenerated": false,
"ratio": 4.602830974188176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8091705793288176,
"avg_score": null,
"num_lines": null
} |
from framework import utils
def serialize_meta_schema(meta_schema):
if not meta_schema:
return None
return {
'id': meta_schema._id,
'schema_name': meta_schema.name,
'schema_version': meta_schema.schema_version,
'schema': meta_schema.schema,
'fulfills': meta_schema.fulfills,
'requires_approval': meta_schema.requires_approval,
'messages': meta_schema.messages
}
def serialize_meta_schemas(meta_schemas):
return [serialize_meta_schema(schema) for schema in (meta_schemas or [])]
def serialize_draft_registration(draft, auth=None):
from website.profile.utils import serialize_user # noqa
from website.project.utils import serialize_node # noqa
node = draft.branched_from
return {
'pk': draft._id,
'branched_from': serialize_node(draft.branched_from, auth),
'initiator': serialize_user(draft.initiator, full=True),
'registration_metadata': draft.registration_metadata,
'registration_schema': serialize_meta_schema(draft.registration_schema),
'initiated': utils.iso8601format(draft.datetime_initiated),
'updated': utils.iso8601format(draft.datetime_updated),
'flags': draft.flags,
'urls': {
'edit': node.web_url_for('edit_draft_registration_page', draft_id=draft._id),
'before_register': node.api_url_for('project_before_register'),
'register': node.api_url_for('register_draft_registration', draft_id=draft._id),
'register_page': node.web_url_for('draft_before_register_page', draft_id=draft._id),
'registrations': node.web_url_for('node_registrations')
},
'requires_approval': draft.requires_approval,
'is_pending_approval': draft.is_pending_review,
'is_approved': draft.is_approved,
}
| {
"repo_name": "ZobairAlijan/osf.io",
"path": "website/project/metadata/utils.py",
"copies": "2",
"size": "1855",
"license": "apache-2.0",
"hash": -3705413592554923000,
"line_mean": 41.1590909091,
"line_max": 96,
"alpha_frac": 0.6533692722,
"autogenerated": false,
"ratio": 3.762677484787018,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5416046756987017,
"avg_score": null,
"num_lines": null
} |
from framework import utils
from osf.utils import permissions as osf_permissions
def serialize_initiator(initiator):
return {
'fullname': initiator.fullname,
'id': initiator._id
}
def serialize_meta_schema(meta_schema):
if not meta_schema:
return None
return {
'id': meta_schema._id,
'schema_name': meta_schema.name,
'schema_version': meta_schema.schema_version,
'schema': meta_schema.schema,
'fulfills': meta_schema.fulfills,
'requires_approval': meta_schema.requires_approval,
'requires_consent': meta_schema.requires_consent,
'messages': meta_schema.messages
}
def serialize_meta_schemas(meta_schemas):
return [serialize_meta_schema(schema) for schema in (meta_schemas or [])]
def serialize_draft_registration(draft, auth=None):
from website.project.utils import serialize_node # noqa
from api.base.utils import absolute_reverse
node = draft.branched_from
return {
'pk': draft._id,
'branched_from': serialize_node(node, auth),
'initiator': serialize_initiator(draft.initiator),
'registration_metadata': draft.registration_metadata,
'registration_schema': serialize_meta_schema(draft.registration_schema),
'initiated': utils.iso8601format(draft.datetime_initiated),
'updated': utils.iso8601format(draft.datetime_updated),
'flags': draft.flags,
'urls': {
'edit': node.web_url_for('edit_draft_registration_page', draft_id=draft._id, _guid=True),
'submit': node.api_url_for('submit_draft_for_review', draft_id=draft._id),
'before_register': node.api_url_for('project_before_register'),
'register': absolute_reverse('nodes:node-registrations', kwargs={'node_id': node._id, 'version': 'v2'}),
'register_page': node.web_url_for('draft_before_register_page', draft_id=draft._id, _guid=True),
'registrations': node.web_url_for('node_registrations', _guid=True)
},
'requires_approval': draft.requires_approval,
'is_pending_approval': draft.is_pending_review,
'is_approved': draft.is_approved,
}
def create_jsonschema_from_metaschema(metaschema, required_fields=False, is_reviewer=False):
"""
Creates jsonschema from registration metaschema for validation.
Reviewer schemas only allow comment fields.
"""
json_schema = base_metaschema(metaschema)
required = []
for page in metaschema['pages']:
for question in page['questions']:
is_required = get_required(question)
if is_required and required_fields:
required.append(question['qid'])
json_schema['properties'][question['qid']] = {
'type': 'object',
'additionalProperties': False,
'properties': extract_question_values(question, required_fields, is_reviewer, is_required)
}
if required_fields:
json_schema['properties'][question['qid']]['required'] = ['value']
if required and required_fields:
json_schema['required'] = required
return json_schema
def get_object_jsonschema(question, required_fields, is_reviewer, is_required):
"""
Returns jsonschema for nested objects within schema
"""
object_jsonschema = {
'type': 'object',
'additionalProperties': False,
'properties': {
}
}
required = []
properties = question.get('properties')
if properties:
for property in properties:
if property.get('required', False) and required_fields:
required.append(property['id'])
values = extract_question_values(property, required_fields, is_reviewer, is_required)
object_jsonschema['properties'][property['id']] = {
'type': 'object',
'additionalProperties': False,
'properties': values
}
if required_fields:
object_jsonschema['properties'][property['id']]['required'] = ['value']
if required_fields and is_required:
object_jsonschema['required'] = required
return object_jsonschema
def extract_question_values(question, required_fields, is_reviewer, is_required):
"""
Pulls structure for 'value', 'comments', and 'extra' items
"""
response = {
'value': {'type': 'string'},
'comments': COMMENTS_SCHEMA,
'extra': {'type': 'array'}
}
if question.get('type') == 'object':
response['value'] = get_object_jsonschema(question, required_fields, is_reviewer, is_required)
elif question.get('type') == 'choose':
options = question.get('options')
if options:
enum_options = get_options_jsonschema(options, is_required)
if question.get('format') == 'singleselect':
response['value'] = enum_options
elif question.get('format') == 'multiselect':
response['value'] = {'type': 'array', 'items': enum_options}
elif question.get('type') == 'osf-upload':
response['extra'] = OSF_UPLOAD_EXTRA_SCHEMA
if is_reviewer:
del response['extra']
if not question.get('type') == 'object':
del response['value']
return response
def get_required(question):
"""
Returns True if metaschema question is required.
"""
required = question.get('required', False)
if not required:
properties = question.get('properties', False)
if properties and isinstance(properties, list):
for item, property in enumerate(properties):
if isinstance(property, dict) and property.get('required', False):
required = True
break
return required
def get_options_jsonschema(options, required):
"""
Returns multiple choice options for schema questions
"""
for item, option in enumerate(options):
if isinstance(option, dict) and option.get('text'):
options[item] = option.get('text')
value = {'enum': options}
if not required and '' not in value['enum']: # Non-required fields need to accept empty strings as a value.
value['enum'].append('')
return value
OSF_UPLOAD_EXTRA_SCHEMA = {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': False,
'properties': {
'data': {
'type': 'object',
'additionalProperties': False,
'properties': {
'kind': {'type': 'string'},
'contentType': {'type': 'string'},
'name': {'type': 'string'},
'extra': {
'type': 'object',
'additionalProperties': False,
'properties': {
'downloads': {'type': 'integer'},
'version': {'type': 'integer'},
'latestVersionSeen': {'type': 'string'},
'guid': {'type': 'string'},
'checkout': {'type': 'string'},
'hashes': {
'type': 'object',
'additionalProperties': False,
'properties': {
'sha256': {'type': 'string'},
'md5': {'type': 'string'}
}
}
}
},
'materialized': {'type': 'string'},
'modified': {'type': 'string'},
'nodeId': {'type': 'string'},
'etag': {'type': 'string'},
'provider': {'type': 'string'},
'path': {'type': 'string'},
'nodeUrl': {'type': 'string'},
'waterbutlerURL': {'type': 'string'},
'resource': {'type': 'string'},
'nodeApiUrl': {'type': 'string'},
'type': {'type': 'string'},
'accept': {
'type': 'object',
'additionalProperties': False,
'properties': {
'acceptedFiles': {'type': 'boolean'},
'maxSize': {'type': 'integer'},
}
},
'links': {
'type': 'object',
'additionalProperties': False,
'properties': {
'download': {'type': 'string'},
'move': {'type': 'string'},
'upload': {'type': 'string'},
'delete': {'type': 'string'}
}
},
'permissions': {
'type': 'object',
'additionalProperties': False,
'properties': {
'edit': {'type': 'boolean'},
'view': {'type': 'boolean'}
}
},
'created_utc': {'type': 'string'},
'id': {'type': 'string'},
'modified_utc': {'type': 'string'},
'size': {'type': 'integer'},
'sizeInt': {'type': 'integer'},
}
},
'fileId': {'type': ['string', 'object']},
'descriptionValue': {'type': 'string'},
'sha256': {'type': 'string'},
'selectedFileName': {'type': 'string'},
'nodeId': {'type': 'string'},
'viewUrl': {'type': 'string'}
}
}
}
COMMENTS_SCHEMA = {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': False,
'properties': {
'seenBy': {
'type': 'array',
},
'canDelete': {'type': 'boolean'},
'created': {'type': 'string'},
'lastModified': {'type': 'string'},
'author': {'type': 'string'},
'value': {'type': 'string'},
'isOwner': {'type': 'boolean'},
'getAuthor': {'type': 'string'},
'user': {
'type': 'object',
'additionalProperties': True,
'properties': {
'fullname': {'type': 'string'},
'id': {'type': 'integer'}
}
},
'saved': {'type': 'boolean'},
'canEdit': {'type': 'boolean'},
'isDeleted': {'type': 'boolean'}
}
}
}
def base_metaschema(metaschema):
json_schema = {
'type': 'object',
'description': metaschema['description'],
'title': metaschema['title'],
'additionalProperties': False,
'properties': {
}
}
return json_schema
def is_prereg_admin(user):
"""
Returns true if user has reviewer permissions
"""
if user is not None:
return user.has_perm('osf.administer_prereg')
return False
def is_prereg_admin_not_project_admin(request, draft):
"""
Returns true if user is prereg admin, but not admin on project
"""
user = request.user
is_project_admin = draft.branched_from.has_permission(user, osf_permissions.ADMIN)
return is_prereg_admin(user) and not is_project_admin
| {
"repo_name": "brianjgeiger/osf.io",
"path": "website/project/metadata/utils.py",
"copies": "5",
"size": "11696",
"license": "apache-2.0",
"hash": 2299537912168417000,
"line_mean": 36.2484076433,
"line_max": 116,
"alpha_frac": 0.4994870041,
"autogenerated": false,
"ratio": 4.522815158546017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7522302162646016,
"avg_score": null,
"num_lines": null
} |
from framework import utils
from website.settings import PREREG_ADMIN_TAG
from website.util import permissions as osf_permissions
def serialize_initiator(initiator):
return {
'fullname': initiator.fullname,
'id': initiator._id
}
def serialize_meta_schema(meta_schema):
if not meta_schema:
return None
return {
'id': meta_schema._id,
'schema_name': meta_schema.name,
'schema_version': meta_schema.schema_version,
'schema': meta_schema.schema,
'fulfills': meta_schema.fulfills,
'requires_approval': meta_schema.requires_approval,
'requires_consent': meta_schema.requires_consent,
'messages': meta_schema.messages
}
def serialize_meta_schemas(meta_schemas):
return [serialize_meta_schema(schema) for schema in (meta_schemas or [])]
def serialize_draft_registration(draft, auth=None):
from website.project.utils import serialize_node # noqa
node = draft.branched_from
return {
'pk': draft._id,
'branched_from': serialize_node(node, auth),
'initiator': serialize_initiator(draft.initiator),
'registration_metadata': draft.registration_metadata,
'registration_schema': serialize_meta_schema(draft.registration_schema),
'initiated': utils.iso8601format(draft.datetime_initiated),
'updated': utils.iso8601format(draft.datetime_updated),
'flags': draft.flags,
'urls': {
'edit': node.web_url_for('edit_draft_registration_page', draft_id=draft._id),
'submit': node.api_url_for('submit_draft_for_review', draft_id=draft._id),
'before_register': node.api_url_for('project_before_register'),
'register': node.api_url_for('register_draft_registration', draft_id=draft._id),
'register_page': node.web_url_for('draft_before_register_page', draft_id=draft._id),
'registrations': node.web_url_for('node_registrations')
},
'requires_approval': draft.requires_approval,
'is_pending_approval': draft.is_pending_review,
'is_approved': draft.is_approved,
}
def create_jsonschema_from_metaschema(metaschema, required_fields=False, is_reviewer=False):
"""
Creates jsonschema from registration metaschema for validation.
Reviewer schemas only allow comment fields.
"""
json_schema = base_metaschema(metaschema)
required = []
for page in metaschema['pages']:
for question in page['questions']:
if is_required(question) and required_fields:
required.append(question['qid'])
json_schema['properties'][question['qid']] = {
'type': 'object',
'additionalProperties': False,
'properties': extract_question_values(question, required_fields, is_reviewer)
}
if required_fields:
json_schema['properties'][question['qid']]['required'] = ['value']
if required and required_fields:
json_schema['required'] = required
return json_schema
def get_object_jsonschema(question, required_fields, is_reviewer):
"""
Returns jsonschema for nested objects within schema
"""
object_jsonschema = {
'type': 'object',
'additionalProperties': False,
'properties': {
}
}
required = []
properties = question.get('properties')
if properties:
for property in properties:
if property.get('required', False) and required_fields:
required.append(property['id'])
values = extract_question_values(property, required_fields, is_reviewer)
object_jsonschema['properties'][property['id']] = {
'type': 'object',
'additionalProperties': False,
'properties': values
}
if required_fields:
object_jsonschema['properties'][property['id']]['required'] = ['value']
if required_fields and is_required(question):
object_jsonschema['required'] = required
return object_jsonschema
def extract_question_values(question, required_fields, is_reviewer):
"""
Pulls structure for 'value', 'comments', and 'extra' items
"""
response = {
'value': {'type': 'string'},
'comments': COMMENTS_SCHEMA,
'extra': {'type': 'array'}
}
if question.get('type') == 'object':
response['value'] = get_object_jsonschema(question, required_fields, is_reviewer)
elif question.get('type') == 'choose':
options = question.get('options')
if options:
response['value'] = get_options_jsonschema(options)
elif question.get('type') == 'osf-upload':
response['extra'] = OSF_UPLOAD_EXTRA_SCHEMA
if is_reviewer:
del response['extra']
if not question.get('type') == 'object':
del response['value']
return response
def is_required(question):
"""
Returns True if metaschema question is required.
"""
required = question.get('required', False)
if not required:
properties = question.get('properties', False)
if properties and isinstance(properties, list):
for item, property in enumerate(properties):
if isinstance(property, dict) and property.get('required', False):
required = True
break
return required
def get_options_jsonschema(options):
"""
Returns multiple choice options for schema questions
"""
for item, option in enumerate(options):
if isinstance(option, dict) and option.get('text'):
options[item] = option.get('text')
value = {'enum': options}
return value
OSF_UPLOAD_EXTRA_SCHEMA = {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': False,
'properties': {
'data': {
'type': 'object',
'additionalProperties': False,
'properties': {
'kind': {'type': 'string'},
'contentType': {'type': 'string'},
'name': {'type': 'string'},
'extra': {
'type': 'object',
'additionalProperties': False,
'properties': {
'downloads': {'type': 'integer'},
'version': {'type': 'integer'},
'checkout': {'type': 'string'},
'hashes': {
'type': 'object',
'additionalProperties': False,
'properties': {
'sha256': {'type': 'string'},
'md5': {'type': 'string'}
}
}
}
},
'materialized': {'type': 'string'},
'modified': {'type': 'string'},
'nodeId': {'type': 'string'},
'etag': {'type': 'string'},
'provider': {'type': 'string'},
'path': {'type': 'string'},
'size': {'type': 'integer'}
}
},
'sha256': {'type': 'string'},
'selectedFileName': {'type': 'string'},
'nodeId': {'type': 'string'},
'viewUrl': {'type': 'string'}
}
}
}
COMMENTS_SCHEMA = {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': False,
'properties': {
'seenBy': {
'type': 'array',
'items': {
'type': 'integer'
}
},
'canDelete': {'type': 'boolean'},
'created': {'type': 'string'},
'lastModified': {'type': 'string'},
'author': {'type': 'string'},
'value': {'type': 'string'},
'isOwner': {'type': 'boolean'},
'getAuthor': {'type': 'string'},
'user': {
'type': 'object',
'additionalProperties': False,
'properties': {
'fullname': {'type': 'string'},
'id': {'type': 'integer'}
}
},
'saved': {'type': 'boolean'},
'canEdit': {'type': 'boolean'},
'isDeleted': {'type': 'boolean'}
}
}
}
def base_metaschema(metaschema):
json_schema = {
'type': 'object',
'description': metaschema['description'],
'title': metaschema['title'],
'additionalProperties': False,
'properties': {
}
}
return json_schema
def is_prereg_admin(user):
"""
Returns true if user has reviewer permissions
"""
if user is not None:
return PREREG_ADMIN_TAG in getattr(user, 'system_tags', [])
return False
def is_prereg_admin_not_project_admin(request, draft):
"""
Returns true if user is prereg admin, but not admin on project
"""
user = request.user
is_project_admin = draft.branched_from.has_permission(user, osf_permissions.ADMIN)
return is_prereg_admin(user) and not is_project_admin
| {
"repo_name": "aaxelb/osf.io",
"path": "website/project/metadata/utils.py",
"copies": "21",
"size": "9400",
"license": "apache-2.0",
"hash": 1698253641384920800,
"line_mean": 34.0746268657,
"line_max": 96,
"alpha_frac": 0.5308510638,
"autogenerated": false,
"ratio": 4.372093023255814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from framework import utils
from website.util import permissions as osf_permissions
def serialize_initiator(initiator):
return {
'fullname': initiator.fullname,
'id': initiator._id
}
def serialize_meta_schema(meta_schema):
if not meta_schema:
return None
return {
'id': meta_schema._id,
'schema_name': meta_schema.name,
'schema_version': meta_schema.schema_version,
'schema': meta_schema.schema,
'fulfills': meta_schema.fulfills,
'requires_approval': meta_schema.requires_approval,
'requires_consent': meta_schema.requires_consent,
'messages': meta_schema.messages
}
def serialize_meta_schemas(meta_schemas):
return [serialize_meta_schema(schema) for schema in (meta_schemas or [])]
def serialize_draft_registration(draft, auth=None):
from website.project.utils import serialize_node # noqa
node = draft.branched_from
return {
'pk': draft._id,
'branched_from': serialize_node(node, auth),
'initiator': serialize_initiator(draft.initiator),
'registration_metadata': draft.registration_metadata,
'registration_schema': serialize_meta_schema(draft.registration_schema),
'initiated': utils.iso8601format(draft.datetime_initiated),
'updated': utils.iso8601format(draft.datetime_updated),
'flags': draft.flags,
'urls': {
'edit': node.web_url_for('edit_draft_registration_page', draft_id=draft._id),
'submit': node.api_url_for('submit_draft_for_review', draft_id=draft._id),
'before_register': node.api_url_for('project_before_register'),
'register': node.api_url_for('register_draft_registration', draft_id=draft._id),
'register_page': node.web_url_for('draft_before_register_page', draft_id=draft._id),
'registrations': node.web_url_for('node_registrations')
},
'requires_approval': draft.requires_approval,
'is_pending_approval': draft.is_pending_review,
'is_approved': draft.is_approved,
}
def create_jsonschema_from_metaschema(metaschema, required_fields=False, is_reviewer=False):
"""
Creates jsonschema from registration metaschema for validation.
Reviewer schemas only allow comment fields.
"""
json_schema = base_metaschema(metaschema)
required = []
for page in metaschema['pages']:
for question in page['questions']:
if is_required(question) and required_fields:
required.append(question['qid'])
json_schema['properties'][question['qid']] = {
'type': 'object',
'additionalProperties': False,
'properties': extract_question_values(question, required_fields, is_reviewer)
}
if required_fields:
json_schema['properties'][question['qid']]['required'] = ['value']
if required and required_fields:
json_schema['required'] = required
return json_schema
def get_object_jsonschema(question, required_fields, is_reviewer):
"""
Returns jsonschema for nested objects within schema
"""
object_jsonschema = {
'type': 'object',
'additionalProperties': False,
'properties': {
}
}
required = []
properties = question.get('properties')
if properties:
for property in properties:
if property.get('required', False) and required_fields:
required.append(property['id'])
values = extract_question_values(property, required_fields, is_reviewer)
object_jsonschema['properties'][property['id']] = {
'type': 'object',
'additionalProperties': False,
'properties': values
}
if required_fields:
object_jsonschema['properties'][property['id']]['required'] = ['value']
if required_fields and is_required(question):
object_jsonschema['required'] = required
return object_jsonschema
def extract_question_values(question, required_fields, is_reviewer):
"""
Pulls structure for 'value', 'comments', and 'extra' items
"""
response = {
'value': {'type': 'string'},
'comments': COMMENTS_SCHEMA,
'extra': {'type': 'array'}
}
if question.get('type') == 'object':
response['value'] = get_object_jsonschema(question, required_fields, is_reviewer)
elif question.get('type') == 'choose':
options = question.get('options')
if options:
response['value'] = get_options_jsonschema(options)
elif question.get('type') == 'osf-upload':
response['extra'] = OSF_UPLOAD_EXTRA_SCHEMA
if is_reviewer:
del response['extra']
if not question.get('type') == 'object':
del response['value']
return response
def is_required(question):
"""
Returns True if metaschema question is required.
"""
required = question.get('required', False)
if not required:
properties = question.get('properties', False)
if properties and isinstance(properties, list):
for item, property in enumerate(properties):
if isinstance(property, dict) and property.get('required', False):
required = True
break
return required
def get_options_jsonschema(options):
"""
Returns multiple choice options for schema questions
"""
for item, option in enumerate(options):
if isinstance(option, dict) and option.get('text'):
options[item] = option.get('text')
value = {'enum': options}
return value
OSF_UPLOAD_EXTRA_SCHEMA = {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': False,
'properties': {
'data': {
'type': 'object',
'additionalProperties': False,
'properties': {
'kind': {'type': 'string'},
'contentType': {'type': 'string'},
'name': {'type': 'string'},
'extra': {
'type': 'object',
'additionalProperties': False,
'properties': {
'downloads': {'type': 'integer'},
'version': {'type': 'integer'},
'checkout': {'type': 'string'},
'hashes': {
'type': 'object',
'additionalProperties': False,
'properties': {
'sha256': {'type': 'string'},
'md5': {'type': 'string'}
}
}
}
},
'materialized': {'type': 'string'},
'modified': {'type': 'string'},
'nodeId': {'type': 'string'},
'etag': {'type': 'string'},
'provider': {'type': 'string'},
'path': {'type': 'string'},
'size': {'type': 'integer'}
}
},
'sha256': {'type': 'string'},
'selectedFileName': {'type': 'string'},
'nodeId': {'type': 'string'},
'viewUrl': {'type': 'string'}
}
}
}
COMMENTS_SCHEMA = {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': False,
'properties': {
'seenBy': {
'type': 'array',
'items': {
'type': 'integer'
}
},
'canDelete': {'type': 'boolean'},
'created': {'type': 'string'},
'lastModified': {'type': 'string'},
'author': {'type': 'string'},
'value': {'type': 'string'},
'isOwner': {'type': 'boolean'},
'getAuthor': {'type': 'string'},
'user': {
'type': 'object',
'additionalProperties': False,
'properties': {
'fullname': {'type': 'string'},
'id': {'type': 'integer'}
}
},
'saved': {'type': 'boolean'},
'canEdit': {'type': 'boolean'},
'isDeleted': {'type': 'boolean'}
}
}
}
def base_metaschema(metaschema):
json_schema = {
'type': 'object',
'description': metaschema['description'],
'title': metaschema['title'],
'additionalProperties': False,
'properties': {
}
}
return json_schema
def is_prereg_admin(user):
"""
Returns true if user has reviewer permissions
"""
if user is not None:
return user.has_perm('osf.administer_prereg')
return False
def is_prereg_admin_not_project_admin(request, draft):
"""
Returns true if user is prereg admin, but not admin on project
"""
user = request.user
is_project_admin = draft.branched_from.has_permission(user, osf_permissions.ADMIN)
return is_prereg_admin(user) and not is_project_admin
| {
"repo_name": "leb2dg/osf.io",
"path": "website/project/metadata/utils.py",
"copies": "4",
"size": "9340",
"license": "apache-2.0",
"hash": -9151890211860215000,
"line_mean": 33.9812734082,
"line_max": 96,
"alpha_frac": 0.5293361884,
"autogenerated": false,
"ratio": 4.378809188935771,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6908145377335773,
"avg_score": null,
"num_lines": null
} |
from framework.latentmodule import LatentModule
from direct.gui.DirectGui import DirectButton
#from direct.gui.DirectGuiGlobals import RIDGE
from panda3d.core import NodePath
from panda3d.core import TextProperties, TextPropertiesManager
import random
import time
import itertools
class Main(LatentModule):
def __init__(self):
LatentModule.__init__(self)
self.conditions = ['object', 'animal']
# Load stimuli
with open('studies/obj_animals/stimuli.txt', 'r') as f:
self.stimuli = f.readlines()
n_animals = 64
n_objects = 464
self.n_blocks = 4 # Divide the stimuli into this number of blocks
animals_list = range(0, n_animals)
objects_list = range(n_animals, n_animals + n_objects)
animals_blocks = []
objects_blocks = []
self.stimuli_order = [] # This list contains the final indices
self.av_type = [] # This list contains the stimulus type for each word (either 0 or 1, corresponding to auditory and visual, or vice versa)
tmp_animals_1 = random.sample(animals_list, 32) # Choose 32 random samples from animals
tmp_animals_2 = list(set(animals_list) - set(tmp_animals_1))
for k in range(self.n_blocks):
if k % 2 == 0:
tmp_animals = tmp_animals_1
else:
tmp_animals = tmp_animals_2
random.shuffle(tmp_animals)
animals_blocks.append(tmp_animals)
tmp_objects = random.sample(objects_list, n_objects/self.n_blocks)
objects_blocks.append(tmp_objects)
objects_list = list(set(objects_list) - set(tmp_objects))
tmp_animals_objects = animals_blocks[k] + objects_blocks[k]
random.shuffle(tmp_animals_objects)
# Make sure that there are no consecutive equal animals - shuffle until this is not the case
max_repeats = max(len(list(v)) for g, v in itertools.groupby(tmp_animals_objects))
while max_repeats > 1:
random.shuffle(tmp_animals_objects)
max_repeats = max(len(list(v)) for g, v in itertools.groupby(tmp_animals_objects))
self.stimuli_order.append(tmp_animals_objects)
# Flatten lists
self.stimuli_order = [item for sublist in self.stimuli_order for item in sublist]
self.target = [1 for k in range(1, n_animals + 1)] + [0 for k in range(1, n_objects + 1)]
self.pause = 0.5
self.score = 0 # Total score
def run(self):
self.implicit_markers = False
base.win.setClearColor((0, 0, 0, 1))
self.marker(0) # Send one event to trigger the whole event sending process
if self.training:
self.stimuli_order = self.stimuli_order[:16]
self.n_blocks = 1
self.n_runs = 1
if self.av_type == 'auditory':
self.file_list = ['studies/obj_animals/stimuli/' + k.strip() + '_f.wav' for k in self.stimuli]
for f in self.file_list:
self.precache_sound(f)
self.precache_sound('buzz.wav')
self.precache_sound('beep.wav')
# Define text properties
tp_gray = TextProperties()
tp_gray.setTextColor(0.5, 0.5, 0.5, 1)
tpMgr = TextPropertiesManager.getGlobalPtr()
tpMgr.setProperties("gray", tp_gray)
# Show instructions
if self.training:
if self.av_type == 'visual':
verb = 'see'
else:
verb = 'hear'
self.write('This is a word association experiment.\nYou will complete several trials in this block.\n\n' +
'\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05)
self.write('The experiment consists of two conditions.\nIn each trial, ' +
'you will be prompted to perform\none of these two tasks:\n' +
'(1) touch a button on the screen,\n' +
'(2) press the space bar.\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05)
self.write('In each trial, you will ' + verb + ' a word.\nWhen the word is an animal,\n' +
'touch the button on the screen.\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05)
self.write('When the word is an object,\npress the space bar.\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05)
self.write('You will hear a beep for correct answers.\nYou will hear a buzz for incorrect answers.\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05)
self.write('When you are ready,\npress the space bar to begin.' +
'\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05)
t = time.localtime()
t_str = '-'.join([str(k) for k in [t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec]])
f = open('studies/obj_animals/log/' + t_str + '.txt', 'w')
f.write('Stimulus No.\tStimulus\tCategory\tButton position\tScore\n')
self.sleep(5)
counter = 0 # Needed for breaks between blocks
for k in self.stimuli_order:
# Short break
if not self.training and counter in xrange(len(self.stimuli_order)/self.n_blocks/2, len(self.stimuli_order), len(self.stimuli_order)/self.n_blocks/2):
self.write('Time for a short break.\n\n' +
'\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05)
self.sleep(2)
counter += 1
# I have to calculate button positions in case the window size changed
ar = base.getAspectRatio()
button_frame = (-ar/9, ar/9, -1.0/4, 1.0/4)
buttons = []
for k1 in xrange(2, 7):
for k2 in xrange(4):
buttons.append((-ar + ar / 9 + k1 * ar / 4.5, 0, 1 - 1.0 / 4 - k2 / 2.0))
# Delete middle buttons
del buttons[5:7]
del buttons[7:9]
del buttons[9:11]
choice = random.randint(0, len(buttons) - 1)
button = buttons[choice]
f.write(str(k) + '\t' + self.stimuli[k].strip() + '\t' + self.conditions[self.target[k]] + '\t' + str(choice) + '\t')
# Visual or auditory presentation
if self.av_type == 'auditory':
self.sound(self.file_list[k], volume=0.5)
self.sleep(0.2)
self.write('+', duration=self.isi-self.pause, block=False, scale=0.15, fg=(1, 1, 1, 1))
elif self.av_type == 'visual':
self.sleep(0.2)
self.write(self.stimuli[k], duration=self.isi-self.pause, block=False, scale=0.15, fg=(1, 1, 1, 1))
self.marker(k + 10000)
btn = DirectButton(frameSize=button_frame, pos=button, frameColor=(0.75, 0, 0, 1), borderWidth=(0.01, 0.01),
rolloverSound=None, clickSound=None, command=messenger.send, extraArgs=('button_pressed',))
latencies = self.waitfor_multiple(['button_pressed', 'space'], self.isi)
if not latencies:
response = 'none'
wait_time = self.pause
self.sound('buzz.wav', volume=0.5)
else:
response = latencies[0]
wait_time = self.pause + self.isi - latencies[1]
if self.target[k] == 1 and response == 'button_pressed': # Check if values in dictionary are not empty
self.score += int(100 * (self.isi - latencies[1]) / self.isi)
self.sound('beep.wav', volume=0.5)
elif self.target[k] == 0 and response == 'space':
self.score += int(10 * (self.isi - latencies[1]) / self.isi)
self.sound('beep.wav', volume=0.5)
elif (self.target[k] == 1 and response == 'space') or (self.target[k] == 0 and response == 'button_pressed'):
self.score -= 5
if self.score < 0:
self.score = 0
self.sound('buzz.wav', volume=0.5)
f.write(str(self.score) + '\n')
try:
btn.destroy()
except:
pass
self.sleep(wait_time - 0.2)
f.close()
if not self.training:
self.write('You successfully completed\none run of the experiment.\n\nThank you!', duration=5, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1))
| {
"repo_name": "villawang/SNAP",
"path": "src/modules/Speech/obj_animals.py",
"copies": "2",
"size": "9177",
"license": "bsd-3-clause",
"hash": -5678417561880071000,
"line_mean": 49.7016574586,
"line_max": 227,
"alpha_frac": 0.5433148088,
"autogenerated": false,
"ratio": 3.5555986051917863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5098913413991786,
"avg_score": null,
"num_lines": null
} |
from framework.latentmodule import LatentModule
from panda3d.core import TextProperties, TextPropertiesManager
import random
import time
class Main(LatentModule):
def __init__(self):
LatentModule.__init__(self)
self.tasks = ['overt', 'covert', 'control']
with open('studies/speech/stimuli.txt', 'r') as f:
self.stimuli = f.readlines()
self.conditions = ['visual', 'auditory']
self.n_blocks = 4
self.pause = 0.5
def run(self):
self.implicit_markers = False
base.win.setClearColor((0, 0, 0, 1))
self.marker(0) # Send one event to trigger the whole event sending process
# Precache sounds
self.file_list = ['studies/speech/stimuli/' + k.strip() + '_f.wav' for k in self.stimuli]
for f in self.file_list:
self.precache_sound(f)
# Define text properties
tp_gray = TextProperties()
tp_gray.setTextColor(0.5, 0.5, 0.5, 1)
tpMgr = TextPropertiesManager.getGlobalPtr()
tpMgr.setProperties("gray", tp_gray)
# Show instructions (only in training run)
if self.training:
self.n_blocks = 1
self.write('This is a speech perception/production\nexperiment. You will complete\nseveral trials in each block.\n\n' +
'\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1))
self.write('The experiment consists of three conditions.\nIn each trial, ' +
'you will be prompted to perform\none of these three tasks:\n' +
'(1) speak a word,\n' +
'(2) imagine speaking a word,\n' +
'(3) press the space bar.\n\n\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1))
self.write('In each trial, you will see or hear a word.\n\nYou will also see ' +
'a visual cue\nto indicate which task to perform:\n\n\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1))
txt = self.write('(1) Speak a word\nwhen you see a speech bubble.\n\n\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1))
pic = self.picture('studies/speech/overt.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5))
self.waitfor('space')
txt.destroy()
pic.destroy()
txt = self.write('(2) Imagine speaking a word\nwhen you see a thought bubble.\n\n\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1))
pic = self.picture('studies/speech/covert.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5))
self.waitfor('space')
txt.destroy()
pic.destroy()
txt = self.write('(3) Press the space bar\nwhen you see a rectangle.\n\n\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1))
pic = self.picture('studies/speech/control.png', duration=10000, block=False, scale=(0.25, 1, 0.1), pos=(0, 0.5))
self.waitfor('space')
txt.destroy()
pic.destroy()
self.write('When you are ready,\npress the space bar to begin.' +
'\n\n\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1))
# Create log file
t = time.localtime()
t_str = '-'.join([str(k) for k in [t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec]])
f = open('studies/speech/log/' + t_str + '.txt', 'w')
self.sleep(4)
for block in range(self.n_blocks):
# Create randomized stimulus presentation sequence
items = []
for i1 in range(len(self.stimuli)): # Stimulus
for i2 in range(len(self.tasks)): # Task (overt, covert, control)
for i3 in range(len(self.conditions)): # Condition (visual, auditory)
items.append([i1, i2, i3])
random.shuffle(items)
if self.training:
items = items[:32] # Use only the first 16 items for the training block
counter = 0
for trial in items:
if not self.training and counter == len(items)/2: # Break in the middle of each run
self.write('Time for a short break.\n\n' +
'\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05)
self.sleep(4)
counter += 1
f.write(self.stimuli[trial[0]].strip() + '\t' + self.tasks[trial[1]] + '\t' + self.conditions[trial[2]] + '\t')
if self.tasks[trial[1]] == 'overt':
self.picture('studies/speech/overt.png',
duration=self.isi-self.pause, block=False, scale=0.5, pos=(0, -0.05))
elif self.tasks[trial[1]] == 'covert':
self.picture('studies/speech/covert.png',
duration=self.isi-self.pause, block=False, scale=0.5, pos=(0, -0.05))
else:
self.picture('studies/speech/control.png',
duration=self.isi-self.pause, block=False, scale=(0.5, 1, 0.2))
# Format: 1zyxx, xx: stimulus (0-35), y: task (0, 1, 2), z: condition (0, 1)
self.marker(trial[0] + trial[1] * 100 + trial[2] * 1000 + 10000)
if self.conditions[trial[2]] == 'visual':
self.write(self.stimuli[trial[0]], duration=self.isi-self.pause, block=False, scale=0.15, fg=(1, 1, 1, 1))
else:
self.sound(self.file_list[trial[0]], volume=0.5)
self.write('+', duration=self.isi-self.pause, block=False, scale=0.15, fg=(1, 1, 1, 1))
if self.watchfor('space', self.isi):
f.write('space\n') # Space bar was pressed
else:
f.write('-\n') # Space bar was not pressed
if block < self.n_blocks - 1: # If it's not the last block
self.write('Time for a short break.\n\n' +
'\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05)
self.sleep(4)
f.close()
if not self.training:
self.write('You successfully completed\none run of the experiment.\n\nThank you!', duration=5, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1))
| {
"repo_name": "sccn/SNAP",
"path": "src/modules/Speech/speech_randomized.py",
"copies": "2",
"size": "7080",
"license": "bsd-3-clause",
"hash": -750726982798328700,
"line_mean": 56.5609756098,
"line_max": 217,
"alpha_frac": 0.5312146893,
"autogenerated": false,
"ratio": 3.46718903036239,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.499840371966239,
"avg_score": null,
"num_lines": null
} |
from framework.latentmodule import LatentModule
import random
import time
class Main(LatentModule):
def __init__(self):
LatentModule.__init__(self)
# set defaults for some configurable parameters:
self.speed = 2
def run(self):
self.write('In the next part, please hit the A key whenever the ball hits the ground!\nSpace when ready.','space')
watcher = self.watchfor_multiple_begin(['a'])
vel = [self.speed,0] # velocity
pos = [-0.7,0.7] # position
ball = self.picture('ball.png',duration=10000,scale=0.03,pos=pos,block=False)
now = time.time()
t_end = now + 20
while True:
# calc amount of time passed
dt = time.time() - now
now = time.time()
if now > t_end:
break
# move the ball
pos[0] += vel[0] * dt
pos[1] += vel[1] * dt
ball.setPos(pos[0],0,pos[1])
# decelerate the ball and apply gravity
vel[0] = vel[0]*0.98**dt
vel[1] = vel[1]*0.98**dt - 2*dt
# bounce
if abs(pos[0]) > base.getAspectRatio():
vel[0] = abs(vel[0]) * (-1 if pos[0]>0 else +1)
if abs(pos[1]) > 1:
vel[1] = abs(vel[1]) * (-1 if pos[1]>0 else +1)
self.sleep(0.01)
ball.destroy()
results = self.watchfor_multiple_end(watcher)
self.write('You pressed the A key at the following times:\n%s\n. Press space to end the experiment.' % str(results['a']),'space')
self.write('You have successfully completed the experiment!')
| {
"repo_name": "villawang/SNAP",
"path": "src/modules/Sample3.py",
"copies": "2",
"size": "1728",
"license": "bsd-3-clause",
"hash": 2545337498854604300,
"line_mean": 35.7659574468,
"line_max": 137,
"alpha_frac": 0.5115740741,
"autogenerated": false,
"ratio": 3.5702479338842976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.998890038039736,
"avg_score": 0.0185843255173874,
"num_lines": 47
} |
from framework.latentmodule import LatentModule
import random
import time
class Main(LatentModule):
def __init__(self):
LatentModule.__init__(self)
self.conditions = ['congruent', 'incongruent'] # Congruent/incongruent right/left
self.n_blocks = 3 # Number of blocks of 14 unique trials
self.n_runs = 10 # Number of runs (i.e. the number of random blocks)
self.stim_duration = 0.15
self.pre_duration = [0, 0.033, 0.066, 0.1, 0.133, 0.166, 0.2]
self.trial_duration = 1.65
self.thresholds = [0.9, 0.6] # Thresholds of accuracy for congruent and incongruent
self.stimulus_images = ['target_R_C.bmp', 'target_L_C.bmp', 'target_R_I.bmp', 'target_L_I.bmp']
self.pre_images = ['flankers_R.bmp', 'flankers_L.bmp']
def run(self):
self.implicit_markers = False
base.win.setClearColor((1, 1, 1, 1)) # White background
self.marker(0) # Send one event to trigger the whole event sending process
# Precache images
for f in self.stimulus_images:
self.precache_picture(f)
for f in self.pre_images:
self.precache_picture(f)
# Create log file
t = time.localtime()
t_str = '-'.join([str(k) for k in [t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec]])
f = open('studies/flanker_arrows/log/' + t_str + '.txt', 'w')
f.write('Trial\tCondition\tDelay\tResponse\tReaction time\n')
# Show instructions
if self.training:
self.write('We\'re now going to show you three arrows\nwhich will quickly appear\non top of each other on the screen.\nIMPORTANT!\nOnly look at the arrow in the middle.',
duration='mouse1', align='center', pos=(0, 0), scale=0.1, fg=(0, 0, 0, 1))
self.picture(self.pre_images[0],
duration='mouse1', block=True, scale=(0.1, 0.2), pos=(0, 0))
self.picture(self.pre_images[1],
duration='mouse1', block=True, scale=(0.1, 0.2), pos=(0, 0))
self.write('Only click in the direction the arrow is pointing to.\nClick as quickly as you can.',
duration='mouse1', align='center', pos=(0, 0), scale=0.1, fg=(0, 0, 0, 1))
self.write('Click as quickly as you can\nand try not to make too many mistakes.\nIn the breaks, the computer\nwill tell you if you should click more quickly\nor more accurately.',
duration='mouse1', align='center', pos=(0, 0), scale=0.1, fg=(0, 0, 0, 1))
self.write('It\'s very important that you observe the feedback!\nIf you are requested to go faster, please do so.\nIf you are requested to be more accurate,\nplease be more careful in your responses.',
duration='mouse1', align='center', pos=(0, 0), scale=0.1, fg=(0, 0, 0, 1))
self.write('We\'re going to start in a minute.\nPlease sit as still as you can during the test.',
duration='mouse1', align='center', pos=(0, 0), scale=0.1, fg=(0, 0, 0, 1))
self.sleep(2)
for run in range(self.n_runs): # Feedback is shown after each run
# Generate randomized stimuli
stimuli = []
for condition in range(len(self.conditions)):
for delay in range(len(self.pre_duration)):
stimuli.append([condition, delay])
stimuli *= self.n_blocks
random.shuffle(stimuli)
correct_congruent, correct_incongruent = 0.0, 0.0 # Number of correct responses
number_congruent, number_incongruent = 0, 0
for trial_number, trial in enumerate(stimuli):
f.write(str(trial[0]) + '\t' + str(trial[1]) + '\t')
watcher = self.watchfor_multiple_begin(['mouse1', 'mouse3'])
left_right = random.randint(0, 1) # Randomly choose left or right arrow (target)
print "Trial {0}, {1}, delay {2}, left/right {3}".format(trial_number + 1, trial[0], trial[1], left_right)
# Show pre-stimulus picture
if trial[1] > 0: # If the delay is 0, don't show the flanker image
if (left_right == 0 and trial[0] == 0) or (left_right == 1 and trial[0] == 1): # Right
self.picture(self.pre_images[0], duration=self.pre_duration[trial[1]], block=True, scale=(0.1, 0.2), pos=(0, 0))
else: # Left
self.picture(self.pre_images[1], duration=self.pre_duration[trial[1]], block=True, scale=(0.1, 0.2), pos=(0, 0))
# Show stimulus
if trial[0] == 0: # congruent
self.picture(self.stimulus_images[left_right], duration=self.stim_duration, block=True, scale=(0.1, 0.2), pos=(0, 0))
number_congruent += 1
self.marker(trial[1] + 10000)
else: # incongruent
self.picture(self.stimulus_images[left_right + 2], duration=self.stim_duration, block=True, scale=(0.1, 0.2), pos=(0, 0))
number_incongruent += 1
self.marker(trial[1] + 11000)
# Event markers: 1000x ... congruent, 1100x ... incongruent (x = 0...6 is the delay)
self.crosshair(self.trial_duration - self.pre_duration[trial[1]] - self.stim_duration, block=True, pos=(0, 0), size=0.025, width=0.005)
responses = self.watchfor_multiple_end(watcher)
first_event_1 = responses['mouse1'][0] if responses['mouse1'] else None
first_event_3 = responses['mouse3'][0] if responses['mouse3'] else None
if first_event_1 == None and first_event_3 == None: # No response
reaction_time = None
mouse_button = None
elif first_event_1 == None:
reaction_time = first_event_3
mouse_button = 3
elif first_event_3 == None:
reaction_time = first_event_1
mouse_button = 1
else:
reaction_time = first_event_1 if first_event_1 < first_event_3 else first_event_3
mouse_button = 1 if first_event_1 < first_event_3 else 3
if mouse_button is None: # No response
f.write('Incorrect\t-\n')
print "Incorrect"
elif (mouse_button == 1 and left_right == 1) or (mouse_button == 3 and left_right == 0): # Correct response
f.write('Correct\t' + str(reaction_time) + '\n')
print "Correct, {0}".format(reaction_time)
if trial[0] == 0:
correct_congruent += 1
else:
correct_incongruent += 1
else: # Incorrect response
f.write('Incorrect\t' + str(reaction_time) + '\n')
print "Incorrect, {0}".format(reaction_time)
try:
congruent_accuracy = correct_congruent/number_congruent
except ZeroDivisionError:
congruent_accuracy = None
try:
incongruent_accuracy = correct_incongruent/number_incongruent
except ZeroDivisionError:
incongruent_accuracy = None
print '******', congruent_accuracy, incongruent_accuracy, '******'
if run < self.n_runs - 1: # Show feedback after every run (but not after the last run)
if congruent_accuracy is None:
congruent_accuracy = 0
if incongruent_accuracy is None:
incongruent_accuracy = 0
if ((congruent_accuracy > self.thresholds[0]) and
(self.thresholds[1] < incongruent_accuracy < self.thresholds[0]) and
(congruent_accuracy > incongruent_accuracy + 0.1)):
self.write('Great! Continue exactly\nas you''re doing!', duration=5, align='center', pos=(0, 0), scale=0.1, fg=(0, 0, 0, 1))
elif (congruent_accuracy < self.thresholds[0]) or (incongruent_accuracy < self.thresholds[1]):
self.write('Good, but can you be\na bit more accurate, please!', duration=5, align='center', pos=(0, 0), scale=0.1, fg=(0, 0, 0, 1))
elif (congruent_accuracy > self.thresholds[0]) and (incongruent_accuracy > self.thresholds[1]):
self.write('Good, but can you go\na bit more quickly, please!', duration=5, align='center', pos=(0, 0), scale=0.1, fg=(0, 0, 0, 1))
else:
self.write('Good!', duration=5, align='center', pos=(0, 0), scale=0.1, fg=(0, 0, 0, 1))
f.close()
| {
"repo_name": "sccn/SNAP",
"path": "src/modules/Flanker/flanker_arrows.py",
"copies": "2",
"size": "9202",
"license": "bsd-3-clause",
"hash": 3813377004170576400,
"line_mean": 57.6114649682,
"line_max": 214,
"alpha_frac": 0.5285807433,
"autogenerated": false,
"ratio": 3.7255060728744938,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5254086816174494,
"avg_score": null,
"num_lines": null
} |
from framework.latentmodule import LatentModule
import random
class Main(LatentModule):
def __init__(self):
LatentModule.__init__(self)
# set defaults for some configurable parameters:
self.training_trials = 10 # number of trial in training block
self.trials_per_block = 30 # number of trials in each block
self.blocks = 3 # number of blocks to present
self.pause_duration = 45 # duration of the pause between blocks
self.fixation_duration = 1 # duation for which the fixation cross is displayed
self.letter_duration = 3 # duration for which the letter is shown
self.wait_duration = 1 # wait duration at the end of each trial (nothing displayed)
self.letter_scale = 0.3
self.stimulus_set = ['L','R','O'] # the set of stimuli to present
def run(self):
self.marker(10) # emit an event marker to indicate the beginning of the experiment
self.write('In this experiment you will be asked to imagine either a left-hand or a right-hand movement through a succession of trials. Each trial will begin with a fixation cross, followed either by the letter L (for left hand movement) or the letter R (for right hand movement), or O (for nothing -- relax). Please begin imagining the respective movement when the letter appears and keep going until the letter disappears (after about 3 seconds). If an O appears, please do nothing. When you are ready for a practice run, please press the space bar.',[1,'space'],wordwrap=30,pos=[0,0.3])
self.markeroffset = 30 # in the training block we will record different marker numbers
self.run_block(self.training_trials)
self.markeroffset = 0
self.write('Please press the space bar when you are ready for the main experiment.',[1,'space'],wordwrap=30)
for b in range(self.blocks):
self.run_block(self.trials_per_block)
self.write('Pause. We will continue after the gong.',self.pause_duration)
self.sound('nice_bell.wav')
self.sleep(3)
self.write('You successfully completed the experiment.')
def run_block(self,numtrials):
self.marker(1+self.markeroffset)
for k in range(numtrials):
# show a fixation cross
self.marker(2+self.markeroffset)
self.crosshair(self.fixation_duration)
# display one of the tree stimuli
stimulus = random.choice([0,1,2])
self.marker(stimulus+3+self.markeroffset)
self.write(self.stimulus_set[stimulus],self.letter_duration,scale=self.letter_scale)
# wait for a few more seconds
self.marker(stimulus+10)
self.sleep(self.wait_duration)
| {
"repo_name": "villawang/SNAP",
"path": "src/modules/BCI/MotorImag.py",
"copies": "2",
"size": "2873",
"license": "bsd-3-clause",
"hash": 5642058444660748000,
"line_mean": 56.48,
"line_max": 597,
"alpha_frac": 0.6425339367,
"autogenerated": false,
"ratio": 4.181950509461426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5824484446161426,
"avg_score": null,
"num_lines": null
} |
from framework.latentmodule import LatentModule
import threading
import rpyc, rpyc.core, rpyc.utils.classic, rpyc.utils.server
from pandac.PandaModules import *
from direct.task import Task
import pygame,time
import framework.ui_elements.ScrollPresenter, framework.ui_elements.TextPresenter, framework.ui_elements.ImagePresenter, framework.ui_elements.AudioPresenter, framework.ui_elements.WorldspaceGizmos
import direct.gui.OnscreenImage
try:
import framework.speech_io.speech
except Exception as e:
print "Could not import speech IO: ", e
#
# This is the client component of the LSE experiment implementation.
# This module is executed on the subjects' PCs. It awaits commands from the master.
#
client_version = '0.03' # this is just for the experimenter's interest
class Main(LatentModule):
def __init__(self):
LatentModule.__init__(self)
self.client_port = 3663 # port where this client waits for connections from the master
self.client_id = 0 # 0 for the first client, 1 for the second
self.keydown_mastercallback = None
self.keyup_mastercallback = None
self.joymove_mastercallback = None
self.speech_mastercallback = None
self.callbacks_connected = False
self.localtesting = False # if both clients run on one machine -- then they need to use different input peripherals
self.allow_speech = (self.client_id == 0) if self.localtesting else True # this is for debugging
self.joystick = None
self.last_x = 0
self.last_y = 0
self.last_u = 0
self.last_v = 0
self.last_buttons = ()
def run(self):
moduleself = self
class MainService(rpyc.core.SlaveService):
"""
An rpyc service that exposes some features of this module:
* allows the master to hook up callbacks to inform him of keystrokes
* grants remote access to the Panda3d engine (and any other module for that matter)
"""
def exposed_mastercallbacks(self,keydown_cbf,keyup_cbf,joymove_cbf,speech_cbf):
moduleself.keydown_mastercallback = rpyc.async(keydown_cbf)
moduleself.keyup_mastercallback = rpyc.async(keyup_cbf)
moduleself.joymove_mastercallback = rpyc.async(joymove_cbf)
moduleself.speech_mastercallback = rpyc.async(speech_cbf)
moduleself.callbacks_connected = True
def exposed_stimpresenter(self):
return moduleself
# set up window title
winprops = WindowProperties()
winprops.setTitle('LSE GameClient '+client_version + ' @' + str(self.client_port))
base.win.requestProperties(winprops)
# hook up key events
base.buttonThrowers[0].node().setButtonDownEvent('buttonDown')
base.buttonThrowers[0].node().setButtonUpEvent('buttonUp')
self.accept('buttonDown', self.on_keydown)
self.accept('buttonUp', self.on_keyup)
# init joystick control
pygame.init()
try:
self.joystick = pygame.joystick.Joystick(self.client_id if self.localtesting else 0)
self.joystick.init()
taskMgr.add(self.update_joystick,'update_joystick')
print "Initialized joystick."
except:
print "Warning: no joystick found!"
# init speech control
if self.allow_speech:
try:
framework.speech_io.speech.listenfor(['yes','no','skip','report','red','green','blue','yellow','north','south','east','west','front','back','left','right','alpha move here','bravo move here','alpha move in front of me','bravo move in front of me','alpha move to truck','bravo move to truck','alpha move behind me','bravo move behind me','alpha move to my left','bravo move to my left','alpha move to my right','bravo move to my right','suspicious object'],self.on_speech)
except:
print "Could not initialiate speech control; falling back to touch screen only."
# initiate a server thread that listens for remote commands
self.remote_server = rpyc.utils.server.ThreadedServer(MainService,port=self.client_port)
self.remote_thread = threading.Thread(target=self.remote_server.start)
self.remote_thread.setDaemon(True)
self.remote_thread.start()
# sleep forever, keeping the engine running in the background
self.sleep(100000)
def on_tick(self,dt):
time.sleep(0.025)
def on_keydown(self, keyname):
if self.callbacks_connected:
self.keydown_mastercallback(keyname)
def on_keyup(self, keyname):
if self.callbacks_connected:
self.keyup_mastercallback(keyname)
def on_speech(self,phrase,listener):
self.speech_mastercallback(phrase)
def update_joystick(self,task):
if self.callbacks_connected and self.joystick is not None:
for e in pygame.event.get(): pass
x = self.joystick.get_axis(1)
y = self.joystick.get_axis(0)
if self.joystick.get_numaxes() >= 5:
u = self.joystick.get_axis(3)
v = self.joystick.get_axis(4)
else:
u = 0
v = 0
buttons = (self.joystick.get_button(0),self.joystick.get_button(1),self.joystick.get_button(2),self.joystick.get_button(3))
if not (self.last_x == x and self.last_y == y and self.last_u == u and self.last_v == v and self.last_buttons == buttons):
self.joymove_mastercallback(x,y,u,v,buttons)
self.last_x = x
self.last_y = y
self.last_u = u
self.last_v = v
self.last_buttons = buttons
return Task.cont
| {
"repo_name": "villawang/SNAP",
"path": "src/modules/LSE/LSE_GameClient.py",
"copies": "2",
"size": "5988",
"license": "bsd-3-clause",
"hash": -7197025763520038000,
"line_mean": 43.6865671642,
"line_max": 487,
"alpha_frac": 0.622745491,
"autogenerated": false,
"ratio": 3.9214145383104126,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5544160029310412,
"avg_score": null,
"num_lines": null
} |
from framework.latentmodule import LatentModule
class Main(LatentModule):
def __init__(self):
LatentModule.__init__(self)
# set defaults for some configurable parameters:
self.awake_duration = 10 # duration of the awake condition
self.snooze_duration = 10 # duration of the zone-out condition
self.wakeup_sound = 'nice_bell.wav' # sound to indicate the end of the zone-out condition
self.transition_duration = 1.5 # time that the subject has to come back
self.moviefile = 'big\\alpha_movie2.avi'
self.begintime = 0.0 # time into the movie where we begin playing
self.endtime = 3.5*60 # time into the movie where we end
def run(self):
self.marker(10) # emit an event marker to indicate the beginning of the experiment
self.write('This experiment is about high or low-intensity visual perception. You will be presented a sequence of trials, during half of which you will see a movie (with a fixation cross in the middle), and during the other half of which you will see just the fixation cross. When you see the movie, keep fixating, but focus on the content. When you see only the cross, try to defocus your vision, think nothing, and just wait for the bell that indicates the beginning of the next trial. Please press the space bar when you are ready.','space',wordwrap=30,pos=[0,0.3])
for k in [3,2,1]:
self.write('Experiment begins in '+str(k))
self.trials = int((self.endtime-self.begintime)/self.awake_duration)
for t in range(self.trials):
# show a piece of the movie, superimposed with a fixation cross
self.marker(1)
m = self.movie(self.moviefile, block=False, scale=[0.7,0.4],aspect=1.125,contentoffset=[0,0],volume=0.3,timeoffset=self.begintime+t*self.awake_duration,looping=True)
self.crosshair(self.awake_duration,size=0.2,width=0.005)
m[0].stop()
m[2].removeNode()
self.marker(2)
self.sleep(self.transition_duration)
# show just the cross-hair
self.marker(3)
self.crosshair(self.snooze_duration,size=0.2,width=0.005)
# play the "wakeup" sound
self.sound(self.wakeup_sound)
self.marker(4)
self.sleep(self.transition_duration)
self.write('You successfully completed the experiment!')
| {
"repo_name": "villawang/SNAP",
"path": "src/modules/BCI/AlphaCalibration.py",
"copies": "2",
"size": "2548",
"license": "bsd-3-clause",
"hash": -8956488353857043000,
"line_mean": 55.6222222222,
"line_max": 576,
"alpha_frac": 0.6310832025,
"autogenerated": false,
"ratio": 3.901990811638591,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5533074014138591,
"avg_score": null,
"num_lines": null
} |
from framework.latentmodule import LatentModule
from serial import *
from direct.gui.OnscreenImage import OnscreenImage
import io, os, glob
#import matplotlib.pyplot as plt
from numpy import *
import time
import pylsl.pylsl as pylsl
class Main (LatentModule):
def __init__ (self):
LatentModule.__init__(self)
def ReadData (self,f):
data=[]
marker=[]
for columns in (raw.strip().split() for raw in f):
data.append(columns[0])
marker.append(columns[1])
data.remove(data[0])
marker.remove(marker[0])
marker.remove(marker[1])
marker.remove(marker[-1])
num=len(data) - 2
f.close()
return data,marker,num
def PlotTime(self, time_interval):
plt.hist(time_interval)
plt.title('Histogram of actural time interval')
plt.xlabel('Time interval (s)')
plt.ylabel('Number')
plt.show()
#-----------------------------------display stimulus--------------------------
def ImageStimulus (self,data):
# self.write('We will present image stimulus.\n Press the space bar when you are ready','space')
# self.marker(11)
# self.write('We will display a crossshair on the screen, please fix it',duration=1)
# self.crosshair(duration=0.1)
# preloading images
image=[]
for i in range(0,len(data)):
image.append(OnscreenImage(data[i]))
# display images
image_time=[] # get time
ti=0.3 # timer interval
for i in range(0,len(data)-1):
image_time.append(time.time())
if i != 0:
self.marker(1)
self.picture(image[i],duration=ti,scale=1.2)
image[i] = None
self.picture(image[i+1],duration=ti,scale=1)
image[i+1] = None
image_time=array(image_time)
time_interval=diff(image_time)
return time_interval[1:-1], image_time[1:-1]
#-----------------------------------------------------------------------------
def run (self):
# f=open('C:\\Users\\villa_000\\Dropbox\\python\\labstreaminglayer-master\\App\\SNAP\\src\\studies'
# '\\Image1\\RSVP.txt','rb')
# data,marker,num=self.ReadData()
# print '\n\nImage name:\n'
# print data[1:-1]
# print '\nMarker:\n'
# print marker
# time_interval,image_time, image=self.ImageStimulus(data)
# print time_interval[1:-1], num
# self.PlotTime(time_interval[1:])
# print image_time[1:-1]
# --------------------------------trial1-----------------------------------------------------
# mac
# f1=open('/Users/Villa/Dropbox/python/labstreaminglayer-master/App/SNAP/src/studies'
# '/trial1/RSVP.txt','rb')
# win
f1=open('C:\\Users\\Villa_000\\Dropbox\\python\\labstreaminglayer-master\\App\\SNAP\\src\\studies'
'\\trial1\\RSVP.txt','rb')
data,marker1,num=self.ReadData(f1)
self.write('We will present image stimulus.\n Press the space bar when you are ready','space')
self.write('Trial1',duration=2)
self.crosshair(duration=0.1)
time_interval1,image_time1=self.ImageStimulus(data)
# ----------------------------------trial2-----------------------------------------------------
# mac
# f2=open('/Users/Villa/Dropbox/python/labstreaminglayer-master/App/SNAP/src/studies'
# '/trial2/RSVP.txt','rb')
# win
f2=open('C:\\Users\\Villa_000\\Dropbox\\python\\labstreaminglayer-master\\App\\SNAP\\src\\studies'
'\\trial2\\RSVP.txt','rb')
data,marker2,num=self.ReadData(f2)
self.write('Trial2',duration=2)
self.crosshair(duration=0.1)
time_interval2,image_time2=self.ImageStimulus(data)
print time_interval1, time_interval2
#self.PlotTime([time_interval1,time_interval2])
| {
"repo_name": "villawang/SNAP",
"path": "src/modules/RSVP.py",
"copies": "1",
"size": "4138",
"license": "bsd-3-clause",
"hash": -2729318973683657700,
"line_mean": 28.8805970149,
"line_max": 107,
"alpha_frac": 0.5222329628,
"autogenerated": false,
"ratio": 3.7380307136404696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9685501440909616,
"avg_score": 0.01495244710617075,
"num_lines": 134
} |
from ..framework.load_file import load_file
def crossproduct(p, q):
return p[0] * q[1] - p[1] * q[0]
def subtract(p, q):
return [x - y for x, y in zip(p, q)]
def same_side(p, q, a, b):
ab = subtract(a, b)
cp1 = crossproduct(ab, subtract(p, a))
cp2 = crossproduct(ab, subtract(q, a))
if cp1 * cp2 >= 0:
return True
else:
return False
def solve(name: str='triangles.txt', relative: bool=True) -> int:
raw = load_file(102, name, relative)
triangles = []
for line in raw.split('\n'):
if not line:
continue
coordinates = [int(x) for x in line.split(',')]
vectors = list(zip(coordinates[0::2], coordinates[1::2], [0] * 3))
triangles.append(vectors)
total = 0
origin = [0, 0, 0]
for i in triangles:
if (
same_side(origin, i[0], i[1], i[2]) and
same_side(origin, i[1], i[0], i[2]) and
same_side(origin, i[2], i[0], i[1])
):
total += 1
return total
| {
"repo_name": "cryvate/project-euler",
"path": "project_euler/solutions/problem_102.py",
"copies": "1",
"size": "1035",
"license": "mit",
"hash": -474828046447023040,
"line_mean": 21.0212765957,
"line_max": 74,
"alpha_frac": 0.5207729469,
"autogenerated": false,
"ratio": 3.0441176470588234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4064890593958823,
"avg_score": null,
"num_lines": null
} |
from framework.logging.logger import Logger
from subprocess import Popen
from datetime import datetime
from xml.dom import DOMException
from xml.dom import minidom
from blessings import Terminal
t = Terminal()
class Debuggable(object):
def __init__(self, directory, apk):
super(Debuggable, self).__init__()
self.directory = directory
self.apk = apk
def run_debuggable(self):
"""
This will take a target APK decompile with
with the apktool.jar and add the attribute
android:debuggable to the <application/> tag
It will the rebuild the APK from the output directory
and sign it with a JKS key so it can be deployed back
to the target device
"""
print(t.green("[{0}] ".format(datetime.now())) +
t.yellow("Decompiling : ") +
self.apk)
try:
Popen(["java -jar apktool.jar d {0} -f -o output/{1}".format(self.apk, self.directory)],
shell=True).wait()
print(t.green("[{0}] ".format(datetime.now())) +
t.yellow("Adding android:debuggable=\"true\""))
with open("output/{0}/AndroidManifest.xml".format(self.directory), "r+") as manifest:
# Using parseString() will prevent
# XML file size issues
#
xml = minidom.parseString(manifest.read())
# application if of
# type NodeList
#
application = xml.getElementsByTagName("application")
application[0].setAttribute("android:debuggable", "true")
# We need to adjust the
# position of where we begin
# write and use truncation in order
# not to corrupt the XML structure
#
manifest.seek(0)
xml.writexml(manifest)
manifest.truncate()
manifest.close()
except OSError as e:
print(t.red("[{0}]".format(datetime.now()) + "Process exception, check the logs"))
Logger.run_logger(e.message)
except IOError as e:
print(t.red("[{0}]".format(datetime.now()) + "IO exception, check the logs"))
Logger.run_logger(e.message)
except DOMException as e:
print(t.red("[{0}]".format(datetime.now()) + "XML exception, check the logs"))
Logger.run_logger(e.message)
try:
print(t.green("[{0}] ".format(datetime.now())) +
t.yellow("Building APK : ") +
self.directory)
Popen(["java -jar apktool.jar b output/{0} -o output/{0}/{0}.apk".format(self.directory)],
shell=True).wait()
print(t.green("[{0}] ".format(datetime.now())) +
t.yellow("Building completed"))
print(t.green("[{0}] ".format(datetime.now())) +
t.yellow("APK signing process initiated"))
Popen(["keytool -genkey -v -keystore lobotomy-key.keystore "
"-alias lobotomy -keyalg RSA -keysize 2048 -validity 10000"],
shell=True).wait()
Popen(["jarsigner -verbose -sigalg SHA1withRSA -digestalg SHA1 "
"-keystore lobotomy-key.keystore output/{0}/{0}.apk lobotomy".format(self.directory)],
shell=True).wait()
print(t.green("[{0}] ".format(datetime.now())) +
t.yellow("Freshly signed APK is located at : ") +
"output/{0}/{0}.apk".format(self.directory))
# We want to create a fresh keystore
# every time we build and sign a new APK
#
Popen(["rm lobotomy-key.keystore"],
shell=True).wait()
except OSError as e:
print(t.red("[{0}]".format(datetime.now()) + "Process exception, check the logs"))
Logger.run_logger(e.message)
| {
"repo_name": "0xr0ot/lobotomy",
"path": "framework/brains/apk/debuggable.py",
"copies": "4",
"size": "3998",
"license": "mit",
"hash": -2717993812182655500,
"line_mean": 35.3454545455,
"line_max": 105,
"alpha_frac": 0.5390195098,
"autogenerated": false,
"ratio": 4.312837108953614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008613291322846068,
"num_lines": 110
} |
from _Framework.ModeSelectorComponent import ModeSelectorComponent
from _Framework.ButtonElement import ButtonElement
from _Framework.ButtonMatrixElement import ButtonMatrixElement
from _Framework.SessionComponent import SessionComponent
from SpecialMixerComponent import SpecialMixerComponent
from PreciseButtonSliderElement import * # noqa
from consts import * # noqa
PAN_VALUE_MAP = (-1.0, -0.634921, -0.31746, 0.0, 0.0, 0.31746, 0.634921, 1.0)
VOL_VALUE_MAP = (0.0, 0.142882, 0.302414, 0.4, 0.55, 0.7, 0.85, 1.0)
SEND_VALUE_MAP = (0.0, 0.103536, 0.164219, 0.238439, 0.343664, 0.55, 0.774942, 1.0)
class SubSelectorComponent(ModeSelectorComponent):
""" Class that handles different mixer modes """
def __init__(self, matrix, side_buttons, session):
assert isinstance(matrix, ButtonMatrixElement)
assert ((matrix.width() == 8) and (matrix.height() == 8))
assert isinstance(side_buttons, tuple)
assert (len(side_buttons) == 8)
assert isinstance(session, SessionComponent)
ModeSelectorComponent.__init__(self)
self._session = session
self._mixer = SpecialMixerComponent(matrix.width())
self._matrix = matrix
self._sliders = []
self._mixer.name = 'Mixer'
self._mixer.master_strip().name = 'Master_Channel_strip'
self._mixer.selected_strip().name = 'Selected_Channel_strip'
for column in range(matrix.width()):
self._mixer.channel_strip(column).name = 'Channel_Strip_' + str(column)
self._sliders.append(PreciseButtonSliderElement(tuple([matrix.get_button(column, 7 - row) for row in range(8)])))
self._sliders[-1].name = 'Button_Slider_' + str(column)
self._side_buttons = side_buttons[4:]
self._update_callback = None
self._session.set_mixer(self._mixer)
self.set_modes_buttons(side_buttons[:4])
def disconnect(self):
for button in self._modes_buttons:
button.remove_value_listener(self._mode_value)
self._session = None
self._mixer = None
for slider in self._sliders:
slider.release_parameter()
slider.set_disabled(True)
self._sliders = None
self._matrix = None
self._side_buttons = None
self._update_callback = None
ModeSelectorComponent.disconnect(self)
def set_update_callback(self, callback):
assert (dir(callback).count("im_func") is 1)
self._update_callback = callback
def set_modes_buttons(self, buttons):
assert ((buttons == None) or (isinstance(buttons, tuple) or (len(buttons) == self.number_of_modes())))
identify_sender = True
for button in self._modes_buttons:
button.remove_value_listener(self._mode_value)
self._modes_buttons = []
if buttons != None:
for button in buttons:
assert isinstance(button, ButtonElement)
self._modes_buttons.append(button)
button.add_value_listener(self._mode_value, identify_sender)
def set_mode(self, mode):
assert isinstance(mode, int)
assert (mode in range(-1, self.number_of_modes()))
if ((self._mode_index != mode) or (mode == -1)):
self._mode_index = mode
self.update()
def mode(self):
result = 0
if self.is_enabled():
result = self._mode_index + 1
return result
def number_of_modes(self):
return 4
def on_enabled_changed(self):
enabled = self.is_enabled()
for index in range(self._matrix.width()):
self._sliders[index].set_disabled(not enabled)
self._mixer.set_enabled(enabled)
self.set_mode(-1)
def release_controls(self):
for track in range(self._matrix.width()):
for row in range(self._matrix.height()):
self._matrix.get_button(track, row).set_on_off_values(127, LED_OFF)
strip = self._mixer.channel_strip(track)
strip.set_default_buttons(None, None, None, None)
strip.set_mute_button(None)
strip.set_solo_button(None)
strip.set_arm_button(None)
strip.set_send_controls((None, None))
strip.set_pan_control(None)
strip.set_volume_control(None)
self._session.set_stop_track_clip_buttons(None)
self._mixer.set_global_buttons(None, None, None)
self._session.set_stop_all_clips_button(None)
def update(self):
assert (self._modes_buttons != None)
if self.is_enabled():
if (self._modes_buttons != None):
for index in range(len(self._modes_buttons)):
self._modes_buttons[index].set_on_off_values(GREEN_FULL, GREEN_THIRD)
if (index == self._mode_index):
self._modes_buttons[index].turn_on()
else:
self._modes_buttons[index].turn_off()
for button in self._side_buttons:
button.set_on_off_values(127, LED_OFF)
button.turn_off()
for index in range(self._matrix.width()):
self._sliders[index].set_disabled((self._mode_index == -1))
self._mixer.set_allow_update(False)
self._session.set_allow_update(False)
if self._mode_index == -1:
self._setup_mixer_overview()
elif self._mode_index == 0:
self._setup_volume_mode()
elif self._mode_index == 1:
self._setup_pan_mode()
elif self._mode_index == 2:
self._setup_send1_mode()
elif self._mode_index == 3:
self._setup_send2_mode()
else:
assert False
if (self._update_callback != None):
self._update_callback()
self._mixer.set_allow_update(True)
self._session.set_allow_update(True)
else:
self.release_controls()
def _setup_mixer_overview(self):
trkon_index = 5
stop_buttons = []
for track in range(self._matrix.width()):
strip = self._mixer.channel_strip(track)
strip.set_send_controls((None, None))
strip.set_pan_control(None)
strip.set_volume_control(None)
self._sliders[track].release_parameter()
for row in range(self._matrix.height()):
full_value = GREEN_THIRD
third_value = GREEN_FULL
if row == trkon_index:
full_value = AMBER_FULL
third_value = AMBER_THIRD
elif row > 3:
full_value = RED_FULL
third_value = RED_THIRD
self._matrix.get_button(track, row).set_on_off_values(full_value, third_value)
strip.set_default_buttons(self._matrix.get_button(track, 0), self._matrix.get_button(track, 1), self._matrix.get_button(track, 2), self._matrix.get_button(track, 3))
stop_buttons.append(self._matrix.get_button(track, 4))
strip.set_mute_button(self._matrix.get_button(track, 5))
strip.set_solo_button(self._matrix.get_button(track, 6))
strip.set_arm_button(self._matrix.get_button(track, 7))
for button in self._side_buttons:
if list(self._side_buttons).index(button) == trkon_index - 4:
button.set_on_off_values(AMBER_FULL, AMBER_THIRD)
else:
button.set_on_off_values(RED_FULL, RED_THIRD)
button.set_force_next_value()
button.turn_off()
self._session.set_stop_track_clip_buttons(tuple(stop_buttons))
self._session.set_stop_all_clips_button(self._side_buttons[0])
self._mixer.set_global_buttons(self._side_buttons[3], self._side_buttons[2], self._side_buttons[1])
def _setup_volume_mode(self):
for track in range(self._matrix.width()):
strip = self._mixer.channel_strip(track)
strip.set_default_buttons(None, None, None, None)
strip.set_mute_button(None)
strip.set_solo_button(None)
strip.set_arm_button(None)
strip.set_send_controls((None, None))
strip.set_pan_control(None)
for row in range(self._matrix.height()):
self._matrix.get_button(track, row).set_on_off_values(GREEN_FULL, LED_OFF)
self._sliders[track].set_mode(SLIDER_MODE_VOLUME)
self._sliders[track].set_value_map(VOL_VALUE_MAP)
strip.set_volume_control(self._sliders[track])
self._session.set_stop_track_clip_buttons(None)
self._session.set_stop_all_clips_button(None)
self._mixer.set_global_buttons(None, None, None)
def _setup_pan_mode(self):
for track in range(self._matrix.width()):
strip = self._mixer.channel_strip(track)
strip.set_default_buttons(None, None, None, None)
strip.set_mute_button(None)
strip.set_solo_button(None)
strip.set_arm_button(None)
strip.set_send_controls((None, None))
strip.set_volume_control(None)
for row in range(self._matrix.height()):
self._matrix.get_button(track, row).set_on_off_values(AMBER_FULL, LED_OFF)
self._sliders[track].set_mode(SLIDER_MODE_PAN)
self._sliders[track].set_value_map(PAN_VALUE_MAP)
strip.set_pan_control(self._sliders[track])
self._session.set_stop_track_clip_buttons(None)
self._session.set_stop_all_clips_button(None)
self._mixer.set_global_buttons(None, None, None)
def _setup_send1_mode(self):
for track in range(self._matrix.width()):
strip = self._mixer.channel_strip(track)
strip.set_default_buttons(None, None, None, None)
strip.set_mute_button(None)
strip.set_solo_button(None)
strip.set_arm_button(None)
strip.set_volume_control(None)
strip.set_pan_control(None)
for row in range(self._matrix.height()):
self._matrix.get_button(track, row).set_on_off_values(RED_FULL, LED_OFF)
self._sliders[track].set_mode(SLIDER_MODE_VOLUME)
self._sliders[track].set_value_map(SEND_VALUE_MAP)
strip.set_send_controls((self._sliders[track], None))
self._session.set_stop_track_clip_buttons(None)
self._session.set_stop_all_clips_button(None)
self._mixer.set_global_buttons(None, None, None)
def _setup_send2_mode(self):
for track in range(self._matrix.width()):
strip = self._mixer.channel_strip(track)
strip.set_default_buttons(None, None, None, None)
strip.set_mute_button(None)
strip.set_solo_button(None)
strip.set_arm_button(None)
strip.set_volume_control(None)
strip.set_pan_control(None)
for row in range(self._matrix.height()):
self._matrix.get_button(track, row).set_on_off_values(RED_FULL, LED_OFF)
self._sliders[track].set_mode(SLIDER_MODE_VOLUME)
self._sliders[track].set_value_map(SEND_VALUE_MAP)
strip.set_send_controls((None, self._sliders[track]))
self._session.set_stop_track_clip_buttons(None)
self._session.set_stop_all_clips_button(None)
self._mixer.set_global_buttons(None, None, None)
| {
"repo_name": "jim-cooley/abletonremotescripts",
"path": "remote-scripts/samples/Launchpad95/SubSelectorComponent.py",
"copies": "1",
"size": "9754",
"license": "apache-2.0",
"hash": 1348479002346262300,
"line_mean": 34.9926199262,
"line_max": 168,
"alpha_frac": 0.6995078942,
"autogenerated": false,
"ratio": 2.8713570797762733,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4070864973976273,
"avg_score": null,
"num_lines": null
} |
from _Framework.ModeSelectorComponent import ModeSelectorComponent
from _Framework.ButtonElement import ButtonElement
from _Framework.ButtonMatrixElement import ButtonMatrixElement
class DeviceControllerComponent(ModeSelectorComponent):
def __init__(self, script, *a, **k):
ModeSelectorComponent.__init__(self, *a, **k)
self._script = script
self._mode_index = 0
self._number_of_modes = 0
self._offset = 0
self._buttons = None
self._last_preset = 0
def set_offset(self, offset = 0):
assert isinstance(offset, int)
self._offset = offset
self.update()
def assign_buttons(self, buttons, offset = 0):
assert isinstance(offset, int)
self._offset = offset
if(buttons != None):
for button in buttons:
assert isinstance(button, MonoButtonElement)
self._buttons = buttons
self.update()
def set_mode_buttons(self, buttons):
for button in self._modes_buttons:
button.remove_value_listener(self._mode_value)
self._modes_buttons = []
if (buttons != None):
for button in buttons:
assert isinstance(button, ButtonElement or MonoButtonElement)
identify_sender = True
button.add_value_listener(self._mode_value, identify_sender)
self._modes_buttons.append(button)
self._number_of_modes = len(self._modes_buttons) + self._offset
for index in range(len(self._modes_buttons)):
if (index + self._offset) == self._last_preset:
self._modes_buttons[index].turn_on()
else:
self._modes_buttons[index].turn_off()
def set_mode_toggle(self, button):
assert ((button == None) or isinstance(button, ButtonElement or MonoButtonElement))
if (self._mode_toggle != None):
self._mode_toggle.remove_value_listener(self._toggle_value)
self._mode_toggle = button
if (self._mode_toggle != None):
self._mode_toggle.add_value_listener(self._toggle_value)
def number_of_modes(self):
return self._number_of_modes
def update(self):
if(self.is_enabled() is False):
self.set_mode_buttons(None)
elif(self.is_enabled() is True):
if(len(self._modes_buttons) is 0):
self.set_mode_buttons(self._buttons)
#self.set_mode_buttons(tuple(self._script._grid[index][5] for index in range(8)))
#self.set_mode_buttons(self._modes_buttons
key = str('p'+ str(self._mode_index + 1 + self._offset))
preset = None
for track in range(len(self.song().tracks)):
#self._script.log_message(self.enumerate_track_device(self.song().tracks[track]))
for device in self.enumerate_track_device(self.song().tracks[track]):
if(match(key, str(device.name)) != None):
preset = device
for return_track in range(len(self.song().return_tracks)):
for device in self.enumerate_track_device(self.song().return_tracks[return_track]):
if(match(key, str(device)) != None):
preset = device
for device in self.enumerate_track_device(self.song().master_track.devices):
if(match(key, str(device)) != None):
preset = device
if(preset != None):
#self._script._device.set_device(preset)
self._script.set_appointed_device(preset)
self._last_preset = self._mode_index + self._offset
#self._script._device._update()
for index in range(len(self._modes_buttons)):
button = self._modes_buttons[button]
if isinstance(button, ButtonElement):
if (index + self._offset) == self._last_preset:
self._modes_buttons[button].turn_on()
else:
self._modes_buttons[button].turn_off()
def enumerate_track_device(self, track):
devices = []
if hasattr(track, 'devices'):
for device in track.devices:
devices.append(device)
if device.can_have_chains:
for chain in device.chains:
for chain_device in self.enumerate_track_device(chain):
devices.append(chain_device)
return devices
def on_enabled_changed(self):
if(self.is_enabled() is False):
self.set_mode_buttons(None)
elif(self.is_enabled() is True):
if(len(self._modes_buttons) is 0):
self.set_mode_buttons(self._buttons)
#self.set_mode_buttons(tuple(self._script._grid[index][5] for index in range(8)))
for button in range(len(self._modes_buttons)):
if (button + self._offset) == self._last_preset:
self._modes_buttons[button].turn_on()
else:
self._modes_buttons[button].turn_off() | {
"repo_name": "jim-cooley/abletonremotescripts",
"path": "remote-scripts/branches/VCM600_2/DeviceControllerComponent.py",
"copies": "1",
"size": "5075",
"license": "apache-2.0",
"hash": -2564608160334679000,
"line_mean": 42.0169491525,
"line_max": 95,
"alpha_frac": 0.5761576355,
"autogenerated": false,
"ratio": 4.136104319478402,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004388079650647949,
"num_lines": 118
} |
from _Framework.ModeSelectorComponent import ModeSelectorComponent
from _Framework.ButtonElement import ButtonElement
from _Framework.ButtonMatrixElement import ButtonMatrixElement
class MainControllerComponent(ModeSelectorComponent):
def __init__(self, *a, **k):
ModeSelectorComponent.__init__(self, *a, **k)
self._mode_index = 0
self._number_of_modes = 0
self._offset = 0
self._buttons = None
self._last_preset = 0
def set_offset(self, offset = 0):
assert isinstance(offset, int)
self._offset = offset
self.update()
def assign_buttons(self, buttons, offset = 0):
assert isinstance(offset, int)
self._offset = offset
if(buttons != None):
for button in buttons:
assert isinstance(button, MonoButtonElement)
self._buttons = buttons
self.update()
def set_mode_buttons(self, buttons):
for button in self._modes_buttons:
button.remove_value_listener(self._mode_value)
self._modes_buttons = []
if (buttons != None):
for button in buttons:
assert isinstance(button, ButtonElement or MonoButtonElement)
identify_sender = True
button.add_value_listener(self._mode_value, identify_sender)
self._modes_buttons.append(button)
self._number_of_modes = len(self._modes_buttons) + self._offset
for index in range(len(self._modes_buttons)):
if (index + self._offset) == self._last_preset:
self._modes_buttons[index].turn_on()
else:
self._modes_buttons[index].turn_off()
def set_mode_toggle(self, button):
assert ((button == None) or isinstance(button, ButtonElement or MonoButtonElement))
if (self._mode_toggle != None):
self._mode_toggle.remove_value_listener(self._toggle_value)
self._mode_toggle = button
if (self._mode_toggle != None):
self._mode_toggle.add_value_listener(self._toggle_value)
def number_of_modes(self):
return self._number_of_modes
def update(self):
if(self.is_enabled() is False):
self.set_mode_buttons(None)
elif(self.is_enabled() is True):
if(len(self._modes_buttons) is 0):
self.set_mode_buttons(self._buttons)
#self.set_mode_buttons(tuple(self._script._grid[index][5] for index in range(8)))
#self.set_mode_buttons(self._modes_buttons
key = str('p'+ str(self._mode_index + 1 + self._offset))
preset = None
for track in range(len(self.song().tracks)):
#self._script.log_message(self.enumerate_track_device(self.song().tracks[track]))
for device in self.enumerate_track_device(self.song().tracks[track]):
if(match(key, str(device.name)) != None):
preset = device
for return_track in range(len(self.song().return_tracks)):
for device in self.enumerate_track_device(self.song().return_tracks[return_track]):
if(match(key, str(device)) != None):
preset = device
for device in self.enumerate_track_device(self.song().master_track.devices):
if(match(key, str(device)) != None):
preset = device
if(preset != None):
#self._script._device.set_device(preset)
self._script.set_appointed_device(preset)
self._last_preset = self._mode_index + self._offset
#self._script._device._update()
for index in range(len(self._modes_buttons)):
button = self._modes_buttons[button]
if isinstance(button, ButtonElement):
if (index + self._offset) == self._last_preset:
self._modes_buttons[button].turn_on()
else:
self._modes_buttons[button].turn_off()
def enumerate_track_device(self, track):
devices = []
if hasattr(track, 'devices'):
for device in track.devices:
devices.append(device)
if device.can_have_chains:
for chain in device.chains:
for chain_device in self.enumerate_track_device(chain):
devices.append(chain_device)
return devices
def on_enabled_changed(self):
if(self.is_enabled() is False):
self.set_mode_buttons(None)
elif(self.is_enabled() is True):
if(len(self._modes_buttons) is 0):
self.set_mode_buttons(self._buttons)
#self.set_mode_buttons(tuple(self._script._grid[index][5] for index in range(8)))
for button in range(len(self._modes_buttons)):
if (button + self._offset) == self._last_preset:
self._modes_buttons[button].turn_on()
else:
self._modes_buttons[button].turn_off()
def _on_timer(self):
if (self.is_enabled() and (self._track != None)):
if (self._toggle_fold_ticks_delay > -1):
assert self._track.is_foldable
if (self._toggle_fold_ticks_delay == 0):
self._track.fold_state = (not self._track.fold_state)
self._toggle_fold_ticks_delay -= 1 | {
"repo_name": "jim-cooley/abletonremotescripts",
"path": "remote-scripts/branches/VCM600_2/MainControllerComponent.py",
"copies": "1",
"size": "5413",
"license": "apache-2.0",
"hash": 8354018688216499000,
"line_mean": 42.312,
"line_max": 97,
"alpha_frac": 0.5704784777,
"autogenerated": false,
"ratio": 4.110098709187548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5180577186887547,
"avg_score": null,
"num_lines": null
} |
from framework.orchestrator_apis import *
from framework.orchestrator_base import OrchestratorBase
import random
class BridgesAPI(OrchestratorBase):
def __init__(self, orchestrator_driver):
self.orchestrator_driver = orchestrator_driver
self.orchestrator_client = self.orchestrator_driver.orchestrator_client
self.createdbridges = []
@catch_exception_decoration
def get_nodes_bridges(self, nodeid):
return self.orchestrator_client.nodes.ListBridges(nodeid=nodeid)
@catch_exception_decoration
def get_nodes_bridges_bridgeid(self, nodeid, bridgeid):
return self.orchestrator_client.nodes.GetBridge(nodeid=nodeid, bridgeid=bridgeid)
@catch_exception_decoration_return
def post_nodes_bridges(self, node_id, **kwargs):
temp = random.randint(1, 254)
temp2 = random.randint(1, 254)
if 'networkMode' not in kwargs.keys():
kwargs['networkMode'] = self.random_choise(["none", "static", "dnsmasq"])
settings_draft = {"none": {},
"static": {"cidr": "192.%i.%i.%i/24" % (random.randint(1, 254), random.randint(1, 254), random.randint(1, 254))},
"dnsmasq": {"cidr": "192.%i.%i.1/24" % (temp, temp2),
"start": "192.%i.%i.2" % (temp, temp2),
"end": "192.%i.%i.254" % (temp, temp2)}}
data = {"name": self.random_string(),
"hwaddr": self.randomMAC(),
"networkMode": kwargs['networkMode'],
"nat": self.random_choise(([False, True])),
"setting": settings_draft[kwargs['networkMode']]
}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.nodes.CreateBridge(nodeid=node_id,
data=data)
if response.status_code == 201:
self.createdbridges.append({"node": node_id, "name": data["name"]})
return response, data
@catch_exception_decoration
def delete_nodes_bridges_bridgeid(self, nodeid, bridgeid):
response = self.orchestrator_client.nodes.DeleteBridge(nodeid=nodeid, bridgeid=bridgeid)
if response.status_code == 204:
self.createdbridges.remove({"node": nodeid, "name": bridgeid})
return response
| {
"repo_name": "g8os/grid",
"path": "tests/0_orchestrator/test_suite/framework/orchestrator_apis/bridges_apis.py",
"copies": "2",
"size": "2413",
"license": "apache-2.0",
"hash": 8154722771491599000,
"line_mean": 46.3137254902,
"line_max": 139,
"alpha_frac": 0.5951098218,
"autogenerated": false,
"ratio": 3.6727549467275495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010301339724230114,
"num_lines": 51
} |
from framework.orchestrator_apis import *
from framework.orchestrator_base import OrchestratorBase
import random
class StoragepoolsAPI(OrchestratorBase):
def __init__(self, orchestrator_driver):
self.orchestrator_driver = orchestrator_driver
self.orchestrator_client = self.orchestrator_driver.orchestrator_client
@catch_exception_decoration
def get_storagepools(self, nodeid):
return self.orchestrator_client.nodes.ListStoragePools(nodeid=nodeid)
@catch_exception_decoration_return
def post_storagepools(self, node_id, free_devices, **kwargs):
data = {"name": self.random_string(),
"metadataProfile": 'single',
"dataProfile": 'single',
"devices": [random.choice(free_devices)]}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.nodes.CreateStoragePool(nodeid=node_id, data=data)
return response, data
@catch_exception_decoration
def get_storagepools_storagepoolname(self, nodeid, storagepoolname):
return self.orchestrator_client.nodes.GetStoragePoolInfo(nodeid=nodeid, storagepoolname=storagepoolname)
@catch_exception_decoration
def delete_storagepools_storagepoolname(self, nodeid, storagepoolname):
return self.orchestrator_client.nodes.DeleteStoragePool(nodeid=nodeid, storagepoolname=storagepoolname)
@catch_exception_decoration
def get_storagepools_storagepoolname_devices(self, nodeid, storagepoolname):
return self.orchestrator_client.nodes.ListStoragePoolDevices(nodeid=nodeid, storagepoolname=storagepoolname)
@catch_exception_decoration
def post_storagepools_storagepoolname_devices(self, nodeid, storagepoolname, data):
return self.orchestrator_client.nodes.CreateStoragePoolDevices(nodeid=nodeid, storagepoolname=storagepoolname,
data=data)
@catch_exception_decoration
def get_storagepools_storagepoolname_devices_deviceid(self, nodeid, storagepoolname, deviceuuid):
return self.orchestrator_client.nodes.GetStoragePoolDeviceInfo(nodeid=nodeid, storagepoolname=storagepoolname,
deviceuuid=deviceuuid)
@catch_exception_decoration
def delete_storagepools_storagepoolname_devices_deviceid(self, nodeid, storagepoolname, deviceuuid):
return self.orchestrator_client.nodes.DeleteStoragePoolDevice(nodeid=nodeid, storagepoolname=storagepoolname,
deviceuuid=deviceuuid)
@catch_exception_decoration
def get_storagepools_storagepoolname_filesystems(self, nodeid, storagepoolname):
return self.orchestrator_client.nodes.ListFilesystems(nodeid=nodeid, storagepoolname=storagepoolname)
@catch_exception_decoration_return
def post_storagepools_storagepoolname_filesystems(self, node_id, storagepoolname, **kwargs):
data = {"name": self.random_string(),
"quota": random.randint(0, 10),
"readOnly": False}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.nodes.CreateFilesystem(nodeid=node_id, storagepoolname=storagepoolname,
data=data)
return response, data
@catch_exception_decoration
def get_storagepools_storagepoolname_filesystems_filesystemname(self, nodeid, storagepoolname, filesystemname):
return self.orchestrator_client.nodes.GetFilesystemInfo(nodeid=nodeid, storagepoolname=storagepoolname,
filesystemname=filesystemname)
@catch_exception_decoration
def delete_storagepools_storagepoolname_filesystems_filesystemname(self, nodeid, storagepoolname, filesystemname):
return self.orchestrator_client.nodes.DeleteFilesystem(nodeid=nodeid, storagepoolname=storagepoolname,
filesystemname=filesystemname)
@catch_exception_decoration
def get_filesystem_snapshots(self, nodeid, storagepoolname, filesystemname):
return self.orchestrator_client.nodes.ListFilesystemSnapshots(nodeid=nodeid, storagepoolname=storagepoolname,
filesystemname=filesystemname)
@catch_exception_decoration_return
def post_filesystems_snapshots(self, nodeid, storagepoolname, filesystemname, **kwargs):
data = {'name': self.random_string()}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.nodes.CreateSnapshot(nodeid=nodeid, storagepoolname=storagepoolname,
filesystemname=filesystemname,
data=data)
return response, data
@catch_exception_decoration
def get_filesystem_snapshots_snapshotname(self, nodeid, storagepoolname, filesystemname, snapshotname):
return self.orchestrator_client.nodes.GetFilesystemSnapshotInfo(nodeid=nodeid, storagepoolname=storagepoolname,
filesystemname=filesystemname,
snapshotname=snapshotname)
@catch_exception_decoration
def delete_filesystem_snapshots_snapshotname(self, nodeid, storagepoolname, filesystemname, snapshotname):
return self.orchestrator_client.nodes.DeleteFilesystemSnapshot(nodeid=nodeid, storagepoolname=storagepoolname,
filesystemname=filesystemname,
snapshotname=snapshotname)
@catch_exception_decoration
def post_filesystem_snapshots_snapshotname_rollback(self, nodeid, storagepoolname, filesystemname, snapshotname,
data={}):
return self.orchestrator_client.nodes.RollbackFilesystemSnapshot(nodeid=nodeid, storagepoolname=storagepoolname,
filesystemname=filesystemname,
snapshotname=snapshotname,
data=data)
| {
"repo_name": "zero-os/0-orchestrator",
"path": "tests/0_orchestrator/test_suite/framework/orchestrator_apis/storagepools_apis.py",
"copies": "2",
"size": "6636",
"license": "apache-2.0",
"hash": 7003958070664350000,
"line_mean": 59.880733945,
"line_max": 120,
"alpha_frac": 0.6387884268,
"autogenerated": false,
"ratio": 4.630844382414515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6269632809214515,
"avg_score": null,
"num_lines": null
} |
from framework.orchestrator_apis import *
from framework.orchestrator_base import OrchestratorBase
import random
class VDisksAPIs(OrchestratorBase):
def __init__(self, orchestrator_driver):
self.orchestrator_driver = orchestrator_driver
self.orchestrator_client = self.orchestrator_driver.orchestrator_client
@catch_exception_decoration
def get_vdiskstorage(self):
return self.orchestrator_client.vdiskstorage.ListVdiskStorages()
@catch_exception_decoration_return
def post_vdiskstorage(self, storagecluster, **kwargs):
data = {
"id": self.random_string(),
"blockCluster": storagecluster
}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.vdiskstorage.CreateNewVdiskStorage(data=data)
return response, data
@catch_exception_decoration
def get_vdiskstorage_info(self, vdiskstorageid):
return self.orchestrator_client.vdiskstorage.GetVdiskStorageInfo(vdiskstorageid=vdiskstorageid)
@catch_exception_decoration
def delete_vdiskstorage(self, vdiskstorageid):
return self.orchestrator_client.vdiskstorage.DeleteVdiskStorage(vdiskstorageid=vdiskstorageid)
@catch_exception_decoration
def get_import_images(self, vdiskstorageid):
return self.orchestrator_client.vdiskstorage.ListImages(vdiskstorageid=vdiskstorageid)
@catch_exception_decoration_return
def post_import_image(self, vdiskstorageid, **kwargs):
size = random.randint(15, 100)
block_size = 2 ** random.randint(9, 15)
export_block_size = 1048576 # should be 1048576 for image: template:ubuntu-1604
imagename = self.random_string()
data = {"imageName": imagename,
"exportName": "template:ubuntu-1604",
"exportSnapshot": "default",
"exportBlockSize": 1048576,
"size": size,
"diskBlockSize": block_size,
"url": "ftp://hub.gig.tech"
}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.vdiskstorage.ImportImage(vdiskstorageid=vdiskstorageid,data=data)
return response, data
@catch_exception_decoration
def get_image_info(self, vdiskstorageid, imageid):
return self.orchestrator_client.vdiskstorage.GetImage(vdiskstorageid=vdiskstorageid,imageid=imageid)
@catch_exception_decoration
def delete_image(self, vdiskstorageid, imageid):
return self.orchestrator_client.vdiskstorage.DeleteImage(vdiskstorageid=vdiskstorageid, imageid=imageid)
@catch_exception_decoration
def get_vdisks(self, vdiskstorageid):
return self.orchestrator_client.vdiskstorage.ListVdisks(vdiskstorageid=vdiskstorageid)
@catch_exception_decoration_return
def post_vdisks(self, vdiskstorageid, imageid="", **kwargs):
size = random.randint(15, 100)
block_size = 2 ** random.randint(9, 15)
data = {"id": self.random_string(),
"size": size,
"blocksize": block_size,
"type": random.choice(['boot', 'db', 'cache', 'tmp']),
"readOnly": random.choice([False, True])}
data = self.update_default_data(default_data=data, new_data=kwargs)
if data['type'] == 'boot':
data['imageId'] = imageid
response = self.orchestrator_client.vdiskstorage.CreateNewVdisk(vdiskstorageid=vdiskstorageid,data=data)
return response, data
@catch_exception_decoration
def get_vdisks_vdiskid(self, vdiskstorageid, vdiskid):
return self.orchestrator_client.vdiskstorage.GetVdiskInfo(vdiskstorageid=vdiskstorageid,vdiskid=vdiskid)
@catch_exception_decoration
def delete_vdisks_vdiskid(self, vdiskstorageid, vdiskid):
return self.orchestrator_client.vdiskstorage.DeleteVdisk(vdiskstorageid=vdiskstorageid, vdiskid=vdiskid)
@catch_exception_decoration
def post_vdisks_vdiskid_resize(self, vdiskstorageid, vdiskid, data):
return self.orchestrator_client.vdiskstorage.ResizeVdisk(vdiskstorageid=vdiskstorageid, vdiskid=vdiskid, data=data)
@catch_exception_decoration
def post_vdisks_vdiskid_rollback(self, vdiskstorageid, vdiskid, data):
return self.orchestrator_client.vdiskstorage.RollbackVdisk(vdiskstorageid=vdiskstorageid, data=data)
| {
"repo_name": "zero-os/0-orchestrator",
"path": "tests/0_orchestrator/test_suite/framework/orchestrator_apis/vdisks_apis.py",
"copies": "2",
"size": "4452",
"license": "apache-2.0",
"hash": 1938611394940791300,
"line_mean": 44.4285714286,
"line_max": 123,
"alpha_frac": 0.6992362983,
"autogenerated": false,
"ratio": 3.5027537372147917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5201990035514792,
"avg_score": null,
"num_lines": null
} |
from framework.orchestrator_apis import *
from framework.orchestrator_base import OrchestratorBase
import random
class VmsAPI(OrchestratorBase):
def __init__(self, orchestrator_driver):
self.orchestrator_driver = orchestrator_driver
self.orchestrator_client = self.orchestrator_driver.orchestrator_client
@catch_exception_decoration
def get_nodes_vms(self, nodeid):
return self.orchestrator_client.nodes.ListVMs(nodeid=nodeid)
@catch_exception_decoration
def get_nodes_vms_vmid(self, nodeid, vmid):
return self.orchestrator_client.nodes.GetVM(nodeid=nodeid, vmid=vmid)
@catch_exception_decoration
def get_nodes_vms_vmid_info(self, nodeid, vmid):
return self.orchestrator_client.nodes.GetVMInfo(nodeid=nodeid, vmid=vmid)
@catch_exception_decoration_return
def post_nodes_vms(self, node_id, **kwargs):
data = {"id": self.random_string(),
"memory": random.randint(1, 16) * 1024,
"cpu": random.randint(1, 16),
"nics": [],
"disks": []}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.nodes.CreateVM(nodeid=node_id, data=data)
return response, data
@catch_exception_decoration
def put_nodes_vms_vmid(self, nodeid, vmid, data):
return self.orchestrator_client.nodes.UpdateVM(nodeid=nodeid, vmid=vmid, data=data)
@catch_exception_decoration
def delete_nodes_vms_vmid(self, nodeid, vmid):
return self.orchestrator_client.nodes.DeleteVM(nodeid=nodeid, vmid=vmid)
@catch_exception_decoration
def post_nodes_vms_vmid_start(self, nodeid, vmid):
return self.orchestrator_client.nodes.StartVM(nodeid=nodeid, vmid=vmid, data={})
@catch_exception_decoration
def post_nodes_vms_vmid_stop(self, nodeid, vmid):
return self.orchestrator_client.nodes.StopVM(nodeid=nodeid, vmid=vmid, data={})
@catch_exception_decoration
def post_nodes_vms_vmid_pause(self, nodeid, vmid):
return self.orchestrator_client.nodes.PauseVM(nodeid=nodeid, vmid=vmid, data={})
@catch_exception_decoration
def post_nodes_vms_vmid_resume(self, nodeid, vmid):
return self.orchestrator_client.nodes.ResumeVM(nodeid=nodeid, vmid=vmid, data={})
@catch_exception_decoration
def post_nodes_vms_vmid_shutdown(self, nodeid, vmid):
return self.orchestrator_client.nodes.ShutdownVM(nodeid=nodeid, vmid=vmid, data={})
@catch_exception_decoration
def post_nodes_vms_vmid_migrate(self, nodeid, vmid, data):
return self.orchestrator_client.nodes.MigrateVM(nodeid=nodeid, vmid=vmid, data=data)
| {
"repo_name": "zero-os/0-orchestrator",
"path": "tests/0_orchestrator/test_suite/framework/orchestrator_apis/vms_apis.py",
"copies": "2",
"size": "2702",
"license": "apache-2.0",
"hash": 4579247441416696300,
"line_mean": 40.5692307692,
"line_max": 92,
"alpha_frac": 0.6980014804,
"autogenerated": false,
"ratio": 3.2205005959475566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4918502076347556,
"avg_score": null,
"num_lines": null
} |
from framework.orchestrator_apis import *
from framework.orchestrator_base import OrchestratorBase
class ContainersAPI(OrchestratorBase):
def __init__(self, orchestrator_driver):
self.orchestrator_driver = orchestrator_driver
self.orchestrator_client = self.orchestrator_driver.orchestrator_client
@catch_exception_decoration
def get_containers(self, nodeid):
return self.orchestrator_client.nodes.ListContainers(nodeid=nodeid)
@catch_exception_decoration_return
def post_containers(self, nodeid, **kwargs):
data = {
"name": self.random_string(),
"hostname": self.random_string(),
"flist": "https://hub.gig.tech/ah-elsayed/ubuntu.flist",
"hostNetworking": False,
"initProcesses": [],
"filesystems": [],
"ports": [],
"nics": [],
"storage": "ardb://hub.gig.tech:16379"
}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.nodes.CreateContainer(nodeid=nodeid, data=data)
return response, data
@catch_exception_decoration_return
def update_container(self, nodeid, containername, **kwargs):
data = {"nics": []}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.nodes.UpdateContainer(data, containername, nodeid)
return response, data
@catch_exception_decoration
def delete_containers_containerid(self, nodeid, containername):
response = self.orchestrator_client.nodes.DeleteContainer(nodeid=nodeid, containername=containername)
return response
@catch_exception_decoration
def get_containers_containerid(self, nodeid, containername):
return self.orchestrator_client.nodes.GetContainer(nodeid=nodeid, containername=containername)
@catch_exception_decoration
def post_containers_containerid_start(self, nodeid, containername):
return self.orchestrator_client.nodes.StartContainer(nodeid=nodeid, containername=containername, data={})
@catch_exception_decoration
def post_containers_containerid_stop(self, nodeid, containername):
return self.orchestrator_client.nodes.StopContainer(nodeid=nodeid, containername=containername, data={})
@catch_exception_decoration
def post_containers_containerid_filesystem(self, nodeid, containername, data, params):
return self.orchestrator_client.nodes.FileUpload(nodeid=nodeid, containername=containername, data=data,
query_params=params, content_type='multipart/form-data')
@catch_exception_decoration
def get_containers_containerid_filesystem(self, nodeid, containername, params):
return self.orchestrator_client.nodes.FileDownload(nodeid=nodeid, containername=containername,
query_params=params)
@catch_exception_decoration
def delete_containers_containerid_filesystem(self, nodeid, containername, data):
return self.orchestrator_client.nodes.FileDelete(nodeid=nodeid, containername=containername, data=data)
@catch_exception_decoration
def get_containers_containerid_jobs(self, nodeid, containername):
return self.orchestrator_client.nodes.ListContainerJobs(nodeid=nodeid, containername=containername)
@catch_exception_decoration
def delete_containers_containerid_jobs(self, nodeid, containername):
return self.orchestrator_client.nodes.KillAllContainerJobs(nodeid=nodeid, containername=containername)
@catch_exception_decoration
def get_containers_containerid_jobs_jobid(self, nodeid, containername, jobid):
return self.orchestrator_client.nodes.GetContainerJob(nodeid=nodeid, containername=containername, jobid=jobid)
@catch_exception_decoration
def post_containers_containerid_jobs_jobid(self, nodeid, containername, jobid, data):
return self.orchestrator_client.nodes.SendSignalToJob(nodeid=nodeid, containername=containername, jobid=jobid,
data=data)
@catch_exception_decoration
def delete_containers_containerid_jobs_jobid(self, nodeid, containername, jobid):
return self.orchestrator_client.nodes.KillContainerJob(nodeid=nodeid, containername=containername, jobid=jobid)
@catch_exception_decoration
def post_containers_containerid_ping(self, nodeid, containername):
return self.orchestrator_client.nodes.PingContainer(nodeid=nodeid, containername=containername, data={})
@catch_exception_decoration
def get_containers_containerid_state(self, nodeid, containername):
return self.orchestrator_client.nodes.GetContainerState(nodeid=nodeid, containername=containername)
@catch_exception_decoration
def get_containers_containerid_info(self, nodeid, containername):
return self.orchestrator_client.nodes.GetContainerOSInfo(nodeid=nodeid, containername=containername)
@catch_exception_decoration
def get_containers_containerid_processes(self, nodeid, containername):
return self.orchestrator_client.nodes.ListContainerProcesses(nodeid=nodeid, containername=containername)
@catch_exception_decoration
def post_containers_containerid_jobs(self, nodeid, containername, data):
return self.orchestrator_client.nodes.StartContainerJob(nodeid=nodeid, containername=containername, data=data)
@catch_exception_decoration
def get_containers_containerid_processes_processid(self, nodeid, containername, processid):
return self.orchestrator_client.nodes.GetContainerProcess(nodeid=nodeid, containername=containername,
processid=processid)
@catch_exception_decoration
def post_containers_containerid_processes_processid(self, nodeid, containername, processid, data):
return self.orchestrator_client.nodes.SendSignalToProcess(nodeid=nodeid, containername=containername,
processid=processid, data=data)
@catch_exception_decoration
def delete_containers_containerid_processes_processid(self, nodeid, containername, processid):
return self.orchestrator_client.nodes.KillContainerProcess(nodeid=nodeid, containername=containername,
processid=processid)
@catch_exception_decoration
def get_container_nics(self, nodeid, containername):
return self.orchestrator_client.nodes.GetContainerNicInfo(nodeid=nodeid, containername=containername)
@catch_exception_decoration
def get_container_mem(self, nodeid, containername):
return self.orchestrator_client.nodes.GetContainerMemInfo(nodeid=nodeid, containername=containername)
@catch_exception_decoration
def get_container_cpus(self, nodeid, containername):
return self.orchestrator_client.nodes.GetContainerCPUInfo(nodeid=nodeid, containername=containername)
@catch_exception_decoration
def get_container_disks(self, nodeid, containername):
return self.orchestrator_client.nodes.GetContainerDiskInfo(nodeid=nodeid, containername=containername)
@catch_exception_decoration
def get_container_info(self, nodeid, containername):
return self.orchestrator_client.nodes.GetContainerOSInfo(nodeid=nodeid, containername=containername) | {
"repo_name": "zero-os/0-orchestrator",
"path": "tests/0_orchestrator/test_suite/framework/orchestrator_apis/containers_apis.py",
"copies": "2",
"size": "7546",
"license": "apache-2.0",
"hash": -8481647413316948000,
"line_mean": 51.4097222222,
"line_max": 119,
"alpha_frac": 0.7178637689,
"autogenerated": false,
"ratio": 3.996822033898305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004916218513600779,
"num_lines": 144
} |
from framework.orchestrator_apis import *
from framework.orchestrator_base import OrchestratorBase
class GatewayAPI(OrchestratorBase):
def __init__(self, orchestrator_driver):
self.orchestrator_driver = orchestrator_driver
self.orchestrator_client = self.orchestrator_driver.orchestrator_client
@catch_exception_decoration
def list_nodes_gateways(self, nodeid):
return self.orchestrator_client.nodes.ListGateways(nodeid=nodeid)
@catch_exception_decoration
def get_nodes_gateway(self, nodeid, gwname):
return self.orchestrator_client.nodes.GetGateway(nodeid=nodeid, gwname=gwname)
@catch_exception_decoration_return
def post_nodes_gateway(self, node_id, **kwargs):
data = {
"name": self.random_string(),
"domain": self.random_string(),
"nics": [],
"portforwards": [],
"httpproxies": []
}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.nodes.CreateGW(nodeid=node_id, data=data)
return response, data
@catch_exception_decoration
def update_nodes_gateway(self, nodeid, gwname, data):
return self.orchestrator_client.nodes.UpdateGateway(nodeid=nodeid, gwname=gwname, data=data)
@catch_exception_decoration
def delete_nodes_gateway(self, nodeid, gwname):
response = self.orchestrator_client.nodes.DeleteGateway(nodeid=nodeid, gwname=gwname)
return response
@catch_exception_decoration
def list_nodes_gateway_forwards(self, nodeid, gwname):
return self.orchestrator_client.nodes.GetGWForwards(nodeid=nodeid, gwname=gwname)
@catch_exception_decoration
def post_nodes_gateway_forwards(self, nodeid, gwname, data):
return self.orchestrator_client.nodes.CreateGWForwards(nodeid=nodeid, gwname=gwname, data=data)
@catch_exception_decoration
def delete_nodes_gateway_forward(self, nodeid, gwname, forwardid):
return self.orchestrator_client.nodes.DeleteGWForward(nodeid=nodeid, gwname=gwname, forwardid=forwardid)
@catch_exception_decoration
def list_nodes_gateway_dhcp_hosts(self, nodeid, gwname, interface):
return self.orchestrator_client.nodes.ListGWDHCPHosts(nodeid=nodeid, gwname=gwname, interface=interface)
@catch_exception_decoration
def post_nodes_gateway_dhcp_host(self, nodeid, gwname, interface, data):
return self.orchestrator_client.nodes.AddGWDHCPHost(nodeid=nodeid, gwname=gwname, interface=interface,
data=data)
@catch_exception_decoration
def delete_nodes_gateway_dhcp_host(self, nodeid, gwname, interface, macaddress):
return self.orchestrator_client.nodes.DeleteDHCPHost(nodeid=nodeid, gwname=gwname, interface=interface,
macaddress=macaddress)
@catch_exception_decoration
def get_nodes_gateway_advanced_http(self, nodeid, gwname):
return self.orchestrator_client.nodes.GetGWHTTPConfig(nodeid=nodeid, gwname=gwname)
@catch_exception_decoration
def post_nodes_gateway_advanced_http(self, nodeid, gwname, data):
return self.orchestrator_client.nodes.SetGWHTTPConfig(nodeid=nodeid, gwname=gwname, data=data)
@catch_exception_decoration
def get_nodes_gateway_advanced_firewall(self, nodeid, gwname):
return self.orchestrator_client.nodes.GetGWFWConfig(nodeid=nodeid, gwname=gwname)
@catch_exception_decoration
def post_nodes_gateway_advanced_firewall(self, nodeid, gwname, data):
return self.orchestrator_client.nodes.SetGWFWConfig(nodeid=nodeid, gwname=gwname, data=data)
@catch_exception_decoration
def post_nodes_gateway_start(self, nodeid, gwname):
return self.orchestrator_client.nodes.StartGateway(nodeid=nodeid, gwname=gwname, data={})
@catch_exception_decoration
def post_nodes_gateway_stop(self, nodeid, gwname):
return self.orchestrator_client.nodes.StopGateway(nodeid=nodeid, gwname=gwname, data={})
@catch_exception_decoration
def list_nodes_gateway_httpproxies(self, nodeid, gwname):
return self.orchestrator_client.nodes.ListHTTPProxies(nodeid=nodeid, gwname=gwname)
@catch_exception_decoration
def get_nodes_gateway_httpproxy(self, nodeid, gwname, proxyid):
return self.orchestrator_client.nodes.GetHTTPProxy(nodeid=nodeid, gwname=gwname, proxyid=proxyid)
@catch_exception_decoration
def get_nodes_gateway_httpproxy(self, nodeid, gwname, proxyid):
return self.orchestrator_client.nodes.GetHTTPProxy(nodeid=nodeid, gwname=gwname, proxyid=proxyid)
@catch_exception_decoration
def post_nodes_gateway_httpproxy(self, nodeid, gwname, data):
return self.orchestrator_client.nodes.CreateHTTPProxies(nodeid=nodeid, gwname=gwname, data=data)
@catch_exception_decoration
def delete_nodes_gateway_httpproxy(self, nodeid, gwname, proxyid):
return self.orchestrator_client.nodes.DeleteHTTPProxies(nodeid=nodeid, gwname=gwname, proxyid=proxyid)
| {
"repo_name": "g8os/grid",
"path": "tests/0_orchestrator/test_suite/framework/orchestrator_apis/gateways_apis.py",
"copies": "2",
"size": "5125",
"license": "apache-2.0",
"hash": -6566751271621048000,
"line_mean": 46.0183486239,
"line_max": 112,
"alpha_frac": 0.7194146341,
"autogenerated": false,
"ratio": 3.5247592847317746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002147414489636392,
"num_lines": 109
} |
from framework.orchestrator_apis import *
from framework.orchestrator_base import OrchestratorBase
class Storageclusters(OrchestratorBase):
def __init__(self, orchestrator_driver):
self.orchestrator_driver = orchestrator_driver
self.orchestrator_client = self.orchestrator_driver.orchestrator_client
@catch_exception_decoration_return
def post_storageclusters(self, nodes, **kwargs):
data = {
"label": self.random_string(),
"servers": 1,
"driveType": 'ssd',
"clusterType": "block",
"nodes": nodes
}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.storageclusters.DeployNewCluster(data=data)
return response, data
@catch_exception_decoration
def get_storageclusters(self):
return self.orchestrator_client.storageclusters.ListAllClusters()
@catch_exception_decoration
def get_storageclusters_label(self, label):
return self.orchestrator_client.storageclusters.GetClusterInfo(label=label)
@catch_exception_decoration
def delete_storageclusters_label(self, label):
return self.orchestrator_client.storageclusters.KillCluster(label=label)
| {
"repo_name": "zero-os/0-orchestrator",
"path": "tests/0_orchestrator/test_suite/framework/orchestrator_apis/storageclusters_apis.py",
"copies": "2",
"size": "1272",
"license": "apache-2.0",
"hash": 5715017894091412000,
"line_mean": 37.5454545455,
"line_max": 87,
"alpha_frac": 0.6996855346,
"autogenerated": false,
"ratio": 3.7970149253731345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001079214463052847,
"num_lines": 33
} |
from framework.orchestrator_apis import *
from framework.orchestrator_base import OrchestratorBase
class ZerotiersAPI(OrchestratorBase):
def __init__(self, orchestrator_driver):
self.orchestrator_driver = orchestrator_driver
self.orchestrator_client = self.orchestrator_driver.orchestrator_client
@catch_exception_decoration
def get_nodes_zerotiers(self, nodeid):
return self.orchestrator_client.nodes.ListZerotier(nodeid=nodeid)
@catch_exception_decoration
def get_nodes_zerotiers_zerotierid(self, nodeid, zerotierid):
return self.orchestrator_client.nodes.GetZerotier(nodeid=nodeid, zerotierid=zerotierid)
@catch_exception_decoration_return
def post_nodes_zerotiers(self, nodeid, **kwargs):
data = {'nwid': ''}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.nodes.JoinZerotier(nodeid=nodeid, data=data)
return response, data
@catch_exception_decoration
def delete_nodes_zerotiers_zerotierid(self, nodeid, zerotierid):
return self.orchestrator_client.nodes.ExitZerotier(nodeid=nodeid, zerotierid=zerotierid)
| {
"repo_name": "g8os/grid",
"path": "tests/0_orchestrator/test_suite/framework/orchestrator_apis/zerotiers_apis.py",
"copies": "2",
"size": "1180",
"license": "apache-2.0",
"hash": -1363093664574696400,
"line_mean": 42.7037037037,
"line_max": 96,
"alpha_frac": 0.7440677966,
"autogenerated": false,
"ratio": 3.3714285714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5115496368028571,
"avg_score": null,
"num_lines": null
} |
from framework.orchestrator_apis import *
class NodesAPI:
def __init__(self, orchestrator_driver):
self.orchestrator_driver = orchestrator_driver
self.orchestrator_client = self.orchestrator_driver.orchestrator_client
@catch_exception_decoration
def get_nodes(self):
return self.orchestrator_client.nodes.ListNodes()
@catch_exception_decoration
def get_nodes_nodeid(self, node_id):
return self.orchestrator_client.nodes.GetNode(nodeid=node_id)
@catch_exception_decoration
def get_nodes_nodeid_jobs(self, node_id):
return self.orchestrator_client.nodes.ListNodeJobs(nodeid=node_id)
@catch_exception_decoration
def get_nodes_nodeid_jobs_jobid(self, node_id, job_id):
return self.orchestrator_client.nodes.GetNodeJob(nodeid=node_id, jobid=job_id)
@catch_exception_decoration
def get_nodes_mounts(self, node_id):
return self.orchestrator_client.nodes.GetNodeMounts(nodeid=node_id)
@catch_exception_decoration
def delete_nodes_nodeid_jobs(self, node_id):
return self.orchestrator_client.nodes.KillAllNodeJobs(nodeid=node_id)
@catch_exception_decoration
def delete_nodes_nodeid_jobs_jobid(self, node_id, job_id):
return self.orchestrator_client.nodes.KillNodeJob(nodeid=node_id, jobid=job_id)
@catch_exception_decoration
def post_nodes_nodeid_ping(self, node_id):
return self.orchestrator_client.nodes.PingNode(nodeid=node_id, data={})
@catch_exception_decoration
def get_nodes_nodeid_state(self, node_id):
return self.orchestrator_client.nodes.GetNodeState(nodeid=node_id)
@catch_exception_decoration
def post_nodes_nodeid_reboot(self, node_id):
return self.orchestrator_client.nodes.RebootNode(nodeid=node_id, data={})
@catch_exception_decoration
def get_nodes_nodeid_cpus(self, node_id):
return self.orchestrator_client.nodes.GetCPUInfo(nodeid=node_id)
@catch_exception_decoration
def get_nodes_nodeid_disks(self, node_id):
return self.orchestrator_client.nodes.GetDiskInfo(nodeid=node_id)
@catch_exception_decoration
def get_nodes_nodeid_mem(self, node_id):
return self.orchestrator_client.nodes.GetMemInfo(nodeid=node_id)
@catch_exception_decoration
def get_nodes_nodeid_nics(self, node_id):
return self.orchestrator_client.nodes.GetNicInfo(nodeid=node_id)
@catch_exception_decoration
def get_nodes_nodeid_info(self, node_id):
return self.orchestrator_client.nodes.GetNodeOSInfo(nodeid=node_id)
@catch_exception_decoration
def get_nodes_nodeid_processes(self, node_id):
return self.orchestrator_client.nodes.ListNodeProcesses(nodeid=node_id)
@catch_exception_decoration
def get_nodes_nodeid_processes_processid(self, node_id, process_id):
return self.orchestrator_client.nodes.GetNodeProcess(nodeid=node_id, processid=process_id)
@catch_exception_decoration
def delete_nodes_nodeid_process_processid(self, node_id, process_id):
return self.orchestrator_client.nodes.KillNodeProcess(nodeid=node_id, processid=process_id)
| {
"repo_name": "g8os/grid",
"path": "tests/0_orchestrator/test_suite/framework/orchestrator_apis/nodes_apis.py",
"copies": "2",
"size": "3147",
"license": "apache-2.0",
"hash": -1548097922767783400,
"line_mean": 38.835443038,
"line_max": 99,
"alpha_frac": 0.7299014935,
"autogenerated": false,
"ratio": 3.281543274244004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5011444767744004,
"avg_score": null,
"num_lines": null
} |
from framework.pages.loginPage import loginPage
from framework.core.webdriverfactory import WebDriverFactory
from framework.core.configuration import webdriver_configuration
import random
class testLogin():
baseUrl = "http://twiindan.pythonanywhere.com/admin"
@classmethod
def setup_class(self):
wdf = WebDriverFactory(webdriver_configuration)
self.driver = wdf.getWebDriverInstance()
self.login_page = loginPage(self.driver)
def setup(self):
self.login_page.navigate()
def testCreatePoll(self):
self.login_page.locate_elements()
main_page = self.login_page.login(username='user1', password='selenium')
main_page.locate_elements()
add_question_page = main_page.addQuestion()
add_question_page.locate_elements()
random_number = random.randint(1, 100000)
questionText = "Question {}".format(random_number)
add_question_page.setQuestionText(questionText)
add_question_page.setNow()
add_question_page.setChoicesText("Selenium", "Python", "Webpages")
add_question_page.setChoiceVotes(0, 3, 1)
add_question_page.savePoll()
@classmethod
def teardown_class(self):
self.driver.quit()
| {
"repo_name": "twiindan/selenium_lessons",
"path": "04_Selenium/framework/tests/testCreatePoll.py",
"copies": "1",
"size": "1254",
"license": "apache-2.0",
"hash": 6186244008843701000,
"line_mean": 27.5,
"line_max": 80,
"alpha_frac": 0.6866028708,
"autogenerated": false,
"ratio": 3.943396226415094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5129999097215094,
"avg_score": null,
"num_lines": null
} |
from ...framework.parser import Parser
from ...framework.core.itemdata import ItemData
from ...framework.core.settings import settings
from ...framework.core.request import Request
import re
import logging
class FundParser(Parser):
def __init__(self):
Parser.__init__(self)
return
def doWork(self, response):
pattern = r"\<tr\>\<td\>(?P<date>[\d-]+)\</td\>\<td[^\>]*?\>(?P<value>[\d\.]+)\</td\>\<td[^\>]*?\>(?P<totalvalue>[\d\.]+)\</td\>"
ptnCurPage = r"curpage:(?P<cur>\d+)"
ptnTotalPage = r"pages:(?P<total>\d+)"
result = []
groups = ["date", "value", "totalvalue"]
try:
p = re.compile(pattern)
iterator = p.finditer(response.responsedata)
for match in iterator:
output = ItemData(response.identifier, "Saver")
output.addTags(response.tags)
content = {}
for g in groups:
content[g] = match.group(g)
output.build(content)
result.append(output)
#get current pages and add fetcher tasks for more pages
curMath = re.search(ptnCurPage, response.responsedata)
if curMath:
curPage = curMath.group("cur")
if int(curPage) == 1:
logging.warning("got first page for " + response.tags["code"])
m = re.search(ptnTotalPage, response.responsedata)
if m:
totalPage = int(m.group("total"))
for x in xrange(2, totalPage+1):
t = Request(response.tags["code"], "Fetcher")
t.build(settings["baseurl"] + response.tags["code"] + "&page=" + str(x))
t.addTags({"name": response.tags["name"]})
result.append(t)
logging.warning("add %s page for %s", totalPage-1, response.tags["code"])
logging.warning("%s parsed content for %s", self.__class__.__name__, response.identifier)
except Exception as excep:
logging.error("%s.doWork() error: %s", self.__class__.__name__, excep)
return []
return result
| {
"repo_name": "H0w13/WebGrabber",
"path": "crawler/crawler/workers/eastmoney/fund_parser.py",
"copies": "1",
"size": "2264",
"license": "mit",
"hash": 2483542748376939000,
"line_mean": 44.28,
"line_max": 137,
"alpha_frac": 0.5128091873,
"autogenerated": false,
"ratio": 4.123861566484518,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5136670753784518,
"avg_score": null,
"num_lines": null
} |
from Framework.Pedido import Pedido
from Framework.ErroNoHTTP import ErroNoHTTP
class PedidoCadastrar(Pedido):
def __init__(self,variaveis_do_ambiente):
super(PedidoCadastrar, self).__init__(variaveis_do_ambiente)
try:
self.mec = self.corpo['mec']
self.nome = self.corpo['nome']
self.codigo = self.corpo['codigo']
self.id_grau = self.corpo['id_grau']
self.id_campus = self.corpo['id_campus']
self.permanencia_minima = self.corpo['permanencia_minima']
self.permanencia_maxima = self.corpo['permanencia_maxima']
self.creditos_formatura = self.corpo['creditos_formatura']
self.creditos_optativos_concentracao = self.corpo['creditos_optativos_concentracao']
self.creditos_optativos_conexa = self.corpo['creditos_optativos_conexa']
self.creditos_livres_maximo = self.corpo['creditos_livres_maximo']
except:
raise ErroNoHTTP(400)
def getNome(self):
return self.nome
def getCodigo(self):
return self.codigo
def getId_grau(self):
return self.id_grau
def getId_campus(self):
return self.id_campus
def getPermanencia_minima(self):
return self.permanencia_minima
def getPermanencia_maxima(self):
return self.permanencia_maxima
def getCreditos_formatura(self):
return self.creditos_formatura
def getCreditos_optativos_concentracao(self):
return self.creditos_optativos_concentracao
def getCreditos_optativos_conexa(self):
return self.creditos_optativos_conexa
def getCreditos_livres_maximo(self):
return self.creditos_livres_maximo
def getMec(self):
return self.mec
| {
"repo_name": "AEDA-Solutions/matweb",
"path": "backend/Models/Curso/PedidoCadastrar.py",
"copies": "1",
"size": "1805",
"license": "mit",
"hash": -3373175212301272000,
"line_mean": 31.8181818182,
"line_max": 108,
"alpha_frac": 0.6443213296,
"autogenerated": false,
"ratio": 2.8515007898894154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3995822119489415,
"avg_score": null,
"num_lines": null
} |
from Framework.Pedido import Pedido
from Framework.ErroNoHTTP import ErroNoHTTP
class PedidoEditar(Pedido):
def __init__(self,variaveis_do_ambiente):
super(PedidoEditar, self).__init__(variaveis_do_ambiente)
try:
self.id = self.corpo['id']
self.nome = self.corpo['nome']
self.matricula = self.corpo['matricula']
self.cpf = self.corpo['cpf']
self.perfil = self.corpo['perfil']
self.email = self.corpo['email']
self.sexo = self.corpo['sexo']
self.nome_pai = self.corpo['nome_pai']
self.nome_mae = self.corpo['nome_mae']
self.ano_conclusao = self.corpo['ano_conclusao']
self.identidade = self.corpo['identidade']
self.id_curso = self.corpo['id_curso']
self.senha = self.corpo['senha']
except:
raise ErroNoHTTP(400)
def getId(self):
return self.id
def getNome(self):
return self.nome
def getMatricula(self):
return self.matricula
def getCpf(self):
return self.cpf
def getPerfil(self):
return self.perfil
def getEmail(self):
return self.email
def getSexo(self):
return self.sexo
def getNome_pai(self):
return self.nome_pai
def getNome_mae(self):
return self.nome_mae
def getAno_conclusao(self):
return self.ano_conclusao
def getSenha(self):
return self.senha
def getIdentidade(self):
return self.identidade
def getId_curso(self):
return self.id_curso
| {
"repo_name": "AEDA-Solutions/matweb",
"path": "backend/Models/Usuario/PedidoEditar.py",
"copies": "1",
"size": "1370",
"license": "mit",
"hash": -9211824504648739000,
"line_mean": 21.0967741935,
"line_max": 59,
"alpha_frac": 0.6912408759,
"autogenerated": false,
"ratio": 2.5,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36912408759,
"avg_score": null,
"num_lines": null
} |
from _Framework.SessionComponent import SessionComponent
class SpecialSessionComponent(SessionComponent):
""" Special session subclass that handles ConfigurableButtons """
def __init__(self, num_tracks, num_scenes, parent):
self._osd = None
self._parent = parent
#self._tracks_and_listeners = []
SessionComponent.__init__(self, num_tracks, num_scenes)
#def disconnect(self):
# for index in range(len(self._tracks_and_listeners)):
# track = self._tracks_and_listeners[index][0]
# fire_listener = self._tracks_and_listeners[index][1]
# playing_listener = self._tracks_and_listeners[index][2]
# if track != None:
# if track.fired_slot_index_has_listener(fire_listener):
# track.remove_fired_slot_index_listener(fire_listener)
# if track.playing_slot_index_has_listener(playing_listener):
# track.remove_playing_slot_index_listener(playing_listener)
#
# SessionComponent.disconnect(self)
def link_with_track_offset(self, track_offset):
assert (track_offset >= 0)
if self._is_linked():
self._unlink()
self.set_offsets(track_offset, 0)
self._link()
def set_osd(self, osd):
self._osd = osd
def _update_OSD(self):
if self._osd != None:
self._osd.mode = "Session"
for i in range(self._num_tracks):
self._osd.attribute_names[i] = " "
self._osd.attributes[i] = " "
tracks = self.tracks_to_use()
idx = 0
for i in range(len(tracks)):
if idx < self._num_tracks and len(tracks) > i + self._track_offset:
track = tracks[i + self._track_offset]
if track != None:
self._osd.attribute_names[idx] = str(track.name)
else:
self._osd.attribute_names[idx] = " "
self._osd.attributes[idx] = " "
idx += 1
self._osd.info[0] = " "
self._osd.info[1] = " "
self._osd.update()
def unlink(self):
if self._is_linked():
self._unlink()
def update(self):
SessionComponent.update(self)
if self._parent._main_mode_index == 0:
self._update_OSD()
def set_enabled(self, enabled):
SessionComponent.set_enabled(self, enabled)
if self._parent._main_mode_index == 0:
self._update_OSD()
def _reassign_tracks(self):
SessionComponent._reassign_tracks(self)
#for index in range(len(self._tracks_and_listeners)):
# track = self._tracks_and_listeners[index][0]
# fire_listener = self._tracks_and_listeners[index][1]
# playing_listener = self._tracks_and_listeners[index][2]
# if track != None:
# if track.fired_slot_index_has_listener(fire_listener):
# track.remove_fired_slot_index_listener(fire_listener)
# if track.playing_slot_index_has_listener(playing_listener):
# track.remove_playing_slot_index_listener(playing_listener)
#self._tracks_and_listeners = []
#tracks_to_use = self.tracks_to_use()
#for index in range(self._num_tracks):
# fire_listener = lambda index = index: self._on_fired_slot_index_changed(index)
# playing_listener = lambda index = index: self._on_playing_slot_index_changed(index)
# track = None
# if self._track_offset + index < len(tracks_to_use):
# track = tracks_to_use[self._track_offset + index]
# if track != None:
# self._tracks_and_listeners.append((track, fire_listener, playing_listener))
# track.add_fired_slot_index_listener(fire_listener)
# track.add_playing_slot_index_listener(playing_listener)
# self._update_stop_clips_led(index)
if self._parent._main_mode_index == 0:
self._update_OSD()
#def _on_fired_slot_index_changed(self, index):
# self._update_stop_clips_led(index)
#def _on_playing_slot_index_changed(self, index):
# self._update_stop_clips_led(index)
def _update_stop_clips_led(self, index):
if self.is_enabled() and self._stop_track_clip_buttons != None:
button = self._stop_track_clip_buttons[index]
tracks_to_use = self.tracks_to_use()
track_index = index + self.track_offset()
if 0 <= track_index < len(tracks_to_use):
track = tracks_to_use[track_index]
if track.fired_slot_index == -2:
button.send_value(self._stop_clip_triggered_value)
elif track.playing_slot_index >= 0:
button.send_value(21)
else:
button.turn_off()
else:
button.send_value(4)
| {
"repo_name": "jim-cooley/abletonremotescripts",
"path": "remote-scripts/samples/Launchpad95/SpecialSessionComponent.py",
"copies": "1",
"size": "4139",
"license": "apache-2.0",
"hash": 4962641708259696000,
"line_mean": 32.9262295082,
"line_max": 87,
"alpha_frac": 0.6767335105,
"autogenerated": false,
"ratio": 2.6515054452274183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3828238955727418,
"avg_score": null,
"num_lines": null
} |
from framework.utils import iso8601format
from dateutil import parser
from website.project.metadata.utils import serialize_meta_schema
EMBARGO = 'embargo'
IMMEDIATE = 'immediate'
def serialize_user(user):
return {
'full_name': user.fullname,
'username': user.username,
'id': user._id
}
# TODO: Write and use APIv2 serializer for this
def serialize_draft_registration(draft, json_safe=True):
registration_choice = draft.approval.meta.get('registration_choice', None)
if registration_choice == EMBARGO:
time = parser.parse(draft.approval.meta['embargo_end_date'])
embargo = iso8601format(time) if json_safe else time
else:
embargo = IMMEDIATE
return {
'pk': draft._id,
'initiator': serialize_user(draft.initiator),
'registration_metadata': draft.registration_metadata,
'registration_schema': serialize_meta_schema(draft.registration_schema),
'initiated': iso8601format(draft.datetime_initiated) if json_safe else draft.datetime_initiated,
'updated': iso8601format(draft.datetime_updated) if json_safe else draft.datetime_updated,
'submitted': iso8601format(draft.approval.initiation_date) if json_safe else draft.approval.initiation_date,
'requires_approval': draft.requires_approval,
'is_pending_approval': draft.is_pending_review,
'is_approved': draft.is_approved,
'is_rejected': draft.is_rejected,
'notes': draft.notes,
'proof_of_publication': draft.flags.get('proof_of_publication'),
'payment_sent': draft.flags.get('payment_sent'),
'assignee': draft.flags.get('assignee'),
'title': draft.registration_metadata['q1']['value'],
'embargo': embargo,
}
| {
"repo_name": "GageGaskins/osf.io",
"path": "admin/pre_reg/serializers.py",
"copies": "1",
"size": "1771",
"license": "apache-2.0",
"hash": -658498399363973200,
"line_mean": 37.5,
"line_max": 116,
"alpha_frac": 0.6798418972,
"autogenerated": false,
"ratio": 3.8086021505376344,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9984039970869789,
"avg_score": 0.0008808153735689968,
"num_lines": 46
} |
from framework.utils import OWTFLogger
from framework.dependency_management.dependency_resolver import ServiceLocator
import time
import logging
DESCRIPTION = "Runs a chain of commands on an agent server via SBD -i.e. for IDS testing-"
def run(PluginInfo):
# ServiceLocator.get_component("config").Show()
Content = DESCRIPTION + " Results:<br />"
Iteration = 1 # Iteration counter initialisation
plugin_params = ServiceLocator.get_component("plugin_params")
config = ServiceLocator.get_component("config")
for Args in plugin_params.GetArgs({
'Description': DESCRIPTION,
'Mandatory': {
'RHOST': config.Get('RHOST_DESCRIP'),
'SBD_PORT': config.Get('SBD_PORT_DESCRIP'),
'SBD_PASSWORD': config.Get('SBD_PASSWORD_DESCRIP'),
'COMMAND_PREFIX': 'The command string to be pre-pended to the tests (i.e. /usr/lib/firefox... http...)',
},
'Optional': {
'TEST': 'The test to be included between prefix and suffix',
'COMMAND_SUFIX': 'The URL to be appended to the tests (i.e. ...whatever)',
'ISHELL_REUSE_CONNECTION': config.Get(
'ISHELL_REUSE_CONNECTION_DESCRIP'),
'ISHELL_EXIT_METHOD': config.Get(
'ISHELL_EXIT_METHOD_DESCRIP'),
'ISHELL_DELAY_BETWEEN_COMMANDS': config.Get(
'ISHELL_DELAY_BETWEEN_COMMANDS_DESCRIP'),
'ISHELL_COMMANDS_BEFORE_EXIT': config.Get(
'ISHELL_COMMANDS_BEFORE_EXIT_DESCRIP'),
'ISHELL_COMMANDS_BEFORE_EXIT_DELIM': config.Get(
'ISHELL_COMMANDS_BEFORE_EXIT_DELIM_DESCRIP'),
'REPEAT_DELIM': config.Get('REPEAT_DELIM_DESCRIP')
}}, PluginInfo):
plugin_params.SetConfig(Args) # Sets the auxiliary plugin arguments as config
REUSE_CONNECTION = (Args['ISHELL_REUSE_CONNECTION'] == 'yes')
#print "REUSE_CONNECTION=" + str(REUSE_CONNECTION)
DELAY_BETWEEN_COMMANDS = Args['ISHELL_DELAY_BETWEEN_COMMANDS']
#print "Args="+str(Args)
#print "'ISHELL_COMMANDS_BEFORE_EXIT_DELIM'=" + Args['ISHELL_COMMANDS_BEFORE_EXIT_DELIM']
#break
if Iteration == 1 or not REUSE_CONNECTION:
ServiceLocator.get_component("interactive_shell").Open({
'ConnectVia': config.GetResources('RCE_SBD_Connection')
, 'InitialCommands': None
#[ Args['BROWSER_PATH'] + ' about:blank']
, 'ExitMethod': Args['ISHELL_EXIT_METHOD']
, 'CommandsBeforeExit': Args[
'ISHELL_COMMANDS_BEFORE_EXIT']
, 'CommandsBeforeExitDelim': Args[
'ISHELL_COMMANDS_BEFORE_EXIT_DELIM']
, 'RHOST': Args['RHOST']
, 'RPORT': Args['SBD_PORT']
}, PluginInfo)
else:
OWTFLogger.log("Reusing initial connection..")
ServiceLocator.get_component("interactive_shell").Run(
Args['COMMAND_PREFIX'] + Args['TEST'] + Args['COMMAND_SUFIX'])
OWTFLogger.log("Sleeping " + DELAY_BETWEEN_COMMANDS + " second(s) (increases reliability)..")
time.sleep(int(DELAY_BETWEEN_COMMANDS))
if not REUSE_CONNECTION:
ServiceLocator.get_component("interactive_shell").Close(PluginInfo)
#Content += ServiceLocator.get_component("plugin_helper").DrawCommandDump('Test Command', 'Output', ServiceLocator.get_component("config").GetResources('LaunchExploit_'+Args['CATEGORY']+"_"+Args['SUBCATEGORY']), PluginInfo, "") # No previous output
Iteration += 1 # Increase Iteration counter
if not ServiceLocator.get_component("interactive_shell").IsClosed(): # Ensure clean exit if reusing connection
ServiceLocator.get_component("interactive_shell").Close(PluginInfo)
return Content
| {
"repo_name": "mikefitz888/owtf",
"path": "plugins/auxiliary/rce/BASH_CommandChainer@OWTF-ARCE-004.py",
"copies": "2",
"size": "5697",
"license": "bsd-3-clause",
"hash": 3870335204891407000,
"line_mean": 80.3857142857,
"line_max": 256,
"alpha_frac": 0.420221169,
"autogenerated": false,
"ratio": 5.541828793774319,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6962049962774319,
"avg_score": null,
"num_lines": null
} |
from framework.utils import OWTFLogger
from framework.dependency_management.dependency_resolver import ServiceLocator
"""
GREP Plugin for Credentials transport over an encrypted channel (OWASP-AT-001)
https://www.owasp.org/index.php/Testing_for_credentials_transport_%28OWASP-AT-001%29
NOTE: GREP plugins do NOT send traffic to the target and only grep the HTTP Transaction Log
"""
import logging
DESCRIPTION = "Searches transaction DB for credentials protections"
def run(PluginInfo):
#ServiceLocator.get_component("config").Show()
# TODO: Needs fixing
"""
Content = "This plugin looks for password fields and then checks the URL (i.e. http vs. https)<br />"
Content += "Uniqueness in this case is performed via URL + password field"
# This retrieves all hidden password fields found in the DB response bodies:
Command, RegexpName, Matches = ServiceLocator.get_component("transaction").GrepMultiLineResponseRegexp(ServiceLocator.get_component("config").Get('RESPONSE_REGEXP_FOR_PASSWORDS'))
# Now we need to check if the URL is https or not and count the insecure ones (i.e. not https)
IDs = []
InsecureMatches = []
for ID, FileMatch in Matches:
if ID not in IDs: # Retrieve Transaction from DB only once
IDs.append(ID) # Process each transaction only once
Transaction = ServiceLocator.get_component("transaction").GetByID(ID)
if 'https' != Transaction.URL.split(":")[0]:
OWTFLogger.log("Transaction: "+ID+" contains passwords fields with a URL different than https")
InsecureMatches.append([ID, Transaction.URL+": "+FileMatch]) # Need to make the unique work by URL + password
Message = "<br /><u>Total insecure matches: "+str(len(InsecureMatches))+'</u>'
OWTFLogger.log(Message)
Content += Message+"<br />"
Content += ServiceLocator.get_component("plugin_helper").DrawResponseMatchesTables([Command, RegexpName, InsecureMatches], PluginInfo)
return Content
"""
return []
| {
"repo_name": "DarKnight24/owtf",
"path": "plugins/web/grep/Credentials_transport_over_an_encrypted_channel@OWTF-AT-001.py",
"copies": "2",
"size": "1937",
"license": "bsd-3-clause",
"hash": 2078377776845594400,
"line_mean": 54.3428571429,
"line_max": 180,
"alpha_frac": 0.7465152297,
"autogenerated": false,
"ratio": 3.761165048543689,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02491794327222824,
"num_lines": 35
} |
from framework.utils import OWTFLogger
from framework.dependency_management.dependency_resolver import ServiceLocator
DESCRIPTION = "robots.txt analysis through third party sites"
def run(PluginInfo):
plugin_helper = ServiceLocator.get_component("plugin_helper")
resource = ServiceLocator.get_component("resource")
Content = plugin_helper.RequestLinkList('Passive Analysis Results', resource.GetResources('PassiveRobotsAnalysisHTTPRequests'), PluginInfo)
Content += plugin_helper.ResourceLinkList('Online Resources', resource.GetResources('PassiveRobotsAnalysisLinks'))
# Try to retrieve the robots.txt file from all defined resources
Count = 0
for Name, Resource in resource.GetResources('PassiveRobots'):
URL = Resource # Just for clarity
# Preparing link chunks for disallowed entries
LinkStart, LinkFinish = URL.split('/robots.txt')
LinkStart = LinkStart.strip()
LinkFinish = LinkFinish.strip()
# Use the cache if possible for speed
Transaction = ServiceLocator.get_component("requester").GetTransaction(True, URL)
if Transaction is not None and Transaction.Found:
Content += plugin_helper.ProcessRobots(PluginInfo, Transaction.GetRawResponseBody(), LinkStart, LinkFinish,
'robots%s.txt' % str(Count))
Count += 1
else: # Not found or unknown request error
Message = "Could not be retrieved using resource: %s" % Resource
OWTFLogger.log(Message)
Content += plugin_helper.TransactionTableForURLList(True, [URL])
return Content
| {
"repo_name": "DarKnight24/owtf",
"path": "plugins/web/passive/Spiders_Robots_and_Crawlers@OWTF-IG-001.py",
"copies": "2",
"size": "1646",
"license": "bsd-3-clause",
"hash": -8762600269936934000,
"line_mean": 52.0967741935,
"line_max": 143,
"alpha_frac": 0.6992709599,
"autogenerated": false,
"ratio": 4.521978021978022,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001122330652691184,
"num_lines": 31
} |
from framework.utils import OWTFLogger
from framework.dependency_management.dependency_resolver import ServiceLocator
DESCRIPTION = "Sends a bunch of URLs through selenium"
CATEGORIES = ['RCE', 'SQLI', 'XSS', 'CHARSET']
def run(PluginInfo):
Content = []
config = ServiceLocator.get_component("config")
OWTFLogger.log("WARNING: This plugin requires a small selenium installation, please run '%s' if you have issues" %
config.FrameworkConfigGet('INSTALL_SCRIPT'))
plugin_params = ServiceLocator.get_component("plugin_params")
args = {
'Description': DESCRIPTION,
'Mandatory': {
'BASE_URL': 'The URL to be pre-pended to the tests',
'CATEGORY': 'Category to use (i.e. ' + ', '.join(sorted(CATEGORIES)) + ')'
},
'Optional': {'REPEAT_DELIM': config.FrameworkConfigGet('REPEAT_DELIM_DESCRIP')}
}
for Args in plugin_params.GetArgs(args, PluginInfo):
plugin_params.SetConfig(Args)
InputFile = config.FrameworkConfigGet("SELENIUM_URL_VECTORS_" + Args['CATEGORY'])
URLLauncher = ServiceLocator.get_component("selenium_handler").CreateURLLauncher({
'BASE_URL': Args['BASE_URL'],
'INPUT_FILE': InputFile
})
URLLauncher.Run()
return Content
| {
"repo_name": "DarKnight24/owtf",
"path": "plugins/auxiliary/selenium/Selenium_URL_Launcher@OWTF-ASEL-001.py",
"copies": "2",
"size": "1304",
"license": "bsd-3-clause",
"hash": 429165021757510800,
"line_mean": 38.5151515152,
"line_max": 118,
"alpha_frac": 0.6510736196,
"autogenerated": false,
"ratio": 3.927710843373494,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5578784462973494,
"avg_score": null,
"num_lines": null
} |
from framework.utils import OWTFLogger
from framework.utils import OWTFLogger
from framework.dependency_management.dependency_resolver import ServiceLocator
import time
import logging
DESCRIPTION = "Runs a chain of commands on an agent server via SBD -i.e. for IDS testing-"
def run(PluginInfo):
# ServiceLocator.get_component("config").Show()
Content = DESCRIPTION + " Results:<br />"
Iteration = 1 # Iteration counter initialisation
plugin_params = ServiceLocator.get_component("plugin_params")
config = ServiceLocator.get_component("config")
for Args in plugin_params.GetArgs({
'Description': DESCRIPTION,
'Mandatory': {
'RHOST': config.Get('RHOST_DESCRIP'),
'SBD_PORT': config.Get('SBD_PORT_DESCRIP'),
'SBD_PASSWORD': config.Get('SBD_PASSWORD_DESCRIP'),
'COMMAND_PREFIX': 'The command string to be pre-pended to the tests (i.e. /usr/lib/firefox... http...)',
},
'Optional': {
'TEST': 'The test to be included between prefix and suffix',
'COMMAND_SUFIX': 'The URL to be appended to the tests (i.e. ...whatever)',
'ISHELL_REUSE_CONNECTION': config.Get(
'ISHELL_REUSE_CONNECTION_DESCRIP'),
'ISHELL_EXIT_METHOD': config.Get(
'ISHELL_EXIT_METHOD_DESCRIP'),
'ISHELL_DELAY_BETWEEN_COMMANDS': config.Get(
'ISHELL_DELAY_BETWEEN_COMMANDS_DESCRIP'),
'ISHELL_COMMANDS_BEFORE_EXIT': config.Get(
'ISHELL_COMMANDS_BEFORE_EXIT_DESCRIP'),
'ISHELL_COMMANDS_BEFORE_EXIT_DELIM': config.Get(
'ISHELL_COMMANDS_BEFORE_EXIT_DELIM_DESCRIP'),
'REPEAT_DELIM': config.Get('REPEAT_DELIM_DESCRIP')
}}, PluginInfo):
plugin_params.SetConfig(Args) # Sets the auxiliary plugin arguments as config
REUSE_CONNECTION = (Args['ISHELL_REUSE_CONNECTION'] == 'yes')
#print "REUSE_CONNECTION=" + str(REUSE_CONNECTION)
DELAY_BETWEEN_COMMANDS = Args['ISHELL_DELAY_BETWEEN_COMMANDS']
#print "Args="+str(Args)
#print "'ISHELL_COMMANDS_BEFORE_EXIT_DELIM'=" + Args['ISHELL_COMMANDS_BEFORE_EXIT_DELIM']
#break
if Iteration == 1 or not REUSE_CONNECTION:
ServiceLocator.get_component("interactive_shell").Open({
'ConnectVia': config.GetResources('RCE_SBD_Connection')
, 'InitialCommands': None
#[ Args['BROWSER_PATH'] + ' about:blank']
, 'ExitMethod': Args['ISHELL_EXIT_METHOD']
, 'CommandsBeforeExit': Args[
'ISHELL_COMMANDS_BEFORE_EXIT']
, 'CommandsBeforeExitDelim': Args[
'ISHELL_COMMANDS_BEFORE_EXIT_DELIM']
, 'RHOST': Args['RHOST']
, 'RPORT': Args['SBD_PORT']
}, PluginInfo)
else:
OWTFLogger.log("Reusing initial connection..")
ServiceLocator.get_component("interactive_shell").Run(
Args['COMMAND_PREFIX'] + Args['TEST'] + Args['COMMAND_SUFIX'])
OWTFLogger.log("Sleeping " + DELAY_BETWEEN_COMMANDS + " second(s) (increases reliability)..")
time.sleep(int(DELAY_BETWEEN_COMMANDS))
if not REUSE_CONNECTION:
ServiceLocator.get_component("interactive_shell").Close(PluginInfo)
#Content += ServiceLocator.get_component("plugin_helper").DrawCommandDump('Test Command', 'Output', ServiceLocator.get_component("config").GetResources('LaunchExploit_'+Args['CATEGORY']+"_"+Args['SUBCATEGORY']), PluginInfo, "") # No previous output
Iteration += 1 # Increase Iteration counter
if not ServiceLocator.get_component("interactive_shell").IsClosed(): # Ensure clean exit if reusing connection
ServiceLocator.get_component("interactive_shell").Close(PluginInfo)
return Content
| {
"repo_name": "DePierre/owtf",
"path": "plugins/auxiliary/rce/SBD_CommandChainer@OWTF-ARCE-003.py",
"copies": "2",
"size": "5736",
"license": "bsd-3-clause",
"hash": 1706015538630278100,
"line_mean": 79.7887323944,
"line_max": 256,
"alpha_frac": 0.4232914923,
"autogenerated": false,
"ratio": 5.526011560693641,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009569504775323702,
"num_lines": 71
} |
from framework.utils import OWTFLogger
from framework.utils import OWTFLogger
from framework.dependency_management.dependency_resolver import ServiceLocator
"""
GREP Plugin for Credentials transport over an encrypted channel (OWASP-AT-001)
https://www.owasp.org/index.php/Testing_for_credentials_transport_%28OWASP-AT-001%29
NOTE: GREP plugins do NOT send traffic to the target and only grep the HTTP Transaction Log
"""
import logging
DESCRIPTION = "Searches transaction DB for credentials protections"
def run(PluginInfo):
#ServiceLocator.get_component("config").Show()
# TODO: Needs fixing
"""
Content = "This plugin looks for password fields and then checks the URL (i.e. http vs. https)<br />"
Content += "Uniqueness in this case is performed via URL + password field"
# This retrieves all hidden password fields found in the DB response bodies:
Command, RegexpName, Matches = ServiceLocator.get_component("transaction").GrepMultiLineResponseRegexp(ServiceLocator.get_component("config").Get('RESPONSE_REGEXP_FOR_PASSWORDS'))
# Now we need to check if the URL is https or not and count the insecure ones (i.e. not https)
IDs = []
InsecureMatches = []
for ID, FileMatch in Matches:
if ID not in IDs: # Retrieve Transaction from DB only once
IDs.append(ID) # Process each transaction only once
Transaction = ServiceLocator.get_component("transaction").GetByID(ID)
if 'https' != Transaction.URL.split(":")[0]:
OWTFLogger.log("Transaction: "+ID+" contains passwords fields with a URL different than https")
InsecureMatches.append([ID, Transaction.URL+": "+FileMatch]) # Need to make the unique work by URL + password
Message = "<br /><u>Total insecure matches: "+str(len(InsecureMatches))+'</u>'
OWTFLogger.log(Message)
Content += Message+"<br />"
Content += ServiceLocator.get_component("plugin_helper").DrawResponseMatchesTables([Command, RegexpName, InsecureMatches], PluginInfo)
return Content
"""
return []
| {
"repo_name": "sharad1126/owtf",
"path": "plugins/web/grep/Credentials_transport_over_an_encrypted_channel@OWTF-AT-001.py",
"copies": "3",
"size": "1976",
"license": "bsd-3-clause",
"hash": -4314745082338454500,
"line_mean": 53.8888888889,
"line_max": 180,
"alpha_frac": 0.7489878543,
"autogenerated": false,
"ratio": 3.763809523809524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6012797378109525,
"avg_score": null,
"num_lines": null
} |
from francesinhas.apps.restaurants.models import User, Rating, Restaurant
from django.core.exceptions import ObjectDoesNotExist
from math import *
from operator import itemgetter
####
# Users
####
def sim_distance(user1, user2):
rests = []
prefs = {}
prefs[user1] = Rating.objects.filter(user=user1)
prefs[user2] = Rating.objects.filter(user=user2)
for item in prefs[user1]:
try:
r = Rating.objects.filter(user=user2).get(restaurant=item.restaurant)
except ObjectDoesNotExist:
pass
else:
rests.append(r.restaurant)
# if they have no ratings in common, return 0
if len(rests) == 0:
return 0
# Add up all the preferences, their squares and the products
sumSq = 0.0
for rest in rests:
r1 = float(Rating.objects.filter(user=user1).get(restaurant=rest).value)/100.0
r2 = float(Rating.objects.filter(user=user2).get(restaurant=rest).value)/100.0
#print r1, " - ", r2
sumSq += sqrt(pow(r1-r2,2))
return 1/(1+sumSq)
# Gets recommendations for a user by using a weighted average
# of every other user's rankings
def get_recommendations(user, similarity=sim_distance):
totals = {}
simSums = {}
prefs = {}
prefs[user] = Rating.objects.filter(user=user)
for other in User.objects.all():
if other == user: # don't compare "me" (user) to "myself" (user = other)
continue
sim = similarity(user, other)
if sim <= 0: # ignore scores of zero or lower
continue
prefs[other] = Rating.objects.filter(user=other)
for item in prefs[other]:
r = Rating.objects.filter(user=other).get(restaurant=item.restaurant)
try: # only score restaurents "I" (user) haven't score
t = Rating.objects.filter(user=user).get(restaurant=item.restaurant)
except ObjectDoesNotExist:
# Similarity * Score
totals.setdefault(r.restaurant, 0)
totals[r.restaurant] += \
Rating.objects.filter(user=other).get(restaurant=r.restaurant).value * sim
# Sum of similarities
simSums.setdefault(r.restaurant, 0)
simSums[r.restaurant] += sim
# Create the normalized list
rankings = [(total/simSums[item], item) for item, total in totals.items()]
rankings.sort()
rankings.reverse()
return rankings
######
# Restaurants
#####
def rest_sim_distance(rest1, rest2):
users = []
prefs = {}
prefs[rest1] = Rating.objects.filter(restaurant=rest1)
prefs[rest2] = Rating.objects.filter(restaurant=rest2)
for item in prefs[rest1]:
try:
r = Rating.objects.filter(restaurant=rest2).get(user=item.user)
except ObjectDoesNotExist:
pass
else:
print r.user
users.append(r.user)
# if they have no ratings in common, return 0
if len(users) == 0:
return 0
# Add up all the preferences, their squares and the products
sumSq = r1 = r2 = 0.0
for user in users:
r1 = float(Rating.objects.filter(restaurant=rest1).get(user=user).value)/100.0
r2 = float(Rating.objects.filter(restaurant=rest2).get(user=user).value)/100.0
#print r1, " - ", r2
sumSq += sqrt(pow(r1-r2,2))
return 1/(1+sumSq)
def calculate_similar_restaurants(n=5):
# Create a dictionary of items showing which restaurants are most similar to another.
result = {}
distances = {}
restaurants = Restaurant.objects.all()
for rest in restaurants:
# Find the most similar items to this one
sim = {}
for other in restaurants:
if other == rest:
continue
sim[other] = rest_sim_distance(rest, other)
scores = sorted(sim.iteritems(), key=itemgetter(1), reverse=True)
result[rest] = scores[0:n]
return result
def get_recommended_restaurants(user, sims):
ratings = Rating.objects.filter(user=user)
rated = [rating.restaurant for rating in ratings]
scores = {}
totalSim = {}
# Loop over items similar to this one
for rest in rated:
for (rest2, sim) in sims[rest]:
# Ignore if this user has already rated this item
# Ignore if sim = 0
if rest2 in rated or sim == 0.0:
continue
# Weighted sum of rating times similarity
scores.setdefault(rest2, 0)
rating = Rating.objects.get(user=user, restaurant=rest)
scores[rest2] += sim * rating.value
# Sum of all the similarities
totalSim.setdefault(rest2, 0)
totalSim[rest2] += sim
# Divide each total score by total weighting to get an average
rankings = [(score/totalSim[item],item) for item,score in scores.items( )]
# Return the rankings from highest to lowest
rankings.sort()
rankings.reverse()
return rankings | {
"repo_name": "lrei/magical_code",
"path": "francesinhas.py",
"copies": "1",
"size": "5013",
"license": "mit",
"hash": -2925858826258733000,
"line_mean": 33.1088435374,
"line_max": 90,
"alpha_frac": 0.6130061839,
"autogenerated": false,
"ratio": 3.699630996309963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9674161786093654,
"avg_score": 0.027695078823261693,
"num_lines": 147
} |
from frappe.installer import create_user_settings_table
from frappe.model.utils.user_settings import update_user_settings
import frappe, json
from six import iteritems
def execute():
if frappe.db.table_exists("__ListSettings"):
for us in frappe.db.sql('''select user, doctype, data from __ListSettings''', as_dict=True):
try:
data = json.loads(us.data)
except:
continue
if 'List' in data:
continue
if 'limit' in data:
data['page_length'] = data['limit']
del data['limit']
new_data = dict(List=data)
new_data = json.dumps(new_data)
frappe.db.sql('''update __ListSettings
set data=%(new_data)s
where user=%(user)s
and doctype=%(doctype)s''',
{'new_data': new_data, 'user': us.user, 'doctype': us.doctype})
frappe.db.sql("RENAME TABLE __ListSettings to __UserSettings")
elif not frappe.db.table_exists("__UserSettings"):
create_user_settings_table()
for user in frappe.db.get_all('User', {'user_type': 'System User'}):
defaults = frappe.defaults.get_defaults_for(user.name)
for key, value in iteritems(defaults):
if key.startswith('_list_settings:'):
doctype = key.replace('_list_settings:', '')
columns = ['`tab{1}`.`{0}`'.format(*c) for c in json.loads(value)]
for col in columns:
if "name as" in col:
columns.remove(col)
update_user_settings(doctype, {'fields': columns}) | {
"repo_name": "bcornwellmott/frappe",
"path": "frappe/patches/v8_0/rename_listsettings_to_usersettings.py",
"copies": "10",
"size": "1392",
"license": "mit",
"hash": -2839899411395433500,
"line_mean": 29.9555555556,
"line_max": 94,
"alpha_frac": 0.6587643678,
"autogenerated": false,
"ratio": 3.2222222222222223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8880986590022222,
"avg_score": null,
"num_lines": null
} |
from frappe.model.utils.user_settings import update_user_settings
import frappe, json
from six import iteritems
def execute():
if frappe.db.table_exists("__ListSettings"):
for us in frappe.db.sql('''select user, doctype, data from __ListSettings''', as_dict=True):
try:
data = json.loads(us.data)
except:
continue
if 'List' in data:
continue
if 'limit' in data:
data['page_length'] = data['limit']
del data['limit']
new_data = dict(List=data)
new_data = json.dumps(new_data)
frappe.db.sql('''update __ListSettings
set data=%(new_data)s
where user=%(user)s
and doctype=%(doctype)s''',
{'new_data': new_data, 'user': us.user, 'doctype': us.doctype})
frappe.db.sql("RENAME TABLE __ListSettings to __UserSettings")
else:
if not frappe.db.table_exists("__UserSettings"):
frappe.db.create_user_settings_table()
for user in frappe.db.get_all('User', {'user_type': 'System User'}):
defaults = frappe.defaults.get_defaults_for(user.name)
for key, value in iteritems(defaults):
if key.startswith('_list_settings:'):
doctype = key.replace('_list_settings:', '')
columns = ['`tab{1}`.`{0}`'.format(*c) for c in json.loads(value)]
for col in columns:
if "name as" in col:
columns.remove(col)
update_user_settings(doctype, {'fields': columns}) | {
"repo_name": "RicardoJohann/frappe",
"path": "frappe/patches/v8_0/rename_listsettings_to_usersettings.py",
"copies": "1",
"size": "1351",
"license": "mit",
"hash": -492434394178933200,
"line_mean": 29.0444444444,
"line_max": 94,
"alpha_frac": 0.6506291636,
"autogenerated": false,
"ratio": 3.171361502347418,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9162375507279861,
"avg_score": 0.03192303173351124,
"num_lines": 45
} |
from frappe.__version__ import __version__
app_name = "frappe"
app_title = "Frappe Framework"
app_publisher = "Web Notes Technologies Pvt. Ltd."
app_description = "Full Stack Web Application Framwork in Python"
app_icon = "assets/frappe/images/frappe.svg"
app_version = __version__
app_color = "#3498db"
app_email = "support@frappe.io"
before_install = "frappe.utils.install.before_install"
after_install = "frappe.utils.install.after_install"
# website
app_include_js = "assets/js/frappe.min.js"
app_include_css = [
"assets/frappe/css/splash.css",
"assets/css/frappe.css"
]
web_include_js = [
"assets/js/frappe-web.min.js",
"website_script.js"
]
web_include_css = [
"assets/css/frappe-web.css",
"style_settings.css"
]
website_clear_cache = "frappe.website.doctype.website_group.website_group.clear_cache"
write_file_keys = ["file_url", "file_name"]
notification_config = "frappe.core.notifications.get_notification_config"
before_tests = "frappe.utils.install.before_tests"
website_generators = ["Web Page", "Blog Post", "Website Group", "Blog Category"]
# permissions
permission_query_conditions = {
"Event": "frappe.core.doctype.event.event.get_permission_query_conditions",
"ToDo": "frappe.core.doctype.todo.todo.get_permission_query_conditions",
"User": "frappe.core.doctype.user.user.get_permission_query_conditions"
}
has_permission = {
"Event": "frappe.core.doctype.event.event.has_permission",
"ToDo": "frappe.core.doctype.todo.todo.has_permission",
"User": "frappe.core.doctype.user.user.has_permission"
}
doc_events = {
"*": {
"after_insert": "frappe.core.doctype.email_alert.email_alert.trigger_email_alerts",
"validate": "frappe.core.doctype.email_alert.email_alert.trigger_email_alerts",
"on_update": [
"frappe.core.doctype.notification_count.notification_count.clear_doctype_notifications",
"frappe.core.doctype.email_alert.email_alert.trigger_email_alerts"
],
"after_rename": "frappe.core.doctype.notification_count.notification_count.clear_doctype_notifications",
"on_submit": "frappe.core.doctype.email_alert.email_alert.trigger_email_alerts",
"on_cancel": [
"frappe.core.doctype.notification_count.notification_count.clear_doctype_notifications",
"frappe.core.doctype.email_alert.email_alert.trigger_email_alerts"
],
"on_trash": "frappe.core.doctype.notification_count.notification_count.clear_doctype_notifications"
},
"Website Route Permission": {
"on_update": "frappe.website.doctype.website_group.website_group.clear_cache_on_doc_event"
}
}
scheduler_events = {
"all": [
"frappe.utils.email_lib.bulk.flush",
"frappe.utils.email_lib.bulk.multitenanct"
],
"daily": [
"frappe.utils.email_lib.bulk.clear_outbox",
"frappe.core.doctype.notification_count.notification_count.clear_notifications",
"frappe.core.doctype.event.event.send_event_digest",
"frappe.sessions.clear_expired_sessions",
"frappe.core.doctype.email_alert.email_alert.trigger_daily_alerts",
],
"hourly": [
"frappe.website.doctype.website_group.website_group.clear_event_cache"
]
}
mail_footer = "frappe.core.doctype.outgoing_email_settings.outgoing_email_settings.get_mail_footer"
| {
"repo_name": "rohitw1991/smarttailorfrappe",
"path": "frappe/hooks.py",
"copies": "1",
"size": "3169",
"license": "mit",
"hash": -1079961956064160600,
"line_mean": 33.4456521739,
"line_max": 106,
"alpha_frac": 0.7390343957,
"autogenerated": false,
"ratio": 3.006641366223909,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.4245675761923909,
"avg_score": null,
"num_lines": null
} |
from frasco_admin import AdminBlueprint
from frasco import current_app, current_context, abort, request, redirect, url_for
from frasco_models.admin import create_model_admin_blueprint
from frasco_forms.form import wtfields
import inflection
def create_admin_blueprint(users):
ucol = users.options['username_column']
ecol = users.options['email_column']
list_columns = [ucol]
form_fields = [ucol]
search_query_default_field = [ucol]
if ucol != ecol:
list_columns.append(ecol)
form_fields.append(ecol)
search_query_default_field.append(ecol)
list_columns.extend([
('signup_at', 'Signup date'),
('last_login_at', 'Last login date')])
form_fields.append(('password', wtfields.PasswordField()))
bp = create_model_admin_blueprint("users", __name__, users.model, title="Users", menu="Users",
icon="fa-users", with_create=False, can_create=True, with_edit=False, can_edit=True,
search_query_default_field=search_query_default_field, list_columns=list_columns,
form_fields=form_fields)
@bp.view("/create", template="admin/models_default/create.html", methods=['GET', 'POST'])
def create():
form = bp.get_form_class()()
current_context['form'] = form
if form.validate_on_submit():
user = users.signup(form=form, login_user=False)
return redirect(url_for('.edit', id=user.id))
@bp.view("/<id>", template="admin/models_default/edit.html", methods=['GET', 'POST'])
def edit(id):
user = users.query.get_or_404(id)
form = bp.get_form_class()(obj=user)
current_context['obj'] = user
current_context['admin_section_title'] = "Edit user #%s" % user.id
current_context['form'] = form
current_context['edit_actions'] = [
('Reset password', '.reset_password', {}),
('Delete user', '.delete', {'style': 'danger'})
]
if form.validate_on_submit():
users.update_user_from_form(user, form)
@bp.route("/<id>/reset-password")
def reset_password(id):
user = users.query.get_or_404(id)
users.gen_reset_password_token(user)
return redirect(url_for('.edit', id=id))
return bp | {
"repo_name": "frascoweb/frasco-users",
"path": "frasco_users/admin.py",
"copies": "1",
"size": "2251",
"license": "mit",
"hash": 2079617371318357200,
"line_mean": 38.5087719298,
"line_max": 98,
"alpha_frac": 0.6277210129,
"autogenerated": false,
"ratio": 3.5062305295950154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9616859233929405,
"avg_score": 0.003418461713122131,
"num_lines": 57
} |
from frasco_admin import AdminBlueprint
from frasco import current_app, current_context, abort, request, redirect, url_for
from frasco_models.form import create_form_class_from_model
import inflection
def create_model_admin_blueprint(name, package, model, title=None, menu=None, icon=None, template_folder=None,
list_columns=None, search_query_default_field=None, edit_actions=None,
with_create=True, with_edit=True, with_delete=True, url_prefix=None,
form_fields=None, form_fields_specs=None, form_exclude_fields=None,
filters=None, list_actions=None, can_edit=None, can_create=None):
if not url_prefix:
url_prefix = "/%s" % name
bp = AdminBlueprint("admin_%s" % name, package, url_prefix=url_prefix,
template_folder=template_folder)
tpl_dir = name if template_folder else "models_default"
def get_form_class():
if not getattr(model, '__admin_form__', None):
model.__admin_form__ = create_form_class_from_model(model,
fields=getattr(model, '__admin_form_fields__', form_fields),
fields_specs=getattr(model, '__admin_form_fields_specs__', form_fields_specs),
exclude_fields=getattr(model, '__admin_form_exclude_fields__', form_exclude_fields))
return model.__admin_form__
bp.get_form_class = get_form_class
if hasattr(model, '__admin_search_query_default_field__'):
search_query_default_field = model.__admin_search_query_default_field__
if hasattr(model, '__admin_list_columns__'):
list_columns = model.__admin_list_columns__
if hasattr(model, '__admin_filters__'):
filters = model.__admin_filters__
if not edit_actions:
edit_actions = []
if not list_actions:
list_actions = []
if not filters:
filters = {}
if can_create is True or (can_create is None and with_create):
can_create = ".create"
if can_edit is True or (can_edit is None and with_edit):
can_edit = ".edit"
if with_delete:
edit_actions.append(('Delete', '.delete', {'style': 'danger'}))
@bp.view("/", template="admin/%s/index.html" % tpl_dir, admin_title=title, admin_menu=menu, admin_menu_icon=icon)
def index():
columns = list_columns
if not columns:
columns = []
for name, _ in current_app.features.models.backend.inspect_fields(model):
columns.append((name, inflection.humanize(name)))
q = dict(order_by=request.args.get('sort', 'id'), **filters)
s = request.args.get('search')
if s:
if s.startswith('#'):
q['id'] = s[1:]
else:
q['search_query'] = s
q['search_query_default_field'] = search_query_default_field
current_context['actions'] = []
if can_create:
current_context['actions'].append(('Create', url_for(can_create)))
for label, url in list_actions:
if callable(url):
url = url()
current_context['actions'].append((label, url))
current_context['objs'] = current_app.features.models.find_all(model, paginate=15, **q)
current_context['model_fields'] = [i[0] if isinstance(i, tuple) else i for i in columns]
current_context['table_headers'] = [i[1] if isinstance(i, tuple) else inflection.humanize(i) for i in columns]
current_context['can_create'] = can_create
current_context['can_edit'] = can_edit
if with_create:
@bp.view("/create", template="admin/%s/create.html" % tpl_dir, methods=['GET', 'POST'])
def create():
form = get_form_class()()
current_context['form'] = form
if form.validate_on_submit():
obj = current_app.features.models.save_from_form(model=model, form=form)
return redirect(url_for('.edit', id=obj.id))
if with_edit:
@bp.view("/<id>", template="admin/%s/edit.html" % tpl_dir, methods=['GET', 'POST'])
def edit(id):
obj = current_app.features.models.query(model).get_or_404(id)
form = get_form_class()(obj=obj)
current_context['obj'] = obj
current_context['admin_section_title'] = "Edit #%s" % obj.id
current_context['form'] = form
current_context['edit_actions'] = edit_actions
if form.validate_on_submit():
current_app.features.models.save_from_form(obj=obj, form=form)
if with_delete:
@bp.route("/<id>/delete")
def delete(id):
obj = current_app.features.models.query(model).get_or_404(id)
current_app.features.models.backend.remove(obj)
return redirect(url_for('.index'))
return bp | {
"repo_name": "frascoweb/frasco-models",
"path": "frasco_models/admin/__init__.py",
"copies": "1",
"size": "4885",
"license": "mit",
"hash": 6168638680183471000,
"line_mean": 46.4368932039,
"line_max": 118,
"alpha_frac": 0.5881269191,
"autogenerated": false,
"ratio": 3.7576923076923077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9832479804329453,
"avg_score": 0.002667884492570966,
"num_lines": 103
} |
from frasco.ext import get_extension_state
from frasco.models import as_transaction
from frasco.billing.invoicing import create_invoice
from frasco.billing.eu_vat import is_eu_country, get_vat_rate, should_charge_vat
import datetime
def create_invoice_from_charge(charge, obj=None, lines=None, tax_rate=None, tax_amount=None):
state = get_extension_state('frasco_stripe')
with create_invoice(**state.options['invoice_ref_kwargs']) as (invoice, InvoiceItem):
invoice.currency = charge.currency.upper()
invoice.subtotal = charge.amount / 100.0
invoice.total = charge.amount / 100.0
invoice.description = charge.description
invoice.issued_at = datetime.datetime.fromtimestamp(charge.created)
invoice.charge_id = charge.id
invoice.paid = charge.status == "succeeded"
if obj is not None:
_fill_invoice_from_obj(invoice, obj)
if tax_rate == "eu_vat":
if is_eu_country(invoice.country):
tax_rate = get_vat_rate(invoice.country)
if not should_charge_vat(invoice.country, obj.eu_vat_number):
tax_rate = None
else:
tax_rate = None
if tax_amount:
invoice.tax_amount = tax_amount
invoice.subtotal = invoice.total - tax_amount
elif tax_rate:
invoice.subtotal = invoice.total * (100 / (100 + tax_rate));
invoice.tax_amount = invoice.total - invoice.subtotal
if lines:
for line in lines:
item = InvoiceItem()
item.amount = line['amount']
item.quantity = line.get('quantity', 1)
item.currency = line.get('currency', charge.currency.upper())
item.description = line['description']
item.tax_amount = line.get('tax_amount', (line['amount'] * tax_rate / 100) if tax_rate else 0)
item.tax_rate = line.get('tax_rate', tax_rate or 0)
invoice.items.append(item)
def create_invoice_from_stripe(obj, stripe_invoice):
state = get_extension_state('frasco_stripe')
with create_invoice(**state.options['invoice_ref_kwargs']) as (invoice, InvoiceItem):
_fill_invoice_from_obj(invoice, obj)
invoice.external_id = stripe_invoice.id
invoice.currency = stripe_invoice.currency.upper()
invoice.subtotal = stripe_invoice.subtotal / 100.0
invoice.total = stripe_invoice.total / 100.0
invoice.tax_amount = stripe_invoice.tax / 100.0 if stripe_invoice.tax else None
invoice.description = stripe_invoice.description
invoice.issued_at = datetime.datetime.fromtimestamp(stripe_invoice.created)
invoice.paid = stripe_invoice.paid
invoice.charge_id = stripe_invoice.charge
for line in stripe_invoice.lines.data:
item = InvoiceItem()
item.external_id = line.id
item.amount = line.amount / 100.0
item.tax_amount = 0
item.tax_rate = 0
item.quantity = line.quantity
item.currency = line.currency
item.description = line.description or ''
if line.tax_amounts:
item.tax_amount = line.tax_amounts[0].amount
item.tax_rate = line.tax_rates[0].amount if line.tax_rates else stripe_invoice.default_tax_rates[0].percentage
invoice.items.append(item)
def _fill_invoice_from_obj(invoice, obj):
invoice.customer = obj
invoice.email = getattr(obj, obj.__stripe_email_property__)
if getattr(obj, '__has_stripe_billing_fields__', False):
invoice.name = obj.billing_name
invoice.address_line1 = obj.billing_address_line1
invoice.address_line2 = obj.billing_address_line2
invoice.address_city = obj.billing_address_city
invoice.address_state = obj.billing_address_state
invoice.address_zip = obj.billing_address_zip
invoice.address_country = obj.billing_address_country
if obj.billing_country:
invoice.country = obj.billing_country.upper()
elif obj.billing_address_country:
invoice.country = obj.billing_address_country.upper()
| {
"repo_name": "frascoweb/frasco",
"path": "frasco/billing/stripe/invoice.py",
"copies": "1",
"size": "4228",
"license": "mit",
"hash": 6085663300027028000,
"line_mean": 44.4623655914,
"line_max": 126,
"alpha_frac": 0.630794702,
"autogenerated": false,
"ratio": 3.9003690036900367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015033863292605388,
"num_lines": 93
} |
from frasco.ext import get_extension_state
from frasco.models import db
from ..password import check_password
AUTH_HANDLERS = []
def register_authentification_handler(func=None, only=False):
def decorator(f):
if only:
AUTH_HANDLERS[:] = []
AUTH_HANDLERS.append(f)
return f
if func:
return decorator(func)
return decorator
def authenticate(identifier, password):
state = get_extension_state('frasco_users')
for func in AUTH_HANDLERS:
user = func(identifier, password)
if user:
return user
if not state.options["disable_password_authentication"]:
if state.options['allow_email_or_username_login'] and hasattr(state.Model, 'username'):
q = state.Model.query.filter(db.or_(db.func.lower(state.Model.username) == identifier.strip().lower(),
state.Model.email == identifier.strip().lower()))
else:
q = state.Model.query_by_identifier(identifier)
user = q.first()
if user and check_password(user, password):
return user
| {
"repo_name": "frascoweb/frasco",
"path": "frasco/users/auth/__init__.py",
"copies": "1",
"size": "1100",
"license": "mit",
"hash": -2985754697417031000,
"line_mean": 30.4285714286,
"line_max": 114,
"alpha_frac": 0.6354545455,
"autogenerated": false,
"ratio": 3.9855072463768115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5120961791876811,
"avg_score": null,
"num_lines": null
} |
from frasco.ext import *
from flask_assets import Environment as BaseEnvironment, FlaskResolver, _webassets_cmd
from flask import Blueprint, current_app, _request_ctx_stack, has_request_context
from flask.cli import with_appcontext, cli
from flask.signals import Namespace as SignalNamespace
from ..helpers import url_for as frasco_url_for
import logging
import click
import os
import shutil
_signals = SignalNamespace()
before_build_assets = _signals.signal('before_build_assets')
after_clean_assets = _signals.signal('before_clean_assets')
auto_build_assets = _signals.signal('auto_build_assets')
class Resolver(FlaskResolver):
"""We override the convert_item_to_flask_url() to use our own url_for()"""
def convert_item_to_flask_url(self, ctx, item, filepath=None):
directory, rel_path, endpoint = self.split_prefix(ctx, item)
if filepath is not None:
filename = filepath[len(directory)+1:]
else:
filename = rel_path
flask_ctx = None
if not _request_ctx_stack.top:
flask_ctx = ctx.environment._app.test_request_context()
flask_ctx.push()
try:
url = cdn_url_for(endpoint, filename=filename)
if url and url.startswith('http:'):
url = url[5:]
return url
finally:
if flask_ctx:
flask_ctx.pop()
class Environment(BaseEnvironment):
resolver_class = Resolver
class FrascoAssetsState(ExtensionState):
def register(self, *args, **kwargs):
return self.env.register(*args, **kwargs)
class FrascoAssets(Extension):
name = 'frasco_assets'
state_class = FrascoAssetsState
prefix_extra_options = 'ASSETS_'
defaults = {'js_packages_path': {},
'copy_files_from_js_packages': {},
'cdn_scheme': 'https',
'cdn_endpoints': ['static']}
def _init_app(self, app, state):
state.env = Environment(app)
state.env.debug = app.debug
app.jinja_env.globals['url_for'] = cdn_url_for
app.jinja_env.globals['url_for_static'] = cdn_url_for_static
app.jinja_env.macros.register_file(os.path.join(os.path.dirname(__file__), "macros.html"), alias="frasco_assets.html")
if state.options['copy_files_from_js_packages']:
register_assets_builder(lambda: copy_files_from_js_packages(state.options['copy_files_from_js_packages']))
@app.cli.command()
@with_appcontext
def build_all_assets():
"""Build assets from all extensions."""
if state.options['js_packages_path']:
register_js_packages_blueprint(app, state.options['js_packages_path'])
before_build_assets.send()
_webassets_cmd('build')
if state.options['js_packages_path'] and (state.env.config["auto_build"] or app.debug):
register_js_packages_blueprint(app, state.options['js_packages_path'])
if state.env.config["auto_build"]:
@app.before_first_request
def before_first_request():
auto_build_assets.send(self)
@ext_stateful_method
def register(self, state, *args, **kwargs):
return state.register(*args, **kwargs)
class AssetsBlueprint(Blueprint):
def __init__(self, name, import_name, **kwargs):
kwargs.setdefault('static_url_path', '/static/vendor/%s' % name)
kwargs.setdefault('static_folder', 'static')
super(AssetsBlueprint, self).__init__(name, import_name, **kwargs)
def expose_package(app, name, import_name):
bp = AssetsBlueprint(name, import_name)
app.register_blueprint(bp)
return bp
def register_assets_builder(func=None):
def decorator(func):
before_build_assets.connect(lambda sender: func(), weak=False)
auto_build_assets.connect(lambda sender: func(), weak=False)
if func:
return decorator(func)
return decorator
def register_js_packages_blueprint(app, js_packages_path):
for name, path in js_packages_path.items():
if name not in app.blueprints:
bp = Blueprint(name, __name__, static_folder=os.path.abspath(path), static_url_path='/static/%s' % name)
app.register_blueprint(bp)
def copy_files_from_js_packages(files):
state = get_extension_state('frasco_assets')
packages = state.options['js_packages_path']
logger = logging.getLogger('frasco.assets')
for src, dest in files.items():
package, filename = src.split('/', 1)
filename = os.path.join(packages.get(package, current_app.root_path), filename)
if not os.path.exists(src):
logger.warning("Cannot copy file from js packages: %s" % src)
continue
target = os.path.join(current_app.static_folder, dest)
if os.path.isdir(filename) and os.path.exists(target):
if dest.endswith('/'):
target = os.path.join(target, os.path.basename(filename))
else:
logger.debug("Removing target of js package file copy: %s" % target)
if os.path.isdir(target):
shutil.rmtree(target)
else:
os.unlink(target)
logger.debug("Copying js package file from '%s' to '%s'" % (filename, target))
if os.path.isdir(filename):
shutil.copytree(filename, target)
else:
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
shutil.copyfile(filename, target)
def cdn_url_for(endpoint, **values):
state = get_extension_state('frasco_assets', app=values.pop('_app', None))
if state.options.get('cdn_domain') and is_cdn_endpoint(endpoint):
try:
scheme = values.pop('_scheme')
except KeyError:
scheme = state.options['cdn_scheme']
urls = current_app.url_map.bind(state.options['cdn_domain'], url_scheme=scheme)
return urls.build(endpoint, values=values, force_external=True)
return frasco_url_for(endpoint, **values)
def cdn_url_for_static(filename, **kwargs):
"""Shortcut function for url_for('static', filename=filename)
"""
return cdn_url_for('static', filename=filename, **kwargs)
def is_cdn_endpoint(endpoint):
cdn_endpoints = get_extension_state('frasco_assets').options['cdn_endpoints']
if endpoint in cdn_endpoints:
return True
for x in cdn_endpoints:
if endpoint.endswith('.%s' % x):
return True
return False
| {
"repo_name": "frascoweb/frasco",
"path": "frasco/assets/__init__.py",
"copies": "1",
"size": "6556",
"license": "mit",
"hash": -1793886649051790600,
"line_mean": 36.0395480226,
"line_max": 126,
"alpha_frac": 0.6294996949,
"autogenerated": false,
"ratio": 3.748427672955975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48779273678559754,
"avg_score": null,
"num_lines": null
} |
from frasco.ext import *
from frasco.assets import expose_package, register_assets_builder
from frasco.utils import join_url_rule
from flask import render_template
import os
import json
import re
import htmlmin
import codecs
class FrascoAngular(Extension):
name = "frasco_angular"
defaults = {"static_dir": None, # defaults to app.static_folder
"static_url_path": None, # defaults to app.static_url_path
"angular_template": "angular_layout.html",
"app_dir": "app",
"services_module": "services",
"services_name": "%s",
"templates_file": None,
"templates_module": "templatesCache",
"templates_search_paths": [],
"disable_templates_cache": None, # app.debug
"templates_matcher": r".*\.html$",
"add_app_dir_in_babel_extract": True}
def _init_app(self, app, state):
require_extension('frasco_assets', app)
expose_package(app, "frasco_angular", __name__)
if not state.options["static_dir"]:
state.options["static_dir"] = app.static_folder
if not state.options["static_url_path"]:
state.options["static_url_path"] = app.static_url_path
state.options['templates_search_paths'].append(
(os.path.join(state.options["static_dir"], state.options['app_dir']), state.options["static_url_path"] + '/' + state.options['app_dir'])
)
if state.options['templates_file']:
register_assets_builder(self.build_templates)
if has_extension('frasco_babel', app) and state.options['add_app_dir_in_babel_extract']:
app.extensions.frasco_babel.add_extract_dir(os.path.join(state.options['static_dir'], state.options['app_dir']),
'.', ['frasco.angular.babel.AngularCompatExtension'], [('javascript:**.js', {})])
@ext_stateful_method
def add_route(self, state, endpoint, rule, decorators=None, **options):
rules = rule if isinstance(rule, (list, tuple)) else [rule]
def func(*args, **kwargs):
return self.angular_view_response()
if decorators:
for decorator in reversed(decorators):
func = decorator(func)
for rule in rules:
self.get_app().add_url_rule(rule, endpoint, func, **options)
def angular_view_response(self):
return render_template(get_extension_state('frasco_angular').options['angular_template'])
@ext_stateful_method
def register_service_builder(self, state, api_version, filename):
def builder():
module = ("/* This file is auto-generated by frasco-angular. DO NOT MODIFY. */\n'use strict';\n"
"\n(function() {\n\nvar services = angular.module('%s', ['frasco']);\n") % state.options["services_module"]
for service in api_version.iter_services():
endpoints = {}
for rule, endpoint, func, options in service.iter_endpoints():
args = []
if hasattr(func, 'request_params'):
for p in reversed(func.request_params):
args.extend(p.names)
endpoints[endpoint] = [_convert_url_args(join_url_rule(service.url_prefix, rule)), args]
module += ("\nservices.factory('%s', ['frascoServiceFactory', function(frascoServiceFactory) {\n"
"return frascoServiceFactory.make('%s', '%s', [], %s);\n}]);\n") % \
(state.options['services_name'] % service.name, service.name, api_version.url_prefix,
json.dumps(endpoints, indent=2))
module += "\n})();"
_write_file(os.path.join(state.options["static_dir"], state.options["app_dir"], filename), module)
register_assets_builder(builder)
@ext_stateful_method
def build_templates(self, state):
module = [("/* This file is auto-generated by frasco-angular. DO NOT MODIFY. */\n'use strict';\n"
"\nangular.module('%s', []).run(['$templateCache', function($templateCache) {") % state.options["templates_module"]]
matcher = re.compile(state.options["templates_matcher"], re.I)
done = set()
def process_file(filename, path, relpath, url_path):
pathname = os.path.join(path, filename)
relname = "/".join([p for p in [url_path, os.path.relpath(path, relpath), filename] if p])
if pathname not in done and matcher.match(relname):
with codecs.open(pathname, 'r', 'utf-8') as f:
content = f.read()
module.append(" $templateCache.put('%s', %s);" % (relname, json.dumps(htmlmin.minify(content))))
done.add(pathname)
disable = state.options["disable_templates_cache"]
if (disable is None and not self.get_app().debug) or disable is False:
for templates_dir, url_path in state.options['templates_search_paths']:
for path, dirnames, filenames in os.walk(templates_dir):
for filename in filenames:
process_file(filename, path, templates_dir, url_path)
module = "\n".join(module) + "\n}]);"
filename = os.path.join(state.options["static_dir"], state.options["app_dir"], state.options['templates_file'])
_write_file(filename, module)
_url_arg_re = re.compile(r"<([a-z]+:)?([a-z0-9_]+)>")
def _convert_url_args(url):
return _url_arg_re.sub(r":\2", url)
def _write_file(filename, source):
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with codecs.open(filename, "w", "utf-8") as f:
f.write(source)
| {
"repo_name": "frascoweb/frasco",
"path": "frasco/angular/__init__.py",
"copies": "1",
"size": "5809",
"license": "mit",
"hash": -6553291051234746000,
"line_mean": 47.4083333333,
"line_max": 148,
"alpha_frac": 0.5863315545,
"autogenerated": false,
"ratio": 3.7917754569190603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9864477483121534,
"avg_score": 0.002725905659505176,
"num_lines": 120
} |
from frasco.ext import *
from frasco.i18n import lazy_translate
from frasco.utils import populate_obj, extract_unmatched_items
from flask import render_template
from flask_login import LoginManager, logout_user, login_required, login_url, login_fresh, confirm_login, fresh_login_required, user_logged_in
import datetime
import os
from .user import *
from .model import *
from .jinja_ext import *
from .forms import *
from .tokens import *
from .tokens import TOKEN_NS_ACCESS_TOKEN
from .signals import *
from .password import *
from .blueprint import users_blueprint
from . import captcha
class FrascoUsersState(ExtensionState):
def __init__(self, *args, **kwargs):
super(FrascoUsersState, self).__init__(*args, **kwargs)
self.manager = LoginManager()
self.user_validators = []
self.override_builtin_user_validation = False
self.login_validators = []
self.password_validators = []
self.captcha_validator = None
self.user_request_loaders = []
def user_request_loader(self, func):
self.user_request_loaders.append(func)
return func
class FrascoUsers(Extension):
name = "frasco_users"
state_class = FrascoUsersState
defaults = {
# email
"must_provide_email": True,
"email_is_unique": True,
"email_allowed_domains": None,
# username
"must_provide_username": True,
"username_is_unique": True,
"forbidden_usernames": [],
"min_username_length": 1,
"allow_spaces_in_username": False,
"username_case_sensitive": False,
# password
"validate_password_regexps": None,
"prevent_password_reuse": False,
"max_password_reuse_saved": None,
"min_time_between_password_change": None,
"expire_password_after": None,
"max_password_length": 500, # used to prevent DOS attacks
# login
"allow_login": True,
"enable_2fa": False,
"login_view": "users.login",
"login_redirect": None, # redirect to url instead of login page
"login_form_class": LoginWithEmailForm,
"login_2fa_form_class": Login2FAForm,
"allow_email_or_username_login": True,
"remember_days": 365,
"redirect_after_login": "index",
"redirect_after_login_disallowed": None,
"2fa_issuer_name": None, # default is app.config['TITLE']
"2fa_remember_days": 60,
"2fa_remember_cookie_options": {},
# signup
"signup_redirect": None, # redirect to url instead of signup page
"allow_signup": True,
"signup_form_class": SignupForm,
"send_welcome_email": False,
"login_user_on_signup": True,
"rate_limit_count": None,
"rate_limit_period": 60,
"redirect_after_signup": "index",
"redirect_after_signup_disallowed": None, # go to login
# captcha
"debug_captcha": False,
"recaptcha_key": None,
"recaptcha_secret": None,
"hcaptcha_key": None,
"hcaptcha_secret": None,
# reset password
"reset_password_redirect": None, # redirect to url instead of reset password page
"allow_reset_password": True,
"send_reset_password_form_class": SendResetPasswordForm,
"reset_password_form_class": ResetPasswordForm,
"send_reset_password_email": True,
"reset_password_ttl": 3600, # 1h
"login_user_on_reset_password": True,
"redirect_after_reset_password_token": False,
"redirect_after_reset_password": "index",
"redirect_after_reset_password_disallowed": "users.login",
# email validation
"redirect_after_email_validated": "index",
"email_validation_ttl": None,
"block_non_email_validated_users": False,
"send_email_validation_email": False,
# logout
"redirect_after_logout": "index",
# oauth
"oauth_signup_only": False,
"oauth_login_only": False,
"oauth_must_signup": False,
"oauth_must_provide_password": False,
# auth
"disable_password_authentication": False,
"default_auth_provider_name": "app",
# access tokens
"enable_access_tokens": False,
"access_tokens_ttl": None,
"enable_access_tokens_web_flow": True,
"access_tokens_web_flow_allowed_redirects": [],
# messages
"login_error_message": lazy_translate("Invalid email or password"),
"login_disallowed_message": None,
"login_2fa_error_message": lazy_translate("Invalid two factor authentification code"),
"login_required_message": lazy_translate("Please log in to access this page"),
"fresh_login_required_message": lazy_translate("Please reauthenticate to access this page"),
"password_expired_message": lazy_translate("Your password has expired, please enter a new one"),
"must_provide_username_message": lazy_translate("A username must be provided"),
"password_reused_message": lazy_translate("You cannot use a password which you have previously used"),
"min_time_between_password_change_message": lazy_translate("You have changed your password too recently"),
"validate_password_regexps_message": lazy_translate("The password does not respect the following rule: {rule}"),
"max_password_length_message": lazy_translate("The password is too long"),
"must_provide_email_message": lazy_translate("An email address must be provided"),
"signup_disallowed_message": None,
"username_taken_message": lazy_translate("An account using the same username already exists"),
"email_taken_message": lazy_translate("An account using the same email already exists"),
"username_too_short_message": lazy_translate("The username is too short"),
"username_has_spaces_message": lazy_translate("The username cannot contain spaces"),
"password_confirm_failed_message": lazy_translate("The two passwords do not match"),
"bad_signup_code_message": lazy_translate("The provided code is not valid"),
"rate_limit_reached_message": lazy_translate("Too many accounts have been created from this location in a too short period. Please, try again later"),
"reset_password_token_error_message": lazy_translate("This email does not exist in our database"),
"reset_password_token_success_message": lazy_translate("An email has been sent to your email address with a link to reset your password"),
"reset_password_error_message": lazy_translate("Invalid or expired link to reset your password"),
"reset_password_success_message": lazy_translate("Password successfully resetted"),
"reset_password_disallowed_message": lazy_translate("You are not allowed to reset your password"),
"update_password_error_message": lazy_translate("Invalid current password"),
"update_user_email_error_message": lazy_translate("An account using the same email already exists"),
"oauth_user_denied_login": lazy_translate("Login was denied"),
"oauth_user_already_exists_message": lazy_translate("This {provider} account has already been used on a different account"),
"oauth_error": lazy_translate("An error occured while authentifying you with the remote provider"),
"captcha_fail_message": lazy_translate("The captcha validation has failed"),
"email_validation_success_message": None,
"enable_admin": True
}
def _init_app(self, app, state):
state.Model = state.import_option('model')
state.LoginModel = state.import_option('login_model', required=False)
app.config.setdefault("REMEMBER_COOKIE_DURATION", datetime.timedelta(days=state.options["remember_days"]))
app.register_blueprint(users_blueprint)
app.jinja_env.add_extension(LoginRequiredExtension)
app.jinja_env.add_extension(AnonymousOnlyExtension)
state.manager.init_app(app)
state.manager.login_view = state.options['login_view']
state.manager.login_message_category = "error"
populate_obj(state.manager, extract_unmatched_items(state.options, self.defaults))
if state.options['recaptcha_key']:
state.captcha_validator = captcha.validate_recaptcha
elif state.options['hcaptcha_key']:
state.captcha_validator = captcha.validate_hcaptcha
if has_extension("frasco_mail", app):
app.extensions.frasco_mail.add_templates_from_package(__name__)
if has_extension("frasco_babel", app):
app.extensions.frasco_babel.add_extract_dir(os.path.dirname(__file__), ["templates"])
@state.manager.user_loader
def user_loader(id):
return state.Model.query.filter(getattr(state.Model, getattr(state.Model, '__session_cookie_identifier__', 'id')) == id).first()
@state.manager.request_loader
def request_loaders(request):
for loader in state.user_request_loaders:
user = loader(request)
if user:
return user
if state.options['enable_access_tokens']:
@state.user_request_loader
def access_token_user_loader(request):
access_token = request.args.get('access_token')
if 'Authorization' in request.headers:
authz = request.headers['Authorization']
if authz.startswith('Bearer '):
access_token = authz[7:]
if access_token:
return read_user_token(access_token, TOKEN_NS_ACCESS_TOKEN, state.options['access_tokens_ttl'])
if state.options['block_non_email_validated_users']:
@app.before_request
def block_non_email_validated_users():
if current_user.is_authenticated and not current_user.email_validated and \
request.endpoint not in ('users.logout', 'users.send_email_validation_email', 'users.validate_email'):
return render_template("users/non_email_validated_users_block_page.html")
@ext_stateful_method
def user_validator(self, state, func):
state.user_validators.append(func)
return func
@ext_stateful_method
def login_validator(self, state, func):
state.login_validators.append(func)
return func
@ext_stateful_method
def password_validator(self, state, func):
state.password_validators.append(func)
return func
| {
"repo_name": "frascoweb/frasco",
"path": "frasco/users/__init__.py",
"copies": "1",
"size": "10598",
"license": "mit",
"hash": 3913719076004854000,
"line_mean": 46.5246636771,
"line_max": 158,
"alpha_frac": 0.6480468013,
"autogenerated": false,
"ratio": 4.040411742279832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0036070123191798828,
"num_lines": 223
} |
from frasco.ext import *
from frasco.mail import send_mail
from frasco.models import db, transaction
from flask.signals import Namespace as SignalNamespace
import datetime
import click
from contextlib import contextmanager
from .model import *
_signals = SignalNamespace()
invoice_issued = _signals.signal('invoice_issued')
class FrascoInvoicing(Extension):
name = "frasco_invoicing"
defaults = {"send_email": None}
def _init_app(self, app, state):
state.Model = state.import_option('model')
state.ItemModel = state.import_option('item_model')
state.ref_creator_func = create_invoice_ref
if has_extension("frasco_mail", app):
app.extensions.frasco_mail.add_templates_from_package(__name__)
if state.options['send_email'] is None:
state.options['send_email'] = True
@app.cli.command('send-invoice-email')
@click.argument('invoice_id')
@click.option('--email')
def send_email_command(invoice_id, email=None):
invoice = state.Model.query.get(invoice_id)
send_invoice_mail(email or invoice.email, invoice)
@ext_stateful_method
def ref_creator(self, state, func):
state.ref_creator_func = func
return func
@contextmanager
def create_invoice(**create_invoice_ref_kwargs):
state = get_extension_state('frasco_invoicing')
with transaction():
invoice = state.Model()
invoice.ref = create_invoice_ref(**create_invoice_ref_kwargs)
yield (invoice, state.ItemModel)
db.session.add(invoice)
invoice_issued.send(invoice)
if state.options['send_email'] and invoice.email:
send_invoice_mail(invoice.email, invoice)
def create_invoice_ref(category=None, counter=None, separator='-', merge_date=True):
today = datetime.date.today()
parts = [today.year, today.month, today.day]
if merge_date:
parts = ["".join(map(str, parts))]
if category:
parts.append(category)
if counter is None:
counter = get_extension_state('frasco_invoicing').Model.query.count() + 1
parts.append(counter)
return separator.join(map(str, parts))
def send_invoice_mail(email, invoice, **kwargs):
items = []
for item in invoice.items:
items.append((item.description, item.quantity, item.amount))
send_mail(email, 'invoice.html',
invoice=invoice,
invoice_date=invoice.issued_at,
invoice_items=items,
invoice_currency=invoice.currency.upper(),
invoice_total=invoice.total,
invoice_tax=invoice.tax_amount,
invoice_tax_rate=invoice.tax_rate,
**kwargs)
def send_failed_invoice_mail(email, invoice, **kwargs):
items = []
for line in invoice.lines.data:
items.append((line.description or '', line.quantity, line.amount / 100.0))
send_mail(email, 'failed_invoice.html',
invoice_date=datetime.datetime.fromtimestamp(invoice.date),
invoice_items=items,
invoice_currency=invoice.currency.upper(),
invoice_total=invoice.total / 100.0, **kwargs)
| {
"repo_name": "frascoweb/frasco",
"path": "frasco/billing/invoicing/__init__.py",
"copies": "1",
"size": "3112",
"license": "mit",
"hash": 1975369080052575500,
"line_mean": 33.1978021978,
"line_max": 84,
"alpha_frac": 0.661311054,
"autogenerated": false,
"ratio": 3.700356718192628,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9841177919379915,
"avg_score": 0.004097970562542596,
"num_lines": 91
} |
from frasco.ext import *
from frasco.upload import url_for_upload
from frasco.helpers import url_for
from frasco.utils import slugify
from flask import current_app, request
import sqlalchemy as sqla
import hashlib
import urllib.request, urllib.parse, urllib.error
import math
import random
import base64
import requests
def svg_to_base64_data(svg):
return 'data:image/svg+xml;base64,' + base64.b64encode(svg)
class UserAvatarModelMixin(object):
avatar_filename = sqla.Column(sqla.String)
@property
def avatar_url(self):
return url_for_avatar(self)
class FrascoUsersAvatars(Extension):
name = "frasco_users_avatars"
defaults = {"url": None,
"avatar_size": 80,
"add_flavatar_route": False,
"try_gravatar": True,
"force_gravatar": False,
"gravatar_size": None,
"gravatar_email_column": None,
"gravatar_default": "mm",
"force_flavatar": False,
"flavatar_size": "100%",
"flavatar_name_column": None,
"flavatar_font_size": 80,
"flavatar_text_dy": "0.32em",
"flavatar_length": 1,
"flavatar_text_color": "#ffffff",
"flavatar_bg_colors": ["#5A8770", "#B2B7BB", "#6FA9AB", "#F5AF29", "#0088B9", "#F18636", "#D93A37", "#A6B12E", "#5C9BBC", "#F5888D", "#9A89B5", "#407887", "#9A89B5", "#5A8770", "#D33F33", "#A2B01F", "#F0B126", "#0087BF", "#F18636", "#0087BF", "#B2B7BB", "#72ACAE", "#9C8AB4", "#5A8770", "#EEB424", "#407887"]}
def _init_app(self, app, state):
app.add_template_global(url_for_avatar)
def flavatar(name, bgcolorstr=None):
if bgcolorstr is None:
bgcolorstr = request.args.get('bgcolorstr')
svg = generate_first_letter_avatar_svg(name, bgcolorstr, request.args.get('size'))
return svg, 200, {
'Content-Type': 'image/svg+xml',
'Cache-Control': 'public, max-age=31536000'
}
@app.route('/avatar/<hash>/<name>')
def avatar(hash, name):
if state.options['try_gravatar']:
size = state.options['gravatar_size'] or state.options["avatar_size"]
try:
r = requests.get(url_for_gravatar(hash, size=size, default=404))
if r.status_code != 404:
return r.content, 200, {'Content-Type': r.headers['content-type']}
except Exception:
pass
return flavatar(name, hash)
if state.options['add_flavatar_route']:
app.add_url_rule('/flavatar/<name>.svg', 'flavatar', flavatar)
app.add_url_rule('/flavatar/<name>/<bgcolorstr>.svg', 'flavatar', flavatar)
def url_for_avatar(user):
state = get_extension_state('frasco_users_avatars')
if getattr(user, 'avatar_filename', None):
return url_for_upload(user.avatar_filename)
hash = None
username = getattr(user, state.options["flavatar_name_column"] or 'username', None)
if username:
username = slugify(username.lower())
hash = hashlib.md5(username.encode('utf-8')).hexdigest()
email = getattr(user, state.options["gravatar_email_column"] or 'email', None)
if email:
hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
if not username:
username = slugify(email.split('@')[0])
if state.options["force_flavatar"] and username:
if state.options['add_flavatar_route']:
return url_for('flavatar', name=username, bgcolorstr=hash, _external=True)
return svg_to_base64_data(generate_first_letter_avatar_svg(username, hash))
if state.options["force_gravatar"] and email:
return url_for_gravatar(email)
if state.options['url'] and email:
return state.options["url"].format(email=email, email_hash=hash, username=username)
return url_for('avatar', hash=hash, name=username, _external=True)
def url_for_gravatar(email, size=None, default=None):
state = get_extension_state('frasco_users_avatars')
hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
params = {
's': size or state.options['gravatar_size'] or state.options["avatar_size"],
'd': default or state.options['gravatar_default']
}
return "https://www.gravatar.com/avatar/%s?%s" % (hash, urllib.parse.urlencode({k: v for k, v in params.items() if v is not None}))
def generate_first_letter_avatar_svg(name, bgcolorstr=None, size=None):
state = get_extension_state('frasco_users_avatars')
size = size or state.options['flavatar_size'] or state.options["avatar_size"]
if size and isinstance(size, int):
size = "%spx" % size
svg_tpl = ('<svg xmlns="http://www.w3.org/2000/svg" pointer-events="none" viewBox="0 0 100 100" '
'width="%(w)s" height="%(h)s" style="background-color: %(bgcolor)s;">%(letter)s</svg>')
char_svg_tpl = ('<text text-anchor="middle" y="50%%" x="50%%" dy="%(dy)s" '
'pointer-events="auto" fill="%(fgcolor)s" font-family="'
'HelveticaNeue-Light,Helvetica Neue Light,Helvetica Neue,Helvetica, Arial,Lucida Grande, sans-serif" '
'style="font-weight: 400; font-size: %(size)spx">%(char)s</text>')
if not name:
text = '?'
else:
text = name[0:min(state.options['flavatar_length'], len(name))]
colors_len = len(state.options['flavatar_bg_colors'])
if bgcolorstr:
bgcolor = sum([ord(c) for c in bgcolorstr]) % colors_len
elif ord(text[0]) < 65:
bgcolor = random.randint(0, colors_len - 1)
else:
bgcolor = int(math.floor((ord(text[0]) - 65) % colors_len))
return svg_tpl % {
'bgcolor': state.options['flavatar_bg_colors'][bgcolor],
'w': size,
'h': size,
'letter': char_svg_tpl % {
'dy': state.options['flavatar_text_dy'],
'fgcolor': state.options['flavatar_text_color'],
'size': state.options['flavatar_font_size'],
'char': text
}
}
| {
"repo_name": "frascoweb/frasco",
"path": "frasco/users/avatars.py",
"copies": "1",
"size": "6180",
"license": "mit",
"hash": -491401150490872450,
"line_mean": 40.4765100671,
"line_max": 325,
"alpha_frac": 0.592394822,
"autogenerated": false,
"ratio": 3.3369330453563717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4429327867356372,
"avg_score": null,
"num_lines": null
} |
from frasco.ext import *
from frasco.utils import import_class
from flask import send_from_directory
from flask.wrappers import Request
from werkzeug.datastructures import FileStorage
from io import BytesIO
from tempfile import TemporaryFile
import uuid
import os
from .backend import StorageBackend, split_backend_from_filename
from .utils import *
def _get_file_stream(self, total_content_length, content_type, filename=None, content_length=None):
if total_content_length > 1024 * 500:
return TemporaryFile('wb+', dir=os.environ.get('FRASCO_UPLOAD_TMP_DIR'))
return BytesIO()
Request._get_file_stream = _get_file_stream
class FrascoUploadError(ExtensionError):
pass
class FrascoUploadState(ExtensionState):
def __init__(self, *args):
super(FrascoUploadState, self).__init__(*args)
self.backends = {}
def _get_backend_options(self, name):
options = dict(self.options)
options.pop('backends', None)
options.pop('backend', None) # make sure this key does not exists
backend = name
if name in self.options['backends']:
if 'backend' in self.options['backends'][name]:
options = self._get_backend_options(self.options['backends'][name]['backend'])
backend = options['backend'] # make sure to keep the deepest level backend to resolve aliases properly
options.update(self.options['backends'][name])
options['backend'] = backend
return options
def get_backend(self, name=None):
if isinstance(name, StorageBackend):
return name
if name is None:
name = self.options['default_backend']
if name not in self.backends:
options = self._get_backend_options(name)
backend_class = import_class(options['backend'], StorageBackend, "frasco.upload.backends")
self.backends[name] = backend_class(name, options)
return self.backends[name]
class FrascoUpload(Extension):
name = 'frasco_upload'
state_class = FrascoUploadState
defaults = {"default_backend": "local",
"backends": {},
"upload_dir": "uploads",
"upload_url": "/uploads",
"upload_tmp_dir": None,
"uuid_prefixes": True,
"uuid_prefix_path_separator": False,
"keep_filenames": True,
"subfolders": False}
def _init_app(self, app, state):
app.add_template_global(url_for_upload)
app.add_template_global(format_file_size)
def send_uploaded_file(filename):
return send_from_directory(os.path.abspath(state.options["upload_dir"]), filename)
app.add_url_rule(state.options["upload_url"] + "/<path:filename>",
endpoint="static_upload",
view_func=send_uploaded_file)
def save_uploaded_file(file, filename=None, backend=None, **kwargs):
state = get_extension_state('frasco_upload')
return_filename_with_backend = kwargs.pop('return_filename_with_backend', False) or backend is True
if not isinstance(file, FileStorage):
file = FileStorage(file)
if not filename:
filename = generate_filename(file.filename, **kwargs)
if not backend or backend is True:
backend, filename = split_backend_from_filename(filename)
state.get_backend(backend).save(file, filename)
if return_filename_with_backend:
return "%s://%s" % (backend or state.options['default_backend'], filename)
return filename
def upload_stream(stream, filename, target_filename=None, **kwargs):
return save_uploaded_file(FileStorage(stream, filename), target_filename, **kwargs)
def upload_file(pathname, filename=None, **kwargs):
with open(pathname, 'rb') as f:
return upload_stream(f, filename or pathname, **kwargs)
def delete_uploaded_file(filename, backend=None, **kwargs):
if not backend:
backend, filename = split_backend_from_filename(filename)
get_extension_state('frasco_upload').get_backend(backend).delete(filename, **kwargs)
def url_for_upload(filename, backend=None, **kwargs):
if not backend:
backend, filename = split_backend_from_filename(filename)
return get_extension_state('frasco_upload').get_backend(backend).url_for(filename, **kwargs)
def open_uploaded_file(filename, backend=None, **kwargs):
if not backend:
backend, filename = split_backend_from_filename(filename)
return get_extension_state('frasco_upload').get_backend(backend).open(filename, **kwargs)
| {
"repo_name": "frascoweb/frasco",
"path": "frasco/upload/__init__.py",
"copies": "1",
"size": "4607",
"license": "mit",
"hash": -3874969661928312300,
"line_mean": 37.3916666667,
"line_max": 118,
"alpha_frac": 0.6598654222,
"autogenerated": false,
"ratio": 3.9511149228130362,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5110980345013036,
"avg_score": null,
"num_lines": null
} |
from frasco.ext import *
from frasco.utils import import_string
from frasco.models import delayed_tx_calls
from flask_rq2 import RQ
from flask_rq2 import cli
from rq import get_current_job
from rq.timeouts import JobTimeoutException
from .job import FrascoJob
import redis.exceptions
import functools
import logging
logger = logging.getLogger('frasco.tasks')
class FrascoTasks(Extension):
name = 'frasco_tasks'
prefix_extra_options = 'RQ_'
defaults = {"tasks_timeout": RQ.default_timeout,
"scheduled_tasks_timeout": 300}
def _init_app(self, app, state):
if not app.config.get('RQ_REDIS_URL') and has_extension('frasco_redis', app):
app.config['RQ_REDIS_URL'] = app.extensions.frasco_redis.options['url']
app.config.setdefault('RQ_JOB_CLASS', 'frasco.tasks.job.FrascoJob')
if app.testing:
app.config.setdefault('RQ_ASYNC', False)
state.rq = RQ(app, default_timeout=state.options['tasks_timeout'])
state.rq.exception_handler(per_task_exception_handler)
def per_task_exception_handler(job, *exc_info):
if hasattr(job.func, '__task_exception_handler__'):
return job.func.__task_exception_handler__(job, *exc_info)
def enqueue_task_now(func, *args, **kwargs):
return enqueue_now(func, args=args, kwargs=kwargs)
def enqueue_now(func, **options):
if getattr(func, '__task_options__', None):
options.update(func.__task_options__)
queue_name = options.pop('queue', None)
if callable(queue_name):
queue_name = queue_name()
return get_extension_state('frasco_tasks').rq.get_queue(queue_name).enqueue_call(func, **options)
def enqueue_task(func, *args, **kwargs):
return enqueue(func, args=args, kwargs=kwargs)
@delayed_tx_calls.proxy
def enqueue(func, **options):
return enqueue_now(func, **options)
def get_enqueued_job(id):
state = get_extension_state('frasco_tasks')
return FrascoJob.fetch(id, connection=state.rq.connection)
def task(**options):
def wrapper(func):
func.__task_options__ = options
setattr(func, 'enqueue', functools.partial(enqueue_task, func))
setattr(func, 'enqueue_now', functools.partial(enqueue_task_now, func))
setattr(func, 'exception_handler', task_exception_handler(func))
setattr(func, 'timeout_handler', task_exception_handler(func, JobTimeoutException))
return func
return wrapper
def task_exception_handler(task_func, exc_type=None):
def decorator(handler_func):
task_func.__task_exception_handler__ = _wrap_exc_handler_for_type(handler_func, exc_type) if exc_type else handler_func
return handler_func
return decorator
def _wrap_exc_handler_for_type(handler_func, exc_type):
@functools.wraps(handler_func)
def wrapper(job, *exc_info):
if exc_info[0] is exc_type:
return handler_func(job, *exc_info)
return wrapper
def schedule_task(pattern, import_name_or_func):
state = get_extension_state('frasco_tasks')
if isinstance(import_name_or_func, str):
func = import_string(import_name_or_func)
import_name = import_name_or_func
else:
func = import_name_or_func
import_name = "%s.%s" % (func.__module__, func.__name__)
try:
logger.info("Scheduling task %s at %s" % (import_name, pattern))
return state.rq.get_scheduler().cron(pattern, func,
id="cron-%s" % import_name.replace('.', '-'),
timeout=state.options['scheduled_tasks_timeout'])
except redis.exceptions.ConnectionError:
logger.error("Cannot initialize scheduled tasks as no redis connection is available")
def clear_all_scheduled_tasks():
scheduler = get_extension_state('frasco_tasks').rq.get_scheduler()
for job in scheduler.get_jobs():
scheduler.cancel(job)
_rq2_scheduler = cli.scheduler
@functools.wraps(_rq2_scheduler)
def _scheduler(*args, **kwargs):
state = get_extension_state('frasco_tasks')
clear_all_scheduled_tasks()
for import_name, pattern in state.options.get('schedule', {}).items():
schedule_task(pattern, import_name)
_rq2_scheduler(*args, **kwargs)
cli._commands['scheduler'] = _scheduler
| {
"repo_name": "frascoweb/frasco",
"path": "frasco/tasks/__init__.py",
"copies": "1",
"size": "4229",
"license": "mit",
"hash": -2148198761658438400,
"line_mean": 32.832,
"line_max": 127,
"alpha_frac": 0.6753369591,
"autogenerated": false,
"ratio": 3.469237079573421,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9638520822856969,
"avg_score": 0.0012106431632904393,
"num_lines": 125
} |
from frasco.ext import require_extension, get_extension_state
from flask import current_app
import os
import json
def generate_cache_service_worker(cache_name, assets, dynamic_urls=None, template_filename=None,
offline_fallback=None, offline_fallback_ignore_paths=None, domains=None):
if not template_filename:
template_filename = os.path.join(os.path.dirname(__file__), 'service-worker.js')
if not domains:
domains = [current_app.config['SERVER_NAME']]
files = []
for asset_name in assets:
if asset_name.startswith('@'):
files.extend(current_app.extensions.frasco_assets.env[asset_name[1:]].urls())
else:
files.append(asset_name)
sw = "\n".join([
'const CACHE_NAME = "%s";' % cache_name,
'const CACHE_DOMAINS = %s;' % json.dumps(domains),
'const CACHE_FILES = %s;' % json.dumps(files),
'const CACHE_DYNAMIC_URLS = %s' % json.dumps(dynamic_urls or []),
'const CACHE_OFFLINE_FALLBACK = "%s";' % offline_fallback,
'const CACHE_OFFLINE_FALLBACK_IGNORE_PATHS = %s;' % json.dumps(offline_fallback_ignore_paths or [])
])
with open(template_filename) as f:
sw += f.read()
return sw
def generate_cache_service_worker_response(*args, **kwargs):
headers = {'Content-type': 'text/javascript', 'Cache-control': 'no-cache', 'Expires': '0'}
return generate_cache_service_worker(*args, **kwargs), headers
def create_cache_service_worker_route(app, *args, **kwargs):
require_extension('frasco_assets', app)
@app.route(app.config.get('CACHE_SERVICE_WORKER_URL', '/cache-sw.js'))
def cache_worker():
state = get_extension_state('frasco_assets')
sw = getattr(state, 'cache_service_worker', None)
if not sw:
sw = generate_cache_service_worker_response(*args, **kwargs)
setattr(state, 'cache_service_worker', sw)
return sw
| {
"repo_name": "frascoweb/frasco",
"path": "frasco/assets/sw_cache.py",
"copies": "1",
"size": "1966",
"license": "mit",
"hash": -3231289928819499000,
"line_mean": 36.8076923077,
"line_max": 107,
"alpha_frac": 0.6317395727,
"autogenerated": false,
"ratio": 3.5107142857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4642453858414286,
"avg_score": null,
"num_lines": null
} |
from frasco import abort
import operator
def Q(**kwargs):
return kwargs.items()
def and_(*args):
return {"$and": args}
def or_(*args):
return {"$or": args}
class QueryError(Exception):
pass
class MultipleResultError(QueryError):
pass
class NoResultError(QueryError):
pass
class Query(object):
ASC = "ASC"
DESC = "DESC"
def __init__(self, model, backend):
self.model = model
self.backend = backend
self._fields = set()
self._filters = []
self._order_by = []
self._offset = None
self._limit = None
def get(self, id):
return self.backend.find_by_id(self.model, id)
def get_or_404(self, id):
obj = self.backend.find_by_id(self.model, id)
if obj is None:
abort(404)
return obj
def select(self, *args):
return self.clone(_fields=args)
def filter(self, *grouped_filters, **filters):
q = self.clone()
q._filters.extend(grouped_filters)
q._filters.extend(filters.items())
return q
def order_by(self, field, direction=None):
q = self.clone()
if field is None:
q._order_by = []
return q
if not isinstance(field, (list, tuple)):
field = map(str.strip, field.split(','))
for f in field:
d = direction or self.ASC
if isinstance(f, tuple):
(f, d) = f
elif " " in f:
(f, d) = f.rsplit(" ", 1)
q._order_by.append((f, d.upper()))
return q
def offset(self, offset):
return self.clone(_offset=offset)
def limit(self, limit):
return self.clone(_limit=limit)
def clone(self, **overrides):
attr_to_clone = ('_fields', '_filters', '_order_by', '_offset', '_limit')
data = {}
for attr in attr_to_clone:
v = getattr(self, attr)
if isinstance(v, dict):
data[attr] = dict(**v)
elif isinstance(v, list):
data[attr] = list(v)
else:
data[attr] = v
data.update(overrides)
q = self.__class__(self.model, self.backend)
q.__dict__.update(data)
return q
def all(self):
return self.backend.find_all(self)
def first(self):
return self.backend.find_first(self)
def first_or_404(self):
obj = self.first()
if obj is None:
abort(404)
return obj
def one(self):
return self.backend.find_one(self)
def count(self):
return self.backend.count(self)
def update(self, data):
return self.backend.update(self, data)
def delete(self):
return self.backend.delete(self)
def for_json(self):
return {"model": self.model.__class__.__name__,
"fields": self._fields,
"filters": self._filters,
"order_by": self._order_by,
"offset": self._offset,
"limit": self._limit}
def __iter__(self):
return iter(self.all())
def __len__(self):
return self.count()
def __repr__(self):
return "Query(fields=%s, filters=%s, order_by=%s, limit=%s, offset=%s)" %\
(self._fields, self._filters, self._order_by, self._limit, self._offset)
known_operators = ('eq', 'ne', 'lt', 'lte', 'gt', 'gte', 'in', 'nin', 'contains',
'incr', 'push')
operators_mapping = {
'eq': operator.eq,
'ne': operator.ne,
'lt': operator.lt,
'lte': operator.le,
'gt': operator.gt,
'gte': operator.ge
}
def split_field_operator(field, check_operator=True, with_python_operator=False):
operator = 'eq'
if '__' in field:
field, operator = field.split('__', 1)
if check_operator and operator not in known_operators:
raise QueryError("Unknown operator '%s'" % operator)
if with_python_operator:
return field, operator, operators_mapping.get(operator)
return field, operator | {
"repo_name": "frascoweb/frasco-models",
"path": "frasco_models/query.py",
"copies": "1",
"size": "4072",
"license": "mit",
"hash": -1762320416871391500,
"line_mean": 24.1419753086,
"line_max": 84,
"alpha_frac": 0.5370825147,
"autogenerated": false,
"ratio": 3.7255260750228727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9759557361236186,
"avg_score": 0.0006102456973372486,
"num_lines": 162
} |
from frasco import Blueprint, current_context, pass_feature, ActionsView, hook, current_app, abort
import inflection
class AdminView(ActionsView):
def __init__(self, *args, **kwargs):
self.title = kwargs.pop("admin_title", None)
self.description = kwargs.pop("admin_desc", None)
self.sidebar_menu = kwargs.pop("admin_menu", None)
self.sidebar_menu_icon = kwargs.pop("admin_menu_icon", None)
super(AdminView, self).__init__(*args, **kwargs)
def register(self, target):
if self.sidebar_menu:
endpoint = self.name
if isinstance(target, Blueprint):
endpoint = target.name + "." + self.name
current_app.features.menu["admin"].add_child(endpoint, self.sidebar_menu,
endpoint, icon=self.sidebar_menu_icon)
super(AdminView, self).register(target)
def dispatch_request(self, *args, **kwargs):
current_context["admin_section_title"] = self.title or inflection.humanize(self.name)
current_context["admin_section_desc"] = self.description
return super(AdminView, self).dispatch_request(*args, **kwargs)
class AdminBlueprint(Blueprint):
view_class = AdminView
def __init__(self, *args, **kwargs):
super(AdminBlueprint, self).__init__(*args, **kwargs)
def is_user_allowed(self, user):
return True
@hook('before_request')
@pass_feature("admin", "users")
def init_admin(self, admin, users):
users.login_required()
if not admin.is_admin(users.current) or not self.is_user_allowed(users.current):
abort(404)
| {
"repo_name": "frascoweb/frasco-admin",
"path": "frasco_admin/view.py",
"copies": "1",
"size": "1627",
"license": "mit",
"hash": 7650426167595637000,
"line_mean": 37.7380952381,
"line_max": 98,
"alpha_frac": 0.6379840197,
"autogenerated": false,
"ratio": 3.7749419953596286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4912926015059629,
"avg_score": null,
"num_lines": null
} |
from frasco import Blueprint, redirect, url_for, request, flash
from werkzeug import url_quote
def create_blueprint(app):
bp = Blueprint("trello_login", __name__)
feature = app.features.trello
users = app.features.users
@bp.route('/login/trello')
def login():
callback_url = url_for('.callback', next=request.args.get('next'), _external=True)
return feature.api.authorize(callback_url,
name=feature.options["app_name"],
scope=feature.options["scope"],
expiration=feature.options["expiration"])
@bp.route('/login/trello/callback')
def callback():
resp = feature.api.authorized_response()
if resp is None:
flash(feature.options["user_denied_login_message"], "error")
return redirect(url_for("users.login"))
me = feature.api.get('members/me',
headers={'Accept': 'application/json'},
data={'key': feature.options['api_key'], 'token': resp['oauth_token']},
token=resp['oauth_token'])
attrs = {"trello_oauth_token": resp['oauth_token'],
"trello_oauth_token_secret": resp['oauth_token_secret'],
"trello_user_id": me.data['id'],
"trello_username": me.data['username']}
defaults = {}
if feature.options["use_username"] and users.options['email_column'] != users.options['username_column']:
defaults[users.options["username_column"]] = me.data['username']
return users.oauth_login("trello", "trello_user_id", me.data['id'], attrs, defaults)
return bp | {
"repo_name": "frascoweb/frasco-trello",
"path": "frasco_trello/blueprint.py",
"copies": "1",
"size": "1612",
"license": "mit",
"hash": -821899939538414700,
"line_mean": 38.3414634146,
"line_max": 113,
"alpha_frac": 0.6048387097,
"autogenerated": false,
"ratio": 3.8380952380952382,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49429339477952383,
"avg_score": null,
"num_lines": null
} |
from frasco import Blueprint, redirect, url_for, request, flash
def create_blueprint(app):
bp = Blueprint("twitter_login", __name__)
feature = app.features.twitter
users = app.features.users
@bp.route('/login/twitter')
def login():
callback_url = url_for('.callback', next=request.args.get('next'), _external=True)
return feature.api.authorize(callback=callback_url)
@bp.route('/login/twitter/callback')
def callback():
resp = feature.api.authorized_response()
if resp is None:
flash(feature.options["user_denied_login_message"], "error")
return redirect(url_for("users.login"))
attrs = {"twitter_screenname": resp['screen_name'],
"twitter_oauth_token": resp['oauth_token'],
"twitter_oauth_token_secret": resp['oauth_token_secret']}
defaults = {}
if feature.options["use_screenname_as_username"]:
defaults[users.options["username_column"]] = resp["screen_name"]
return users.oauth_login("twitter", "twitter_screenname", resp["screen_name"], attrs, defaults)
return bp | {
"repo_name": "frascoweb/frasco-twitter",
"path": "frasco_twitter/blueprint.py",
"copies": "1",
"size": "1141",
"license": "mit",
"hash": -6060923051783742000,
"line_mean": 35.8387096774,
"line_max": 103,
"alpha_frac": 0.6292725679,
"autogenerated": false,
"ratio": 4.003508771929824,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003146047500886211,
"num_lines": 31
} |
from frasco import Blueprint, session, redirect, url_for, request, flash
def create_blueprint(app):
bp = Blueprint("facebook_login", __name__, static_folder="static")
feature = app.features.facebook
users = app.features.users
@bp.route('/login/facebook')
def login():
callback_url = url_for('.callback', next=request.args.get('next'), _external=True)
return feature.api.authorize(callback=callback_url)
@bp.route('/login/facebook/callback')
def callback():
resp = feature.api.authorized_response()
if resp is None:
flash(feature.options["user_denied_login_message"], "error")
return redirect(url_for("users.login"))
me = feature.api.get("/me", token=[resp['access_token']])
attrs = {"facebook_access_token": resp['access_token'],
"facebook_token_expires": resp['expires'],
"facebook_id": str(me.data["id"]),
"facebook_name": me.data["name"],
"facebook_email": me.data["email"]}
defaults = {}
if feature.options["use_email"]:
defaults[users.options["email_column"]] = me.data["email"]
if feature.options["use_name_as_username"] and users.options['email_column'] != users.options['username_column']:
defaults[users.options["username_column"]] = me.data["name"]
for k in feature.options["save_data"]:
if k in me.data:
defaults[k] = me.data[k]
return users.oauth_login("facebook", "facebook_id", str(me.data["id"]), attrs, defaults)
return bp | {
"repo_name": "frascoweb/frasco-facebook",
"path": "frasco_facebook/blueprint.py",
"copies": "1",
"size": "1610",
"license": "mit",
"hash": 7215520231077575000,
"line_mean": 40.3076923077,
"line_max": 121,
"alpha_frac": 0.599378882,
"autogenerated": false,
"ratio": 3.936430317848411,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027286689121558665,
"num_lines": 39
} |
from frasco import (Blueprint, with_actions, redirect, request, current_app,\
url_for, flash, current_context, pass_feature, session,\
populate_obj, ContextExitException)
from frasco.expression import compile_expr
import datetime
import requests
bp = Blueprint("users", __name__, template_folder="templates")
def make_redirect_url(value):
if value.startswith('http://') or value.startswith('https://'):
return value
return url_for(value)
@bp.view('/login', template="users/login.html")
@with_actions([{"form": "LoginForm"}])
@with_actions("form_submitted", ["users.login"])
@pass_feature("users")
def login(users):
if not users.options["allow_login"]:
if users.options["login_disallowed_message"]:
flash(users.options["login_disallowed_message"], "error")
return redirect(url_for(users.options.get("redirect_after_login_disallowed") or\
"index", next=request.args.get("next")))
redirect_url = request.args.get("next") or make_redirect_url(users.options["redirect_after_login"])
if users.logged_in() or request.method == "POST":
if (users.options['expire_password_after'] and users.current.last_password_change_at and \
(datetime.datetime.utcnow() - users.current.last_password_change_at).total_seconds() > users.options['expire_password_after']) \
or users.current.must_reset_password_at_login:
token = users._gen_reset_password_token(users.current)
users.logout()
flash(users.options['password_expired_message'], 'error')
return redirect(url_for('.reset_password', token=token))
return redirect(redirect_url)
@bp.view('/logout')
@with_actions(["users.logout"])
def logout():
redirect_to = current_app.features.users.options["redirect_after_logout"]
if redirect_to:
return redirect(make_redirect_url(redirect_to))
@bp.view('/signup', template="users/signup.html")
@with_actions([
{"form": {"name": "SignupForm", "obj": "$session[oauth_user_defaults]", "validate_on_submit": False}}])
@pass_feature("users")
def signup(users):
if request.method == "GET" and not "oauth" in request.args:
# signup was accessed directly so we ensure that oauth
# params stored in session are cleaned. this can happen
# if a user started to login using an oauth provider but
# didn't complete the process
session.pop("oauth_user_defaults", None)
session.pop("oauth_user_attrs", None)
current_context["must_provide_password"] = "oauth_user_attrs" not in session \
or users.options["oauth_must_provide_password"]
redirect_url = request.args.get("next") or make_redirect_url(users.options["redirect_after_signup"])
if users.logged_in():
return redirect(redirect_url)
allow_signup = users.options["allow_signup"]
if users.options["oauth_signup_only"] and "oauth_user_attrs" not in session:
allow_signup = False
if not allow_signup:
if users.options["signup_disallowed_message"]:
flash(users.options["signup_disallowed_message"], "error")
return redirect(url_for(users.options.get("redirect_after_signup_disallowed") or\
"users.login", next=request.args.get("next")))
if current_context["form"].is_submitted() and current_context["form"].validate():
if users.options['recaptcha_secret'] and not current_app.debug and not current_app.testing:
if 'g-recaptcha-response' not in request.form:
return redirect(url_for('users.signup', next=request.args.get('next')))
r = requests.post('https://www.google.com/recaptcha/api/siteverify', data={
'secret': users.options['recaptcha_secret'],
'response': request.form['g-recaptcha-response'],
'remote_ip': request.remote_addr
})
if r.status_code != 200 or not r.json().get('success'):
if users.options['recaptcha_fail_message']:
flash(users.options['recaptcha_fail_message'], 'error')
return redirect(url_for('users.signup', next=request.args.get('next')))
user = users.model()
if "oauth_user_defaults" in session:
populate_obj(user, session["oauth_user_defaults"] or {})
if users.options['require_code_on_signup'] and 'code' in current_context['form'] and\
not users.check_signup_code(current_context['form'].code.data):
if users.options['bad_signup_code_message']:
flash(users.options['bad_signup_code_message'], 'error')
return redirect(url_for('users.signup', next=request.args.get('next')))
users.signup(user, form=current_context["form"],
must_provide_password=current_context["must_provide_password"], **session.get("oauth_user_attrs", {}))
session.pop("oauth_user_defaults", None)
session.pop("oauth_user_attrs", None)
return redirect(redirect_url)
@bp.route('/signup/oauth')
@pass_feature("users", "models")
def oauth_signup(users, models):
if "oauth_user_attrs" not in session or users.options["oauth_must_signup"]:
oauth = 1 if users.options["oauth_must_signup"] else 0
return redirect(url_for(".signup", oauth=oauth, next=request.args.get("next")))
signup_url = url_for(".signup", oauth=1, next=request.args.get("next"))
user = users.model()
populate_obj(user, session["oauth_user_defaults"] or {})
if not users.validate_user(user, flash_messages=False, raise_error=False):
return redirect(signup_url)
try:
users.signup(user, must_provide_password=False, **session["oauth_user_attrs"])
except ContextExitException:
return redirect(signup_url)
del session["oauth_user_defaults"]
del session["oauth_user_attrs"]
return redirect(request.args.get("next") or make_redirect_url(users.options["redirect_after_login"]))
@bp.view('/login/reset-password', template="users/send_reset_password.html")
@with_actions([{"form": "SendResetPasswordForm"}])
@with_actions("form_submitted", ["users.gen_reset_password_token"])
def send_reset_password():
msg = current_app.features.users.options["reset_password_token_success_message"]
redirect_to = current_app.features.users.options["redirect_after_reset_password_token"]
if request.method == "POST":
if msg:
flash(msg, "success")
if redirect_to:
return redirect(make_redirect_url(redirect_to))
@bp.view('/login/reset-password/<token>', methods=('GET', 'POST'), template="users/reset_password.html")
@with_actions([{"form": "ResetPasswordForm"}])
@with_actions("form_submitted", ["users.reset_password"])
def reset_password(token):
msg = current_app.features.users.options["reset_password_success_message"]
redirect_to = current_app.features.users.options["redirect_after_reset_password"]
if request.method == "POST":
if msg:
flash(msg, "success")
if redirect_to:
return redirect(make_redirect_url(redirect_to))
| {
"repo_name": "frascoweb/frasco-users",
"path": "frasco_users/blueprint.py",
"copies": "1",
"size": "7144",
"license": "mit",
"hash": -6895259469417876000,
"line_mean": 45.6928104575,
"line_max": 138,
"alpha_frac": 0.6562150056,
"autogenerated": false,
"ratio": 3.7481636935991607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9887853136659155,
"avg_score": 0.0033051125080011366,
"num_lines": 153
} |
from frasco import current_app, json
from frasco.utils import unknown_value
from frasco.templating import jinja_fragment_extension
import re
import inspect
import functools
__all__ = ('CacheFragmentExtension', 'PartialObject', 'redis_get_set', 'redis_get_set_as_json',
'redis_cached_function', 'redis_cached_function_as_json', 'unknown_value', 'redis_cached_property',
'redis_cached_property_as_json', 'redis_cached_method', 'redis_cached_method_as_json', 'RedisHash',
'JSONRedisHash', 'RedisList', 'JSONRedisList', 'RedisSet', 'JSONRedisSet', 'build_object_key')
@jinja_fragment_extension("cache")
def CacheFragmentExtension(name=None, caller=None, timeout=None, key=None, model=None, facets=None, ns=None):
conn = current_app.features.redis.connection
if model:
if not getattr(model, 'cache_key', None):
current_app.features.redis.update_model_cache_key(model)
key = model.cache_key
if not facets:
facets = []
if name:
facets.insert(0, name)
key = current_app.features.redis.make_request_cache_key(key, ns, facets)
rv = conn.get(key)
if rv is None:
timeout = timeout or current_app.features.redis.options["fragment_cache_timeout"]\
or current_app.features.redis.options["view_cache_timeout"]
rv = caller()
conn.setex(key, timeout, rv)
return rv
class PartialObject(object):
def __init__(self, loader, cached_attrs=None):
object.__setattr__(self, '_loader', loader)
object.__setattr__(self, "_obj", None)
object.__setattr__(self, "_cached_attrs", dict(cached_attrs or {}))
def _load(self):
if not self._obj:
object.__setattr__(self, "_obj", self.loader())
return self._obj
def __getattr__(self, name):
if name in self._cached_attrs:
return self._cached_attrs[name]
return getattr(self._load(), name)
def __setattr__(self, name, value):
if name in self._cached_attrs:
del self._cached_attrs[name]
setattr(self._load(), name, value)
def redis_get_set(key, callback, ttl=None, coerce=None, serializer=None, redis=None):
if not redis:
redis = current_app.features.redis.connection
if redis.exists(key):
value = redis.get(key)
if coerce:
return coerce(value)
return value
_value = value = callback()
if serializer:
_value = serializer(value)
if ttl:
redis.setex(key, ttl, _value)
else:
redis.set(key, _value)
return value
def redis_get_set_as_json(key, callback, **kwargs):
kwargs['serializer'] = json.dumps
kwargs['coerce'] = json.loads
return redis_get_set(key, callback, **kwargs)
def build_object_key(obj=None, name=None, key=None, at_values=None, values=None, super_key=None):
cls = None
if obj:
super_key = getattr(obj, '__redis_cache_key__', None)
if inspect.isclass(obj):
cls = obj
else:
cls = obj.__class__
elif not key:
raise ValueError('obj or key is needed for build_object_key()')
if key and '{__super__}' in key and super_key is not None:
key = key.replace('{__super__}', super_key)
elif not key and super_key:
key = super_key
elif not key:
key = '%s:{__name__}' % cls.__name__
if name is None and cls:
name = cls.__name__
if values is None:
values = {}
else:
values = dict(**values)
for attr in re.findall(r'\{(@?[a-z0-9_]+)[^}]*\}', key, re.I):
value = unknown_value
if attr == '__name__' and name is not None:
value = name
elif attr.startswith('@') and at_values:
value = at_values.get(attr[1:], '')
elif obj:
value = getattr(obj, attr)
if value is not unknown_value:
cache_id = getattr(value, '__redis_cache_id__', None)
if cache_id:
value = cache_id()
values[attr] = value
return key.format(**values)
def redis_cached_function(key, **opts):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
k = build_object_key(None, func.__name__, key, values=inspect.getcallargs(func, *args, **kwargs))
return redis_get_set(k, lambda: func(*args, **kwargs), **opts)
return wrapper
return decorator
def redis_cached_function_as_json(key, **opts):
opts['serializer'] = json.dumps
opts['coerce'] = json.loads
return redis_cached_function(key, **opts)
class RedisCachedAttribute(object):
def __init__(self, func, redis=None, key=None, ttl=None, coerce=None,\
serializer=None, name=None):
self.func = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
self._redis = redis
self.key = key
self.ttl = ttl
self.coerce = coerce
self.serializer = serializer
self.name = name or self.__name__
self.cached_property_name = self.__name__ + '_cached'
self.cache_disabled = False
self.cache_ignore_current = False
self.cache_current_ttl = None
@property
def redis(self):
return self._redis or current_app.features.redis.connection
def _set_cached_value(self, key, value, default_ttl=None):
if self.serializer:
value = self.serializer.dumps(value)
ttl = self.cache_current_ttl
if ttl is None:
ttl = default_ttl
if ttl is not None:
self.redis.setex(key, ttl, value)
else:
self.redis.set(key, value)
def _get_cached_value(self, key):
if not self.redis.exists(key):
return unknown_value
value = self.redis.get(key)
if value is None:
return None
if self.serializer:
value = self.serializer.loads(value)
if self.coerce:
value = self.coerce(value)
return value
def _call_func(self, obj, *args, **kwargs):
self.cache_ignore_current = False
self.cache_current_ttl = self.ttl
return self.func(obj, *args, **kwargs)
class RedisCachedProperty(RedisCachedAttribute):
def __init__(self, func, fset=None, fdel=None, **kwargs):
super(RedisCachedProperty, self).__init__(func, **kwargs)
self.fset = fset
self.fdel = fdel
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__.get(self.cached_property_name, unknown_value)
if value is unknown_value:
key = None
if not self.cache_disabled:
try:
key = self.build_key(obj)
value = self._get_cached_value(key)
except Exception as e:
current_app.log_exception(e)
value = unknown_value
if value is unknown_value:
value = self.get_fresh(obj)
if not self.cache_disabled and not self.cache_ignore_current and key:
self._set_cached_value(key, value,
getattr(obj, '__redis_cache_ttl__', None))
obj.__dict__[self.cached_property_name] = value
return value
def __set__(self, obj, value):
if self.fset is None:
raise AttributeError("can't set attribute")
self.fset(obj, value)
self.invalidate(obj)
def __delete__(self, obj):
if self.fdel is None:
raise AttributeError("can't delete attribute")
self.fdel(obj)
self.invalidate(obj)
def build_key(self, obj):
return build_object_key(obj, self.name, self.key)
def get_cached(self, obj):
try:
key = self.build_key(obj)
except Exception as e:
current_app.log_exception(e)
return unknown_value
return self._get_cached_value(key)
def get_fresh(self, obj):
return self._call_func(obj)
def require_fresh(self, obj):
obj.__dict__.pop(self.cached_property_name, None)
def invalidate(self, obj):
try:
key = self.build_key(obj)
except Exception as e:
current_app.log_exception(e)
return
self.redis.delete(key)
def setter(self, fset):
self.fset = fset
return self
def deleter(self, fdel):
self.fdel = fdel
return self
def redis_cached_property(fget=None, **kwargs):
def wrapper(f):
return RedisCachedProperty(f, **kwargs)
if fget:
return wrapper(fget)
return wrapper
def redis_cached_property_as_json(fget=None, **kwargs):
kwargs['serializer'] = json
return redis_cached_property(fget, **kwargs)
class RedisCachedMethod(RedisCachedAttribute):
def __get__(self, obj, cls=None):
self.obj = obj
return self
def __call__(self, *args, **kwargs):
obj = kwargs.pop('__obj__', self.obj)
value = unknown_value
if not self.cache_disabled:
key = None
try:
key = self.build_key(args, kwargs, obj)
value = self._get_cached_value(key)
except Exception as e:
current_app.log_exception(e)
value = unknown_value
if value is unknown_value:
value = self._call_func(obj, *args, **kwargs)
if not self.cache_disabled and not self.cache_ignore_current and key:
self._set_cached_value(key, value,
getattr(self.obj, '__redis_cache_ttl__', None))
return value
def cached(self, *args, **kwargs):
obj = kwargs.pop('__obj__', self.obj)
try:
key = self.build_key(args, kwargs, obj)
except Exception as e:
current_app.log_exception(e)
return unknown_value
return self._get_cached_value(key)
def fresh(self, *args, **kwargs):
obj = kwargs.pop('__obj__', self.obj)
return self._call_func(obj, *args, **kwargs)
def invalidate(self, *args, **kwargs):
obj = kwargs.pop('__obj__', self.obj)
try:
key = self.build_key(args, kwargs, obj)
except Exception as e:
current_app.log_exception(e)
return
self.redis.delete(key)
def build_key(self, args=None, kwargs=None, obj=None):
if not obj:
obj = self.obj
if not args:
args = []
if not kwargs:
kwargs = {}
at_values = inspect.getcallargs(self.func, obj, *args, **kwargs)
return build_object_key(obj, self.name, self.key, at_values)
def redis_cached_method(func=None, **kwargs):
def wrapper(f):
return RedisCachedMethod(f, **kwargs)
if func:
return wrapper(func)
return wrapper
def redis_cached_method_as_json(func=None, **kwargs):
kwargs['serializer'] = json
return redis_cached_method(func, **kwargs)
class RedisObject(object):
def __init__(self, key, serializer=None, coerce=None, redis=None):
self.key = key
self.serializer = serializer
self.coerce = coerce
self.redis = redis or current_app.features.redis.connection
def _to_redis(self, value):
if self.serializer:
return self.serializer.dumps(value)
return value
def _from_redis(self, value):
if self.serializer:
value = self.serializer.loads(value)
if self.coerce:
value = self.coerce(value)
return value
def clear(self):
self.redis.delete(self.key)
def expire(self, ttl):
self.redis.expire(self.key, ttl)
class RedisHash(RedisObject):
def __setitem__(self, key, value):
self.redis.hset(self.key, key, self._to_redis(value))
def __getitem__(self, key):
return self._from_redis(self.redis.hget(self.key, key))
def __delitem__(self, key):
return self.redis.hdel(self.key, key)
def __contains__(self, key):
return key in self.keys()
def keys(self):
return self.redis.hkeys(self.key)
def items(self):
return {k: self._from_redis(v) for k, v in self.redis.hgetall(self.key).iteritems()}
def values(self):
return self.items().values()
def update(self, dct):
pipe = self.redis.pipeline()
for k, v in dct.iteritems():
pipe.hset(self.key, k, self._to_redis(v))
pipe.execute()
class JSONRedisHash(RedisHash):
def __init__(self, key, **kwargs):
kwargs['serializer'] = json
super(JSONRedisHash, self).__init__(key, **kwargs)
class RedisList(RedisObject):
def __setitem__(self, index, value):
self.redis.lset(self.key, index, self._to_redis(value))
def __getitem__(self, index):
if isinstance(index, slice):
if slice.step is not None:
return [self[i] for i in xrange(*index.indices(len(self)))]
return [self._from_redis(v) for v in \
self.redis.lrange(self.key, slice.start or 0, slice.stop or -1)]
elif isinstance(index, int):
return self._from_redis(self.redis.lindex(self.key, index))
else:
raise TypeError("Invalid argument type.")
def __len__(self):
return self.redis.llen(self.key)
def __iter__(self):
for value in self.redis.lrange(self.key, 0, -1):
yield self._from_redis(value)
def __contains__(self, value):
return value in list(self)
def append(self, value):
self.redis.rpush(self.key, self._to_redis(value))
def extend(self, lst):
pipe = self.redis.pipeline()
for v in lst:
pipe.rpush(self.key, self._to_redis(v))
pipe.execute()
def remove(self, value):
self.redis.lrem(self.key, self._to_redis(value))
class JSONRedisList(RedisList):
def __init__(self, key, **kwargs):
kwargs['serializer'] = json
super(JSONRedisList, self).__init__(key, **kwargs)
class RedisSet(RedisObject):
def __iter__(self):
for value in self.redis.smembers(self.key):
yield self._from_redis(value)
def __contains__(self, value):
return self.redis.ismember(self.key, value)
def add(self, value):
self.redis.sadd(self.key, self._to_redis(value))
def update(self, lst):
pipe = self.redis.pipeline()
for v in lst:
pipe.sadd(self.key, self._to_redis(v))
pipe.execute()
def remove(self, value):
self.redis.srem(self.key, self._to_redis(value))
def pop(self):
return self._from_redis(self.redis.spop(self.key))
def move(self, destination, value):
if isinstance(destination, RedisSet):
destination = destination.key
self.redis.smove(self.key, self.destination, self._to_redis(value))
def diff(self, *other_keys):
return self._cmp('sdiff', other_keys)
def inter(self, *other_keys):
return self._cmp('sinter', other_keys)
def union(self, *other_keys):
return self._cmp('union', other_keys)
def _cmp(self, op, other_keys):
keys = []
for k in other_keys:
if isinstance(k, RedisSet):
keys.append(k.key)
else:
keys.append(k)
for value in getattr(self.redis, op)(self.key, *keys):
yield self._from_redis(value)
class JSONRedisSet(RedisSet):
def __init__(self, key, **kwargs):
kwargs['serializer'] = json
super(JSONRedisSet, self).__init__(key, **kwargs) | {
"repo_name": "frascoweb/frasco-redis",
"path": "frasco_redis/utils.py",
"copies": "1",
"size": "15720",
"license": "mit",
"hash": -18935361817248580,
"line_mean": 30.50501002,
"line_max": 110,
"alpha_frac": 0.5786895674,
"autogenerated": false,
"ratio": 3.736629427145234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9813031871985507,
"avg_score": 0.00045742451194537206,
"num_lines": 499
} |
from frasco import current_app
from frasco.ctx import ContextStack
from contextlib import contextmanager
import logging
bulk_connection_context = ContextStack()
class BulkConnection(object):
def __init__(self, sender):
self.sender = sender
def send(self, msg):
self.sender(msg)
class MailProvider(object):
def __init__(self, state, options):
self.state = state
self.options = options
def send(self, msg):
raise NotImplementedError()
def start_bulk_connection(self):
return BulkConnection(self.send)
def stop_bulk_connection(self, bulk_connection):
pass
@contextmanager
def bulk_connection(self):
failed_establish = False
try:
conn = self.start_bulk_connection()
except Exception as e:
if not self.state.options['silent_failures']:
raise e
current_app.log_exception(e)
logging.getLogger('frasco.mail').warning("Failed establishing bulk connection")
conn = BulkConnection(self.send)
failed_establish = True
bulk_connection_context.push(conn)
try:
yield conn
finally:
if not failed_establish:
self.stop_bulk_connection(conn)
bulk_connection_context.pop()
| {
"repo_name": "frascoweb/frasco",
"path": "frasco/mail/provider.py",
"copies": "1",
"size": "1339",
"license": "mit",
"hash": -1966089178510094600,
"line_mean": 25.78,
"line_max": 91,
"alpha_frac": 0.6198655713,
"autogenerated": false,
"ratio": 4.375816993464053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5495682564764053,
"avg_score": null,
"num_lines": null
} |
from frasco import current_app
from frasco.ext import get_extension_state
from suds.client import Client as SudsClient
from suds import WebFault
import xml.etree.ElementTree as ET
import requests
import datetime
EU_COUNTRIES = {
"AT": "EUR", # Austria
"BE": "EUR", # Belgium
"BG": "BGN", # Bulgaria
"DE": "EUR", # Germany
"CY": "EUR", # Cyprus
"CZ": "CZK", # Czech Republic
"DK": "DKK", # Denmark
"EE": "EUR", # Estonia
"ES": "EUR", # Spain
"FI": "EUR", # Finland
"FR": "EUR", # France,
"GR": "EUR", # Greece
"HR": "HRK", # Croatia
"HU": "HUF", # Hungary
"IE": "EUR", # Ireland
"IT": "EUR", # Italy
"LT": "EUR", # Lithuania
"LV": "EUR", # Latvia
"LU": "EUR", # Luxembourg
"MT": "EUR", # Malta
"NL": "EUR", # Netherlands
"PL": "PLN", # Poland
"PT": "EUR", # Portugal
"RO": "RON", # Romania
"SE": "SEK", # Sweden
"SI": "EUR", # Slovenia
"SK": "EUR" # Slovakia
}
KNOWN_VAT_RATES = {
"AT": 20.0, # Austria
"BE": 21.0, # Belgium
"BG": 20.0, # Bulgaria
"DE": 19.0, # Germany
"CY": 19.0, # Cyprus
"CZ": 21.0, # Czech Republic
"DK": 25.0, # Denmark
"EE": 20.0, # Estonia
"ES": 21.0, # Spain
"FI": 24.0, # Finland
"FR": 20.0, # France,
"GR": 23.0, # Greece
"HR": 25.0, # Croatia
"HU": 27.0, # Hungary
"IE": 23.0, # Ireland
"IT": 22.0, # Italy
"LT": 21.0, # Lithuania
"LV": 21.0, # Latvia
"LU": 15.0, # Luxembourg
"MT": 18.0, # Malta
"NL": 21.0, # Netherlands
"PL": 23.0, # Poland
"PT": 23.0, # Portugal
"RO": 24.0, # Romania
"SE": 25.0, # Sweden
"SI": 22.0, # Slovenia
"SK": 20.0 # Slovakia
}
ECB_EUROFXREF_URL = 'http://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml'
ECB_EUROFXREF_XML_NS = 'http://www.ecb.int/vocabulary/2002-08-01/eurofxref'
VIES_SOAP_WSDL_URL = 'http://ec.europa.eu/taxation_customs/vies/checkVatService.wsdl'
TIC_SOAP_WSDL_URL = 'http://ec.europa.eu/taxation_customs/tic/VatRateWebService.wsdl'
def is_eu_country(country_code):
return country_code and country_code.upper() in EU_COUNTRIES
def should_charge_vat(country_code, eu_vat_number=None):
return is_eu_country(country_code) and (
get_extension_state('frasco_eu_vat').options['own_country'] == country_code or not eu_vat_number)
_exchange_rates_cache = {}
_vat_rates_cache = {}
VIESClient = None
def get_vies_soap_client():
global VIESClient
if not VIESClient:
VIESClient = SudsClient(VIES_SOAP_WSDL_URL)
return VIESClient
TICClient = None
def get_ticc_soap_client():
global TICClient
if not TICClient:
TICClient = SudsClient(TIC_SOAP_WSDL_URL)
return TICClient
class EUVATError(Exception):
pass
def get_vat_rate(country_code, rate_type='standard'):
country_code = country_code.upper()
if not is_eu_country(country_code):
raise EUVATError('Not an EU country')
if country_code not in _vat_rates_cache:
_vat_rates_cache[country_code] = {}
try:
r = get_ticc_soap_client().service.getRates(dict(memberState=country_code,
requestDate=datetime.date.today().isoformat()))
for rate in r.ratesResponse.rate:
_vat_rates_cache[country_code][rate.type.lower()] = float(rate.value)
except Exception as e:
current_app.logger.debug(e)
_vat_rates_cache.pop(country_code)
return KNOWN_VAT_RATES.get(country_code)
return _vat_rates_cache[country_code].get(rate_type.lower())
def validate_vat_number(vat_number, invalid_format_raise_error=False):
if len(vat_number) < 3:
if invalid_format_raise_error:
raise EUVATError('VAT number too short')
return False
try:
r = get_vies_soap_client().service.checkVat(vat_number[0:2].upper(), vat_number[2:])
return r.valid
except WebFault:
pass
return False
def fetch_exchange_rates():
today = datetime.date.today()
if today in _exchange_rates_cache:
return _exchange_rates_cache[today]
rates = {'EUR': 1.0}
try:
r = requests.get(ECB_EUROFXREF_URL)
root = ET.fromstring(r.text)
for cube in root.findall('eu:Cube/eu:Cube/eu:Cube', {'eu': ECB_EUROFXREF_XML_NS}):
rates[cube.attrib['currency']] = float(cube.attrib['rate'])
_exchange_rates_cache[today] = rates
except Exception as e:
current_app.logger.debug(e)
return rates
def get_exchange_rate(country_code, src_currency='EUR'):
if not is_eu_country(country_code):
raise EUVATError('Not an EU country')
dest_currency = EU_COUNTRIES[country_code]
rates = fetch_exchange_rates()
if src_currency == dest_currency:
return 1.0
if src_currency == 'EUR':
return rates.get(dest_currency, 1.0)
if src_currency not in rates:
raise EUVATError('Can only use a currency listed in the ECB rates')
return round(1.0 / rates[src_currency] * rates.get(dest_currency, 1.0), 5)
| {
"repo_name": "frascoweb/frasco",
"path": "frasco/billing/eu_vat/data.py",
"copies": "1",
"size": "5059",
"license": "mit",
"hash": -8591441768103965000,
"line_mean": 28.9349112426,
"line_max": 105,
"alpha_frac": 0.6066416288,
"autogenerated": false,
"ratio": 2.824678950307091,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39313205791070904,
"avg_score": null,
"num_lines": null
} |
from frasco import current_app
from frasco_forms import TemplateForm
from frasco_forms.form import field_type_map
from frasco.utils import unknown_value
from wtforms.fields.core import UnboundField
import inflection
import datetime
import inspect
import fields
__all__ = ('create_form_from_model', 'create_form_class_from_model')
class ModelFormGenerationError(Exception):
pass
model_type_map = dict([(str, "text"), (unicode, "text"), (int, "int"),
(float, "float"), (datetime.datetime, "datetime5"), (datetime.date, "date5"),
(bool, "checkbox")])
def create_form_from_model(model, **kwargs):
obj = None
if not inspect.isclass(model):
obj = model
return create_form_class_from_model(model, **kwargs)(obj=obj)
def create_form_class_from_model(model, backend=None, template=unknown_value, fields=None,
fields_specs=None, exclude_fields=None):
if not backend:
backend = current_app.features.models.backend
if template is unknown_value:
if 'bootstrap' in current_app.features:
template = "model_bs_form_template.html"
else:
template = "model_form_template.html"
model_name = model.__name__ if inspect.isclass(model) else model.__class__.__name__
form_name = model_name + 'Form'
form_class = type(form_name, (TemplateForm,), {"name": form_name, "template": template})
names = []
specs = fields_specs or {}
inspected_fields = backend.inspect_fields(model)
if fields:
for f in fields:
if isinstance(f, tuple):
specs[f[0]] = f[1]
names.append(f[0])
else:
names.append(f)
else:
names = [n for n, _ in inspected_fields]
for name in names:
if exclude_fields and name in exclude_fields:
continue
spec = specs.get(name)
if not spec:
spec = dict(inspected_fields).get(name)
if not spec:
raise ModelFormGenerationError("Unknown field '%s'" % name)
setattr(form_class, name, create_form_field_from_model_field(model, name, spec))
return form_class
def create_form_field_from_model_field(model, name, spec):
if isinstance(spec, UnboundField):
return spec
field_type = 'text'
if spec.get('form_field'):
field_type = spec['form_field']
elif spec.get('type'):
field_type = model_type_map.get(spec['type'], "text")
kwargs = {"label": spec.get('label', inflection.humanize(name))}
if spec.get('description'):
kwargs['description'] = spec['description']
if 'form_field_kwargs' in spec:
kwargs.update(spec['form_field_kwargs'])
return field_type_map[field_type](**kwargs)
| {
"repo_name": "frascoweb/frasco-models",
"path": "frasco_models/form/__init__.py",
"copies": "1",
"size": "2763",
"license": "mit",
"hash": 956701608249489900,
"line_mean": 31.1279069767,
"line_max": 92,
"alpha_frac": 0.6275787188,
"autogenerated": false,
"ratio": 3.6742021276595747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48017808464595746,
"avg_score": null,
"num_lines": null
} |
from frasco import current_app
from frasco.utils import ContextStack, DelayedCallsContext
from contextlib import contextmanager
import functools
from werkzeug.local import LocalProxy
__all__ = ('transaction', 'current_transaction', 'as_transaction', 'delayed_tx_calls')
_transaction_ctx = ContextStack(False, True)
current_transaction = _transaction_ctx.make_proxy()
delayed_tx_calls = DelayedCallsContext()
@contextmanager
def transaction():
if not _transaction_ctx.top:
current_app.logger.debug('BEGIN TRANSACTION')
_transaction_ctx.push()
delayed_tx_calls.push()
try:
yield
_transaction_ctx.pop()
if not _transaction_ctx.top:
current_app.logger.debug('COMMIT TRANSACTION')
current_app.features.models.backend.commit_transaction()
else:
current_app.features.models.backend.flush_transaction()
delayed_tx_calls.pop()
except:
_transaction_ctx.pop()
if not _transaction_ctx.top:
current_app.logger.debug('ROLLBACK TRANSACTION')
current_app.features.models.backend.rollback_transaction()
delayed_tx_calls.pop(drop_calls=True)
raise
def as_transaction(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with transaction():
return func(*args, **kwargs)
return wrapper
| {
"repo_name": "frascoweb/frasco-models",
"path": "frasco_models/transaction.py",
"copies": "1",
"size": "1373",
"license": "mit",
"hash": -2265721369065204000,
"line_mean": 29.5111111111,
"line_max": 86,
"alpha_frac": 0.6758922068,
"autogenerated": false,
"ratio": 3.968208092485549,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00210727969348659,
"num_lines": 45
} |
from frasco import current_app, url_for
from flask import safe_join
import os
upload_backends = {}
def file_upload_backend(cls):
upload_backends[cls.name] = cls
return cls
class StorageBackend(object):
def __init__(self, options):
self.options = options
def save(self, file, filename):
raise NotImplementedError
def url_for(self, filename, **kwargs):
raise NotImplementedError
def delete(self, filename):
raise NotImplementedError
@file_upload_backend
class LocalStorageBackend(StorageBackend):
name = 'local'
def save(self, file, filename):
filename = safe_join(self.options["upload_dir"], filename)
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
file.save(filename)
def url_for(self, filename, **kwargs):
return url_for("static_upload", filename=filename, **kwargs)
def delete(self, filename):
filename = safe_join(self.options["upload_dir"], filename)
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
if os.path.exists(filename):
os.unlink(filename)
@file_upload_backend
class HttpStorageBackend(StorageBackend):
name = 'http'
def url_for(self, filename, **kwargs):
return 'http://' + filename
@file_upload_backend
class HttpsStorageBackend(StorageBackend):
name = 'https'
def url_for(self, filename, **kwargs):
return 'https://' + filename | {
"repo_name": "frascoweb/frasco-upload",
"path": "frasco_upload/backends.py",
"copies": "1",
"size": "1658",
"license": "mit",
"hash": -6426801043074675000,
"line_mean": 24.5230769231,
"line_max": 68,
"alpha_frac": 0.6525934861,
"autogenerated": false,
"ratio": 3.928909952606635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00042735042735042735,
"num_lines": 65
} |
from frasco import Feature, action, ActionsView, current_app, Markup
from frasco.utils import remove_yaml_frontmatter, wrap_in_markup
from frasco.templating import render_layout, jinja_fragment_extension, get_template_source
import markdown
import os
class MarkdownView(ActionsView):
def __init__(self, layout=None, **kwargs):
super(MarkdownView, self).__init__(**kwargs)
self.layout = layout
def render(self, **kwargs):
if self.template is None:
return None
source = get_template_source(current_app, self.template)
html = current_app.features.markdown.convert(remove_yaml_frontmatter(source))
if self.layout is None:
return html
return render_layout(self.layout, html, **kwargs)
@jinja_fragment_extension("markdown")
def MarkdownJinjaExtension(caller, **kwargs):
return current_app.features.markdown.convert(caller(), **kwargs)
class MarkdownFeature(Feature):
name = "markdown"
view_files = [("*.md", MarkdownView)]
defaults = {"extensions": {},
"output_format": "html5",
"safe_mode": False,
"html_replacement_text": "[HTML REMOVED]"}
def init_app(self, app):
app.jinja_env.add_extension(MarkdownJinjaExtension)
app.add_template_filter(wrap_in_markup(self.convert), "markdown")
@action("markdown_to_html", default_option="content")
def convert(self, content, **kwargs):
kwargs.setdefault('extensions', self.options["extensions"].keys())
kwargs.setdefault('extension_configs', self.options["extensions"])
kwargs.setdefault('output_format', self.options["output_format"])
kwargs.setdefault('safe_mode', self.options["safe_mode"])
kwargs.setdefault('html_replacement_text', self.options["html_replacement_text"])
return markdown.markdown(content, **kwargs) | {
"repo_name": "frascoweb/frasco-markdown",
"path": "frasco_markdown.py",
"copies": "1",
"size": "1893",
"license": "mit",
"hash": -653922890435198300,
"line_mean": 37.6530612245,
"line_max": 90,
"alpha_frac": 0.6714210248,
"autogenerated": false,
"ratio": 4.010593220338983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5182014245138983,
"avg_score": null,
"num_lines": null
} |
from frasco import Feature, action, AttrDict, current_app, signal, command, request, current_context, json, Response
from elasticsearch import Elasticsearch
import math
class SearchFeature(Feature):
name = "search"
defaults = {"hosts": [{"host": "localhost"}],
"pagination_per_page": 10,
"index_models": {},
"autocomplete": {},
"autocomplete_url": "/_autocomplete/<doc_type>/<field>",
"autocomplete_endpoint": "autocomplete",
"indexes": {},
"use_only_one_index": False,
"default_index": "frasco",
"create_indexes": True,
"indexes_settings": {},
"mapping": {},
"use_redis_partial_models": False}
def init_app(self, app):
self.es = Elasticsearch(self.options["hosts"])
if self.options["index_models"]:
signal("model_saved").connect(self._index_model_on_change)
signal("model_deleted").connect(self._delete_model_on_change)
if self.options["create_indexes"]:
self.create_all()
if self.options["autocomplete_url"]:
app.add_url_rule(self.options["autocomplete_url"], self.options["autocomplete_endpoint"],
self.autocomplete_endpoint)
def get_index(self, doc_type, index=None):
if index:
return index
if self.options["use_only_one_index"]:
return self.options["default_index"]
indexes = set()
for dt in doc_type.split(","):
given = False
for index, doc_types in self.options["indexes"].iteritems():
if dt.strip() in doc_types:
indexes.add(index)
given = True
break
if not given:
indexes.add(doc_type.lower())
return ",".join(indexes)
@command()
def create_all(self):
indexes = set(self.options["indexes"].keys())
for model in self.options["index_models"]:
indexes.add(self.get_index(model))
mappings = self.options["mapping"]
for doc_type in self.options["autocomplete"]:
if doc_type not in mappings:
mappings[doc_type] = {}
indexes.add(self.get_index(doc_type))
for index in indexes:
self.create_index(index, self.options["indexes_settings"].get(index, {}), 400)
for doc_type, doc_mapping in mappings.iteritems():
self.create_mapping(doc_type.lower(), doc_mapping, ignore_conflicts=True)
@command()
def delete_all(self):
for index in self.options["indexes"]:
self.delete_index(index)
for doc_type in self.options["mapping"]:
self.delete_index(self.get_index(doc_type))
@command()
@action("create_search_index", default_option="name")
def create_index(self, name, settings=None, ignore=None):
self.es.indices.create(index=name,
body={"settings": settings or {}}, ignore=ignore)
@command()
@action("delete_search_index", default_option="name")
def delete_index(self, name):
self.es.indices.delete(index=name)
@command()
@action("create_search_mapping")
def create_mapping(self, name, mapping, index=None, with_autocomplete=True, ignore_conflicts=False):
if name in self.options["autocomplete"]:
mapping.setdefault("properties", {})
for prop in self.options["autocomplete"][name]:
if isinstance(prop, list):
prop = prop[0]
mapping["properties"]["suggest_%s" % prop] = {
"type": "completion",
"index_analyzer": "simple",
"search_analyzer": "simple",
"payloads": True}
self.es.indices.put_mapping(index=self.get_index(name, index),
doc_type=name, body=dict([(name, mapping)]), ignore_conflicts=ignore_conflicts)
@command()
def reindex_models(self, models=None):
if models:
models = models.split(",")
else:
models = self.options["index_models"].keys()
for name in models:
command.echo("Indexing %s..." % name)
for obj in current_app.features.models.query(name).all():
self.index_model(obj)
@action("index_doc_for_search")
def index(self, doc_type, id, doc, index=None, refresh=True, **kwargs):
doc = dict(doc)
if doc_type in self.options["autocomplete"]:
for props in self.options["autocomplete"][doc_type]:
if isinstance(props, list):
props = [props]
input = [doc[k] for k in props]
output = doc[props[0]]
payload = {"id": id}
doc["suggest_%s" % props[0]] = {"input": input,
"output": output, "payload": payload}
index = self.get_index(doc_type, index)
current_app.logger.debug("Indexing document %s in %s/%s" % (id, index, doc_type))
return self.es.index(index=index, doc_type=doc_type, id=id, body=doc, refresh=refresh, **kwargs)
@action("index_model_for_search", default_option="obj")
def index_model(self, obj, fields=None, extra_doc=None, **kwargs):
model = obj.__class__.__name__
doc_type = model.lower()
doc = obj.for_json()
if not fields and model in self.options["index_models"]:
fields = self.options["index_models"][model]
if fields:
doc = dict((k, doc[k]) for k in fields)
if extra_doc:
doc.update(extra_doc)
return self.index(doc_type, str(obj.id), doc, **kwargs)
def _index_model_on_change(self, backend, obj):
for name, fields in self.options["index_models"].iteritems():
if obj.__class__.__name__ == name:
return self.index_model(obj)
@action("delete_search_doc")
def delete(self, doc_type, id, index=None, **kwargs):
index = self.get_index(doc_type, index)
current_app.logger.debug("Deindexing document %s in %s/%s" % (id, index, doc_type))
return self.es.delete(index=index, doc_type=doc_type, id=id, **kwargs)
@action("delete_search_model", default_option="obj")
def delete_model(self, obj, **kwargs):
return self.delete(obj.__class__.__name__.lower(), obj.id, **kwargs)
def _delete_model_on_change(self, backend, obj):
for name, fields in self.options["index_models"].iteritems():
if obj.__class__.__name__ == name:
return self.delete_model(obj)
@action("fetch_search_doc")
def fetch(self, doc_type, id, index=None, **kwargs):
index = self.get_index(doc_type, index)
return self.es.get(index=index, doc_type=doc_type, id=id, **kwargs)
@action(as_="search_results")
def search(self, doc_type, index=None, paginate=False, page=None, obj_loader=None, **kwargs):
index = self.get_index(doc_type, index)
per_page = 10 # pyelasticsearch's default
if not kwargs:
kwargs["q"] = request.args.get("q")
if paginate:
if not page:
page = int(request.args.get("page", 1))
per_page = self.options["pagination_per_page"] if isinstance(paginate, bool) else paginate
kwargs["from_"] = (page - 1) * per_page
kwargs["size"] = per_page
current_app.logger.debug("Performing search %s in %s/%s" % (kwargs, index, doc_type))
try:
res = self.es.search(index=index, **kwargs)
except Exception as e:
current_app.log_exception(e)
res = None
return SearchResults(res, kwargs.get("q"), per_page, page, obj_loader)
@action(as_="search_results", default_option="model")
def search_model(self, model, **kwargs):
if isinstance(model, str):
model = current_app.features.models[model]
def obj_loader(hit):
if self.options["use_redis_partial_models"]:
return current_app.features.redis.get_partial_model_from_cache(model, hit['_id'])
return current_app.features.models.query(model).get(hit["_id"])
return self.search(model.__name__.lower(), _source=False, obj_loader=obj_loader, **kwargs)
@action(as_="suggestions")
def autocomplete(self, doc_type, field, text, index=None):
index = self.get_index(doc_type, index)
field = "suggest_%s" % prop
body = {"autocomplete", {"text": text, "completion": {"field": field}}}
try:
res = self.es.suggest(index=index, body=body)
except Exception as e:
current_app.log_exception(e)
return []
if "autocomplete" not in res:
return []
suggestions = []
for option in res["autocomplete"][0]["options"]:
suggestions.append((option["text"], option["payload"]["id"]))
return suggestions
def autocomplete_endpoint(self, doc_type=None, field=None, text=None):
doc_type = doc_type or request.args["doc_type"]
field = field or request.args["field"]
text = text or request.args["text"]
return Response(json.dumps(self.autocomplete(doc_type, field, text)), mimetype="application/json")
class SearchResults(object):
def __init__(self, results, q=None, per_page=10, page=1, obj_loader=None):
self.results = results
self.q = q
self.total = results["hits"]["total"] if results else 0
self.per_page = per_page
self.nb_pages = int(math.ceil(float(self.total) / float(per_page)))
self.page = page
self.hits = results["hits"]["hits"] if results else []
self.obj_loader = obj_loader
def __iter__(self):
for hit in self.hits:
if self.obj_loader:
obj = self.obj_loader(hit)
if obj:
yield obj
else:
yield AttrDict(hit)
def for_json(self):
return list(self)
@property
def prev_page(self):
return self.page - 1 if self.page > 1 else None
@property
def next_page(self):
return self.page + 1 if self.page < self.nb_pages else None
# code from Flask-Sqlalchemy
# https://github.com/mitsuhiko/flask-sqlalchemy/
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
last = 0
for num in xrange(1, self.nb_pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and \
num < self.page + right_current) or \
num > self.nb_pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
| {
"repo_name": "frascoweb/frasco-search",
"path": "frasco_search.py",
"copies": "1",
"size": "10930",
"license": "mit",
"hash": -8295283454229887000,
"line_mean": 39.7835820896,
"line_max": 116,
"alpha_frac": 0.5652333028,
"autogenerated": false,
"ratio": 3.9049660593068953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4970199362106895,
"avg_score": null,
"num_lines": null
} |
from frasco import Feature, action, Blueprint, current_app, hook, signal
from frasco.utils import import_string
from .view import AdminView, AdminBlueprint
from blueprint import admin_bp
import os
class AdminFeature(Feature):
name = "admin"
requires = ["bootstrap"]
blueprints = [admin_bp]
view_files = [("admin/*", AdminView)]
defaults = {"url_prefix": "/admin",
"subdomain": None}
init_admin_signal = signal('init_admin')
def init_app(self, app):
self.app = app
self.admin_checker_func = None
app.features.menu.ensure("admin")
app.assets.register({
"admin": [
"@jquery-bootstrap-all-cdn",
"@font-awesome-cdn",
"admin/layout.css",
"admin/admin.js"]})
app.jinja_env.macros.register_file(
os.path.join(os.path.dirname(__file__), "macros.html"), "admin.html")
self.dashboard_counters = []
def admin_checker(self, func):
self.admin_checker_func = func
def is_admin(self, user):
if self.admin_checker_func:
return self.admin_checker_func(user)
return getattr(user, 'is_admin', False)
def init_blueprints(self, app):
self.register_blueprint(admin_bp)
for feature in app.features:
if hasattr(feature, "init_admin"):
feature.init_admin(self, app)
self.init_admin_signal.send(app, admin=self)
def register_blueprint(self, bp):
if isinstance(bp, str):
bp = import_string(bp)
self.app.register_blueprint(bp, **self.get_blueprint_options(bp))
def get_blueprint_options(self, bp=None):
url_prefix = self.options["url_prefix"]
if bp and bp.url_prefix:
url_prefix = (url_prefix + "/" + bp.url_prefix.lstrip("/")).rstrip("/")
return dict(url_prefix=url_prefix, subdomain=self.options["subdomain"])
def register_dashboard_counter(self, label, value_func, icon, color='blue'):
self.dashboard_counters.append((label, icon, color, value_func)) | {
"repo_name": "frascoweb/frasco-admin",
"path": "frasco_admin/__init__.py",
"copies": "1",
"size": "2109",
"license": "mit",
"hash": -3567897263967139300,
"line_mean": 33.0322580645,
"line_max": 83,
"alpha_frac": 0.6031294452,
"autogenerated": false,
"ratio": 3.7931654676258995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48962949128259,
"avg_score": null,
"num_lines": null
} |
from frasco import (Feature, action, Blueprint, View, render_template,\
current_context, command, hook, current_app, signal)
from frasco.utils import remove_yaml_frontmatter
from frasco.templating import get_template_source
import os
import json
import re
import hashlib
import uuid
import json
from jinja2 import PackageLoader
from jinja2.ext import Extension
import htmlmin
import codecs
class AngularView(View):
def __init__(self, name=None, url=None, template=None, layout=None, angular_url=None, **kwargs):
view_attrs = ('methods', 'url_rules')
self.angular_url = angular_url
self.route_options = { k: kwargs.pop(k) for k in kwargs.keys() if k not in view_attrs}
super(AngularView, self).__init__(name=name, url=url, **kwargs)
self.template = template
self.layout = layout
def dispatch_request(self, *args, **kwargs):
layout = self.layout or current_app.features.angular.options['views_layout']
return render_template(layout, **current_context.vars)
_endmacro_re = re.compile(r"\{%-?\s*endmacro\s*%\}")
_ngdirective_re = re.compile(r"\{#\s*ngdirective:(.+)#\}")
_url_arg_re = re.compile(r"<([a-z]+:)?([a-z0-9_]+)>")
def convert_url_args(url):
return _url_arg_re.sub(r":\2", url)
class AngularFeature(Feature):
name = "angular"
requires = ["assets"]
view_files = [("*.ng.html", AngularView)]
ignore_attributes = ['assets']
defaults = {"export_macros": [],
"static_dir": None, # defaults to app.static_folder
"static_url_path": None, # defaults to app.static_url_path
"auto_assets": True,
"use_layout": True,
"base_layout": "frasco_layout.html",
"app_dir": "app",
"app_file": "app.js", # set to False to not generate an app.js
"app_module": "app",
"app_var": "app",
"app_deps": [],
"partials_dir": "partials",
"directives_file": "directives/auto.js",
"directives_module": "directives",
"directives_name": "%s",
"auto_add_directives_module": True,
"views_dir": "views",
"routes_file": "routes.js",
"routes_module": "routes",
"routes": [],
"auto_add_routes_module": True,
"views_layout": "angular_layout.html",
"services_file": "services/auto.js",
"services_module": "services",
"services_name": "%s",
"auto_add_services_module": True,
"templates_file": None,
"templates_module": "templatesCache",
"disable_templates_cache": None, # app.debug
"auto_add_templates_module": True,
"append_version_to_template_names": True,
"templates_matcher": r".*\.html$",
"disable_reloading_endpoints": False,
"angular_version": "1.3.3",
"add_app_dir_in_babel_extract": True}
build_all_signal = signal('angular_build_all')
before_build_write_signal = signal('angular_before_build_write')
before_clean_signal = signal('angular_before_clean')
def init_app(self, app):
self.app = app
self.built = False
if not self.options["static_dir"]:
self.options["static_dir"] = app.static_folder
if not self.options["static_url_path"]:
self.options["static_url_path"] = app.static_url_path
app.features.assets.expose_package("frasco_angular", __name__)
version = self.options['angular_version']
app.assets.register({
"angular-cdn": [
"https://cdnjs.cloudflare.com/ajax/libs/angular.js/%s/angular.min.js" % version],
"angular-route-cdn": [
"https://cdnjs.cloudflare.com/ajax/libs/angular.js/%s/angular-route.min.js" % version],
"angular-resource-cdn": [
"https://cdnjs.cloudflare.com/ajax/libs/angular.js/%s/angular-resource.min.js" % version],
"angular-animate-cdn": [
"https://cdnjs.cloudflare.com/ajax/libs/angular.js/%s/angular-animate.min.js" % version],
"angular-cookies-cdn": [
"https://cdnjs.cloudflare.com/ajax/libs/angular.js/%s/angular-cookies.min.js" % version],
"angular-loader-cdn": [
"https://cdnjs.cloudflare.com/ajax/libs/angular.js/%s/angular-loader.min.js" % version],
"angular-sanitize-cdn": [
"https://cdnjs.cloudflare.com/ajax/libs/angular.js/%s/angular-sanitize.min.js" % version],
"angular-touch-cdn": [
"https://cdnjs.cloudflare.com/ajax/libs/angular.js/%s/angular-touch.min.js" % version],
"angular-frasco": [
{"output": "angular-frasco.min.js", "filters": "jsmin",
"contents": ["frasco_angular/angular-frasco.js"]}]})
app.jinja_env.loader.bottom_loaders.append(PackageLoader(__name__))
if self.options["use_layout"]:
app.jinja_env.loader.set_layout_alias("angular_app_layout.html")
self.auto_assets_pkg = app.assets.register("angular-auto-assets",
{"output": "frasco-auto-angular",
"contents": [{"filters": "jsmin", "contents": ["frasco_angular/angular-frasco.js"]}]})
if self.options['auto_assets']:
app.features.assets.add_default("@angular-cdn", "@angular-route-cdn",
"@angular-auto-assets")
app.features.assets.register_assets_builder(self.build)
if not self.options["disable_reloading_endpoints"]:
# adding the url rule ensure that we don't need to reload the app to regenerate the
# partial file. partial files are still generated when the app starts but will then
# be served by this endpoint and be generated on the fly
# note: we don't need to the same for views as a change triggers the reloader
app.add_url_rule(self.options["static_url_path"] + "/" + self.options["partials_dir"] + "/<macro>.html",
endpoint="angular_partial", view_func=self.extract_macro)
if app.features.exists('babel') and self.options['add_app_dir_in_babel_extract']:
app.features.babel.add_extract_dir(os.path.join(self.options['static_dir'], self.options['app_dir']),
'.', ['frasco_angular.AngularCompatExtension'], [('javascript:**.js', {})])
@command()
def build(self):
files = self.build_all()
self.before_build_write_signal.send(self, files=files)
for filename, source in files:
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with codecs.open(filename, "w", "utf-8") as f:
f.write(source)
@command()
def clean(self):
files = self.build_all()
self.before_clean_signal.send(self, files=files)
for filename, source in files:
if os.path.exists(filename):
os.unlink(filename)
def build_all(self, version=None):
if not version:
version = hashlib.sha1(str(uuid.uuid4())).hexdigest()[:8]
files = []
files.extend(self.build_directives(version))
files.extend(self.build_routes(version))
files.extend(self.build_services(version))
files.extend(self.build_templates(version))
files.extend(self.build_app(version))
self.build_all_signal.send(self, files=files, version=version)
return files
def _iter_angular_views(self):
for v in self.app.views.itervalues():
if isinstance(v, AngularView):
yield (None, v)
for name, bp in self.app.blueprints.iteritems():
if isinstance(bp, Blueprint):
for v in bp.views.itervalues():
if isinstance(v, AngularView):
yield (bp.url_prefix, v)
def build_routes(self, version):
if not self.options['routes_file']:
return []
files = []
base_url = self.options["static_url_path"] + "/" + self.options['app_dir'] + "/" + self.options["views_dir"] + "/"
when_tpl = "$routeProvider.when('%s', %s);"
routes = []
for url_prefix, view in self._iter_angular_views():
spec = dict(view.route_options)
if view.template:
files.append(self.export_view(view.template))
spec['templateUrl'] = base_url + view.template
if 'templateUrl' in spec:
spec['templateUrl'] = spec['templateUrl'] + '?' + version
if view.angular_url:
routes.append(when_tpl % (view.angular_url, json.dumps(spec)))
else:
for url, options in view.url_rules:
if url_prefix:
url = url_prefix + url
routes.append(when_tpl % (convert_url_args(url), json.dumps(spec)))
for spec in self.options["routes"]:
if not isinstance(spec, dict) or 'url' not in spec:
raise Exception('Wrong format for route definition')
if 'templateUrl' in spec:
spec['templateUrl'] = spec['templateUrl'] + '?' + version
routes.append(when_tpl % (spec.pop('url'), json.dumps(spec)))
routes.append("$routeProvider.otherwise({redirectTo: function(params, path, search) { window.location.href = path; }});")
module = ("/* This file is auto-generated by frasco-angular. DO NOT MODIFY. */\n'use strict';\n\n"
"function versionizeUrl(url) {\n return url + '?%s';\n};\n\n"
"angular.module('%s', ['ngRoute']).config(['$routeProvider', '$locationProvider',\n"
" function($routeProvider, $locationProvider) {\n $locationProvider.html5Mode(true);\n"
" %s\n }\n]);\n") % (version, self.options["routes_module"], "\n ".join(routes))
filename = os.path.join(self.options['app_dir'], self.options["routes_file"])
files.append((os.path.join(self.options["static_dir"], filename), module))
self.auto_assets_pkg.append({"filters": "jsmin", "contents": [filename]})
if self.options['auto_add_routes_module']:
self.options["app_deps"].append(self.options["routes_module"])
return files
def export_view(self, filename):
source = remove_yaml_frontmatter(get_template_source(self.app, filename))
dest = os.path.join(self.options["static_dir"], self.options['app_dir'],
self.options["views_dir"], filename)
return (dest, source)
def build_directives(self, version):
files = []
directives = {}
for macro in self.options["export_macros"]:
filename, source, directives[macro] = self.export_macro(macro)
files.append((filename, source))
if not files:
return files
module = ("/* This file is auto-generated by frasco-angular. DO NOT MODIFY. */\n'use strict';\n"
"\n(function() {\n\nvar directives = angular.module('%s', []);\n\n") % self.options["directives_module"]
for name, options in directives.iteritems():
name = options.pop("name", name)
module += "directives.directive('%s', function() {\nreturn %s;\n});\n\n" % \
(self.options['directives_name'] % name, json.dumps(options, indent=4))
module += "})();";
filename = os.path.join(self.options["app_dir"], self.options["directives_file"])
files.append((os.path.join(self.options["static_dir"], filename), module))
self.auto_assets_pkg({"filters": "jsmin", "contents": [filename]})
if self.options['auto_add_directives_module']:
self.options["app_deps"].append(self.options["directives_module"])
return files
def export_macro(self, macro):
partial, options = self.extract_macro(macro, True)
filename = os.path.join(self.options["static_dir"], self.options['app_dir'],
self.options["partials_dir"], macro + ".html")
url = self.options["static_url_path"] + "/" + self.options['app_dir'] + "/" \
+ self.options["partials_dir"] + "/" + macro + ".html"
options["templateUrl"] = url
return (filename, partial.strip(), options)
def extract_macro(self, macro, with_options=False):
template = self.app.jinja_env.macros.resolve_template(macro)
if not template:
raise Exception("Macro '%s' cannot be exported to angular because it does not exist" % macro)
source = get_template_source(self.app, template)
m = re.search(r"\{%\s*macro\s+" + re.escape(macro), source)
if not m:
raise Exception("Macro '%s' not found in template %s" % (macro, template))
start = source.find("%}", m.start()) + 2
end = _endmacro_re.search(source, start).start()
partial = source[start:end]
options = {}
m = _ngdirective_re.search(partial)
if m:
options = json.loads(m.group(1))
partial = partial.replace(m.group(0), "")
if with_options:
return (partial, options)
return partial
def build_app(self, version):
if not self.options["app_file"]:
return []
module = ("/* This file is auto-generated by frasco-angular. DO NOT MODIFY. */\n'use strict';\n"
"\nvar %s = angular.module('%s', [\n '%s'\n]);\n") % (self.options['app_var'],
self.options["app_module"], "',\n '".join(self.options["app_deps"]))
filename = os.path.join(self.options['app_dir'], self.options['app_file'])
self.auto_assets_pkg.append({"filters": "jsmin", "contents": [filename]})
return [(os.path.join(self.options["static_dir"], filename), module)]
def build_services(self, version):
if not self.options["services_file"]:
return []
filename = os.path.join(self.options["app_dir"], self.options["services_file"])
module = ("/* This file is auto-generated by frasco-angular. DO NOT MODIFY. */\n'use strict';\n"
"\n(function() {\n\nvar services = angular.module('%s', ['frasco']);\n") % self.options["services_module"]
for name, srv in self.app.services.iteritems():
endpoints = {}
for view in srv.views:
args = []
if hasattr(view.func, 'request_params'):
for p in reversed(view.func.request_params):
args.extend(p.names)
endpoints[view.name] = [convert_url_args(view.url_rules[-1][0]), args]
module += ("\nservices.factory('%s', ['frascoServiceFactory', function(frascoServiceFactory) {\n"
"return frascoServiceFactory.make('%s', '%s', [], %s);\n}]);\n") % \
(self.options['services_name'] % name, name, self.app.services_url_prefix,\
json.dumps(endpoints, indent=2))
module += "\n})();";
self.auto_assets_pkg.append({"filters": "jsmin", "contents": [filename]})
if self.options["auto_add_services_module"]:
self.options["app_deps"].append(self.options["services_module"])
return [(os.path.join(self.options["static_dir"], filename), module)]
def build_templates(self, version):
if not self.options["templates_file"]:
return []
module = [("/* This file is auto-generated by frasco-angular. DO NOT MODIFY. */\n'use strict';\n"
"\nangular.module('%s', []).run(['$templateCache', function($templateCache) {") % self.options["templates_module"]]
matcher = re.compile(self.options["templates_matcher"], re.I)
done = set()
def process_file(filename, path=None, content=None):
if not path:
pathname = filename
path = os.path.dirname(filename)
filename = os.path.basename(filename)
else:
pathname = os.path.join(path, filename)
relname = self.options["static_url_path"] + "/" + os.path.relpath(path, self.options["static_dir"]) + "/" + filename
if pathname not in done and matcher.match(relname):
if not content:
with codecs.open(pathname, 'r', 'utf-8') as f:
content = f.read()
if self.options['append_version_to_template_names']:
relname += "?%s" % version
module.append(" $templateCache.put('%s', %s);" % (relname, json.dumps(htmlmin.minify(content))))
done.add(pathname)
disable = self.options["disable_templates_cache"]
if (disable is None and not self.app.debug) or disable is False:
for url_prefix, view in self._iter_angular_views():
if view.template:
dest, source = self.export_view(view.template)
process_file(dest, content=source)
for path, dirnames, filenames in os.walk(os.path.join(self.options["static_dir"], self.options['app_dir'])):
for filename in filenames:
process_file(filename, path)
module = "\n".join(module) + "\n}]);"
filename = os.path.join(self.options["app_dir"], self.options["templates_file"])
self.auto_assets_pkg.append({"filters": "jsmin", "contents": [filename]})
if self.options["auto_add_templates_module"]:
self.options["app_deps"].append(self.options["templates_module"])
return [(os.path.join(self.options["static_dir"], filename), module)]
class AngularCompatExtension(Extension):
"""Jinja extension that does the bare minimum into making angular templates
parsable by Jinja so gettext strings can be extacted.
Removes angular one-time binding indicators and javascript ternary operator.
"""
special_chars_re = re.compile(r"'[^']*'|\"[^\"]+\"|(\{[^{]+\}|[?:!&|$=]{1,3})")
replacements = {'!': ' not ', '$': '', '=': '=', '==': '==',
'===': '==', '!=': '!=', '!==': '!=', '&&': ' and ', '||': ' or '}
def process_expression(self, source, start):
p = start
end = p
while True:
end = source.find('}}', p)
m = self.special_chars_re.search(source, p, end)
if not m:
break
if m.group(1) is None:
p = m.end(0)
continue
if m.group(1).startswith('{'):
repl = 'True'
else:
repl = self.replacements.get(m.group(1), ' or ')
p = m.start(1) + len(repl)
source = source[:m.start(1)] + repl + source[m.end(1):]
return source, end + 2
def preprocess(self, source, name, filename=None):
source = source.replace('{{::', '{{')
p = 0
while True:
p = source.find('{{', p)
if p == -1:
break
source, p = self.process_expression(source, p + 2)
return source | {
"repo_name": "frascoweb/frasco-angular",
"path": "frasco_angular/__init__.py",
"copies": "1",
"size": "19402",
"license": "mit",
"hash": 7207776451617335000,
"line_mean": 47.3865336658,
"line_max": 133,
"alpha_frac": 0.5628801155,
"autogenerated": false,
"ratio": 3.804313725490196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4867193840990196,
"avg_score": null,
"num_lines": null
} |
from frasco import Feature, action, cached_property
import boto
import os
from tempfile import NamedTemporaryFile
class AwsFeature(Feature):
name = 'aws'
ignore_attributes = ['s3_connection']
defaults = {'upload_bucket': None,
'upload_filename_prefix': '',
'upload_acl': 'public-read',
'upload_async': False,
'upload_signed_url': False,
'upload_s3_urls_ttl': 3600,
'set_content_dispotion_header_with_filename': True,
'charset': None,
'use_sig_v4': False,
'region_name': None,
'connect_params': {},
'set_contents_headers': {}}
@cached_property
def s3_connection(self):
kwargs = {'aws_access_key_id': self.options.get('access_key'),
'aws_secret_access_key': self.options.get('secret_key')}
if self.options['use_sig_v4']:
if not boto.config.get('s3', 'use-sigv4'):
boto.config.add_section('s3')
boto.config.set('s3', 'use-sigv4', 'True')
if self.options['region_name']:
kwargs['host'] = 's3.%s.amazonaws.com' % self.options['region_name']
else:
kwargs['host'] = 's3.amazonaws.com'
kwargs.update(self.options['connect_params'])
if self.options['region_name']:
return boto.s3.connect_to_region(self.options['region_name'], **kwargs)
return boto.connect_s3(**kwargs)
@action()
def upload_file_to_s3(self, stream_or_filename, filename, bucket=None, prefix=None,
acl=None, mimetype=None, charset=None, delete_source=False,
content_disposition_filename=None):
b = self.s3_connection.get_bucket(bucket or self.options['upload_bucket'])
prefix = prefix or self.options.get('upload_filename_prefix', '')
k = b.new_key(prefix + filename)
acl = acl or self.options['upload_acl']
headers = {}
if self.options['set_content_dispotion_header_with_filename']:
headers['Content-Disposition'] = 'attachment;filename="%s"' % (content_disposition_filename or filename)
if mimetype:
headers['Content-Type'] = mimetype
if charset or self.options['charset']:
if 'Content-Type' not in headers:
headers['Content-Type'] = 'binary/octet-stream' # S3 default mimetype
headers['Content-Type'] += '; charset=%s' % (charset or self.options['charset'])
headers.update(self.options['set_contents_headers'])
is_filename = isinstance(stream_or_filename, (str, unicode))
if is_filename:
k.set_contents_from_filename(stream_or_filename, headers, policy=acl)
else:
k.set_contents_from_string(stream_or_filename.read(), headers, policy=acl)
if is_filename and delete_source:
os.remove(stream_or_filename)
@action(default_option='filename')
def delete_s3_file(self, filename, bucket=None, prefix=None):
b = self.s3_connection.get_bucket(bucket or self.options['upload_bucket'])
prefix = prefix or self.options.get('upload_filename_prefix', '')
b.delete_key(prefix + filename)
try:
import upload
except ImportError:
pass
| {
"repo_name": "frascoweb/frasco-aws",
"path": "frasco_aws/__init__.py",
"copies": "1",
"size": "3356",
"license": "mit",
"hash": 1614320799267788300,
"line_mean": 42.0256410256,
"line_max": 116,
"alpha_frac": 0.5884982122,
"autogenerated": false,
"ratio": 3.8887601390498263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4977258351249826,
"avg_score": null,
"num_lines": null
} |
from frasco import Feature, action, command, request, session, current_app, current_context, hook
import pygeoip
import os
COUNTRY_DB_URL = "http://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz"
CITY_DB_URL = "http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz"
class GeoipFeature(Feature):
name = "geoip"
defaults = {"country_db": "GeoIP.dat",
"city_db": "GeoLiteCity.dat",
"auto_geolocate": True}
@hook()
def before_request(self, *args, **kwargs):
if self.options["auto_geolocate"]:
self.geolocate_country()
@action(default_option="addr", as_="geo_country_code")
def geolocate_country(self, addr=None, use_session_cache=True):
country = None
if use_session_cache and "geo_country_code" in session:
country = session["geo_country_code"]
else:
gi = pygeoip.GeoIP(self.options["country_db"])
country = gi.country_code_by_addr(addr or self.get_remote_addr())
if use_session_cache:
session["geo_country_code"] = country
current_context.data.geo_country_code = country
return country
@action(default_option="addr", as_="geo_city")
def geolocate_city(self, addr=None, use_session_cache=True):
city = None
if use_session_cache and "geo_city" in session:
city = session["geo_city"]
else:
gi = pygeoip.GeoIP(self.options["city_db"])
city = gi.record_by_addr(addr or self.get_remote_addr())
if use_session_cache:
session["geo_city"] = city
current_context.data.geo_city = city
return city
@action("clear_geo_cache")
def clear_cache(self):
session.pop("geo_country_code", None)
session.pop("geo_city", None)
def get_remote_addr(self):
if current_app.debug and "__geoaddr" in request.values:
return request.values["__geoaddr"]
return request.remote_addr
@command('dlcountries')
def download_country_db(self):
os.system("wget -O - %s | gunzip -c > %s" % (COUNTRY_DB_URL, self.options["country_db"]))
@command('dlcities')
def download_city_db(self):
os.system("wget -O - %s | gunzip -c > %s" % (CITY_DB_URL, self.options["city_db"])) | {
"repo_name": "frascoweb/frasco-geoip",
"path": "frasco_geoip.py",
"copies": "1",
"size": "2356",
"license": "mit",
"hash": 8333822921712202000,
"line_mean": 36.4126984127,
"line_max": 97,
"alpha_frac": 0.6116298812,
"autogenerated": false,
"ratio": 3.505952380952381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46175822621523804,
"avg_score": null,
"num_lines": null
} |
from frasco import Feature, action, current_app, current_context, g, hook, request, signal
from redis import StrictRedis
from werkzeug.local import LocalProxy
import time
import inspect
from .utils import *
class RedisFeature(Feature):
name = "redis"
ignore_attributes = ("connection",)
defaults = {"url": "redis://localhost:6379/0",
"view_cache_key_tpl": "{prefix}:{endpoint}",
"view_cache_key_prefix": "views",
"view_cache_timeout": 3600,
"fragment_cache_timeout": None, # same as view cache
"auto_model_cache_key": True,
"cache_model_attrs": {}}
def __init__(self, options=None):
super(RedisFeature, self).__init__(options)
def init_app(self, app):
self.connection = StrictRedis.from_url(self.options["url"])
app.jinja_env.add_extension(CacheFragmentExtension)
self.register_redis_actions(app)
if self.options["auto_model_cache_key"]:
signal("model_inserted").connect(lambda _, obj: self.update_model_cache_key(obj), weak=False)
signal("before_model_updated").connect(lambda _, obj: self.update_model_cache_key(obj, False), weak=False)
def register_redis_actions(self, app):
redis_ops = ("append", "bitop", "decr", "delete", "dump", "exists", "expire", "expireat", "get",
"getbit", "getrange", "getset", "incr", "incrby", "incrbyfloat", "keys", "mget",
"mset", "msetnx", "move", "persist", "pexpire", "pexpireat", "psetex", "pttl",
"randomkey", "rename", "renamenx", "restore", "set", "setbit", "setex", "setnx",
"setrange", "strlen", "substr", "ttl", "type", "blpop", "brpop", "brpoplpush",
"lindex", "linsert", "llen", "lpop", "lpush", "lpushx", "lrange", "lrem", "lset",
"ltrim", "rpop", "rpoplpush", "rpush", "rpushx", "sort", "scan", "sscan", "hscan",
"zscan", "sadd", "scard", "sdiff", "sdiffstore", "sinter", "sinterstore", "sismember",
"smembers", "smove", "spop", "srandmember", "srem", "sunion", "sunionstore", "zadd",
"zcard", "zcount", "zincrby", "zinterstore", "zlexcount", "zrange", "zrangebylex",
"zrangebyscore", "zrank", "zrem", "zremrangebylex", "zremrangebyrank", "zremrangebyscore",
"zrevrange", "zrevrangebyscore", "zrevrank", "zscore", "zunionstore", "pfadd", "pfcount",
"pfmerge", "hdel", "hexists", "hget", "hgetall", "hincrby", "hincrbyfloat", "hkeys",
"hlen", "hset", "hsetnx", "hmset", "hmget", "hvals", "publish", "eval", "evalsha",
"script_exists", "script_flush", "script_kill", "script_load")
for op in redis_ops:
app.actions.register(action("redis_" + op)(getattr(self.connection, op)))
def update_model_cache_key(self, obj, save=True):
obj.cache_key = "%s:%s" % (obj.id, time.time())
self.cache_model_attributes(obj)
if save:
obj.save()
def cache_model_attributes(self, obj):
key = "models_attrs:%s:%s" % (obj.__class__.__name__, obj.id)
attrs = dict((k, getattr(obj, k)) for k in self.options["cache_model_attrs"].get(obj.__class__.__name__, []))
if attrs:
self.connection.hmset(key, attrs)
def get_cached_model_attributes(self, model, id):
if inspect.isclass(model):
model = model.__name__
key = "models_attrs:%s:%s" % (model, id)
return self.connection.hgetall(key) or {}
def get_partial_model_from_cache(self, model, id):
cached_attrs = self.get_cached_model_attributes(model, id)
def loader():
return current_app.features.models.query(model).get(id)
return PartialObject(loader, dict(cached_attrs, id=id))
def make_cache_key(self, key, ns=None, facets=None):
if isinstance(key, (list, tuple)):
key = ":".join(map(str, key))
if isinstance(ns, (list, tuple)):
ns = ":".join(map(str, ns))
if ns:
key = "%s:%s" % (ns, key)
if facets:
if isinstance(facets, dict):
facets = ["%s=%s" % (k, v) for k, v in facets.iteritems()]
key += ":%s" % ":".join(map(str, facets))
return key
def make_request_cache_key(self, key=None, ns=None, facets=None):
key = key or self.options["view_cache_key_tpl"]
key = key.format(
prefix=self.options["view_cache_key_prefix"],
endpoint=request.endpoint,
path=request.path,
method=request.method)
return self.make_cache_key(key, facets, ns)
@action("cached", default_option="timeout")
def cache_view(self, timeout=None, key=None, ns=None, facets=None):
key = self.make_request_cache_key(key, facets, ns)
data = self.connection.get(key)
if data is not None:
current_context.exit(data)
g.redis_cache_view = (key, timeout or self.options["view_cache_timeout"])
return key
@hook()
def after_request(self, response):
if "redis_cache_view" in g:
response.freeze()
self.connection.setex(g.redis_cache_view[0], g.redis_cache_view[1], "\n".join(response.response))
return response
@action(default_option="key")
def clear_request_cache(self, key=None, ns=None, facets=None):
self.connection.delete(self.make_request_cache_key(key, facets, ns))
def get_current_redis():
return current_app.features.redis.connection
redis = LocalProxy(get_current_redis)
| {
"repo_name": "frascoweb/frasco-redis",
"path": "frasco_redis/__init__.py",
"copies": "1",
"size": "5712",
"license": "mit",
"hash": -5385026747469490000,
"line_mean": 46.2066115702,
"line_max": 118,
"alpha_frac": 0.5723039216,
"autogenerated": false,
"ratio": 3.4020250148898152,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9461844060128644,
"avg_score": 0.0024969752722345164,
"num_lines": 121
} |
from frasco import Feature, action, current_app, hook, request, command, current_context
from flask import has_request_context
import socketio
import os
import urlparse
import uuid
import json
import hashlib
from itsdangerous import URLSafeTimedSerializer, BadSignature
class PushFeature(Feature):
name = 'push'
command_group = False
defaults = {"redis_url": None,
"server_url": None,
"server_port": 8888,
"server_secured": False,
"channel": "socketio",
"sio_client_version": "1.4.5",
"skip_self": False,
"secret": None,
"prefix_event_with_room": True,
"default_current_user_handler": True}
def init_app(self, app):
if self.options['secret'] is None:
self.options["secret"] = app.config['SECRET_KEY']
if not self.options['redis_url'] and 'redis' in app.features:
self.options['redis_url'] = app.features.redis.options['url']
args = ["python", "-m", "frasco_push",
"--channel", self.options["channel"],
"--redis", self.options["redis_url"],
"--port", self.options["server_port"]]
if self.options['secret']:
args.extend(["--secret", self.options["secret"]])
app.processes.append(("push", args))
if not self.options["server_url"]:
server_name = app.config.get('SERVER_NAME') or 'localhost'
self.options["server_url"] = "%s://%s:%s" % (
"https" if self.options['server_secured'] else "http",
server_name.split(':')[0], self.options['server_port'])
if app.features.exists('assets'):
app.assets.register('socketio', [
'https://cdn.socket.io/socket.io-%s.js' % self.options['sio_client_version']])
self.token_serializer = URLSafeTimedSerializer(self.options['secret'])
self.manager = socketio.RedisManager(self.options['redis_url'],
channel=self.options['channel'], write_only=True)
self.current_user_handler = None
if 'users' in app.features and self.options['default_current_user_handler']:
self.current_user_handler = self.default_current_user_handler
def current_user(self, func):
self.current_user_handler = func
return func
def default_current_user_handler(self):
if not current_app.features.users.logged_in():
return None, {"guest": True}, None
info = {"guest": False}
current = current_app.features.users.current
allowed_rooms = None
if hasattr(current, 'get_allowed_push_rooms'):
allowed_rooms = current.get_allowed_push_rooms()
if hasattr(current, 'for_json'):
return current.get_id(), current.for_json(), allowed_rooms
if current_app.features.users.options['username_column'] != current_app.features.users.options['email_column']:
info['username'] = getattr(current, current_app.features.users.options['username_column'])
if 'users_avatar' in current_app.features:
info['avatar_url'] = current.avatar_url
return current.get_id(), info, allowed_rooms
def get_direct_event(self, user_id):
if not self.options['secret']:
raise Exception('A secret must be set to use emit_direct()')
return hashlib.sha1(str(user_id) + self.options['secret']).hexdigest()
@hook()
def before_request(self):
if self.options['secret']:
user_id = None
user_info = None
allowed_rooms = None
if self.current_user_handler:
user_id, user_info, allowed_rooms = self.current_user_handler()
current_context['socketio_token'] = self.create_token(user_info, allowed_rooms)
if user_id:
current_context['socketio_user_event'] = self.get_direct_event(user_id)
if current_app.features.exists('assets'):
current_app.config['EXPORTED_JS_VARS'].update({
'SOCKETIO_URL': self.options['server_url'],
'SOCKETIO_TOKEN': current_context.get('socketio_token'),
'SOCKETIO_USER_EVENT': current_context.get('socketio_user_event')
})
@command('emit_push_event')
@action('emit_push_event')
def emit(self, event, data=None, skip_self=None, room=None, **kwargs):
if self.options['prefix_event_with_room'] and room:
event = "%s:%s" % (room, event)
if skip_self is None:
skip_self = self.options['skip_self']
if skip_self and has_request_context() and 'x-socketio-sid' in request.headers:
kwargs['skip_sid'] = request.headers['x-socketio-sid']
return self.manager.emit(event, data=data, room=room, **kwargs)
def emit_to_user(self, user_id, data=None, **kwargs):
return self.emit(self.get_direct_event(user_id), data=data, **kwargs)
@action('create_push_token', default_option='user_info', as_='token')
def create_token(self, user_info=None, allowed_rooms=None):
return self.token_serializer.dumps([user_info, allowed_rooms])
class PresenceRedisManager(socketio.RedisManager):
def __init__(self, *args, **kwargs):
self.presence_session_id = kwargs.pop('presence_session_id', str(uuid.uuid4()).split('-')[-1])
self.presence_key_prefix = "presence%s:" % self.presence_session_id
super(PresenceRedisManager, self).__init__(*args, **kwargs)
def enter_room(self, sid, namespace, room):
super(PresenceRedisManager, self).enter_room(sid, namespace, room)
if room and room != sid:
self.redis.sadd("%s%s:%s" % (self.presence_key_prefix, namespace, room), sid)
self.server.emit('%s:joined' % room, {"sid": sid, "info": self.get_member_info(sid, namespace)},
room=room, skip_sid=sid)
def leave_room(self, sid, namespace, room):
super(PresenceRedisManager, self).leave_room(sid, namespace, room)
if room and room != sid:
self.redis.srem("%s%s:%s" % (self.presence_key_prefix, namespace, room), sid)
self.server.emit('%s:left' % room, sid, room=room, skip_sid=sid)
def get_room_members(self, namespace, room):
return self.redis.smembers("%s%s:%s" % (self.presence_key_prefix, namespace, room))
def set_member_info(self, sid, namespace, info):
self.redis.set("%s%s@%s" % (self.presence_key_prefix, namespace, sid), json.dumps(info))
for room in self.get_rooms(sid, namespace):
if not room or room == sid:
continue
self.server.emit('%s:member_updated' % room, {"sid": sid, "info": info}, room=room, skip_sid=sid)
def get_member_info(self, sid, namespace):
data = self.redis.get("%s%s@%s" % (self.presence_key_prefix, namespace, sid))
if data:
try:
return json.loads(data)
except:
pass
return {}
def disconnect(self, sid, namespace):
super(PresenceRedisManager, self).disconnect(sid, namespace)
self.redis.delete("%s%s@%s" % (self.presence_key_prefix, namespace, sid))
def cleanup_presence_keys(self):
keys = self.redis.keys('%s*' % self.presence_key_prefix)
pipe = self.redis.pipeline()
for key in keys:
pipe.delete(key)
pipe.execute()
def create_app(redis_url='redis://', channel='socketio', secret=None, token_max_age=None):
mgr = PresenceRedisManager(redis_url, channel=channel)
sio = socketio.Server(client_manager=mgr, async_mode='eventlet')
token_serializer = URLSafeTimedSerializer(secret)
default_ns = '/'
@sio.on('connect')
def connect(sid, env):
if not secret:
return
try:
qs = urlparse.parse_qs(env['QUERY_STRING'])
if not 'token' in qs:
return False
user_info, allowed_rooms = token_serializer.loads(qs['token'][0], max_age=token_max_age)
env['allowed_rooms'] = allowed_rooms
if user_info:
mgr.set_member_info(sid, default_ns, user_info)
except BadSignature as e:
return False
@sio.on('members')
def get_room_members(sid, data):
if not data.get('room') or data['room'] not in mgr.get_rooms(sid, default_ns):
return []
return {sid: mgr.get_member_info(sid, default_ns) for sid in mgr.get_room_members(default_ns, data['room'])}
@sio.on('join')
def join(sid, data):
if sio.environ[sid].get('allowed_rooms') and data['room'] not in sio.environ[sid]['allowed_rooms']:
return False
sio.enter_room(sid, data['room'])
return get_room_members(sid, data)
@sio.on('broadcast')
def room_broadcast(sid, data):
sio.emit("%s:%s" % (data['room'], data['event']), data.get('data'), room=data['room'], skip_sid=sid)
@sio.on('leave')
def leave(sid, data):
sio.leave_room(sid, data['room'])
@sio.on('set')
def set(sid, data):
mgr.set_member_info(sid, default_ns, data)
@sio.on('get')
def get(sid, data):
return mgr.get_member_info(data['sid'], default_ns)
return socketio.Middleware(sio)
def _get_env_var(wsgi_env, name, default=None):
return wsgi_env.get(name, os.environ.get(name, default))
_wsgi_app = None
def wsgi_app(environ, start_response):
global _wsgi_app
if not _wsgi_app:
_wsgi_app = create_app(_get_env_var(environ, 'SIO_REDIS_URL', 'redis://'),
_get_env_var(environ, 'SIO_CHANNEL', 'socketio'), _get_env_var(environ, 'SIO_SECRET'))
return _wsgi_app(environ, start_response)
def cleanup_wsgi_app():
if _wsgi_app:
_wsgi_app.engineio_app.manager.cleanup_presence_keys()
def run_server(port=8888, **kwargs):
from eventlet import wsgi
import eventlet
eventlet.sleep()
eventlet.monkey_patch()
env = dict([("SIO_%s" % k.upper(), v) for k, v in kwargs.items()])
wsgi.server(eventlet.listen(('', port)), wsgi_app, environ=env)
cleanup_wsgi_app()
if __name__ == '__main__':
import argparse
argparser = argparse.ArgumentParser(prog='tornadopush',
description='Start tornadopush server')
argparser.add_argument('-p', '--port', default=8888, type=int,
help='Port number')
argparser.add_argument('-r', '--redis', default=os.environ.get('SIO_REDIS_URL', 'redis://'), type=str,
help='Redis URL')
argparser.add_argument('-c', '--channel', default=os.environ.get('SIO_CHANNEL', 'socketio'), type=str,
help='Channel')
argparser.add_argument('-s', '--secret', default=os.environ.get('SIO_SECRET'), type=str,
help='Secret')
args = argparser.parse_args()
run_server(args.port, redis_url=args.redis, channel=args.channel, secret=args.secret) | {
"repo_name": "frascoweb/frasco-push",
"path": "frasco_push.py",
"copies": "1",
"size": "10950",
"license": "mit",
"hash": 2212900664044784600,
"line_mean": 40.1691729323,
"line_max": 119,
"alpha_frac": 0.60456621,
"autogenerated": false,
"ratio": 3.5889872173058013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4693553427305801,
"avg_score": null,
"num_lines": null
} |
from frasco import Feature, action, current_app, Markup, hook, session
from werkzeug.local import LocalProxy
from .api import ShareJsApi
from redis import StrictRedis
import os
import hashlib
import uuid
class SharejsFeature(Feature):
name = 'sharejs'
ignore_attributes = ['current_token']
defaults = {"mongodb_url": "mongodb://localhost:27017/sharejs?auto_reconnect",
"server_url": None,
"server_port": 3000,
"server_host": "127.0.0.1",
"api_url": None,
"token_ttl": 3600,
"rest_api": False,
"use_websocket": False,
"local_rest_api": False,
"local_rest_api_port": 3100,
"redis_host": "localhost",
"redis_port": 6379}
def init_app(self, app):
sharejs_path = os.path.join(os.path.dirname(__file__), 'sharejs')
self.serverjs_path = os.path.join(sharejs_path, 'server.js')
args = ["--mongodb-url", self.options['mongodb_url'],
"--redis-host", self.options['redis_host'],
"--redis-port", self.options['redis_port']]
sharejs_args = ["node", self.serverjs_path, "--port",
self.options['server_port'], "--host", self.options['server_host']] + args
if self.options['rest_api']:
sharejs_args.append('--rest')
if self.options['use_websocket']:
sharejs_args.append('--websocket')
app.processes.append(("sharejs", sharejs_args))
if self.options['server_url'] is None:
self.options['server_url'] = 'http://%s:%s/channel' % (
app.config.get('SERVER_NAME') or 'localhost',
self.options['server_port'])
if self.options['local_rest_api']:
app.processes.append(("sharejs_api", ["node", self.serverjs_path,
"--rest-only", "--port", self.options['local_rest_api_port']] + args))
if self.options['api_url'] is None:
self.options['api_url'] = 'http://localhost:%s' % \
self.options['local_rest_api_port']
self.api = ShareJsApi(self.options['api_url'])
self.redis = StrictRedis(self.options['redis_host'], self.options['redis_port'])
app.jinja_env.globals['current_sharejs_token'] = current_sharejs_token
app.jinja_env.globals['sharejs_js_vars'] = LocalProxy(
lambda: self.get_sharejs_js_vars())
if app.features.exists('assets'):
app.features.assets.expose_package('sharejs', __name__)
assets = ['sharejs/bcsocket.js', 'sharejs/text.js', 'sharejs/json0.js',
'sharejs/share.js', 'sharejs/connect.js']
if self.options['use_websocket']:
assets.append('sharejs/reconnecting-websocket.js')
app.assets.register('sharejs', {
"contents": [{"filters": "jsmin", "contents": assets}],
"output": 'sharejs'})
@hook()
def before_request(self):
current_app.config['EXPORTED_JS_VARS'].update({
'SHAREJS_SERVER_URL': self.options['server_url'],
'SHAREJS_TOKEN': self.current_token,
'SHAREJS_USE_WEBSOCKET': self.options['use_websocket']
})
@action('create_sharejs_token', default_option='doc_id', as_='sharejs_token')
def create_token(self, doc_id=None, *docs):
token = hashlib.sha1(str(uuid.uuid4())).hexdigest()
self.authorize_docs(doc_id or 0, *docs, token=token)
return token
@property
def current_token(self):
if 'sjstoken' not in session:
token = self.create_token()
session['sjstoken'] = token
else:
self.redis.expire('sharejs:%s' % session['sjstoken'],
self.options["token_ttl"])
return session['sjstoken']
@action('authorize_sharejs_doc', default_option='doc_id')
def authorize_docs(self, doc_id, *docs, **kwargs):
token = kwargs.get('token') or self.current_token
key = 'sharejs:%s' % token
self.redis.sadd(key, *map(str, list([doc_id], *docs)))
self.redis.expire(key, self.options["token_ttl"])
return token
current_sharejs_token = LocalProxy(lambda: current_app.features.sharejs.current_token)
| {
"repo_name": "frascoweb/frasco-sharejs",
"path": "frasco_sharejs/__init__.py",
"copies": "1",
"size": "4310",
"license": "mit",
"hash": -1084792616485833200,
"line_mean": 40.4423076923,
"line_max": 88,
"alpha_frac": 0.5754060325,
"autogenerated": false,
"ratio": 3.652542372881356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4727948405381356,
"avg_score": null,
"num_lines": null
} |
from frasco import Feature, action, current_app
from statsd import StatsClient
class StatsdFeature(Feature):
name = "statsd"
defaults = {"host": "localhost",
"port": 8125,
"prefix": None,
"maxudpsize": 512,
"log": False}
def init_app(self, app):
if self.options['host']:
self.client = StatsClient(
host=self.options['host'],
port=self.options['port'],
prefix=self.options['prefix'],
maxudpsize=self.options['maxudpsize'])
self.timer_stack = []
def _log(self, action, stat, value):
if self.options['log']:
current_app.logger.info('STATSD: %s: %s = %s' % (action, stat, value))
@action('statsd_incr', default_option='stat')
def incr(self, stat, value=1, rate=1):
if self.client:
self.client.incr(stat, value, rate)
self._log('incr', stat, value)
@action('statsd_decr', default_option='stat')
def decr(self, stat, value=1, rate=1):
if self.client:
self.client.decr(stat, value, rate)
self._log('decr', stat, value)
@action('statsd_gauge')
def gauge(self, stat, value, rate=1, delta=False):
if self.client:
self.client.gauge(stat, value, rate, delta)
self._log('gauge', stat, value)
@action('statsd_gauge_incr', default_option='stat')
def gauge_incr(self, stat, value=1, rate=1):
self.gauge(stat, value, rate, True)
@action('statsd_set')
def set(self, stat, value, rate=1):
if self.client:
self.client.set(stat, value, rate)
self._log('set', stat, value)
@action('statsd_timing')
def timing(self, stat, delta, rate=1):
if self.client:
self.client.timing(stat, delta, rate)
self._log('timing', stat, delta)
@action('statsd_timer')
def timer(self, stat, rate=1):
if self.client:
return self.client.timer(stat, rate)
@action('statsd_start_timer', default_option='stat')
def start_timer(self, stat, rate=1):
timer = self.client.timer(stat, rate)
self.timer_stack.append(timer)
timer.start()
return timer
@action('statsd_stop_timer')
def stop_timer(self):
if self.timer_stack:
timer = self.timer_stack.pop()
timer.stop()
return timer | {
"repo_name": "frascoweb/frasco-statsd",
"path": "frasco_statsd.py",
"copies": "1",
"size": "2445",
"license": "mit",
"hash": 2643800093455001000,
"line_mean": 30.7662337662,
"line_max": 82,
"alpha_frac": 0.5619631902,
"autogenerated": false,
"ratio": 3.5693430656934306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46313062558934304,
"avg_score": null,
"num_lines": null
} |
from frasco import Feature, action, current_app, pass_feature, copy_extra_feature_options, ContextExitException, signal
import ldap
from ldap.filter import escape_filter_chars
class UsersLdapFeature(Feature):
name = "users_ldap"
requires = ["users"]
defaults = {"server": None,
"use_tls": False,
"tls_cacert_dir": None,
"tls_cacert_file": None,
"tls_cert_file": None,
"tls_key_file": None,
"tls_require_cert": None,
"tls_demand": False,
"bind_dn": None,
"bind_password": None,
"user_dn": '',
"user_filter": "(&(objectClass=inetOrgPerson)(uid=%(user)s))",
"username_attr": "uid",
"append_username_domain": None,
"strip_username_domain": True,
"email_attr": "mail",
"additional_attrs": {},
"group_flags": {},
"group_dn": '',
"group_filter": "(&(objectclass=groupOfNames)(cn=%(group)s))",
"group_member_attr": "member",
"group_member_uid_user_attr": None,
"track_uuid": False,
"track_uuid_attr": "ldap_uuid"}
ldap_login = signal('users_ldap_login')
ldap_signup = signal('users_ldap_signup')
def init_app(self, app):
app.features.users.add_authentification_handler(self.authentify)
if self.options['track_uuid']:
app.features.models.ensure_model(app.features.users.model, **dict([
(self.options['track_uuid_attr'], str)]))
def connect(self, bind=True):
if self.options['tls_require_cert']:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)
elif self.options['tls_require_cert'] is False:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
if self.options['tls_cacert_dir']:
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, self.options['tls_cacert_dir'])
if self.options['tls_cacert_file']:
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, self.options['tls_cacert_file'])
if self.options['tls_cert_file']:
ldap.set_option(ldap.OPT_X_TLS_CERTFILE, self.options['tls_cert_file'])
if self.options['tls_key_file']:
ldap.set_option(ldap.OPT_X_TLS_KEYFILE, self.options['tls_key_file'])
conn = ldap.initialize(self.options['server'])
if self.options['tls_demand']:
conn.set_option(ldap.OPT_REFERRALS, 0)
conn.set_option(ldap.OPT_PROTOCOL_VERSION, 3)
conn.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_DEMAND)
conn.set_option(ldap.OPT_X_TLS_DEMAND, True)
ldap_opts = {}
copy_extra_feature_options(self, ldap_opts)
for key, value in ldap_opts.iteritems():
conn.set_option(getattr(ldap, 'OPT_%s' % key.upper()), value)
if bind and self.options['bind_dn']:
conn.simple_bind_s(self.options['bind_dn'].encode('utf-8'),
self.options['bind_password'].encode('utf-8'))
if self.options['use_tls']:
conn.start_tls_s()
return conn
def search_objects(self, base_dn, filter, conn=None):
if not conn:
conn = self.connect()
return conn.search_s(base_dn, ldap.SCOPE_SUBTREE, filter)
def search_user(self, id, conn=None):
filter = self.options['user_filter'] % {'user': escape_filter_chars(id)}
rs = self.search_objects(self.options['user_dn'], filter, conn)
if rs:
return rs[0]
def search_group(self, id, conn=None):
filter = self.options['group_filter'] % {'group': escape_filter_chars(id)}
rs = self.search_objects(self.options['group_dn'], filter, conn)
if rs:
return rs[0]
def is_member_of(self, group_dn, user_dn, member_attr=None, conn=None, ignore_errors=True):
if not conn:
conn = self.connect()
if not member_attr:
member_attr = self.options['group_member_attr']
try:
return bool(conn.compare_s(group_dn, member_attr, user_dn))
except ldap.LDAPError as e:
if not ignore_errors:
raise
return False
def authentify(self, username, password):
try:
conn = self.connect()
if self.options['append_username_domain'] and "@" not in username:
username += "@" + self.options['append_username_domain']
ldap_user = self.search_user(username, conn=conn)
if ldap_user:
dn, attrs = ldap_user
self.connect(bind=False).simple_bind_s(dn, password)
return self._get_or_create_user_from_ldap(dn, attrs, conn=conn)
except ldap.LDAPError as e:
current_app.log_exception(e)
@pass_feature('users')
def _get_or_create_user_from_ldap(self, dn, attrs, users, conn=None):
filters = {}
if self.options['track_uuid']:
filters[self.options['track_uuid_attr']] = attrs[self.options['track_uuid']][0]
else:
filters[users.options['email_column']] = attrs[self.options['email_attr']][0].lower()
user = users.query.filter(**filters).first()
if user:
self.ldap_login.send(self, user=user, dn=dn, attrs=attrs, conn=conn)
return user
username = attrs[self.options['username_attr']][0]
if "@" in username and self.options['strip_username_domain']:
username = username.split('@')[0]
user = users.model()
user.email = attrs[self.options['email_attr']][0]
user.username = username
if self.options['track_uuid']:
setattr(user, self.options['track_uuid_attr'],
attrs[self.options['track_uuid']][0])
for target, src in self.options['additional_attrs'].iteritems():
if src in attrs:
setattr(user, target, attrs[src][0])
memberships = {}
for flag, group_dn in self.options['group_flags'].iteritems():
member = getattr(user, self.options['group_member_uid_user_attr'])\
if self.options['group_member_uid_user_attr'] else dn
if group_dn not in memberships:
memberships[group_dn] = self.is_member_of(group_dn, member, conn=conn)
setattr(user, flag, memberships[group_dn])
try:
users.signup(user, must_provide_password=False, provider='ldap')
except ContextExitException:
return None
self.ldap_signup.send(self, user=user, dn=dn, attrs=attrs, conn=conn)
return user
| {
"repo_name": "frascoweb/frasco-users-ldap",
"path": "frasco_users_ldap.py",
"copies": "1",
"size": "6796",
"license": "mit",
"hash": 7928991458768226000,
"line_mean": 40.950617284,
"line_max": 119,
"alpha_frac": 0.5681283108,
"autogenerated": false,
"ratio": 3.6774891774891776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4745617488289178,
"avg_score": null,
"num_lines": null
} |
from frasco import Feature, action, current_app, request, abort, listens_to, current_context
from frasco.utils import (AttrDict, import_string, populate_obj, RequirementMissingError,\
find_classes_in_module, slugify)
from frasco.expression import compile_expr, eval_expr
from frasco.templating import FileLoader, FileSystemLoader
from werkzeug.local import LocalProxy
from .backend import *
from .utils import *
from .query import *
from .transaction import *
import inspect
import os
import inflection
_db = None
def get_current_db():
return _db
db = LocalProxy(get_current_db)
form_imported = False
try:
from .form import *
form_imported = True
except ImportError:
pass
class ModelsFeature(Feature):
name = "models"
defaults = {"backend": None,
"pagination_per_page": 10,
"scopes": {},
"import_models": True,
"ensure_schema": True,
"admin_models": []}
def init_app(self, app):
if not self.options["backend"]:
raise Exception("Missing backend")
self.backend_cls = self.get_backend_class(self.options["backend"])
self.backend = self.backend_cls(app, self.options)
self.scopes = compile_expr(self.options["scopes"])
self.models = {}
self.delayed_tx_calls = delayed_tx_calls
global _db
self.db = _db = self.backend.db
if self.options["import_models"]:
models_pkg = self.options['import_models']
if not isinstance(self.options['import_models'], str):
models_pkg = "models"
if app.import_name != "__main__":
models_pkg = app.import_name + "." + models_pkg
try:
__import__(models_pkg)
except ImportError as e:
if "No module named %s" % models_pkg.split('.')[-1] not in e.message:
raise
if form_imported:
app.jinja_env.loader.bottom_loaders.append(FileLoader(
os.path.join(os.path.dirname(__file__), "form_template.html"), "model_form_template.html"))
app.jinja_env.loader.bottom_loaders.append(FileLoader(
os.path.join(os.path.dirname(__file__), "bs_form_template.html"), "model_bs_form_template.html"))
def init_admin(self, admin, app):
from .admin import create_model_admin_blueprint
app.jinja_env.loader.bottom_loaders.append(FileSystemLoader(
os.path.join(os.path.dirname(__file__), "admin/templates")))
for model in self.options['admin_models']:
kwargs = {}
if isinstance(model, dict):
model, kwargs = model.items()[0]
model = self.ensure_model(model)
with_counter = kwargs.pop('with_counter', False)
counter_filters = kwargs.pop('counter_filters', {})
title = inflection.pluralize(inflection.humanize(model.__name__))
kwargs.setdefault('title', title)
kwargs.setdefault('menu', title)
name = inflection.pluralize(inflection.underscore(model.__name__))
admin.register_blueprint(create_model_admin_blueprint(name, __name__, model, **kwargs))
if with_counter:
admin.register_dashboard_counter(title,
lambda: self.query(model).filter(**counter_filters).count(),
icon=kwargs.get('icon'))
def get_backend_class(self, name):
try:
backend_cls = import_string("frasco_models.backends.%s" % name)
except ImportError:
backend_cls = import_string(name)
if inspect.ismodule(backend_cls):
# Gives the possibility to reference a module and auto-discover the Backend class
classes = find_classes_in_module(backend_cls, (Backend,))
if not classes:
raise ImportError("Cannot find a Backend class in module '%s'" % name)
if len(classes) > 1:
raise ImportError("Model backend '%s' references a module with multiple backends" % name)
backend_cls = classes[0]
elif not issubclass(backend_cls, Backend):
raise ImportError("Class '%s' is not a subclass of Backend" % name)
return backend_cls
def require_backend(self, name):
if self.backend.name != name:
raise RequirementMissingError("A models backend named '%s' is required but '%s' is used" % (name, self.backend.name))
def ensure_model(self, model_name, **fields):
if inspect.isclass(model_name):
model_name = model_name.__name__
if model_name not in self.models:
self.models[model_name] = self.backend.ensure_model(model_name)
if fields and self.options['ensure_schema']:
for k, v in fields.iteritems():
if not isinstance(v, dict):
fields[k] = dict(type=v)
self.backend.ensure_schema(model_name, fields)
return self.models[model_name]
def __getitem__(self, name):
return self.ensure_model(name)
def __setitem__(self, name, model):
self.models[name] = model
def __contains__(self, name):
return name in self.models
def query(self, model):
return Query(self.ensure_model(model), self.backend)
def transaction(self, *args, **kwargs):
return transaction(*args, **kwargs)
def scoped_query(self, model, scope=None):
q = self.query(model)
if "model_scopes" in current_context.data:
q = q.filter(**current_context.data.model_scopes.get(model.__name__, {}))
if scope:
scopes = scope if isinstance(scope, list) else list([scope])
for s in scopes:
if s not in self.scopes:
raise QueryError("Missing model scope '%s'" % s)
q = q.filter(**eval_expr(self.scopes[s], current_context.vars))
return q
@action("build_model_query")
def build_query(self, model, scope=None, filter_from=None, search_query=None, search_query_default_field=None,
order_by=None, limit=None, offset=None, **kwargs):
q = self.scoped_query(model, scope)
filters = {}
if filter_from == "form":
filters.update(dict([(f.name, f.data) for f in current_context.data.form]))
elif filter_from == "url":
filters.update(dict([(k, v) for k, v in request.values.items()]))
elif filter_from == "args":
filters.update(dict([(k, v) for k, v in request.view_args.items()]))
if 'filters_or' in kwargs:
q = q.filter(or_(*kwargs.pop('filters_or')))
filters.update(kwargs.get("filters", kwargs))
if filters:
q = q.filter(**filters)
if search_query:
q = q.filter(*parse_search_query(search_query, search_query_default_field))
if order_by:
q = q.order_by(order_by)
if limit:
q = q.limit(limit)
if offset:
q = q.offset(offset)
return q
@action("paginate_query")
def paginate(self, query, page=None, per_page=None, check_bounds=True):
if page is None:
page = int(page or request.values.get("page", 1))
if per_page is None:
per_page = self.options["pagination_per_page"]
total = query.order_by(None).offset(None).limit(None).count()
pagination = Pagination(page, per_page, total)
if check_bounds and pagination.nb_pages > 0 and (page < 1 or page > pagination.nb_pages):
raise PageOutOfBoundError()
return query.offset(pagination.offset).limit(per_page), pagination
@action("find_model")
def find_first(self, model, not_found_404=True, **query):
model = self.ensure_model(model)
obj = self.build_query(model, **query).first()
if obj is None and not_found_404:
abort(404)
if not self.find_first.as_:
self.find_first.as_ = as_single_model(model)
current_context.data.model = obj
return obj
@action("find_models", default_option="model")
def find_all(self, model, paginate=False, page=None, pagination_var="pagination", **query):
model = self.ensure_model(model)
q = self.build_query(model, **query)
if paginate:
per_page = paginate if not isinstance(paginate, bool) else None
try:
q, pagination = self.paginate(q, page, per_page)
except PageOutOfBoundError:
abort(404)
current_context.vars[pagination_var] = pagination
if not self.find_all.as_:
self.find_all.as_ = as_many_models(model)
current_context.data.models = q
return q
@action("count_models", default_option="model")
def count(self, model, **query):
model = self.ensure_model(model)
count = self.build_query(model, **query).count()
if not self.count.as_:
self.count.as_ = "%s_count" % as_single_model(model)
return count
@action("create_model", default_option="model")
def create(self, model, **attrs):
obj = self.ensure_model(model)(**clean_kwargs_proxy(attrs))
if not self.create.as_:
self.create.as_ = as_single_model(obj.__class__)
return obj
@action("save_model", default_option="obj")
@as_transaction
def save(self, obj=None, model=None, **attrs):
auto_assign = False
obj = clean_proxy(obj)
if obj is None:
obj = self.ensure_model(model)()
auto_assign = True
if attrs:
populate_obj(obj, clean_kwargs_proxy(attrs))
self.backend.add(obj)
if not self.save.as_ and auto_assign:
self.save.as_ = as_single_model(obj.__class__)
return obj
@action("create_model_from_form", default_option="model", requires=["form"])
def create_from_form(self, model, form=None, **attrs):
form = form or current_context.data.form
obj = self.ensure_model(model)()
form.populate_obj(obj)
populate_obj(obj, clean_kwargs_proxy(attrs))
if not self.create_from_form.as_:
self.create_from_form.as_ = as_single_model(obj.__class__)
return obj
@action("save_form_model", default_option="model", requires=["form"])
@as_transaction
def save_from_form(self, obj=None, model=None, form=None, **attrs):
form = form or current_context.data.form
obj = clean_proxy(obj)
auto_assign = False
if obj is None:
if isinstance(model, str):
obj = self.ensure_model(model)()
auto_assign = True
else:
obj = model()
form.populate_obj(obj)
populate_obj(obj, clean_kwargs_proxy(attrs))
self.backend.add(obj)
if not self.save_from_form.as_ and auto_assign:
self.save_from_form.as_ = as_single_model(obj.__class__)
return obj
@action("delete_model", default_option="obj")
@as_transaction
def delete(self, obj):
self.backend.remove(obj)
@action("create_form_from_model", default_option="model", requires=["form"])
def create_form_from_model(self, model, **kwargs):
return create_form_from_model(model, **kwargs)
@action("check_model_not_exists")
def check_not_exists(self, model, error_message=None, **query):
q = self.build_query(model, **query)
if q.count() > 0:
if error_message:
flash(error_message, "error")
current_context.exit(trigger_action_group="model_exists")
@action("define_model_scope")
def define_scope(self, model, **filters):
current_context.data.setdefault("model_scopes", {})
current_context.data.model_scopes.setdefault(model, {})
current_context.data.model_scopes[model].update(filters.get('filters', filters))
@action(as_="slug")
def create_unique_slug(self, value, model, column="slug", **kwargs):
slug = slugify(value)
return ensure_unique_value(model, column, slug, **kwargs)
def save_model(model):
current_app.features.models.backend.add(model)
def delete_model(model):
current_app.features.models.backend.remove(model)
| {
"repo_name": "frascoweb/frasco-models",
"path": "frasco_models/__init__.py",
"copies": "1",
"size": "12408",
"license": "mit",
"hash": -8926344580636542000,
"line_mean": 38.141955836,
"line_max": 129,
"alpha_frac": 0.5942134107,
"autogenerated": false,
"ratio": 3.861811391223156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4956024801923156,
"avg_score": null,
"num_lines": null
} |
from frasco import Feature, action, current_app, request, url_for
import hashlib
import urllib
import math
import random
import base64
import requests
try:
from frasco_upload import url_for_upload
except ImportError:
pass
def svg_to_base64_data(svg):
return 'data:image/svg+xml;base64,' + base64.b64encode(svg)
class UsersAvatarFeature(Feature):
name = "users_avatar"
requires = ["users"]
defaults = {"avatar_column": "avatar_filename",
"url": None,
"avatar_size": 80,
"url_scheme": "",
"add_flavatar_route": False,
"add_avatar_route": True,
"try_gravatar": True,
"force_gravatar": False,
"gravatar_size": None,
"gravatar_email_column": None,
"gravatar_default": "mm",
"force_flavatar": False,
"flavatar_size": "100%",
"flavatar_name_column": None,
"flavatar_font_size": 80,
"flavatar_text_dy": "0.32em",
"flavatar_length": 1,
"flavatar_text_color": "#ffffff",
"flavatar_bg_colors": ["#5A8770", "#B2B7BB", "#6FA9AB", "#F5AF29", "#0088B9", "#F18636", "#D93A37", "#A6B12E", "#5C9BBC", "#F5888D", "#9A89B5", "#407887", "#9A89B5", "#5A8770", "#D33F33", "#A2B01F", "#F0B126", "#0087BF", "#F18636", "#0087BF", "#B2B7BB", "#72ACAE", "#9C8AB4", "#5A8770", "#EEB424", "#407887"]}
def init_app(self, app):
user_model = app.features.models.ensure_model(app.features.users.model,
**dict([(self.options["avatar_column"], str)]))
if not hasattr(user_model, 'avatar_url'):
user_model.avatar_url = property(self.get_avatar_url)
def flavatar(name, bgcolorstr=None):
if bgcolorstr is None:
bgcolorstr = request.args.get('bgcolorstr')
return self.generate_first_letter_avatar_svg(
name, bgcolorstr, request.args.get('size')), 200, {'Content-Type': 'image/svg+xml'}
if self.options['add_avatar_route']:
@app.route('/avatar/<hash>/<name>')
def avatar(hash, name):
if self.options['try_gravatar']:
size = self.options['gravatar_size'] or self.options["avatar_size"]
try:
r = requests.get(self._format_gravatar_url(hash, s=size, d=404, _scheme='http'))
if r.status_code != 404:
return r.content, 200, {'Content-Type': r.headers['content-type']}
except Exception:
pass
return flavatar(name, hash)
if self.options['add_flavatar_route']:
app.add_url_rule('/flavatar/<name>.svg', 'flavatar', flavatar)
app.add_url_rule('/flavatar/<name>/<bgcolorstr>.svg', 'flavatar', flavatar)
def get_avatar_url(self, user):
filename = getattr(user, self.options["avatar_column"], None)
if filename:
return url_for_upload(filename)
hash = None
username = getattr(user, self.options["flavatar_name_column"] or
current_app.features.users.options["username_column"], None)
if username:
if isinstance(username, unicode):
username = username.lower().encode('utf-8')
else:
username = username.lower()
hash = hashlib.md5(username).hexdigest()
email = getattr(user, self.options["gravatar_email_column"] or
current_app.features.users.options["email_column"], None)
if email:
hash = hashlib.md5(email.lower()).hexdigest()
if self.options["force_flavatar"] and (email or username):
if self.options['add_flavatar_route']:
return url_for('flavatar', name=username, bgcolorstr=hash, _external=True,
_scheme=self.options['url_scheme'])
return svg_to_base64_data(self.generate_first_letter_avatar_svg(username or email, hash))
if self.options["force_gravatar"] and email:
return self.get_gravatar_url(email)
if self.options['url'] and email:
return self.options["url"].format(email=email, email_hash=hash, username=username)
if self.options['add_avatar_route']:
return url_for('avatar', hash=hash, name=(username or email), _external=True,
_scheme=self.options['url_scheme'])
@action("gravatar_url", default_option="email", as_="gravatar_url")
def get_gravatar_url(self, email, size=None, default=None):
hash = hashlib.md5(email.lower()).hexdigest()
size = size or self.options['gravatar_size'] or self.options["avatar_size"]
default = default or self.options['gravatar_default']
return self._format_gravatar_url(hash, s=size, d=default)
def _format_gravatar_url(self, hash, _scheme=None, **kwargs):
return ("%s://www.gravatar.com/avatar/%s?%s" % (self.options["url_scheme"] or _scheme, hash,
urllib.urlencode({k: v for k, v in kwargs.items() if v is not None}))).lstrip(':')
def generate_first_letter_avatar_svg(self, name, bgcolorstr=None, size=None):
size = size or self.options['flavatar_size'] or self.options["avatar_size"]
if size and isinstance(size, int):
size = "%spx" % size
svg_tpl = ('<svg xmlns="http://www.w3.org/2000/svg" pointer-events="none" viewBox="0 0 100 100" '
'width="%(w)s" height="%(h)s" style="background-color: %(bgcolor)s;">%(letter)s</svg>')
char_svg_tpl = ('<text text-anchor="middle" y="50%%" x="50%%" dy="%(dy)s" '
'pointer-events="auto" fill="%(fgcolor)s" font-family="'
'HelveticaNeue-Light,Helvetica Neue Light,Helvetica Neue,Helvetica, Arial,Lucida Grande, sans-serif" '
'style="font-weight: 400; font-size: %(size)spx">%(char)s</text>')
if not name:
text = '?'
else:
text = name[0:min(self.options['flavatar_length'], len(name))]
colors_len = len(self.options['flavatar_bg_colors'])
if bgcolorstr:
bgcolor = sum([ord(c) for c in bgcolorstr]) % colors_len
elif ord(text[0]) < 65:
bgcolor = random.randint(0, colors_len - 1)
else:
bgcolor = int(math.floor((ord(text[0]) - 65) % colors_len))
return svg_tpl % {
'bgcolor': self.options['flavatar_bg_colors'][bgcolor],
'w': size,
'h': size,
'letter': char_svg_tpl % {
'dy': self.options['flavatar_text_dy'],
'fgcolor': self.options['flavatar_text_color'],
'size': self.options['flavatar_font_size'],
'char': text
}
} | {
"repo_name": "frascoweb/frasco-users-avatar",
"path": "frasco_users_avatar.py",
"copies": "1",
"size": "6908",
"license": "mit",
"hash": 7948590781712802000,
"line_mean": 45.3691275168,
"line_max": 325,
"alpha_frac": 0.5592067169,
"autogenerated": false,
"ratio": 3.5755693581780537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9620391013898144,
"avg_score": 0.002877012235981724,
"num_lines": 149
} |
from frasco import (Feature, action, current_context, hook, listens_to, command,
signal, flash, request, redirect, current_app, OptionMissingError,
InvalidOptionError, populate_obj, Markup, html_tag, url_for, session,
lazy_translate, copy_extra_feature_options, translate, current_app)
from frasco.utils import ContextStack
from frasco_models import transaction, as_transaction, save_model
import flask_login
from flask_login import _get_user, login_required, make_secure_token
from flask_bcrypt import Bcrypt
from flask_oauthlib.client import OAuth
from flask import has_request_context, _request_ctx_stack
from werkzeug.local import LocalProxy
from itsdangerous import URLSafeTimedSerializer, BadSignature
from contextlib import contextmanager
import uuid
import datetime
import os
import re
from .blueprint import bp
from .jinja_ext import LoginRequiredExtension, AnonymousOnlyExtension
class UserMixin(flask_login.UserMixin):
@property
def is_active(self):
is_active = getattr(self, 'active_account', None)
if is_active is None:
return True
return is_active
def get_auth_token(self):
return self.auth_token_serializer.dumps(self.get_id())
class PasswordValidationFailedException(Exception):
def __init__(self, reason, rule=None):
super(PasswordValidationFailedException, self).__init__()
self.reason = reason
self.rule = rule
class SignupValidationFailedException(Exception):
def __init__(self, reason):
super(SignupValidationFailedException, self).__init__()
self.reason = reason
current_user = LocalProxy(lambda: current_app.features.users.current)
is_user_logged_in = LocalProxy(lambda: current_app.features.users.logged_in())
class UsersFeature(Feature):
"""User management
"""
name = "users"
blueprints = (bp,)
requires = ["forms", "models"]
defaults = {"login_view": "users.login",
"model": "User",
"username_column": "email",
"password_column": "password",
"email_column": "email",
"username_is_unique": True,
"email_is_unique": True,
"must_provide_username": True,
"must_provide_email": True,
"allow_email_or_username_login": True,
"allow_login": True,
"allow_signup": True,
"forbidden_usernames": [],
"min_username_length": 1,
"allow_spaces_in_username": False,
"username_case_sensitive": False,
"validate_password_regexps": None,
"prevent_password_reuse": False,
"max_password_reuse_saved": None,
"min_time_between_password_change": None,
"expire_password_after": None,
"require_code_on_signup": False,
"allowed_signup_codes": [],
"recaptcha_key": None,
"recaptcha_secret": None,
"rate_limit_count": None,
"rate_limit_period": 60,
"oauth_signup_only": False,
"oauth_login_only": False,
"oauth_must_signup": False,
"oauth_must_provide_password": False,
"login_user_on_signup": True,
"login_user_on_reset_password": True,
"disable_default_authentication": False,
"default_auth_provider_name": "app",
"remember_days": 365,
"reset_password_ttl": 86400,
"redirect_after_login": "index",
"redirect_after_login_disallowed": None,
"redirect_after_signup": "index",
"redirect_after_signup_disallowed": None, # go to login
"redirect_after_logout": "index",
"redirect_after_reset_password_token": False,
"redirect_after_reset_password": "index",
"send_welcome_email": False,
"send_reset_password_email": True,
"login_error_message": lazy_translate(u"Invalid email or password"),
"login_disallowed_message": None,
"login_required_message": lazy_translate(u"Please log in to access this page"),
"fresh_login_required_message": lazy_translate(u"Please reauthenticate to access this page"),
"password_expired_message": lazy_translate(u"Your password has expired, please enter a new one"),
"must_provide_username_error_message": lazy_translate(u"A username must be provided"),
"password_reused_message": lazy_translate(u"You cannot use a password which you have previously used"),
"min_time_between_password_change_message": lazy_translate(u"You have changed your password too recently"),
"validate_password_regexps_message": lazy_translate(u"The password does not respect the following rule: {rule}"),
"must_provide_email_error_message": lazy_translate(u"An email address must be provided"),
"signup_disallowed_message": None,
"signup_user_exists_message": lazy_translate(u"An account using the same username already exists"),
"signup_email_exists_message": lazy_translate(u"An account using the same email already exists"),
"username_too_short_message": lazy_translate(u"The username is too short"),
"username_has_spaces_message": lazy_translate(u"The username cannot contain spaces"),
"password_confirm_failed_message": lazy_translate(u"The two passwords do not match"),
"bad_signup_code_message": lazy_translate(u"The provided code is not valid"),
"rate_limit_reached_message": lazy_translate(u"Too many accounts have been created from this location in a too short period. Please, try again later"),
"reset_password_token_error_message": lazy_translate(u"This email does not exist in our database"),
"reset_password_token_success_message": lazy_translate(u"An email has been sent to your email address with a link to reset your password"),
"reset_password_error_message": lazy_translate(u"Invalid or expired link to reset your password"),
"reset_password_success_message": lazy_translate(u"Password successfully resetted"),
"update_password_error_message": lazy_translate(u"Invalid current password"),
"update_user_email_error_message": lazy_translate(u"An account using the same email already exists"),
"oauth_user_already_exists_message": lazy_translate(u"This {provider} account has already been used on a different account"),
"recaptcha_fail_message": lazy_translate(u"The captcha validation has failed"),
"enable_admin": True}
init_signal = signal('users_init')
signup_signal = signal('user_signup')
reset_password_token_signal = signal('user_reset_password_token')
reset_password_signal = signal('user_reset_password')
update_user_password_signal = signal('user_update_password')
ignore_attributes = ['current']
def init_app(self, app):
self.app = app
copy_extra_feature_options(self, app.config)
app.config.setdefault("REMEMBER_COOKIE_DURATION", datetime.timedelta(days=self.options["remember_days"]))
self.bcrypt = app.bcrypt = Bcrypt()
self.token_serializer = URLSafeTimedSerializer(app.config["SECRET_KEY"])
app.jinja_env.add_extension(LoginRequiredExtension)
app.jinja_env.add_extension(AnonymousOnlyExtension)
self.oauth = OAuth()
self.oauth_apps = []
self.authentify_handlers = []
self.signup_code_checker = None
self.user_validator = None
self.override_builtin_user_validation = False
self.login_validator = None
self.password_validator = None
self.login_manager = flask_login.LoginManager(app)
self.login_manager.login_view = self.options["login_view"]
self.login_manager.refresh_view = self.options["login_view"]
self.login_manager.login_message = self.options["login_required_message"]
self.login_manager.login_message_category = "warning"
self.login_manager.needs_refresh_message = self.options["fresh_login_required_message"]
self.login_manager.needs_refresh_message_category = "warning"
# this allows to set a current user without a request context
self.no_req_ctx_user_stack = ContextStack()
if app.features.exists("emails"):
app.features.emails.add_templates_from_package(__name__)
if app.features.exists("babel"):
app.features.babel.add_extract_dir(os.path.dirname(__file__), ["templates"])
model = self.model = app.features.models.ensure_model(self.options["model"],
signup_at=datetime.datetime,
signup_from=str,
signup_provider=str,
auth_providers=list,
last_login_at=datetime.datetime,
last_login_from=str,
last_login_provider=str,
last_password_change_at=datetime.datetime,
must_reset_password_at_login=bool)
if self.options['prevent_password_reuse']:
app.features.models.ensure_model(self.options["model"], previous_passwords=list)
if UserMixin not in model.__bases__:
model.__bases__ = (UserMixin,) + model.__bases__
model.auth_token_serializer = self.token_serializer
self.query = app.features.models.query(self.options['model'])
if self.options["username_column"] != self.options["email_column"]:
app.features.models.ensure_model(model, **dict([
(self.options["username_column"], dict(type=str, index=True)),
(self.options["username_column"] + '_lcase', dict(type=str, index=True, unique=self.options["username_is_unique"]))]))
app.features.models.ensure_model(model, **dict([
(self.options["email_column"], dict(type=str, index=True, unique=self.options["email_is_unique"])),
(self.options["password_column"], str)]))
self.login_manager.user_loader(self.find_by_id)
self.login_manager.token_loader(self.find_by_token)
self.init_signal.send(app)
def init_admin(self, admin, app):
if self.options['enable_admin']:
from .admin import create_admin_blueprint
admin.register_blueprint(create_admin_blueprint(self))
admin.register_dashboard_counter('Users', lambda: self.query.count(), icon='fa-users')
def create_oauth_app(self, name, login_view=None, **kwargs):
app = self.oauth.remote_app(name, **kwargs)
if login_view:
self.oauth_apps.append((name, login_view))
return app
def add_authentification_handler(self, callback, only=False):
if only:
self.authentify_handlers = []
self.authentify_handlers.append(callback)
def authentification_handler(self, only=False):
def decorator(f):
self.add_authentification_handler(f, only)
return f
return decorator
@property
def current(self):
"""Returns the current user
"""
if not has_request_context():
return self.no_req_ctx_user_stack.top
user_stack = getattr(_request_ctx_stack.top, 'user_stack', None)
if user_stack and user_stack.top:
return user_stack.top
return _get_user()
def start_user_context(self, user):
stack = self.no_req_ctx_user_stack
if has_request_context():
if not hasattr(_request_ctx_stack.top, 'user_stack'):
_request_ctx_stack.top.user_stack = ContextStack()
stack = _request_ctx_stack.top.user_stack
stack.push(user)
def stop_user_context(self):
stack = self.no_req_ctx_user_stack
if has_request_context():
_request_ctx_stack.top.user_stack.pop()
else:
self.no_req_ctx_user_stack.pop()
@contextmanager
def user_context(self, user):
self.start_user_context(user)
try:
yield user
finally:
self.stop_user_context()
def logged_in(self):
"""Checks if the user is logged in
"""
return self.current and self.current.is_authenticated
def find_by_id(self, id):
return self.query.get(id)
def find_by_username(self, username):
ucol = self.options['username_column']
if not self.options['username_case_sensitive']:
ucol += '_lcase'
username = username.lower()
return self.query.filter((ucol, username.strip())).first()
def find_by_email(self, email):
emailcol = self.options['email_column']
return self.query.filter((emailcol, email.strip().lower())).first()
def generate_user_token(self, user, salt=None):
"""Generates a unique token associated to the user
"""
return self.token_serializer.dumps(str(user.id), salt=salt)
def find_by_token(self, token, salt=None, max_age=None):
"""Loads a user instance identified by the token generated using generate_user_token()
"""
model = current_app.features.models[self.options["model"]]
try:
id = self.token_serializer.loads(token, salt=salt, max_age=max_age)
except BadSignature:
return None
if id is None:
return None
return self.find_by_id(id)
def validate_password(self, user, password, pwhash, flash_messages=True, raise_error=True):
pwcol = self.options["password_column"]
pwhash = self.bcrypt.generate_password_hash(password) if not pwhash else pwhash
if self.options['min_time_between_password_change'] and user.last_password_change_at and not user.must_reset_password_at_login:
if (datetime.datetime.utcnow() - user.last_password_change_at).total_seconds() < self.options['min_time_between_password_change']:
if flash_messages and self.options['min_time_between_password_change_message']:
flash(self.options['min_time_between_password_change_message'], 'error')
if raise_error:
raise PasswordValidationFailedException("password_change_too_soon")
return False
if self.options['validate_password_regexps']:
for pattern, label in self.options['validate_password_regexps']:
if not re.search(pattern, password):
if flash_messages and self.options['validate_password_regexps_message']:
flash(self.options['validate_password_regexps_message'].format(rule=label), 'error')
if raise_error:
raise PasswordValidationFailedException("invalid_password", label)
return False
if self.options['prevent_password_reuse'] and getattr(user, pwcol):
for oldhash in [getattr(user, pwcol)] + (user.previous_passwords or []):
if oldhash and self.bcrypt.check_password_hash(oldhash, password):
if flash_messages and self.options['password_reused_message']:
flash(self.options['password_reused_message'], 'error')
if raise_error:
raise PasswordValidationFailedException("password_reused")
return False
if self.password_validator and not self.password_validator(password):
if raise_error:
raise PasswordValidationFailedException("password_validator_failed")
return False
return True
def update_password(self, user, password, skip_validation=False):
"""Updates the password of a user
"""
pwcol = self.options["password_column"]
pwhash = self.bcrypt.generate_password_hash(password)
if not skip_validation:
self.validate_password(user, password, pwhash)
if self.options['prevent_password_reuse']:
user.previous_passwords = [getattr(user, pwcol)] + (user.previous_passwords or [])
if self.options['max_password_reuse_saved']:
user.previous_passwords = user.previous_passwords[:self.options['max_password_reuse_saved']]
setattr(user, pwcol, pwhash)
user.last_password_change_at = datetime.datetime.utcnow()
user.must_reset_password_at_login = False
def check_password(self, user, password):
pwcol = self.options['password_column']
return getattr(user, pwcol, None) and \
self.bcrypt.check_password_hash(getattr(user, pwcol), password)
@hook()
def before_request(self, *args, **kwargs):
current_context["current_user"] = current_user
@action()
def login_required(self, fresh=False, redirect_to=None):
"""Ensures that a user is authenticated
"""
if not self.logged_in() or (fresh and not self.login_manager.login_fresh()):
if redirect_to:
resp = redirect(redirect_to)
else:
resp = self.login_manager.unauthorized()
current_context.exit(resp, trigger_action_group="missing_user")
@action(default_option="user", defaults=dict(remember=None))
def login(self, user=None, remember=False, provider=None, form=None, force=False, **attrs):
if user:
self._login(user, provider, remember=remember, force=force, **attrs)
return user
if self.options["oauth_login_only"]:
if users.options["login_disallowed_message"]:
flash(users.options["login_disallowed_message"], "error")
return redirect(url_for("users.login", next=request.args.get("next")))
ucol = self.options['username_column']
pwcol = self.options['password_column']
if form:
form = opts["form"]
elif "form" in current_context.data and request.method == "POST":
form = current_context.data.form
else:
raise OptionMissingError("Missing 'form' option or form for 'login' action")
user = self.authentify(form[ucol].data, form[pwcol].data)
if user and self.login_validator and not self.login_validator(user):
user = None
if not user:
if self.options["login_error_message"]:
flash(self.options["login_error_message"], "error")
current_context.exit(trigger_action_group="login_failed")
if remember is None and "remember" in form:
remember = form["remember"].data
self._login(user, provider, remember, force, **attrs)
def authentify(self, username, password):
for func in self.authentify_handlers:
user = func(username, password)
if user:
return user
if not self.options["disable_default_authentication"]:
ucol = self.options['username_column']
emailcol = self.options['email_column']
username = username.strip()
if ucol == emailcol:
username = username.lower()
elif not self.options['username_case_sensitive']:
ucol += '_lcase'
username = username.lower()
filters = [(ucol, username)]
if self.options['allow_email_or_username_login'] and ucol != emailcol and username:
filters.extend([(emailcol, username.lower())])
user = self.query.filter({"$or": filters}).first()
if user and self.check_password(user, password):
return user
@as_transaction
def _login(self, user, provider=None, remember=False, force=False, **attrs):
"""Updates user attributes and login the user in flask-login
"""
user.last_login_at = datetime.datetime.now()
user.last_login_provider = provider or self.options["default_auth_provider_name"]
user.last_login_from = request.remote_addr
populate_obj(user, attrs)
save_model(user)
flask_login.login_user(user, remember=remember, force=force)
@action()
def confirm_login(self):
"""Confirm the login when the session is not fresh
"""
self.login_manager.confirm_login()
@action()
def logout(self):
flask_login.logout_user()
@command(with_request_ctx=True)
@command.arg("username_")
@command.arg("password")
@action()
def signup(self, username_=None, password=None, user=None, form=None, login_user=None, send_email=None,\
must_provide_password=True, provider=None, validate_user=True, validate_password=True, **attrs):
with transaction():
ucol = self.options['username_column']
pwcol = self.options['password_column']
emailcol = self.options['email_column']
pwconfirmfield = pwcol + "_confirm"
if not user and not username_ and not form:
if "form" in current_context.data and request.method == "POST":
form = current_context.data.form
else:
raise OptionMissingError(("Missing 'username' and 'password' options or "
"'form' option or form for 'signup' action"))
try:
remote_addr = request.remote_addr
except RuntimeError:
remote_addr = None
if remote_addr and self.options["rate_limit_count"]:
self.check_rate_limit(remote_addr, "signup_rate_limited")
if isinstance(username_, self.model):
user = username_
username_ = None
if not user:
user = self.model()
if username_:
setattr(user, ucol, username_)
if form:
if must_provide_password:
# the password field is manually validated to allow for cases when the
# password is not provided and not required (ie. oauth login)
if pwcol not in form or not form[pwcol].data.strip():
form[pwcol].errors.append(form[pwcol].gettext('This field is required.'))
current_context.exit(trigger_action_group="form_validation_failed")
self.check_password_confirm(form, "signup_pwd_mismatch")
password = form[pwcol].data
form.populate_obj(user, ignore_fields=[pwcol, pwconfirmfield])
populate_obj(user, attrs)
if password and validate_password:
try:
self.update_password(user, password)
except PasswordValidationFailedException as e:
current_context["password_validation_error"] = e.reason
current_context.exit(trigger_action_group="password_validation_failed")
if getattr(user, ucol, None):
setattr(user, ucol, getattr(user, ucol).strip())
if ucol != emailcol:
setattr(user, ucol + '_lcase', getattr(user, ucol).lower())
if getattr(user, emailcol, None):
setattr(user, emailcol, getattr(user, emailcol).strip().lower())
if validate_user:
try:
self.validate_user(user, must_provide_password=must_provide_password, is_signup=True)
except SignupValidationFailedException as e:
current_context["signup_error"] = e.reason
current_context.exit(trigger_action_group="signup_validation_failed")
user.signup_at = datetime.datetime.now()
user.signup_from = remote_addr
user.signup_provider = provider or self.options["default_auth_provider_name"]
user.auth_providers = [user.signup_provider]
with transaction():
save_model(user)
self.post_signup(user, login_user, send_email)
return user
def check_password_confirm(self, form, trigger_action_group=None):
"""Checks that the password and the confirm password match in
the provided form. Won't do anything if any of the password fields
are not in the form.
"""
pwcol = self.options['password_column']
pwconfirmfield = pwcol + "_confirm"
if pwcol in form and pwconfirmfield in form and form[pwconfirmfield].data != form[pwcol].data:
if self.options["password_confirm_failed_message"]:
flash(self.options["password_confirm_failed_message"], "error")
current_context.exit(trigger_action_group=trigger_action_group)
def check_signup_code(self, code):
if self.signup_code_checker:
return self.signup_code_checker(code)
return code in self.options['allowed_signup_codes']
def check_rate_limit(self, ip, trigger_action_group=None):
since = datetime.datetime.now() - datetime.timedelta(seconds=self.options['rate_limit_period'])
count = self.query.filter(signup_from=ip, signup_at__gte=since).count()
if count >= self.options['rate_limit_count']:
if self.options["rate_limit_reached_message"]:
flash(self.options["rate_limit_reached_message"], "error")
current_context.exit(trigger_action_group=trigger_action_group)
def validate_user(self, user=None, username=None, email=None, password=None, ignore_user_id=None,
must_provide_password=False, flash_messages=True, raise_error=True, is_signup=False):
"""Validates a new user object before saving it in the database.
Checks if a password is present unless must_provide_password is False.
Checks if the username is unique unless the option username_is_unique is set to False.
If the email column exists on the user object and the option email_is_unique is set to True,
also checks if the email is unique.
"""
ucol = self.options['username_column']
emailcol = self.options['email_column']
if user:
username = getattr(user, ucol, None)
email = getattr(user, emailcol, None)
password = getattr(user, self.options["password_column"], None)
ignore_user_id = getattr(user, 'id', None)
if username is not None:
username = username.strip()
if email is not None:
email = email.strip().lower()
if must_provide_password and not password:
if raise_error:
raise SignupValidationFailedException("password_missing")
return False
if self.user_validator and self.override_builtin_user_validation:
if not self.user_validator(username, email, password, is_signup):
if raise_error:
raise SignupValidationFailedException("failed_validation")
return False
return True
if ucol != emailcol and self.options["must_provide_username"]:
if not username:
if flash_messages and self.options["must_provide_username_error_message"]:
flash(self.options["must_provide_username_error_message"], "error")
if raise_error:
raise SignupValidationFailedException("username_missing")
return False
if username.lower() in self.options['forbidden_usernames']:
if flash_messages and self.options["signup_user_exists_message"]:
flash(self.options["signup_user_exists_message"], "error")
if raise_error:
raise SignupValidationFailedException("username_forbidden")
return False
if len(username) < self.options['min_username_length']:
if flash_messages and self.options["username_too_short_message"]:
flash(self.options["username_too_short_message"], "error")
if raise_error:
raise SignupValidationFailedException("username_too_short")
return False
if not self.options['allow_spaces_in_username'] and " " in username:
if flash_messages and self.options["username_has_spaces_message"]:
flash(self.options["username_has_spaces_message"], "error")
if raise_error:
raise SignupValidationFailedException("username_has_spaces")
return False
if ucol != emailcol and username and self.options["username_is_unique"]:
col = ucol if self.options["username_case_sensitive"] else ucol + '_lcase'
uname = username if self.options["username_case_sensitive"] else username.lower()
q = self.query.filter((col, uname))
if ignore_user_id:
q = q.filter(("id__ne", ignore_user_id))
if q.count() > 0:
if flash_messages and self.options["signup_user_exists_message"]:
flash(self.options["signup_user_exists_message"], "error")
if raise_error:
raise SignupValidationFailedException("user_exists")
return False
if self.options["must_provide_email"] and not email:
if flash_messages and self.options["must_provide_email_error_message"]:
flash(self.options["must_provide_email_error_message"], "error")
if raise_error:
raise SignupValidationFailedException("email_missing")
return False
if self.options["email_is_unique"] and email:
q = self.query.filter((emailcol, email))
if ignore_user_id:
q = q.filter(("id__ne", ignore_user_id))
if q.count() > 0:
if flash_messages and self.options["signup_email_exists_message"]:
flash(self.options["signup_email_exists_message"], "error")
if raise_error:
raise SignupValidationFailedException("email_exists")
return False
if self.user_validator and not self.user_validator(username, email, password, is_signup):
if raise_error:
raise SignupValidationFailedException("failed_validation")
return False
return True
def post_signup(self, user, login_user=None, send_email=None):
"""Executes post signup actions: sending the signal, logging in the user and
sending the welcome email
"""
self.signup_signal.send(self, user=user)
if (login_user is None and self.options["login_user_on_signup"]) or login_user:
self._login(user, user.signup_provider)
to_email = getattr(user, self.options["email_column"], None)
if to_email and ((send_email is None and self.options["send_welcome_email"]) or send_email):
template = "users/welcome.txt" if self.options["send_welcome_email"] == True else self.options["send_welcome_email"]
current_app.features.emails.send(to_email, template, user=user)
def _gen_reset_password_token(self, user):
return self.generate_user_token(user, salt="password-reset")
@action(default_option="user")
def gen_reset_password_token(self, user=None, send_email=None):
"""Generates a reset password token and optionnaly (default to yes) send the reset
password email
"""
if not user and "form" in current_context.data and request.method == "POST":
form = current_context.data.form
user = self.find_by_email(form[self.options["email_column"]].data)
if user is None:
if self.options["reset_password_token_error_message"]:
flash(self.options["reset_password_token_error_message"], "error")
current_context.exit(trigger_action_group="reset_password_failed")
if not user:
raise InvalidOptionError("Invalid user in 'reset_password_token' action")
token = self._gen_reset_password_token(user)
self.reset_password_token_signal.send(self, user=user, token=token)
if (send_email is None and self.options["send_reset_password_email"]) or send_email:
to_email = getattr(user, self.options["email_column"])
current_app.features.emails.send(to_email, "users/reset_password.txt", user=user, token=token)
return token
@command("send-reset-password")
def send_reset_password_command(self, email, send_email=True):
user = self.find_by_email(email)
if not user:
raise Exception("User '%s' not found" % email)
token = self.gen_reset_password_token(user, send_email)
command.echo(url_for("users.reset_password", token=token, _external=True))
@action(default_option="token")
def reset_password(self, token=None, login_user=None):
"""Resets the password of the user identified by the token
"""
pwcol = self.options['password_column']
if not token:
if "token" in request.view_args:
token = request.view_args["token"]
elif "token" in request.values:
token = request.values["token"]
else:
raise OptionMissingError(("Missing 'token' option or 'token' view arg "
"or 'token' GET paramater in 'reset_password' action"))
user = self.find_by_token(token, salt="password-reset", max_age=self.options["reset_password_ttl"])
if user is None:
if self.options["reset_password_error_message"]:
flash(self.options["reset_password_error_message"], "error")
current_context.exit(trigger_action_group="reset_password_failed")
self.update_password_from_form(user)
self.reset_password_signal.send(self, user=user)
if (login_user is None and self.options["login_user_on_reset_password"]) or login_user:
flask_login.login_user(user)
return user
@command("reset-password")
def reset_password_command(self, username, password):
with transaction():
user = self.find_by_username(username)
if not user:
raise Exception("User '%s' not found" % username)
self.update_password(user, password, skip_validation=True)
save_model(user)
@action("update_user_password", default_option="user")
@as_transaction
def update_password_from_form(self, user=None, form=None):
"""Updates the user password using a form
"""
user = user or self.current
if not form and "form" in current_context.data and request.method == "POST":
form = current_context.data.form
elif not form:
raise OptionMissingError("Missing a form in 'update_user_password' action")
self._update_password_from_form(user, form)
save_model(user)
self.update_user_password_signal.send(self, user=user)
def _update_password_from_form(self, user, form):
pwcol = self.options['password_column']
pwcurrentcol = pwcol + "_current"
pwconfirmcol = pwcol + "_confirm"
current_pwd = getattr(user, pwcol)
if current_pwd and pwcurrentcol in form and not self.bcrypt.check_password_hash(current_pwd, form[pwcurrentcol].data):
if self.options["update_password_error_message"]:
flash(self.options["update_password_error_message"], "error")
current_context.exit(trigger_action_group="reset_password_current_mismatch")
self.check_password_confirm(form, "reset_password_confirm_mismatch")
try:
self.update_password(user, form[pwcol].data)
except PasswordValidationFailedException as e:
current_context["password_validation_error"] = e.reason
current_context.exit(trigger_action_group="password_validation_failed")
@action(default_option="user")
def check_user_password(self, user, password=None, form=None):
"""Checks if the password matches the one of the user. If no password is
provided, the current form will be used
"""
pwcol = self.options['password_column']
if password is None:
if not form and "form" in current_context.data and request.method == "POST":
form = current_context.data.form
if form:
password = form[pwcol].data
else:
raise OptionMissingError("Missing 'password' option or a form")
current_pwd = getattr(user, pwcol)
if not current_pwd or not self.bcrypt.check_password_hash(current_pwd, password):
current_context.exit(trigger_action_group="password_mismatch")
@action(default_option="user")
@as_transaction
def update_user_from_form(self, user, form=None):
ucol = self.options["username_column"]
emailcol = self.options["email_column"]
pwcol = self.options['password_column']
if not form and "form" in current_context.data and request.method == "POST":
form = current_context.data.form
elif not form:
raise OptionMissingError("Missing form")
username = form[ucol].data if ucol in form else user.username
email = form[emailcol].data if emailcol in form else user.email
try:
self.validate_user(username=username, email=email, ignore_user_id=user.id)
except SignupValidationFailedException as e:
current_context["validation_error"] = e.reason
current_context.exit(trigger_action_group="user_update_validation_failed")
if pwcol in form and form[pwcol].data:
self._update_password_from_form(user, form)
form.populate_obj(user, ignore_fields=[pwcol])
if ucol in form:
setattr(user, ucol + '_lcase', form[ucol].data.lower().strip())
if emailcol in form:
setattr(user, emailcol, form[emailcol].data.lower().strip())
save_model(user)
@action("check_user_unique_attr", default_option="attrs")
def check_unique_attr(self, attrs, user=None, form=None, flash_msg=None):
"""Checks that an attribute of the current user is unique amongst all users.
If no value is provided, the current form will be used.
"""
user = user or self.current
ucol = self.options["username_column"]
email = self.options["email_column"]
if not isinstance(attrs, (list, tuple, dict)):
attrs = [attrs]
for name in attrs:
if isinstance(attrs, dict):
value = attrs[name]
else:
form = form or current_context.data.get("form")
if not form:
raise OptionMissingError("Missing 'value' option or form in 'check_user_unique_attr' action")
value = form[name].data
if name == ucol and not self.options["username_case_sensitive"]:
filters = (ucol + '_lcase', value.strip().lower())
elif name == emailcol:
filters = (emailcol, value.strip().lower())
else:
filters = (name, value.strip())
if self.query.filter({"$and": [filters, ("id__ne", user.id)]}).count() > 0:
if flash_msg is None:
flash_msg = "The %s is already in use" % name
if flash_msg:
flash(flash_msg, "error")
current_context.exit(trigger_action_group="user_attr_not_unique")
def oauth_login(self, provider, id_column, id, attrs, defaults, redirect_url=None):
"""Execute a login via oauth. If no user exists, oauth_signup() will be called
"""
user = self.query.filter(**dict([(id_column, id)])).first()
if not redirect_url:
redirect_url = request.args.get('next') or url_for(self.options["redirect_after_login"])
if self.logged_in():
if user and user != self.current:
if self.options["oauth_user_already_exists_message"]:
flash(self.options["oauth_user_already_exists_message"].format(provider=provider), "error")
return redirect(redirect_url)
if provider not in self.current.auth_providers:
self.current.auth_providers.append(provider)
current_app.features.models.save(self.current, **attrs)
elif not user:
return self.oauth_signup(provider, attrs, defaults, redirect_url=redirect_url)
else:
self.login(user, provider=provider, **attrs)
return redirect(redirect_url)
def oauth_signup(self, provider, attrs, defaults, redirect_url=None):
"""Start the signup process after having logged in via oauth
"""
session["oauth_user_defaults"] = defaults
session["oauth_user_attrs"] = dict(provider=provider, **attrs)
if not redirect_url:
redirect_url = request.args.get("next")
return redirect(url_for('users.oauth_signup', next=redirect_url))
@command("show")
def show_user_command(self, username):
user = self.find_by_username(username)
if not user:
raise Exception("User '%s' not found" % username)
command.echo(user.for_json())
| {
"repo_name": "frascoweb/frasco-users",
"path": "frasco_users/__init__.py",
"copies": "1",
"size": "41824",
"license": "mit",
"hash": 777762732943815200,
"line_mean": 46.6898517674,
"line_max": 167,
"alpha_frac": 0.6102955241,
"autogenerated": false,
"ratio": 4.195405757849333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0020213248454166955,
"num_lines": 877
} |
from frasco import Feature, action, current_context, OptionMissingError, copy_extra_feature_options, current_app
from jinja_macro_tags import MacroLoader, MacroRegistry
from frasco.utils import parse_yaml_frontmatter
from frasco.expression import compile_expr, eval_expr
from flask_mail import Mail, Message, email_dispatched, Attachment, force_text
from jinja2 import ChoiceLoader, FileSystemLoader, PackageLoader, TemplateNotFound
from contextlib import contextmanager
import premailer
import os
import datetime
import re
import markdown
try:
# html2text being licensed under the GPL, it is not
# directly included and may be not available
import html2text
except ImportError:
html2text = None
try:
from cpickle import pickle
except ImportError:
import pickle
_url_regexp = re.compile(r'(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)')
def clickable_links(text):
return _url_regexp.sub(r'<a href="\1">\1</a>', text)
class EmailsFeature(Feature):
"""Send emails using SMTP
"""
name = "emails"
defaults = {"default_layout": "layout.html",
"default_template_vars": {},
"inline_css": False,
"auto_render_missing_content_type": True,
"log_messages": None, # default is app.testing
"dump_logged_messages": True,
"dumped_messages_folder": "email_logs",
"localized_emails": None,
"default_locale": None,
"markdown_options": {},
"silent_failures": False,
"send_async": False}
def init_app(self, app):
copy_extra_feature_options(self, app.config, "MAIL_")
self.client = Mail(app)
self.connection = None
self.templates_loader = [FileSystemLoader(os.path.join(app.root_path, "emails"))]
layout_loader = PackageLoader(__name__, "templates")
loader = ChoiceLoader([ChoiceLoader(self.templates_loader), layout_loader])
self.jinja_env = app.jinja_env.overlay(loader=MacroLoader(loader))
self.jinja_env.macros = MacroRegistry(self.jinja_env) # the overlay methods does not call the constructor of extensions
self.jinja_env.macros.register_from_template("layouts/macros.html")
self.jinja_env.default_layout = self.options["default_layout"]
self.jinja_env.filters['clickable_links'] = clickable_links
if (self.options["log_messages"] is not None and self.options["log_messages"]) or \
(self.options["log_messages"] is None and app.testing):
email_dispatched.connect(self.log_message)
self.locale = None
if app.features.exists('babel'):
if self.options['default_locale'] is None:
self.options['default_locale'] = app.config['BABEL_DEFAULT_LOCALE']
if self.options['localized_emails'] is None:
self.options['localized_emails'] = '{locale}/{filename}'
def add_template_folder(self, path):
self.templates_loader.append(FileSystemLoader(path))
def add_templates_from_package(self, pkg_name, pkg_path="emails"):
self.templates_loader.append(PackageLoader(pkg_name, pkg_path))
def render_message(self, template_filename, auto_render_missing_content_type=None,\
auto_html_layout="layouts/text.html", auto_markdown_template="layouts/markdown.html", **vars):
text_body = None
html_body = None
vars = dict(self.options["default_template_vars"], **vars)
filename, ext = os.path.splitext(template_filename)
localized_filename = None
if self.options['localized_emails']:
locale = vars.get('locale', self.locale)
if locale is None and 'current_locale' in current_context:
locale = current_context['current_locale']
if locale and locale != self.options['default_locale']:
localized_filename = self.options['localized_emails'].format(**{
"locale": locale, "filename": template_filename,
"name": filename, "ext": ext})
source = None
for tpl_filename in [localized_filename, template_filename]:
if not tpl_filename:
continue
# only extract the frontmatter from the first template if
# multiple extensions are provided
filename, ext = os.path.splitext(tpl_filename)
if "," in ext:
tpl_filename = filename + ext.split(",")[0]
try:
source, _, __ = self.jinja_env.loader.get_source(self.jinja_env, tpl_filename)
except TemplateNotFound:
pass
if source:
break
if source is None:
raise TemplateNotFound(template_filename)
frontmatter, source = parse_yaml_frontmatter(source)
if frontmatter:
frontmatter = eval_expr(compile_expr(frontmatter), vars)
vars = dict(frontmatter, **vars)
templates = [("%s.%s" % (filename, e), e) for e in ext[1:].split(",")]
for tpl_filename, ext in templates:
rendered = self.jinja_env.get_template(tpl_filename).render(**vars)
if ext == "html":
html_body = rendered
elif ext == "txt":
text_body = rendered
elif ext == "md":
text_body = rendered
content = markdown.markdown(rendered, **self.options["markdown_options"])
html_body = self.jinja_env.get_template(auto_markdown_template).render(
content=content, **vars)
if (auto_render_missing_content_type is not None and auto_render_missing_content_type) or \
(auto_render_missing_content_type is None and self.options["auto_render_missing_content_type"]):
if html_body is None:
html_body = self.jinja_env.get_template(auto_html_layout).render(
text_body=text_body, **vars)
if text_body is None and html2text is not None:
text_body = html2text.html2text(html_body)
if html_body and self.options["inline_css"]:
html_body = premailer.transform(html_body)
return (frontmatter, text_body, html_body)
@action("create_email_message", as_="email_message")
def create_message(self, to, tpl, **vars):
recipients = to if isinstance(to, (list, tuple)) else [to]
frontmatter, text_body, html_body = self.render_message(tpl, **vars)
kwargs = {}
for k in ('subject', 'sender', 'cc', 'bcc', 'attachments', 'reply_to', 'send_date', 'charset', 'extra_headers'):
if k in vars:
kwargs[k] = vars[k]
elif frontmatter and k in frontmatter:
kwargs[k] = frontmatter[k]
kwargs["date"] = kwargs.pop("send_date", None)
if not kwargs.get("subject"):
raise OptionMissingError("Missing subject for email with template '%s'" % tpl)
subject = kwargs.pop("subject")
attachments = kwargs.pop("attachments", None)
msg = Message(recipients=recipients, subject=subject, body=text_body, html=html_body, **kwargs)
msg.template = tpl
if attachments:
for attachment in attachments:
if isinstance(attachment, Attachment):
msg.attachments.append(attachment)
elif isinstance(attachment, dict):
msg.attach(**attachment)
else:
msg.attach(attachment)
current_context.data.mail_message = msg
return msg
@action("add_email_attachment", default_option="filename")
def add_attachment(self, filename, msg=None, **kwargs):
msg = msg or current_context.data.mail_message
msg.attach(filename, **kwargs)
@action("start_bulk_emails")
def start_bulk(self):
try:
self.connection = self.client.connect()
# simulate entering a with context
# (flask-mail does not provide a way to connect otherwise)
self.connection.__enter__()
except Exception as e:
if not self.options['silent_failures']:
raise e
current_app.log_exception(e)
self.connection = None
@action("stop_bulk_emails")
def stop_bulk(self):
if self.connection:
self.connection.__exit__(None, None, None) # see start_bulk()
self.connection = None
@contextmanager
def bulk(self):
self.start_bulk()
try:
yield self
finally:
self.stop_bulk()
def _prepare_action_message(self, to=None, tpl=None, **kwargs):
msg = None
if isinstance(to, Message):
msg = to
elif to is None and "mail_message" in current_context.data:
msg = current_context.data.mail_message
else:
if not to:
raise OptionMissingError("A recipient must be provided when sending an email")
if not tpl:
raise OptionMissingError("A template must be provided when sending an email")
try:
return self.create_message(to, tpl, **kwargs)
except Exception as e:
if not self.options['silent_failures']:
raise e
current_app.log_exception(e)
def _send_message(self, msg):
try:
if self.connection:
self.connection.send(msg)
else:
self.client.send(msg)
except Exception as e:
if not self.options['silent_failures']:
raise e
current_app.log_exception(e)
def _send_async(self, msg):
current_app.features.tasks.enqueue('send_async_email_task', pickled_msg=pickle.dumps(msg).decode('latin1'))
@action("send_email")
def send(self, to=None, tpl=None, **kwargs):
force_sync = kwargs.pop('_force_sync', False)
msg = self._prepare_action_message(to, tpl, **kwargs)
if msg:
if self.options['send_async'] and not force_sync:
self._send_async(msg)
else:
self._send_message(msg)
@action("send_async_email")
def send_async(self, *args, **kwargs):
msg = self._prepare_action_message(to, tpl, **kwargs)
if msg:
self._send_async(msg)
@action("send_async_email_task")
def send_async_task(self, pickled_msg):
self._send_message(pickle.loads(pickled_msg.encode('latin1')))
def log_message(self, message, app):
app.logger.debug("Email %s sent to %s as \"%s\"" % (message.template, message.recipients, message.subject))
if self.options["dump_logged_messages"]:
path = os.path.join(app.root_path, self.options["dumped_messages_folder"])
if not os.path.exists(path):
os.mkdir(path, 0777)
filename = os.path.join(path, "-".join([
datetime.datetime.now().isoformat("-"),
os.path.splitext(message.template)[0].replace("/", "_"),
"-".join(message.recipients)]))
if message.body:
with open(filename + ".txt", "w") as f:
f.write(message.body.encode('utf-8'))
if message.html:
with open(filename + ".html", "w") as f:
f.write(message.html.encode('utf-8')) | {
"repo_name": "frascoweb/frasco-emails",
"path": "frasco_emails/__init__.py",
"copies": "1",
"size": "11545",
"license": "mit",
"hash": -615477609118070300,
"line_mean": 40.2357142857,
"line_max": 127,
"alpha_frac": 0.5890861845,
"autogenerated": false,
"ratio": 4.072310405643739,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5161396590143739,
"avg_score": null,
"num_lines": null
} |
from frasco import Feature, action, execute_action, command, current_app, import_string, signal, copy_extra_feature_options, has_app_context
from celery import Celery
from celery.bin.worker import worker as celery_worker
from celery.bin.beat import beat as celery_beat
from celery.schedules import crontab
def pack_task_args(data):
"""Traverse data and converts every object with a __taskdump__() method
"""
if hasattr(data, "__taskdump__"):
cls, state = data.__taskdump__()
if not cls:
cls = data.__class__.__module__ + "." + data.__class__.__name__
return {"$taskobj": [cls, state]}
if isinstance(data, (list, tuple)):
lst = []
for item in data:
lst.append(pack_task_args(item))
return lst
if isinstance(data, dict):
dct = {}
for k, v in data.iteritems():
dct[k] = pack_task_args(v)
return dct
return data
def unpack_task_args(data):
"""Traverse data and transforms back objects which where dumped
using __taskdump()
"""
if isinstance(data, (list, tuple)):
lst = []
for item in data:
lst.append(unpack_task_args(item))
return lst
if isinstance(data, dict):
if "$taskobj" in data:
cls = import_string(data["$taskobj"][0])
return cls.__taskload__(data["$taskobj"][1])
else:
dct = {}
for k, v in data.iteritems():
dct[k] = unpack_task_args(v)
return dct
return data
def run_action(name, **kwargs):
"""Instanciates and executes an action from current_app.
This is the actual function which will be queued.
"""
kwargs = unpack_task_args(kwargs)
current_user = None
if '_current_user' in kwargs:
current_user = kwargs.pop('_current_user')
current_app.features.users.start_user_context(current_user)
try:
current_app.features.tasks.before_task_event.send(name=name)
action = current_app.actions[name](kwargs)
rv = execute_action(action)
current_app.features.tasks.after_task_event.send(name=name)
finally:
if current_user:
current_app.features.users.stop_user_context()
return rv
class TasksFeature(Feature):
"""Enqueue tasks to process them in the background
"""
name = "tasks"
command_group = False
defaults = {"broker_url": None,
"result_backend": None,
"accept_content": ['json', 'msgpack', 'yaml'],
"task_serializer": "json",
"result_serializer": "json",
"schedule": {},
"delay_if_models_transaction": False,
"run_beat_with_worker": True}
before_task_event = signal("before_task")
after_task_event = signal("after_task")
task_enqueued_event = signal("task_enqueued")
def init_app(self, app):
self.app = app
broker = self.options["broker_url"]
backend = self.options["result_backend"]
if not broker:
if app.features.exists("redis"):
broker = app.features.redis.options["url"]
else:
broker = "redis://localhost"
if not backend:
backend = broker
self.celery = Celery(__name__, broker=broker, backend=backend)
self.celery.conf["CELERY_ACCEPT_CONTENT"] = self.options["accept_content"]
self.celery.conf["CELERY_TASK_SERIALIZER"] = self.options["task_serializer"]
self.celery.conf["CELERY_RESULT_SERIALIZER"] = self.options["result_serializer"]
self.celery.conf["CELERYBEAT_SCHEDULE_FILENAME"] = ".celerybeat-schedule"
copy_extra_feature_options(self, self.celery.conf, "CELERY_")
TaskBase = self.celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
if has_app_context():
# useful for testing if running tasks synchronously
return TaskBase.__call__(self, *args, **kwargs)
else:
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
self.celery.Task = ContextTask
self.celery.conf["CELERYBEAT_SCHEDULE"] = {}
if self.options["schedule"]:
for action, schedule in self.options["schedule"].iteritems():
self.schedule_action(action, schedule)
self.run_action_task = self.celery.task(name="frasco_run_action")(run_action)
app.processes.append(("worker", ["frasco", "worker"]))
if not self.options['run_beat_with_worker']:
app.processes.append(("scheduler", ["frasco", "scheduler"]))
def add_task(self, func, **kwargs):
return self.celery.task(**kwargs)(func)
def send_task(self, *args, **kwargs):
return self.celery.send_task(*args, **kwargs)
def schedule_task(self, schedule_name, name, schedule, **kwargs):
if isinstance(schedule, dict):
schedule = crontab(**schedule)
elif isinstance(schedule, str):
schedule = crontab(*schedule.split(" "))
self.celery.conf["CELERYBEAT_SCHEDULE"][schedule_name] = dict(
task=name, schedule=schedule, **kwargs)
def schedule_action(self, action, schedule, name=None):
if not name:
name = "scheduled_%s" % action
self.schedule_task(name, "frasco_run_action", schedule,
args=(action,))
@action(default_option="action")
def enqueue(self, action, **kwargs):
if current_app.features.exists('models') and current_app.features.models.delayed_tx_calls.top is not None \
and self.options['delay_if_models_transaction']:
current_app.features.models.delayed_tx_calls.call(self.enqueue, (action,), kwargs)
return
if current_app.features.exists('users') and current_app.features.users.logged_in():
kwargs.setdefault('_current_user', current_app.features.users.current)
result = self.run_action_task.apply_async(args=(action,), kwargs=pack_task_args(kwargs))
self.task_enqueued_event.send(self, action=action, result=result)
return result
def get_result(self, id):
return self.run_action_task.AsyncResult(id)
@command(with_reloader=True, with_app_ctx=False)
def worker(self, hostname=None):
options = {'hostname': hostname, 'beat': False}
if self.options['run_beat_with_worker'] and self.celery.conf["CELERYBEAT_SCHEDULE"]:
options['beat'] = True
if self.app.debug:
options['concurrency'] = 1
w = celery_worker(self.celery)
w.run(**options)
@command(with_reloader=True, with_app_ctx=False)
def scheduler(self):
b = celery_beat(self.celery)
b.run()
| {
"repo_name": "frascoweb/frasco-tasks",
"path": "frasco_tasks.py",
"copies": "1",
"size": "6915",
"license": "mit",
"hash": 7540645045358001000,
"line_mean": 38.2897727273,
"line_max": 140,
"alpha_frac": 0.5978308026,
"autogenerated": false,
"ratio": 3.935685828116107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012059107226218189,
"num_lines": 176
} |
from frasco import Feature, action, flash, url_for, hook, lazy_translate, Blueprint, redirect, request
from frasco_users import current_user
def create_blueprint(app):
bp = Blueprint("github_login", __name__)
feature = app.features.github
users = app.features.users
@bp.route('/login/github')
def login():
callback_url = url_for('.callback', next=request.args.get('next'), _external=True)
kwargs = {}
if 'scope' in request.args:
kwargs['scope'] = request.args['scope']
return feature.api.authorize(callback=callback_url, **kwargs)
@bp.route('/login/github/callback')
def callback():
resp = feature.api.authorized_response()
if resp is None:
flash(feature.options["user_denied_login_message"], "error")
return redirect(url_for("users.login"))
me = feature.api.get('user', token=[resp['access_token']])
attrs = {"github_access_token": resp['access_token'],
"github_username": me.data['login'],
"github_id": str(me.data['id']),
"github_email": me.data.get('email'),
"github_scope": resp['scope']}
defaults = {}
if feature.options["use_email"] and 'email' in me.data:
defaults[users.options["email_column"]] = me.data['email']
if feature.options["use_username"] and users.options['email_column'] != users.options['username_column']:
defaults[users.options["username_column"]] = me.data['login']
return users.oauth_login("github", "github_id", str(me.data['id']), attrs, defaults)
return bp
class GithubFeature(Feature):
name = "github"
requires = ["users"]
blueprints = [create_blueprint]
defaults = {"use_username": True,
"use_email": True,
"scope": None,
"user_denied_login_message": lazy_translate("Login via Github was denied")}
def init_app(self, app):
self.app = app
self.api = app.features.users.create_oauth_app("github",
base_url='https://api.github.com/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://github.com/login/oauth/access_token',
authorize_url='https://github.com/login/oauth/authorize',
consumer_key=self.options["consumer_key"],
consumer_secret=self.options["consumer_secret"],
request_token_params={'scope': self.options['scope']},
login_view="github_login.login")
@self.api.tokengetter
def token_getter():
if not current_user.is_authenticated or not current_user.github_access_token:
return
return (current_user.github_access_token, "")
self.model = app.features.models.ensure_model(app.features.users.model,
github_access_token=str,
github_username=str,
github_id=dict(type=str, index=True),
github_email=str,
github_scope=str)
def has_scope(self, *scopes):
if current_user.github_scope:
available_scopes = current_user.github_scope.split(',')
for scope in scopes:
if scope not in available_scopes:
return False
return True
return False
def update_scope(self, *scopes):
if current_user.github_scope:
current_scopes = set(current_user.github_scope.split(','))
current_scopes.update(scopes)
return ",".join(current_scopes)
return ",".join(scopes) | {
"repo_name": "frascoweb/frasco-github",
"path": "frasco_github.py",
"copies": "1",
"size": "3632",
"license": "mit",
"hash": 2662604777840834600,
"line_mean": 38.4891304348,
"line_max": 113,
"alpha_frac": 0.5889317181,
"autogenerated": false,
"ratio": 4.035555555555556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0031633446015202986,
"num_lines": 92
} |
from frasco import Feature, action, flash, url_for, hook, lazy_translate
from frasco_users import current_user
from .blueprint import create_blueprint
class TrelloFeature(Feature):
name = "trello"
requires = ["users"]
blueprints = [create_blueprint]
defaults = {"app_name": None,
"scope": "read",
"expiration": "30days",
"use_username": True,
"user_denied_login_message": lazy_translate("Login via Trello was denied")}
def init_app(self, app):
self.app = app
if not self.options["app_name"]:
self.options["app_name"] = app.config.get('TITLE', 'My App')
self.api = app.features.users.create_oauth_app("trello",
base_url='https://api.trello.com/1/',
request_token_url="https://trello.com/1/OAuthGetRequestToken",
access_token_url='https://trello.com/1/OAuthGetAccessToken',
authorize_url='https://trello.com/1/OAuthAuthorizeToken',
consumer_key=self.options["api_key"],
consumer_secret=self.options["api_secret"],
login_view="trello_login.login")
@self.api.tokengetter
def token_getter(token=None):
return None
if not current_user.is_authenticated or not current_user.trello_oauth_token:
return
return (current_user.trello_oauth_token, current_user.trello_oauth_token_secret)
self.model = app.features.models.ensure_model(app.features.users.model,
trello_oauth_token=str,
trello_oauth_token_secret=str,
trello_user_id=dict(type=str, index=True),
trello_username=dict(type=str, index=True))
| {
"repo_name": "frascoweb/frasco-trello",
"path": "frasco_trello/__init__.py",
"copies": "1",
"size": "1722",
"license": "mit",
"hash": -3338348788013251000,
"line_mean": 41,
"line_max": 92,
"alpha_frac": 0.6132404181,
"autogenerated": false,
"ratio": 3.602510460251046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4715750878351046,
"avg_score": null,
"num_lines": null
} |
from frasco import Feature, action, flash, url_for, hook, lazy_translate
from frasco_users import current_user
from .blueprint import create_blueprint
class TwitterFeature(Feature):
name = "twitter"
requires = ["users"]
blueprints = [create_blueprint]
defaults = {"use_screenname_as_username": False,
"user_denied_login_message": lazy_translate("Login via Twitter was denied")}
def init_app(self, app):
self.app = app
self.api = app.features.users.create_oauth_app("twitter",
base_url='https://api.twitter.com/1.1/',
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authenticate',
consumer_key=self.options["consumer_key"],
consumer_secret=self.options["consumer_secret"],
login_view="twitter_login.login")
@self.api.tokengetter
def token_getter(token=None):
if not current_user.is_authenticated() or not current_user.twitter_oauth_token:
return
return (current_user.twitter_oauth_token, current_user.twitter_oauth_token_secret)
self.model = app.features.models.ensure_model(app.features.users.model,
twitter_oauth_token=str,
twitter_oauth_token_secret=str,
twitter_screenname=dict(type=str, index=True))
@action("post_twitter_update", default_option="status")
def post_update(self, status):
self.api.post("statuses/update.json", data={"status": status})
| {
"repo_name": "frascoweb/frasco-twitter",
"path": "frasco_twitter/__init__.py",
"copies": "1",
"size": "1637",
"license": "mit",
"hash": -1805159815351507700,
"line_mean": 43.2432432432,
"line_max": 94,
"alpha_frac": 0.6499694563,
"autogenerated": false,
"ratio": 3.8517647058823528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9977935378480335,
"avg_score": 0.004759756740403673,
"num_lines": 37
} |
from frasco import Feature, action, flash, url_for, hook, session, lazy_translate
from frasco_users import current_user
from .blueprint import create_blueprint
class FacebookFeature(Feature):
name = "facebook"
requires = ["users"]
blueprints = [create_blueprint]
defaults = {"use_name_as_username": True,
"use_email": True,
"scope": "email",
"save_data": ["first_name", "last_name"],
"user_denied_login_message": lazy_translate("Login via Facebook was denied")}
def init_app(self, app):
self.app = app
self.api = app.features.users.create_oauth_app("facebook",
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key=self.options["app_id"],
consumer_secret=self.options["app_secret"],
request_token_params={'scope': self.options['scope']},
login_view="facebook_login.login")
@self.api.tokengetter
def token_getter():
if not current_user.is_authenticated() or not current_user.facebook_access_token:
return
return (current_user.facebook_access_token, "")
self.model = app.features.models.ensure_model(app.features.users.model,
facebook_access_token=str,
facebook_token_expires=dict(type=int, index=True),
facebook_name=str,
facebook_email=str,
facebook_id=dict(type=str, index=True))
| {
"repo_name": "frascoweb/frasco-facebook",
"path": "frasco_facebook/__init__.py",
"copies": "1",
"size": "1624",
"license": "mit",
"hash": -3912012763414823400,
"line_mean": 40.641025641,
"line_max": 93,
"alpha_frac": 0.604679803,
"autogenerated": false,
"ratio": 3.941747572815534,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004527143893561348,
"num_lines": 39
} |
from frasco import Feature, action, hook, current_app, g, request, url_for
class MenuMissingError(Exception):
pass
class Menu(object):
def __init__(self, name=None, label=None, view=None, login_required=None, childs=None, separator=False, url=None, **options):
self.name = name
self.label = label or name.capitalize()
self.view = view
self.login_required = login_required
self.childs = childs or []
self.separator = separator
self._url = url
self.options = options
def url(self, **kwargs):
if self._url:
return self._url
if self.view:
return url_for(self.view, **kwargs)
return "#"
def add_child(self, *args, **kwargs):
self.childs.append(Menu(*args, **kwargs))
def is_current(self):
current = getattr(g, "current_menu", None)
if current is None:
return request.endpoint == self.view
return self.name == current
def is_visible(self):
if current_app.features.exists("users") and self.login_required is not None:
if (self.login_required and not current_app.features.users.logged_in()) or \
(not self.login_required and current_app.features.users.logged_in()):
return False
return True
def __iter__(self):
return iter(self.childs)
class MenuFeature(Feature):
name = "menu"
defaults = {"default": None}
def init_app(self, app):
app.add_template_global(lambda n: current_app.features.menu[n], "get_menu")
app.add_template_global(Menu, "menu")
self.menus = {}
for name, items in self.options.iteritems():
if name in self.defaults.keys():
continue
self.menus[name] = Menu(name)
for itemspec in items:
if isinstance(itemspec, dict):
iname, options = itemspec.popitem()
if isinstance(options, str):
options = {"view": options}
elif isinstance(options, list):
options = {"childs": options}
item = Menu(iname, **options)
elif itemspec == "--":
item = Menu(separator=True)
else:
item = Menu(itemspec, view=iname)
self.menus[name].childs.append(item)
def __getitem__(self, name):
if name not in self.menus:
raise MenuMissingError("Menu '%s' not found" % name)
return self.menus[name]
def ensure(self, name):
if name not in self.menus:
self.menus[name] = Menu(name)
return self.menus[name]
@hook()
def before_request(self):
g.current_menu = self.options["default"]
@action(default_option="name")
def set_current_menu(self, name):
g.current_menu = name | {
"repo_name": "frascoweb/frasco-menu",
"path": "frasco_menu.py",
"copies": "1",
"size": "2929",
"license": "mit",
"hash": -7207094067665872000,
"line_mean": 31.9213483146,
"line_max": 129,
"alpha_frac": 0.5592352339,
"autogenerated": false,
"ratio": 4.073713490959666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5132948724859666,
"avg_score": null,
"num_lines": null
} |
from frasco import Feature, action, hook, OptionMissingError, current_context, session, g
import flask
from werkzeug.local import LocalProxy
from .db import *
def _get_current_country():
"""Returns the current set of assets in the request stack.
"""
return getattr(flask._request_ctx_stack.top, 'current_country', None)
current_country = LocalProxy(_get_current_country)
def list_countries():
"""Returns a list of all available countries as country objects
"""
return countries
def country_name(alpha2):
"""Returns the name of the country identified by the alpha2 code
"""
try:
return countries.get(alpha_2=alpha2).name
except:
return None
# pycountry does not provide currency information for country in the Eurozone
eurozone_countries = ("040", "056", "196", "233", "246", "250", "276", "300", "372",
"380", "428", "442", "470", "528", "620", "703", "705", "724")
def country_currency(alpha2_or_country_obj):
"""Returns the currency code of the specified country
"""
if isinstance(alpha2_or_country_obj, (str, unicode)):
country = countries.get(alpha_2=alpha2_or_country_obj)
else:
country = alpha2_or_country_obj
if country.numeric in eurozone_countries:
return "EUR"
try:
c = currencies.get(numeric=country.numeric)
except:
return None
return c.letter
class CountriesFeature(Feature):
name = "countries"
defaults = {"use_geolocation_as_default": True}
def init_app(self, app):
app.add_template_global(list_countries)
app.add_template_global(country_name)
app.add_template_global(country_currency)
@hook()
def before_request(self, *args, **kwargs):
current_context["current_country"] = current_country
if "current_country_code" not in session:
if not self.options["use_geolocation_as_default"] or "geo_country_code" not in current_context.data:
return
alpha2 = current_context.data.geo_country_code
else:
alpha2 = session["current_country_code"]
if alpha2:
flask._request_ctx_stack.top.current_country = countries.get(alpha_2=alpha2)
@action("set_current_country", default_option="alpha2")
def set_current(self, country=None, is_global=True, **kwargs):
if not country:
try:
country = countries.get(**kwargs)
except:
pass
if not country:
raise OptionMissingError("No way to identify the current country")
flask._request_ctx_stack.top.current_country = country
if is_global:
session["current_country_code"] = country.alpha_2
return country
@action("get_country", default_option="alpha2", as_="country")
def get(self, **kwargs):
try:
return countries.get(**kwargs)
except:
raise OptionMissingError("No way to identify the country")
@action("geolocate_country", as_="country")
def geolocate(self):
return countries.get(alpha2=current_context.data.geo_country_code)
@action(default_option="alpha_3", as_="currency")
def get_currency(self, **kwargs):
try:
return currencies.get(**kwargs)
except:
raise OptionMissingError("No way to identify the current currency")
@action(default_option="alpha2", as_="language")
def get_language(self, **kwargs):
try:
return languages.get(**kwargs)
except:
raise OptionMissingError("No way to identify the current language")
try:
import frasco_forms.form
import form
frasco_forms.form.field_type_map.update({
"country": form.CountryField,
"language": form.LanguageField,
"currency": form.CurrencyField})
except ImportError:
pass
| {
"repo_name": "frascoweb/frasco-country",
"path": "frasco_countries/__init__.py",
"copies": "1",
"size": "3884",
"license": "mit",
"hash": -7316093701971271000,
"line_mean": 30.8360655738,
"line_max": 112,
"alpha_frac": 0.636199794,
"autogenerated": false,
"ratio": 3.8995983935742973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5035798187574297,
"avg_score": null,
"num_lines": null
} |
from frasco import (Feature, action, hook, set_translation_callbacks, copy_extra_feature_options,\
session, request, signal, current_app, command, shell_exec, current_context, json)
from flask_babel import (Babel, gettext, ngettext, lazy_gettext, format_datetime, format_date,\
format_time, format_currency as babel_format_currency, get_locale,\
get_timezone, refresh as refresh_babel)
from babel import Locale
from flask import _request_ctx_stack, has_request_context
import os
import tempfile
import contextlib
import re
_DEFAULT_CURRENCY = "USD"
def get_currency():
"""Returns the timezone that should be used for this request as
`pytz.timezone` object. This returns `None` if used outside of
a request. If flask-babel was not attached to application, will
return UTC timezone object.
"""
ctx = _request_ctx_stack.top if has_request_context() else None
currency = None
babel = None
if ctx:
currency = getattr(ctx, 'babel_currency', None)
babel = ctx.app.extensions.get('babel')
if currency is None:
if babel is None:
currency = _DEFAULT_CURRENCY
else:
if getattr(babel, "currency_selector_func") is None:
currency = babel.default_currency
else:
currency = babel.currency_selector_func()
if currency is None:
currency = babel.default_currency
if ctx:
ctx.babel_currency = currency
return currency
def format_currency(number, format=None):
return babel_format_currency(number, get_currency(), format)
class BabelFeature(Feature):
name = "babel"
defaults = {"locales": ["en"],
"currencies": ["USD"],
"default_currency": "USD",
"currency_name_format": u"{name} ({symbol})",
"store_locale_in_session": True,
"store_locale_in_user": False,
"user_locale_column": "locale",
"user_timezone_column": "timezone",
"user_currency_column": "currency",
"extract_locale_from_headers": True,
"extract_locale_from_request": False,
"always_add_locale_to_urls": True,
"store_request_locale_in_session": False,
"request_arg": "locale",
"extractors": [],
"extract_keywords": [],
"extract_jinja_dirs": ["views", "templates", "emails", "features"],
"extract_with_jinja_exts": ["jinja2.ext.autoescape", "jinja2.ext.with_",
"jinja2.ext.do", "frasco.templating.RemoveYamlFrontMatterExtension",
"jinja_layout.LayoutExtension", "jinja_macro_tags.LoadMacroExtension",
"jinja_macro_tags.CallMacroTagExtension", "jinja_macro_tags.JinjaMacroTagsExtension",
"jinja_macro_tags.HtmlMacroTagsExtension", "frasco.templating.FlashMessagesExtension"],
"request_locale_arg_ignore_endpoints": ["static", "static_upload"],
"compile_to_json": False,
"compile_to_js": False,
"js_catalog_varname": "LOCALE_%s_CATALOG"}
translation_extracted_signal = signal("translation_extracted")
translation_updated_signal = signal("translation_updated")
translation_compiled_signal = signal("translation_compiled")
def init_app(self, app):
copy_extra_feature_options(self, app.config, "BABEL_")
self.extract_dirs = []
self.app = app
self.babel = Babel(app)
self.babel.default_currency = self.options["default_currency"]
self.babel.localeselector(self.detect_locale)
self.babel.timezoneselector(self.detect_timezone)
self.babel.currency_selector_func = self.detect_currency
set_translation_callbacks(translate=gettext,
ntranslate=ngettext,
lazy_translate=lazy_gettext,
format_datetime=format_datetime,
format_date=format_date,
format_time=format_time)
app.jinja_env.filters["currencyformat"] = format_currency
if self.options["store_locale_in_user"]:
signal('users_init').connect(self.init_user_model)
def init_user_model(self, sender):
signal("user_signup").connect(lambda _, user: self.update_user(user), weak=False)
sender.features.models.ensure_model(sender.features.users.model, **dict([
(self.options['user_locale_column'], str),
(self.options['user_timezone_column'], str),
(self.options['user_currency_column'], str)]))
def add_extract_dir(self, path, jinja_dirs=None, jinja_exts=None, extractors=None):
jinja_exts = jinja_exts or []
jinja_exts.extend(self.options["extract_with_jinja_exts"])
self.extract_dirs.append((path, jinja_dirs, jinja_exts, extractors))
def detect_locale(self):
if has_request_context() and self.options["extract_locale_from_request"]:
if self.options["request_arg"] in request.args:
locale = request.args[self.options["request_arg"]]
if self.options["store_request_locale_in_session"]:
session["locale"] = locale
return locale
if self.options["store_locale_in_user"] and self.app.features.exists("users"):
if self.app.features.users.logged_in():
locale = getattr(self.app.features.users.current, self.options["user_locale_column"], None)
if locale:
return locale
if not has_request_context():
return
if self.options["store_locale_in_session"] and "locale" in session:
return session["locale"]
if self.options['extract_locale_from_headers']:
return request.accept_languages.best_match(self.options["locales"])
def detect_timezone(self):
if self.options["store_locale_in_user"] and self.app.features.exists("users"):
if self.app.features.users.logged_in():
tz = getattr(self.app.features.users.current, self.options["user_timezone_column"], None)
if tz:
return tz
if not has_request_context():
return
if self.options["store_locale_in_session"] and "timezone" in session:
return session["timezone"]
def detect_currency(self):
if self.options["store_locale_in_user"] and self.app.features.exists("users"):
if self.app.features.users.logged_in():
currency = getattr(self.app.features.users.current, self.options["user_currency_column"], None)
if currency:
return currency
if not has_request_context():
return
if self.options["store_locale_in_session"] and "currency" in session:
return session["currency"]
@hook('url_value_preprocessor')
def extract_locale_from_values(self, endpoint, values):
if self.options["extract_locale_from_request"] and values:
values.pop(self.options["request_arg"], None)
@hook('url_defaults')
def add_locale_to_url_params(self, endpoint, values):
if endpoint not in self.options["request_locale_arg_ignore_endpoints"] and \
self.options["extract_locale_from_request"] and self.options['always_add_locale_to_urls'] and \
self.options["request_arg"] not in values:
values[self.options["request_arg"]] = get_locale().language
@action(default_option="locale")
def set_locale(self, locale, refresh=False):
if self.options["store_locale_in_user"] and app.features.exists("users"):
if app.features.users.logged_in():
self.update_user(current_app.features.users.current, locale=locale)
return
if self.options["store_locale_in_session"]:
session["locale"] = locale
if refresh:
refresh_babel()
@action(default_option="tz")
def set_timezone(self, tz):
if self.options["store_locale_in_user"] and app.features.exists("users"):
if app.features.users.logged_in():
self.update_user(current_app.features.users.current, timezone=tz)
return
if self.options["store_locale_in_session"]:
session["timezone"] = tz
@action(default_option="currency")
def set_currency(self, currency):
if self.options["store_locale_in_user"] and app.features.exists("users"):
if app.features.users.logged_in():
self.update_user(current_app.features.users.current, currency=currency)
return
if self.options["store_locale_in_session"]:
session["currency"] = currency
def update_user(self, user, locale=None, timezone=None, currency=None):
setattr(user, self.options["user_locale_column"], locale or get_locale().language)
setattr(user, self.options["user_timezone_column"], timezone or get_timezone().zone)
setattr(user, self.options["user_currency_column"], currency or get_currency())
@action('refresh_locale')
def refresh(self):
refresh_babel()
@hook()
def before_request(self):
locale = get_locale()
currency = get_currency()
current_context["current_locale"] = locale.language
current_context["current_timezone"] = get_timezone().zone
current_context["current_currency"] = currency
current_context["current_language"] = locale.display_name
current_context["current_currency_name"] = self.options["currency_name_format"].format(
code=currency,
name=locale.currencies[currency],
symbol=locale.currency_symbols[currency])
if current_app.features.exists('assets'):
current_app.config['EXPORTED_JS_VARS']['CURRENT_LOCALE'] = {
"locale": current_context['current_locale'],
"lang": current_context['current_language'],
"timezone": current_context['current_timezone'],
"currency": current_context['current_currency'],
"currency_name": current_context['current_currency_name']}
if self.options['compile_to_js']:
current_app.config['EXPORTED_JS_VARS']['CURRENT_LOCALE']['catalog_var'] = self.options['js_catalog_varname'] % current_context['current_locale'].upper()
@hook('template_global', _force_call=True)
def available_locales(self, english_name=False):
locales = []
for language in self.options["locales"]:
locale = Locale(language)
name = locale.english_name if english_name else locale.display_name
locales.append((language, name))
return locales
@hook('template_global', _force_call=True)
def available_currencies(self):
currencies = []
locale = get_locale()
for currency in self.options["currencies"]:
currencies.append((currency, locale.currencies[currency], locale.currency_symbols[currency]))
return currencies
@command(pass_script_info=True)
def extract(self, info, bin="pybabel", keywords=None):
path = os.path.join(info.app_import_path, "translations")
if not os.path.exists(path):
os.mkdir(path)
potfile = os.path.join(path, "messages.pot")
mapping = create_babel_mapping(self.options["extract_jinja_dirs"],
self.options["extract_with_jinja_exts"], self.options["extractors"])
self._extract(info.app_import_path, potfile, mapping, bin, keywords)
# we need to extract message from other paths independently then
# merge the catalogs because babel's mapping configuration does
# not support absolute paths
for path, jinja_dirs, jinja_exts, extractors in self.extract_dirs:
mapping = create_babel_mapping(jinja_dirs, jinja_exts, extractors)
path_potfile = tempfile.NamedTemporaryFile()
self._extract(path, path_potfile.name, mapping, bin)
with self.edit_pofile(path_potfile.name) as path_catalog:
with self.edit_pofile(potfile) as catalog:
for msg in path_catalog:
if msg.id not in catalog:
catalog[msg.id] = msg
path_potfile.close()
self.translation_extracted_signal.send(self)
def _extract(self, path, potfile, mapping=None, bin="pybabel", keywords=None):
if mapping:
mapping_file = tempfile.NamedTemporaryFile()
mapping_file.write(mapping)
mapping_file.flush()
if isinstance(keywords, (str, unicode)):
keywords = map(str.strip, str(keywords).split(";"))
elif not keywords:
keywords = []
keywords.extend(["_n:1,2", "translatable", "translate", "ntranslate",
"lazy_translate", "lazy_gettext"])
keywords.extend(self.options['extract_keywords'])
cmdline = [bin, "extract", "-o", potfile]
if mapping:
cmdline.extend(["-F", mapping_file.name])
for k in keywords:
cmdline.append("-k")
cmdline.append(k)
cmdline.append(path)
command.echo("Extracting translatable strings from %s in %s" % (path, potfile))
shell_exec(cmdline)
if mapping:
mapping_file.close()
@command("init", pass_script_info=True)
def init_translation(self, info, locale, bin="pybabel", gotrans=False):
path = os.path.join(info.app_import_path, "translations")
potfile = os.path.join(path, "messages.pot")
if not os.path.exists(potfile):
self.extract(info, bin)
command.echo("Initializing new translation '%s' in %s" % (locale, os.path.join(path, locale)))
shell_exec([bin, "init", "-i", potfile, "-d", path, "-l", locale])
self.translation_updated_signal.send(self, locale=locale)
if gotrans:
self.translate_with_google(info, locale)
@command("compile", pass_script_info=True)
def compile_translations(self, info, bin="pybabel"):
command.echo("Compiling all translations")
path = os.path.join(info.app_import_path, "translations")
shell_exec([bin, "compile", "-d", path])
if self.options['compile_to_json']:
output = os.path.join(current_app.static_folder, self.options['compile_to_json'])
for f in os.listdir(path):
if os.path.isdir(os.path.join(path, f)):
self.po2json(info, f, output % f)
if self.options['compile_to_js']:
output = os.path.join(current_app.static_folder, self.options['compile_to_js'])
for f in os.listdir(path):
if os.path.isdir(os.path.join(path, f)):
self.po2js(info, f, output % f)
self.translation_compiled_signal.send(self)
@command("update", pass_script_info=True)
def update_translations(self, info, bin="pybabel", extract=True, gotrans=False):
path = os.path.join(info.app_import_path, "translations")
potfile = os.path.join(path, "messages.pot")
if not os.path.exists(potfile) or extract:
self.extract(info, bin)
command.echo("Updating all translations")
shell_exec([bin, "update", "-i", potfile, "-d", path])
for f in os.listdir(path):
if os.path.isdir(os.path.join(path, f)):
self.translation_updated_signal.send(self, locale=f)
if gotrans:
self.translate_with_google(info, f)
@command("gotrans", pass_script_info=True)
def translate_with_google(self, info, locale):
import goslate
command.echo("Google translating '%s'" % locale)
command.echo("WARNING: you must go through the translation after the process as placeholders may have been modified", fg="red")
filename = os.path.join(info.app_import_path, "translations", locale, "LC_MESSAGES", "messages.po")
def translate(id):
# google translate messes with the format placeholders thus
# we replace them with something which is easily recoverable
string, placeholders = safe_placeholders(id)
string = gs.translate(string, locale)
return unsafe_placeholders(string, placeholders, "## %s ##")
with self.edit_pofile(filename) as catalog:
gs = goslate.Goslate()
for message in catalog:
if not message.id:
continue
if message.pluralizable:
string = list(message.string)
if not string[0]:
string[0] = translate(message.id[0])
if not string[1]:
string[1] = translate(message.id[1])
message.string = tuple(string)
elif not message.string:
message.string = translate(message.id)
def _po2json(self, info, locale):
filename = os.path.join(info.app_import_path, "translations", locale, "LC_MESSAGES", "messages.po")
json_dct = {}
with self.edit_pofile(filename) as catalog:
for message in catalog:
if not message.id:
continue
if message.pluralizable:
json_dct[message.id[0]] = [message.id[1]] + list(message.string)
else:
json_dct[message.id] = [None, message.string]
return json.dumps(json_dct)
@command(pass_script_info=True)
def po2json(self, info, locale, output=None):
dump = self._po2json(info, locale)
if output:
with open(output, 'w') as f:
f.write(dump)
else:
command.echo(dump)
@command(pass_script_info=True)
def po2js(self, info, locale, output=None):
dump = "var %s = %s;" % (self.options['js_catalog_varname'] % locale.upper(), self._po2json(info, locale))
if output:
with open(output, 'w') as f:
f.write(dump)
else:
command.echo(dump)
@contextlib.contextmanager
def edit_pofile(self, filename, save=True):
from babel.messages import pofile
with open(filename, "r") as f:
catalog = pofile.read_po(f)
yield catalog
if save:
with open(filename, "w") as f:
pofile.write_po(f, catalog)
def create_babel_mapping(jinja_dirs=None, jinja_exts=None, extractors=None):
exts = ",".join(jinja_exts or [])
conf = "[python:**.py]\n"
if jinja_dirs:
for jinja_dir in jinja_dirs:
if jinja_dir == '.':
jinja_dir = ''
conf += "[jinja2:%s]\n" % os.path.join(jinja_dir, "**.html")
if exts:
conf += "extensions=%s\n" % exts
if extractors:
for extractor, settings in extractors:
conf += "[%s]\n" % extractor
for k, v in settings.iteritems():
conf += "%s = %s\n" % (k, v)
return conf
def safe_placeholders(string, repl="##%s##"):
placeholders = []
def replace_placeholder(m):
placeholders.append(m.group(1))
return repl % (len(placeholders) - 1)
string = re.sub(r"%\(([a-zA-Z_]+)\)s", replace_placeholder, string)
return string, placeholders
def unsafe_placeholders(string, placeholders, repl="##%s##"):
for i, placeholder in enumerate(placeholders):
string = string.replace(repl % i, "%%(%s)s" % placeholder)
return string
try:
import frasco_forms.form
import form
frasco_forms.form.field_type_map.update({
"locale": form.LocaleField,
"currency": form.CurrencyField})
except ImportError:
pass
| {
"repo_name": "frascoweb/frasco-babel",
"path": "frasco_babel/__init__.py",
"copies": "1",
"size": "20176",
"license": "mit",
"hash": -8844495387509039000,
"line_mean": 42.8608695652,
"line_max": 168,
"alpha_frac": 0.5942208565,
"autogenerated": false,
"ratio": 4.041666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5135887523166667,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.