text stringlengths 0 1.05M | meta dict |
|---|---|
"Add a label to publications given by IUID, DOI or PMID."
from publications import utils
from publications.publication import PublicationSaver
def add_label_to_publications(db, label, qualifier, identifiers):
if not label:
raise ValueError('no new label given')
if not qualifier:
raise ValueError('no new qualifier given')
if not identifiers:
raise ValueError('no identifiers given')
view = db.view('label/value', key=label, reduce=False)
if len(view) == 0:
raise ValueError("label %s does not exist" % label)
count = 0
errors = []
for identifier in identifiers:
try:
publ = utils.get_publication(db, identifier)
except KeyError as error:
errors.append(str(error))
else:
if label not in publ['labels']:
with PublicationSaver(doc=publ, db=db) as saver:
labels = publ['labels'].copy()
labels[label] = qualifier
saver['labels'] = labels
count += 1
print("Label '%s/%s' added to %i publications" % (label, qualifier, count))
for error in errors:
print(error)
if __name__ == '__main__':
parser = utils.get_command_line_parser(
'Add a label to all publications in a list.')
parser.add_argument('--label', action='store', dest='label',
default=None, help='label to add')
parser.add_argument('--qualifier', action='store', dest='qualifier',
default=None, help='qualifier of label to add')
parser.add_argument('--file', action='store', dest='idfile',
metavar='IDFILE',
help='path to file containing publication identifiers')
args = parser.parse_args()
utils.load_settings(filepath=args.settings)
db = utils.get_db()
identifiers = []
with open(args.idfile) as infile:
for line in infile:
line = line.strip()
if line: identifiers.append(line)
print(len(identifiers), 'identifiers')
add_label_to_publications(db, args.label, args.qualifier, identifiers)
| {
"repo_name": "pekrau/Publications",
"path": "publications/scripts/add_label_to_publications.py",
"copies": "1",
"size": "2160",
"license": "mit",
"hash": 4500351079703637000,
"line_mean": 37.5714285714,
"line_max": 79,
"alpha_frac": 0.5976851852,
"autogenerated": false,
"ratio": 4.21875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00038819875776397513,
"num_lines": 56
} |
# Add all ARMv8 state
def upgrader(cpt):
if cpt.get('root','isa') != 'arm':
return
import re
print "Warning: The size of the FP register file has changed. "\
"To get similar results you need to adjust the number of "\
"physical registers in the CPU you're restoring into by "\
"NNNN."
# Find the CPU context's and upgrade their registers
for sec in cpt.sections():
re_xc_match = re.match('^.*?sys.*?\.cpu(\d+)*\.xc\.*', sec)
if not re_xc_match:
continue
# Update floating point regs
fpr = cpt.get(sec, 'floatRegs.i').split()
# v8 has 128 normal fp and 32 special fp regs compared
# to v7's 64 normal fp and 8 special fp regs.
# Insert the extra normal fp registers at end of v7 normal fp regs
for x in xrange(64):
fpr.insert(64, "0")
# Append the extra special registers
for x in xrange(24):
fpr.append("0")
cpt.set(sec, 'floatRegs.i', ' '.join(str(x) for x in fpr))
ir = cpt.get(sec, 'intRegs').split()
# Add in v8 int reg state
# Splice in R13_HYP
ir.insert(20, "0")
# Splice in INTREG_DUMMY and SP0 - SP3
ir.extend(["0", "0", "0", "0", "0"])
cpt.set(sec, 'intRegs', ' '.join(str(x) for x in ir))
# Update the cpu interrupt field
for sec in cpt.sections():
re_int_match = re.match("^.*?sys.*?\.cpu(\d+)*$", sec)
if not re_int_match:
continue
irqs = cpt.get(sec, "interrupts").split()
irqs.append("false")
irqs.append("false")
cpt.set(sec, "interrupts", ' '.join(str(x) for x in irqs))
# Update the per cpu interrupt structure
for sec in cpt.sections():
re_int_match = re.match("^.*?sys.*?\.cpu(\d+)*\.interrupts$", sec)
if not re_int_match:
continue
irqs = cpt.get(sec, "interrupts").split()
irqs.append("false")
irqs.append("false")
cpt.set(sec, "interrupts", ' '.join(str(x) for x in irqs))
# Update the misc regs and add in new isa specific fields
for sec in cpt.sections():
re_isa_match = re.match("^.*?sys.*?\.cpu(\d+)*\.isa$", sec)
if not re_isa_match:
continue
cpt.set(sec, 'haveSecurity', 'false')
cpt.set(sec, 'haveLPAE', 'false')
cpt.set(sec, 'haveVirtualization', 'false')
cpt.set(sec, 'haveLargeAsid64', 'false')
cpt.set(sec, 'physAddrRange64', '40')
# splice in the new misc registers, ~200 -> 605 registers,
# ordering does not remain consistent
mr_old = cpt.get(sec, 'miscRegs').split()
mr_new = [ '0' for x in xrange(605) ]
# map old v7 miscRegs to new v8 miscRegs
mr_new[0] = mr_old[0] # CPSR
mr_new[16] = mr_old[1] # CPSR_Q
mr_new[1] = mr_old[2] # SPSR
mr_new[2] = mr_old[3] # SPSR_FIQ
mr_new[3] = mr_old[4] # SPSR_IRQ
mr_new[4] = mr_old[5] # SPSR_SVC
mr_new[5] = mr_old[6] # SPSR_MON
mr_new[8] = mr_old[7] # SPSR_UND
mr_new[6] = mr_old[8] # SPSR_ABT
mr_new[432] = mr_old[9] # FPSR
mr_new[10] = mr_old[10] # FPSID
mr_new[11] = mr_old[11] # FPSCR
mr_new[18] = mr_old[12] # FPSCR_QC
mr_new[17] = mr_old[13] # FPSCR_EXC
mr_new[14] = mr_old[14] # FPEXC
mr_new[13] = mr_old[15] # MVFR0
mr_new[12] = mr_old[16] # MVFR1
mr_new[28] = mr_old[17] # SCTLR_RST,
mr_new[29] = mr_old[18] # SEV_MAILBOX,
mr_new[30] = mr_old[19] # DBGDIDR
mr_new[31] = mr_old[20] # DBGDSCR_INT,
mr_new[33] = mr_old[21] # DBGDTRRX_INT,
mr_new[34] = mr_old[22] # DBGTRTX_INT,
mr_new[35] = mr_old[23] # DBGWFAR,
mr_new[36] = mr_old[24] # DBGVCR,
#mr_new[] = mr_old[25] # DBGECR -> UNUSED,
#mr_new[] = mr_old[26] # DBGDSCCR -> UNUSED,
#mr_new[] = mr_old[27] # DBGSMCR -> UNUSED,
mr_new[37] = mr_old[28] # DBGDTRRX_EXT,
mr_new[38] = mr_old[29] # DBGDSCR_EXT,
mr_new[39] = mr_old[30] # DBGDTRTX_EXT,
#mr_new[] = mr_old[31] # DBGDRCR -> UNUSED,
mr_new[41] = mr_old[32] # DBGBVR,
mr_new[47] = mr_old[33] # DBGBCR,
#mr_new[] = mr_old[34] # DBGBVR_M -> UNUSED,
#mr_new[] = mr_old[35] # DBGBCR_M -> UNUSED,
mr_new[61] = mr_old[36] # DBGDRAR,
#mr_new[] = mr_old[37] # DBGBXVR_M -> UNUSED,
mr_new[64] = mr_old[38] # DBGOSLAR,
#mr_new[] = mr_old[39] # DBGOSSRR -> UNUSED,
mr_new[66] = mr_old[40] # DBGOSDLR,
mr_new[67] = mr_old[41] # DBGPRCR,
#mr_new[] = mr_old[42] # DBGPRSR -> UNUSED,
mr_new[68] = mr_old[43] # DBGDSAR,
#mr_new[] = mr_old[44] # DBGITCTRL -> UNUSED,
mr_new[69] = mr_old[45] # DBGCLAIMSET,
mr_new[70] = mr_old[46] # DBGCLAIMCLR,
mr_new[71] = mr_old[47] # DBGAUTHSTATUS,
mr_new[72] = mr_old[48] # DBGDEVID2,
mr_new[73] = mr_old[49] # DBGDEVID1,
mr_new[74] = mr_old[50] # DBGDEVID,
mr_new[77] = mr_old[51] # TEEHBR,
mr_new[109] = mr_old[52] # v7 SCTLR -> aarc32 SCTLR_NS
mr_new[189] = mr_old[53] # DCCISW,
mr_new[188] = mr_old[54] # DCCIMVAC,
mr_new[183] = mr_old[55] # DCCMVAC,
mr_new[271] = mr_old[56] # v7 CONTEXTIDR -> aarch32 CONTEXTIDR_NS,
mr_new[274] = mr_old[57] # v7 TPIDRURW -> aarch32 TPIDRURW_NS,
mr_new[277] = mr_old[58] # v7 TPIDRURO -> aarch32 TPIDRURO_NS,
mr_new[280] = mr_old[59] # v7 TPIDRPRW -> aarch32 TPIDRPRW_NS,
mr_new[170] = mr_old[60] # CP15ISB,
mr_new[185] = mr_old[61] # CP15DSB,
mr_new[186] = mr_old[62] # CP15DMB,
mr_new[114] = mr_old[63] # CPACR,
mr_new[101] = mr_old[64] # CLIDR,
mr_new[100] = mr_old[65] # CCSIDR,
mr_new[104] = mr_old[66] # v7 CSSELR -> aarch32 CSSELR_NS,
mr_new[163] = mr_old[67] # ICIALLUIS,
mr_new[168] = mr_old[68] # ICIALLU,
mr_new[169] = mr_old[69] # ICIMVAU,
mr_new[172] = mr_old[70] # BPIMVA,
mr_new[164] = mr_old[71] # BPIALLIS,
mr_new[171] = mr_old[72] # BPIALL,
mr_new[80] = mr_old[73] # MIDR,
mr_new[126] = mr_old[74] # v7 TTBR0 -> aarch32 TTBR0_NS,
mr_new[129] = mr_old[75] # v7 TTBR1 -> aarch32 TTBR1_NS,
mr_new[83] = mr_old[76] # TLBTR,
mr_new[137] = mr_old[77] # v7 DACR -> aarch32 DACR_NS,
mr_new[192] = mr_old[78] # TLBIALLIS,
mr_new[193] = mr_old[79] # TLBIMVAIS,
mr_new[194] = mr_old[80] # TLBIASIDIS,
mr_new[195] = mr_old[81] # TLBIMVAAIS,
mr_new[198] = mr_old[82] # ITLBIALL,
mr_new[199] = mr_old[83] # ITLBIMVA,
mr_new[200] = mr_old[84] # ITLBIASID,
mr_new[201] = mr_old[85] # DTLBIALL,
mr_new[202] = mr_old[86] # DTLBIMVA,
mr_new[203] = mr_old[87] # DTLBIASID,
mr_new[204] = mr_old[88] # TLBIALL,
mr_new[205] = mr_old[89] # TLBIMVA,
mr_new[206] = mr_old[90] # TLBIASID,
mr_new[207] = mr_old[91] # TLBIMVAA,
mr_new[140] = mr_old[92] # v7 DFSR -> aarch32 DFSR_NS,
mr_new[143] = mr_old[93] # v7 IFSR -> aarch32 IFSR_NS,
mr_new[155] = mr_old[94] # v7 DFAR -> aarch32 DFAR_NS,
mr_new[158] = mr_old[95] # v7 IFAR -> aarch32 IFAR_NS,
mr_new[84] = mr_old[96] # MPIDR,
mr_new[241] = mr_old[97] # v7 PRRR -> aarch32 PRRR_NS,
mr_new[247] = mr_old[98] # v7 NMRR -> aarch32 NMRR_NS,
mr_new[131] = mr_old[99] # TTBCR,
mr_new[86] = mr_old[100] # ID_PFR0,
mr_new[81] = mr_old[101] # CTR,
mr_new[115] = mr_old[102] # SCR,
# Set the non-secure bit
scr = int(mr_new[115])
scr = scr | 0x1
mr_new[115] = str(scr)
###
mr_new[116] = mr_old[103] # SDER,
mr_new[165] = mr_old[104] # PAR,
mr_new[175] = mr_old[105] # V2PCWPR -> ATS1CPR,
mr_new[176] = mr_old[106] # V2PCWPW -> ATS1CPW,
mr_new[177] = mr_old[107] # V2PCWUR -> ATS1CUR,
mr_new[178] = mr_old[108] # V2PCWUW -> ATS1CUW,
mr_new[179] = mr_old[109] # V2POWPR -> ATS12NSOPR,
mr_new[180] = mr_old[110] # V2POWPW -> ATS12NSOPW,
mr_new[181] = mr_old[111] # V2POWUR -> ATS12NSOUR,
mr_new[182] = mr_old[112] # V2POWUW -> ATS12NWOUW,
mr_new[90] = mr_old[113] # ID_MMFR0,
mr_new[92] = mr_old[114] # ID_MMFR2,
mr_new[93] = mr_old[115] # ID_MMFR3,
mr_new[112] = mr_old[116] # v7 ACTLR -> aarch32 ACTLR_NS
mr_new[222] = mr_old[117] # PMCR,
mr_new[230] = mr_old[118] # PMCCNTR,
mr_new[223] = mr_old[119] # PMCNTENSET,
mr_new[224] = mr_old[120] # PMCNTENCLR,
mr_new[225] = mr_old[121] # PMOVSR,
mr_new[226] = mr_old[122] # PMSWINC,
mr_new[227] = mr_old[123] # PMSELR,
mr_new[228] = mr_old[124] # PMCEID0,
mr_new[229] = mr_old[125] # PMCEID1,
mr_new[231] = mr_old[126] # PMXEVTYPER,
mr_new[233] = mr_old[127] # PMXEVCNTR,
mr_new[234] = mr_old[128] # PMUSERENR,
mr_new[235] = mr_old[129] # PMINTENSET,
mr_new[236] = mr_old[130] # PMINTENCLR,
mr_new[94] = mr_old[131] # ID_ISAR0,
mr_new[95] = mr_old[132] # ID_ISAR1,
mr_new[96] = mr_old[133] # ID_ISAR2,
mr_new[97] = mr_old[134] # ID_ISAR3,
mr_new[98] = mr_old[135] # ID_ISAR4,
mr_new[99] = mr_old[136] # ID_ISAR5,
mr_new[20] = mr_old[137] # LOCKFLAG,
mr_new[19] = mr_old[138] # LOCKADDR,
mr_new[87] = mr_old[139] # ID_PFR1,
# Set up the processor features register
pfr = int(mr_new[87])
pfr = pfr | 0x1011
mr_new[87] = str(pfr)
###
mr_new[238] = mr_old[140] # L2CTLR,
mr_new[82] = mr_old[141] # TCMTR
mr_new[88] = mr_old[142] # ID_DFR0,
mr_new[89] = mr_old[143] # ID_AFR0,
mr_new[91] = mr_old[144] # ID_MMFR1,
mr_new[102] = mr_old[145] # AIDR,
mr_new[146] = mr_old[146] # v7 ADFSR -> aarch32 ADFSR_NS,
mr_new[148] = mr_old[147] # AIFSR,
mr_new[173] = mr_old[148] # DCIMVAC,
mr_new[174] = mr_old[149] # DCISW,
mr_new[184] = mr_old[150] # MCCSW -> DCCSW,
mr_new[187] = mr_old[151] # DCCMVAU,
mr_new[117] = mr_old[152] # NSACR,
mr_new[262] = mr_old[153] # VBAR,
mr_new[265] = mr_old[154] # MVBAR,
mr_new[267] = mr_old[155] # ISR,
mr_new[269] = mr_old[156] # FCEIDR -> FCSEIDR,
#mr_new[] = mr_old[157] # L2LATENCY -> UNUSED,
#mr_new[] = mr_old[158] # CRN15 -> UNUSED,
mr_new[599] = mr_old[159] # NOP
mr_new[600] = mr_old[160] # RAZ,
# Set the new miscRegs structure
cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in mr_new))
cpu_prefix = {}
# Add in state for ITB/DTB
for sec in cpt.sections():
re_tlb_match = re.match('(^.*?sys.*?\.cpu(\d+)*)\.(dtb|itb)$', sec)
if not re_tlb_match:
continue
cpu_prefix[re_tlb_match.group(1)] = True # Save off prefix to add
# Set the non-secure bit (bit 9) to 1 for attributes
attr = int(cpt.get(sec, '_attr'))
attr = attr | 0x200
cpt.set(sec, '_attr', str(attr))
cpt.set(sec, 'haveLPAE', 'false')
cpt.set(sec, 'directToStage2', 'false')
cpt.set(sec, 'stage2Req', 'false')
cpt.set(sec, 'bootUncacheability', 'true')
# Add in extra state for the new TLB Entries
for sec in cpt.sections():
re_tlbentry_match = re.match('(^.*?sys.*?\.cpu(\d+)*)\.(dtb|itb).TlbEntry\d+$', sec)
if not re_tlbentry_match:
continue
# Add in the new entries
cpt.set(sec, 'longDescFormat', 'false')
cpt.set(sec, 'vmid', '0')
cpt.set(sec, 'isHyp', 'false')
valid = cpt.get(sec, 'valid')
if valid == 'true':
cpt.set(sec, 'ns', 'true')
cpt.set(sec, 'nstid', 'true')
cpt.set(sec, 'pxn', 'true')
cpt.set(sec, 'hap', '3')
# All v7 code used 2 level page tables
cpt.set(sec, 'lookupLevel', '2')
attr = int(cpt.get(sec, 'attributes'))
# set the non-secure bit (bit 9) to 1
# as no previous v7 code used secure code
attr = attr | 0x200
cpt.set(sec, 'attributes', str(attr))
else:
cpt.set(sec, 'ns', 'false')
cpt.set(sec, 'nstid', 'false')
cpt.set(sec, 'pxn', 'false')
cpt.set(sec, 'hap', '0')
cpt.set(sec, 'lookupLevel', '0')
cpt.set(sec, 'outerShareable', 'false')
# Add d/istage2_mmu and d/istage2_mmu.stage2_tlb
for key in cpu_prefix:
for suffix in ['.istage2_mmu', '.dstage2_mmu']:
new_sec = key + suffix
cpt.add_section(new_sec)
new_sec = key + suffix + ".stage2_tlb"
cpt.add_section(new_sec)
# Fill in tlb info with some defaults
cpt.set(new_sec, '_attr', '0')
cpt.set(new_sec, 'haveLPAE', 'false')
cpt.set(new_sec, 'directToStage2', 'false')
cpt.set(new_sec, 'stage2Req', 'false')
cpt.set(new_sec, 'bootUncacheability', 'false')
cpt.set(new_sec, 'num_entries', '0')
legacy_version = 9
| {
"repo_name": "bcheung92/Paperproject",
"path": "gem5/util/cpt_upgraders/armv8.py",
"copies": "29",
"size": "13359",
"license": "mit",
"hash": -8154455197665384000,
"line_mean": 41.5445859873,
"line_max": 92,
"alpha_frac": 0.5180777004,
"autogenerated": false,
"ratio": 2.5305929153248723,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# Add a method addText to the Question superclass and provide a different implementation
# of ChoiceQuestion that calls addText rather than storing a list of choices.
class Question():
def __init__(self):
self._text = ""
self._answer = ""
self._answer_number = 0
def get_text(self):
return self._text
def get_answer(self):
return self._answer
def set_text(self, questionText):
self._text = questionText
def set_answer(self, correctResponse):
self._answer = correctResponse
def check_answer(self, response):
return response == self._answer
def add_text(self, additional_text):
self._answer_number += 1
self._text += "{} {}".format(self.i, additional_text)
def get_answer_number(self):
return self._answer_number
def display(self):
print(self._text, end=' ')
class ChoiceQuestion(Question):
def __init__(self):
super().__init__()
def add_choice(self, choice, correct):
super().add_text(choice)
if correct:
self.set_answer(str(self._answer_number))
def display(self):
super().display()
| {
"repo_name": "futurepr0n/Books-solutions",
"path": "Python-For-Everyone-Horstmann/Chapter10-Inheritance/P10_6.py",
"copies": "1",
"size": "1187",
"license": "mit",
"hash": -3348903170698032600,
"line_mean": 24.2340425532,
"line_max": 89,
"alpha_frac": 0.607925801,
"autogenerated": false,
"ratio": 4.103806228373703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002364066193853428,
"num_lines": 47
} |
## add a !mute function
## add a !100 function to set volume to 100
## !register <command> <uri>
# Import some necessary libraries.
import socket
import subprocess
import re
import cPickle as pickle
# Importing the list of registered commands
with open('dict.pickle', 'rb') as handle:
params = pickle.load(handle)
# Some basic variables used to configure the bot
server = "irc.freenode.net" # Server
channel = "#<channel>" # Channel
botnick = "Spot-Bot" # Your bots nick
uri = ""
spotify = "osascript /path/to/SpotifyControl.scpt"
def ping(): # Function to respond to pings, so we don't get disconnected.
ircsock.send("PONG :pingis\n")
def sendmsg(chan , msg): # This is the send message function, it simply sends messages to the channel.
ircsock.send("PRIVMSG "+ chan +" :"+ msg +"\n")
def joinchan(chan): # This function is used to join channels.
ircsock.send("JOIN "+ chan +"\n")
def hello(): # This function responds to a user that inputs "Hello <botnick>"
ircsock.send("PRIVMSG "+ channel +" :Hello!\n")
def help(): #The help command. This can probably be cleaned up...
ircsock.send("PRIVMSG "+ channel +" :The currently supported commands are...\n")
ircsock.send("PRIVMSG "+ channel +" :!play Tells spotify to play. Accepts arguments in the form of spotify urls/uris\n")
ircsock.send("PRIVMSG "+ channel +" :!stop Stops the music\n")
ircsock.send("PRIVMSG "+ channel +" :!pause Pauses the music\n")
ircsock.send("PRIVMSG "+ channel +" :!next & !skip Plays the next song\n")
ircsock.send("PRIVMSG "+ channel +" :!prev & !last Plays the next song\n")
ircsock.send("PRIVMSG "+ channel +" :!shuffle Toggles shuffle on and off\n")
ircsock.send("PRIVMSG "+ channel +" :!repeat Toggles repeat on and off\n")
ircsock.send("PRIVMSG "+ channel +" :!volume N Changes the volume. N is substituted with a number (1 - 100)\n")
ircsock.send("PRIVMSG "+ channel +" :" + line + "\n")
ircsock.send("PRIVMSG "+ channel +" :!register <alias> <uri> Allows you to register an alias. Both arguments are needed.\n")
ircsock.send("PRIVMSG "+ channel +" :~<alias> Plays the uri asscoiated with a register alias. Please note it uses '~' rather that '!'\n")
ircsock.send("PRIVMSG "+ channel +" :!aliases: Lists all registered aliases.\n")
def play(n): #Sends the play command. Accepts arguments
if re.search( r'!play *\w', n):
null, song = n.split(':!play ', 1 )
subprocess.call(spotify + " play " + song, shell=True)
else:
subprocess.call(spotify + " play", shell=True)
def pause():
subprocess.call(spotify + " pause", shell=True)
def stop():
subprocess.call(spotify + " stop", shell=True)
def next():
subprocess.call(spotify + " next", shell=True)
def previous():
subprocess.call(spotify + " previous", shell=True)
def shuffle():
shuffle = subprocess.Popen(spotify + " shuffle", shell=True, stdout=subprocess.PIPE ).communicate()[0]
ircsock.send("PRIVMSG "+ channel +" :" + shuffle + "\n")
def repeat():
subprocess.call(spotify + " repeat", shell=True)
def volume(n): #Changes the volume. Accepts arguments. Want to add a "volume++" and "volume--" command
if re.search( r'!volume *\w', n):
null, level = n.split(':!volume ', 1 )
subprocess.call(spotify + " volume " + level, shell=True)
else:
ircsock.send("PRIVMSG "+ channel +" :Volume requires a numeric input. Type !help for more information.\n")
###################
## !REGISTER ##
###################
def register(n):
if re.search( r'!register *\w', n):
null, holder = n.split("PRIVMSG " + channel + " ", 1)
null, alias, uri = holder.split(" ", 2)
for item in params:
if item == alias:
ircsock.send("PRIVMSG "+ channel +" :" + alias + " is already registered\n")
break
else:
params[alias] = uri
ircsock.send("PRIVMSG "+ channel +" :" + alias + " " + uri + " was registered!\n")
with open('dict.pickle', 'wb') as handle:
pickle.dump(params, handle)
#playing things that are registered
def alias(n):
null, alias = n.split(":~", 1)
for item in params:
if item == alias:
subprocess.call(spotify + " play " + params[alias], shell=True)
break
else:
ircsock.send("PRIVMSG "+ channel +" :" + alias + " was not found... You should register it!\n")
def aliases():
ircsock.send("PRIVMSG "+ channel +" :Currently registered aliases:\n")
for item in params:
ircsock.send("PRIVMSG "+ channel +" :" + item + "\n")
#####################
## END !REGISTER ##
#####################
def info(): #Posts all of the spotify information about the current song
info = subprocess.Popen(spotify + " info", shell=True, stdout=subprocess.PIPE ).communicate()[0]
infoList = []
infoList = output.split('\n', 10)
pre = "PRIVMSG "+ channel +" :"
infoList = [pre + x for x in infoList]
for x in infoList:
ircsock.send(x + "\n")
line = "-----------------------------------------------------------" #a variable for sexy formating. Can almost certainly be done a better way
output = subprocess.Popen(spotify + " info", shell=True, stdout=subprocess.PIPE ).communicate()[0] #all of this is setting up for the ability to post in chat anytime the song is changed
array = []
array = output.split('\n', 10)
track = array[2]
artist = array[1]
null, track = track.split("Track: ", 2)
null, artist = artist.split("Artist: ", 2)
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ircsock.connect((server, 6667)) # Here we connect to the server using the port 6667
ircsock.send("USER "+ botnick +" "+ botnick +" "+ botnick +" :This bot is a result of a tutoral covered on http://shellium.org/wiki.\n") # user authentication
ircsock.send("NICK "+ botnick +"\n") # here we actually assign the nick to the bot
joinchan(channel) # Join the channel using the functions we previously defined
while 1: #Starting to listen
ircmsg = ircsock.recv(2048) # receive data from the server
ircmsg = ircmsg.strip('\n\r') # removing any unnecessary linebreaks.
print(ircmsg) # Here we print what's coming from the server. This is useful for debugging, and getting feedback that things are working!
if ircmsg.find(' PRIVMSG ')!=-1:
nick=ircmsg.split('!')[0][1:]
channel=ircmsg.split(' PRIVMSG ')[-1].split(' :')[0]
###################
## USER COMMANDS ##
###################
if ircmsg.find(":Hello "+ botnick) != -1: # a "just for fun" hello command.
hello()
if ircmsg.find(":!play") != -1: #sends the content of the string to the play command. If empty, just play. If it has an arg, go to that song
play(ircmsg)
if ircmsg.find(":!pause") != -1: #sends the pause command
pause()
if ircmsg.find(":!stop") != -1: #sends the stop command
stop()
if ircmsg.find(":!next") != -1 or ircmsg.find(":!skip") != -1: #jumps to the next song
next()
if ircmsg.find(":!prev") != -1 or ircmsg.find(":!previous") != -1 or ircmsg.find(":!skip") != -1: #jump back to the previous song
previous()
if ircmsg.find(":!shuffle") != -1: #Tells spotify to toggle shuffle
shuffle()
if ircmsg.find(":!repeat") != -1: #Tells spotify to toggle repeat
shuffle()
if ircmsg.find(":!volume") != -1: #Sets the volume to be equal to the number passed in the command
volume(ircmsg)
if ircmsg.find(":!help") != -1: #lists all commands (currently hard coded...)
help()
if ircmsg.find(":!info") != -1: #lists all information about the current song
info()
if ircmsg.find(":!register") != -1: #Allowing users to register aliases
register(ircmsg)
if ircmsg.find(":!aliases") != -1: #list all registered aliases
aliases()
if ircmsg.find(":~") != -1:
alias(ircmsg)
#######################
## AUTO TRACK UPDATE ##
#######################
# this monitors the spotify information. If the song changes, the bot posts to the chat with
# the song and the artists
output = subprocess.Popen(spotify + " info", shell=True, stdout=subprocess.PIPE ).communicate()[0]
array = []
array = output.split('\n', 10)
if track != array[2]:
track = array[2]
artist = array[1]
null, track2 = track.split("Track: ", 2)
null, artist = artist.split("Artist: ", 2)
ircsock.send("PRIVMSG "+ channel +" :" + line + "\n")
ircsock.send("PRIVMSG "+ channel +" : Now playing:" + track2 +" by " + artist + "\n")
ircsock.send("PRIVMSG "+ channel +" :" + line + "\n")
if ircmsg.find("PING :") != -1: # making sure to respond to server pings
ping() | {
"repo_name": "Pink401k/spotify-irc-bot",
"path": "spotify-irc.py",
"copies": "1",
"size": "8698",
"license": "mit",
"hash": -8996589431997328000,
"line_mean": 39.8403755869,
"line_max": 186,
"alpha_frac": 0.6183030582,
"autogenerated": false,
"ratio": 3.409643277146217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4527946335346217,
"avg_score": null,
"num_lines": null
} |
"""Add an address translation to an IPSEC tunnel context."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
# from SoftLayer.CLI.exceptions import ArgumentError
# from SoftLayer.CLI.exceptions import CLIHalt
@click.command()
@click.argument('context_id', type=int)
@click.option('-s',
'--static-ip',
required=True,
help='Static IP address value')
@click.option('-r',
'--remote-ip',
required=True,
help='Remote IP address value')
@click.option('-n',
'--note',
default=None,
help='Note value')
@environment.pass_env
def cli(env, context_id, static_ip, remote_ip, note):
"""Add an address translation to an IPSEC tunnel context.
A separate configuration request should be made to realize changes on
network devices.
"""
manager = SoftLayer.IPSECManager(env.client)
# ensure context can be retrieved by given id
manager.get_tunnel_context(context_id)
translation = manager.create_translation(context_id,
static_ip=static_ip,
remote_ip=remote_ip,
notes=note)
env.out('Created translation from {} to {} #{}'
.format(static_ip, remote_ip, translation['id']))
| {
"repo_name": "softlayer/softlayer-python",
"path": "SoftLayer/CLI/vpn/ipsec/translation/add.py",
"copies": "3",
"size": "1435",
"license": "mit",
"hash": -5974673219158660000,
"line_mean": 33.1666666667,
"line_max": 73,
"alpha_frac": 0.5881533101,
"autogenerated": false,
"ratio": 4.375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.64631533101,
"avg_score": null,
"num_lines": null
} |
"""Add and upload SSL certificate details."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
@click.command()
@click.option('--crt',
type=click.Path(exists=True),
help="Certificate file")
@click.option('--csr',
type=click.Path(exists=True),
help="Certificate Signing Request file")
@click.option('--icc',
type=click.Path(exists=True),
help="Intermediate Certificate file")
@click.option('--key', type=click.Path(exists=True), help="Private Key file")
@click.option('--notes', help="Additional notes")
@environment.pass_env
def cli(env, crt, csr, icc, key, notes):
"""Add and upload SSL certificate details."""
template = {
'intermediateCertificate': '',
'certificateSigningRequest': '',
'notes': notes,
}
template['certificate'] = open(crt).read()
template['privateKey'] = open(key).read()
if csr:
body = open(csr).read()
template['certificateSigningRequest'] = body
if icc:
body = open(icc).read()
template['intermediateCertificate'] = body
manager = SoftLayer.SSLManager(env.client)
manager.add_certificate(template)
| {
"repo_name": "skraghu/softlayer-python",
"path": "SoftLayer/CLI/ssl/add.py",
"copies": "5",
"size": "1270",
"license": "mit",
"hash": 4586629513029185500,
"line_mean": 29.2380952381,
"line_max": 77,
"alpha_frac": 0.6299212598,
"autogenerated": false,
"ratio": 4.006309148264984,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 42
} |
"""Add and upload SSL certificate details."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
import click
@click.command()
@click.option('--crt',
type=click.Path(exists=True),
help="Certificate file")
@click.option('--csr',
type=click.Path(exists=True),
help="Certificate Signing Request file")
@click.option('--icc',
type=click.Path(exists=True),
help="Intermediate Certificate file")
@click.option('--key', type=click.Path(exists=True), help="Private Key file")
@click.option('--notes', help="Additional notes")
@environment.pass_env
def cli(env, crt, csr, icc, key, notes):
"""Add and upload SSL certificate details."""
template = {
'intermediateCertificate': '',
'certificateSigningRequest': '',
'notes': notes,
}
template['certificate'] = open(crt).read()
template['privateKey'] = open(key).read()
if csr:
body = open(csr).read()
template['certificateSigningRequest'] = body
if icc:
body = open(icc).read()
template['intermediateCertificate'] = body
manager = SoftLayer.SSLManager(env.client)
manager.add_certificate(template)
| {
"repo_name": "briancline/softlayer-python",
"path": "SoftLayer/CLI/ssl/add.py",
"copies": "3",
"size": "1270",
"license": "mit",
"hash": 450123584773859900,
"line_mean": 29.2380952381,
"line_max": 77,
"alpha_frac": 0.6299212598,
"autogenerated": false,
"ratio": 4.006309148264984,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6136230408064984,
"avg_score": null,
"num_lines": null
} |
"""Add a new file to the xpacsdb.patient_info table.
Usage:
add_patient.py [options] <patientid> <file>
add_patient.py -h
Arguments:
patientid PatientID to attach with the file.
Error if there is no such PatientID.
Use add_patient.py to add new patient to the database.
file File to be added to the database
Options:
-h, --help Show this screen
-d, --debug Show some debug information
-s <host> The MySQL server host IP address.
-p <port> MySQL port. Default is 3306.
-u <user> User name.
--password=<password> User password.
--db=<database> Database name. Default is xpacs.
Author: Avan Suinesiaputra - University of Auckland (2017)
"""
# Docopt is a library for parsing command line arguments
import docopt
import getpass
import mysql.connector
import termutils as tu
import datetime
import magic
def read_file(filename):
with open(filename, 'rb') as f:
contents = f.read()
return contents
if __name__ == '__main__':
try:
# Parse arguments, use file docstring as a parameter definition
arguments = docopt.docopt(__doc__)
# Default values
if not arguments['-s']:
arguments['-s'] = '127.0.0.1'
if not arguments['-p']:
arguments['-p'] = 3306
if not arguments['--db']:
arguments['--db'] = 'xpacs'
# Check user & password
if not arguments['-u']:
arguments['-u'] = raw_input('Username: ')
if arguments['--password'] is None:
arguments['--password'] = getpass.getpass('Password: ')
# print arguments for debug
if arguments['--debug']:
tu.debug(str(arguments))
# Handle invalid options
except docopt.DocoptExit as e:
tu.error(e.message)
exit()
# connecting
print 'Connecting to mysql://' + arguments['-s'] + ':' + str(arguments['-p']) + ' ...'
try:
cnx = mysql.connector.connect(user=arguments['-u'],
host=arguments['-s'],
port=arguments['-p'],
password=arguments['--password'],
database=arguments['--db'])
except mysql.connector.Error as err:
print(err)
exit()
# check the patient
# This should be unique. Have to check it first.
query = "SELECT * FROM patient_info WHERE patient_id = '" + arguments['<patientid>'] + "'"
if arguments['--debug']:
tu.debug(query)
cursor = cnx.cursor(buffered=True, named_tuple=True)
cursor.execute(query)
isexist = cursor.rowcount>0
if arguments['--debug']:
tu.debug("Number of rows = " + str(cursor.rowcount))
# get the row id of the patient
id = cursor.fetchone().id
if arguments['--debug']:
tu.debug("The id row for " + arguments['<patientid>'] + " is " + str(id))
cursor.close()
if not isexist:
tu.error("Patient " + arguments['<patientid>'] + " does not exist. Run add_patient.py to add a new patient.")
else:
# creation_date = today
dd = datetime.date.today()
# ask for description
desc = raw_input('Description [press <enter> to skip]: ')
# build the query
add_file = ("INSERT INTO aux_file "
"(patient_infofk_id, creation_date, description, file, file_content_type) "
"VALUES (%(pat_id)s, %(date)s, %(desc)s, %(file)s, %(content_type)s)")
new_file = {
'pat_id': id,
'date': dd,
'desc': desc,
'file': read_file(arguments['<file>']),
'content_type': magic.from_file(arguments['<file>'])
}
try:
cursor = cnx.cursor()
cursor.execute(add_file, new_file)
cnx.commit()
cursor.close()
except mysql.connector.Error as err:
tu.error(str(err))
exit()
tu.ok("File " + arguments['<file>'] + " added to the database")
| {
"repo_name": "CardiacAtlasProject/CAPServer2.0",
"path": "dbase/utils/add_file.py",
"copies": "1",
"size": "3963",
"license": "apache-2.0",
"hash": -8813165802459661000,
"line_mean": 28.1397058824,
"line_max": 113,
"alpha_frac": 0.5710320464,
"autogenerated": false,
"ratio": 3.7246240601503757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47956561065503756,
"avg_score": null,
"num_lines": null
} |
"""Add a new guest OS type"""
from baseCmd import *
from baseResponse import *
class addGuestOsCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""ID of Guest OS category"""
"""Required"""
self.oscategoryid = None
self.typeInfo['oscategoryid'] = 'uuid'
"""Unique display name for Guest OS"""
"""Required"""
self.osdisplayname = None
self.typeInfo['osdisplayname'] = 'string'
"""Optional name for Guest OS"""
self.name = None
self.typeInfo['name'] = 'string'
self.required = ["oscategoryid", "osdisplayname", ]
class addGuestOsResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the OS type"""
self.id = None
self.typeInfo['id'] = 'string'
"""the name/description of the OS type"""
self.description = None
self.typeInfo['description'] = 'string'
"""is the guest OS user defined"""
self.isuserdefined = None
self.typeInfo['isuserdefined'] = 'string'
"""the ID of the OS category"""
self.oscategoryid = None
self.typeInfo['oscategoryid'] = 'string'
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/addGuestOs.py",
"copies": "1",
"size": "1211",
"license": "apache-2.0",
"hash": -2876783132625176000,
"line_mean": 28.5365853659,
"line_max": 59,
"alpha_frac": 0.5722543353,
"autogenerated": false,
"ratio": 3.9318181818181817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9882121297605987,
"avg_score": 0.024390243902439025,
"num_lines": 41
} |
"""Add a new patient to the xpacsdb.patient_info table
Usage:
add_patient.py [options]
add_patient.py -h
Options:
-h, --help Show this screen
-d, --debug Show some debug information
-s <host> The MySQL server host IP address.
-p <port> MySQL port. Default is 3306.
-u <user> User name.
--password=<password> User password.
--db=<database> Database name. Default is xpacs.
-f <csv_file> Read list of patients from a CSV file (see below)
Batch addition of patients
^^^^^^^^^^^^^^^^^^^^^^^^^^
If -f option is given, then patients are added in from a CSV file in a batch
mode. This file should have the following headers:
'patient_id', 'gender', 'cohort', 'ethnicity', 'primary_diagnosis
If there are existing patient_ids in the database, then the existing rows
will be updated.
Author: Avan Suinesiaputra - University of Auckland (2017)
"""
# Docopt is a library for parsing command line arguments
import docopt
import getpass
import mysql.connector
import termutils as tu
import sqlutils as su
if __name__ == '__main__':
try:
# Parse arguments, use file docstring as a parameter definition
arguments = docopt.docopt(__doc__)
# Default values
if not arguments['-s']:
arguments['-s'] = '127.0.0.1'
if not arguments['-p']:
arguments['-p'] = 3306
if not arguments['--db']:
arguments['--db'] = 'xpacs'
# Check user & password
if not arguments['-u']:
arguments['-u'] = raw_input('Username: ')
if arguments['--password'] is None:
arguments['--password'] = getpass.getpass('Password: ')
# print arguments for debug
if arguments['--debug']:
tu.debug(str(arguments))
# Handle invalid options
except docopt.DocoptExit as e:
tu.error(e.message)
exit()
# connecting
print 'Connecting to mysql://' + arguments['-s'] + ':' + str(arguments['-p']) + ' ...'
try:
cnx = mysql.connector.connect(user=arguments['-u'],
host=arguments['-s'],
port=arguments['-p'],
password=arguments['--password'],
database=arguments['--db'])
except mysql.connector.Error as err:
print(err)
exit()
existing_patients = su.get_all_patient_ids(cnx)
# it's either by CSV file or interactive
if arguments['-f'] is None:
# First question: who is the patient?
patientID = raw_input('Patient ID: ')
if patientID in existing_patients:
tu.error("Patient " + patientID + " already exists.")
exit()
# Remaining questions
cohort = raw_input('Cohort [press <enter> to skip]: ')
ethnicity = raw_input('Ethnicity [press <enter> to skip]: ')
gender = raw_input('Cohort [M/F/U=unknown (default)]: ')
if str.lower(gender) == 'f':
gender = 'female'
elif str.lower(gender) == 'm':
gender = 'male'
else:
gender = 'unknown'
primary_diagnosis = raw_input('Primary diagnosis [press <enter> to skip]: ')
query = su.insert_new_patient_info(cnx, {
'patient_id': patientID,
'cohort': cohort,
'ethnicity': ethnicity,
'gender': gender,
'primary_diagnosis': primary_diagnosis
})
if arguments['--debug']:
tu.debug(query)
tu.ok("Patient " + patientID + " added to the database")
# don't forget to close the connection
cnx.close()
else:
try:
for row in su.read_csv(arguments['-f']):
# fix gender
g = str.lower(row['gender'])
if g == 'male' or g == 'm':
row['gender'] = 'male'
elif g == 'female' or g == 'f':
row['gender'] = 'female'
else:
row['gender'] = 'unknown'
# update or insert
if row['patient_id'] in existing_patients:
if arguments['--debug']:
tu.warn('Updating ' + row['patient_id'])
query = su.update_patient_info(cnx, row)
else:
if arguments['--debug']:
tu.debug('Inserting ' + row['patient_id'])
query = su.insert_new_patient_info(cnx, row)
if arguments['--debug']:
print query
except Exception, e:
tu.error(str(e))
exit()
tu.ok("SUCCESS")
| {
"repo_name": "CardiacAtlasProject/CAPServer2.0",
"path": "dbase/utils/add_patient.py",
"copies": "1",
"size": "4541",
"license": "apache-2.0",
"hash": 7769446019170502000,
"line_mean": 28.487012987,
"line_max": 86,
"alpha_frac": 0.5463554283,
"autogenerated": false,
"ratio": 3.86468085106383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49110362793638296,
"avg_score": null,
"num_lines": null
} |
"""Add a new repository for the Arcyd instance to manage."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdcmd_addrepo
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import phlurl_request
import abdi_repo
import abdi_repoargs
import abdt_fs
_CONFIG = """
@{phabricator_config}
@{repohost_config}
--repo-desc
{repo_desc}
--repo-url
{repo_url}
--repo-path
{repo_path}
--try-touch-path
{try_touch_path}
--ok-touch-path
{ok_touch_path}
""".strip()
_CONFIG_ADMIN_EMAILS_FORMAT = """
--admin-emails
{admin_emails}
""".strip()
def getFromfilePrefixChars():
return None
def setupParser(parser):
parser.add_argument(
'phabricator_name',
type=str,
help="name of the Phabricator instance associated with the repo.")
parser.add_argument(
'repohost_name',
type=str,
help="name of the repohost associated with the repo.")
parser.add_argument(
'repo_url',
type=str,
help="url to clone the repository, e.g. 'github:org/repo' or maybe "
"something like 'org/repo' if using '--repo-url-format'.")
parser.add_argument(
'--name',
type=str,
metavar='STR',
help="string identifier for the repository, '{regex}'. "
"will guess a name from the mandatory args if "
"none provided.".format(regex=abdt_fs.CONFIG_NAME_REGEX))
parser.add_argument(
'--repo-desc',
type=str,
metavar='STR',
help="very short description of the repository, appears on the "
"dashboard, in error messages and in logs. "
"will guess a name from the mandatory args if none provided.")
parser.add_argument(
'--admin-emails',
nargs='*',
metavar="TO",
type=str,
help="list of email addresses to send important repo events to")
def _repo_desc_for_params(phab, repohost, url):
return "{url}".format(
phab=phab, repohost=repohost, url=url)
def _repo_name_for_params(phab, repohost, url):
"""Return a sensible repo name from the given parameters.
Usage examples:
>>> _repo_name_for_params('phab', 'host', 'namespace/repo.1.git')
'phab_host_namespace_repo-1'
:phab: the string name of the phab config
:repohost: the string name of the repository host
:url: the relative url of the repository
:returns: the string best-effort to name the repository config
"""
no_dot_git_url = url[:-4] if url.endswith('.git') else url
dot_to_dash = no_dot_git_url.replace(".", "-")
snakecase_url = dot_to_dash.lower().replace("/", "_")
name = "{phab}_{repohost}_{url}".format(
phab=phab, repohost=repohost, url=snakecase_url)
return name
def process(args):
fs = abdt_fs.make_default_accessor()
repo_name = args.name
if repo_name is None:
repo_name = _repo_name_for_params(
args.phabricator_name, args.repohost_name, args.repo_url)
repo_desc = args.repo_desc
if repo_desc is None:
repo_desc = _repo_desc_for_params(
args.phabricator_name, args.repohost_name, args.repo_url)
try_touch_path = fs.layout.repo_try(repo_name)
ok_touch_path = fs.layout.repo_ok(repo_name)
repo_path = fs.layout.repo(repo_name)
# make sure the repo doesn't exist already
if os.path.exists(repo_path):
raise Exception('{} already exists'.format(repo_path))
# make sure the phabricator config exists
phab_config_path = fs.get_phabricator_config_rel_path(
args.phabricator_name)
# make sure the repohost config exists
repohost_config_path = fs.get_repohost_config_rel_path(
args.repohost_name)
# generate the config file
config = _CONFIG.format(
phabricator_config=phab_config_path,
repohost_config=repohost_config_path,
repo_desc=repo_desc,
repo_url=args.repo_url,
repo_path=repo_path,
try_touch_path=try_touch_path,
ok_touch_path=ok_touch_path)
if args.admin_emails:
config = '\n'.join([
config,
_CONFIG_ADMIN_EMAILS_FORMAT.format(
admin_emails='\n'.join(args.admin_emails))])
# parse the arguments again, as a real repo
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
abdi_repoargs.setup_parser(parser)
repo_args = config.splitlines()
repo_params = parser.parse_args(repo_args)
abdi_repoargs.validate_args(repo_params)
# make sure we can use the snoop URL
repo_snoop_url = abdi_repoargs.get_repo_snoop_url(repo_params)
if repo_snoop_url:
phlurl_request.get(repo_snoop_url)
# determine the repo url from the parsed params
repo_url = abdi_repoargs.get_repo_url(repo_params)
# determine the repo push url from the parsed params
repo_push_url = abdi_repoargs.get_repo_push_url(repo_params)
with fs.lockfile_context():
with abdi_repo.setup_repo_context(repo_url, repo_path, repo_push_url):
fs.create_repo_config(repo_name, config)
# -----------------------------------------------------------------------------
# Copyright (C) 2014-2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"repo_name": "kjedruczyk/phabricator-tools",
"path": "py/abd/abdcmd_addrepo.py",
"copies": "4",
"size": "6321",
"license": "apache-2.0",
"hash": 6135597288517758000,
"line_mean": 29.3894230769,
"line_max": 79,
"alpha_frac": 0.6060749881,
"autogenerated": false,
"ratio": 3.7138660399529964,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6319941028052998,
"avg_score": null,
"num_lines": null
} |
"""Add a new repository host for the Arcyd instance to refer to."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdcmd_addrepohost
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abdi_repoargs
import abdt_fs
_CONFIG_REPO_URL_FORMAT = """
--repo-url-format
{repo_url_format}
""".strip()
_CONFIG_REPO_PUSH_URL_FORMAT = """
--repo-push-url-format
{repo_push_url_format}
""".strip()
_CONFIG_REPO_SNOOP_URL_FORMAT = """
--repo-snoop-url-format
{repo_snoop_url_format}
""".strip()
_CONFIG_BRANCH_URL_FORMAT = """
--branch-url-format
{branch_url_format}
""".strip()
_CONFIG_ADMIN_EMAILS_FORMAT = """
--admin-emails
{admin_emails}
""".strip()
def getFromfilePrefixChars():
return None
def setupParser(parser):
parser.add_argument(
'--name',
type=str,
metavar='STR',
required=True,
help="string name of the repohost, {regex}".format(
regex=abdt_fs.CONFIG_NAME_REGEX))
abdi_repoargs.setup_repohost_parser(parser)
parser.add_argument(
'--admin-emails',
nargs='*',
metavar="TO",
type=str,
help="list of email addresses to send important repo events to")
def process(args):
fs = abdt_fs.make_default_accessor()
# generate the config file
config = ""
if args.repo_url_format:
config = '\n'.join([
config,
_CONFIG_REPO_URL_FORMAT.format(
repo_url_format=args.repo_url_format)])
if args.repo_push_url_format:
config = '\n'.join([
config,
_CONFIG_REPO_PUSH_URL_FORMAT.format(
repo_push_url_format=args.repo_push_url_format)])
if args.repo_snoop_url_format:
config = '\n'.join([
config,
_CONFIG_REPO_SNOOP_URL_FORMAT.format(
repo_snoop_url_format=args.repo_snoop_url_format)])
if args.branch_url_format:
config = '\n'.join([
config,
_CONFIG_BRANCH_URL_FORMAT.format(
branch_url_format=args.branch_url_format)])
if args.admin_emails:
config = '\n'.join([
config,
_CONFIG_ADMIN_EMAILS_FORMAT.format(
admin_emails='\n'.join(args.admin_emails))])
config = config.strip()
# write out the config
with fs.lockfile_context():
fs.create_repohost_config(args.name, config)
# -----------------------------------------------------------------------------
# Copyright (C) 2014-2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"repo_name": "aevri/phabricator-tools",
"path": "py/abd/abdcmd_addrepohost.py",
"copies": "4",
"size": "3620",
"license": "apache-2.0",
"hash": 8745047236506278000,
"line_mean": 26.6335877863,
"line_max": 79,
"alpha_frac": 0.5513812155,
"autogenerated": false,
"ratio": 3.9519650655021836,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6503346281002184,
"avg_score": null,
"num_lines": null
} |
"""Add a new SSH key."""
# :license: MIT, see LICENSE for more details.
from os import path
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
@click.command()
@click.argument('label')
@click.option('--in-file', '-f',
type=click.Path(exists=True),
help="The id_rsa.pub file to import for this key")
@click.option('--key', '-k', help="The actual SSH key")
@click.option('--note', help="Extra note that will be associated with key")
@environment.pass_env
def cli(env, label, in_file, key, note):
"""Add a new SSH key."""
if in_file is None and key is None:
raise exceptions.ArgumentError(
'Either [-f | --in-file] or [-k | --key] arguments are required to add a key'
)
if in_file and key:
raise exceptions.ArgumentError(
'[-f | --in-file] is not allowed with [-k | --key]'
)
if key:
key_text = key
else:
key_file = open(path.expanduser(in_file), 'rU')
key_text = key_file.read().strip()
key_file.close()
mgr = SoftLayer.SshKeyManager(env.client)
result = mgr.add_key(key_text, label, note)
env.fout("SSH key added: %s" % result.get('fingerprint'))
| {
"repo_name": "kyubifire/softlayer-python",
"path": "SoftLayer/CLI/sshkey/add.py",
"copies": "2",
"size": "1257",
"license": "mit",
"hash": 2239842936819517200,
"line_mean": 28.2325581395,
"line_max": 89,
"alpha_frac": 0.6133651551,
"autogenerated": false,
"ratio": 3.501392757660167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002583979328165375,
"num_lines": 43
} |
"""Add a new subnet to your account."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
@click.command(short_help="Add a new subnet to your account")
@click.argument('network', type=click.Choice(['public', 'private']))
@click.argument('quantity', type=click.INT)
@click.argument('vlan-id')
@click.option('--v6', '--ipv6', is_flag=True, help="Order IPv6 Addresses")
@click.option('--test',
is_flag=True,
help="Do not order the subnet; just get a quote")
@environment.pass_env
def cli(env, network, quantity, vlan_id, ipv6, test):
"""Add a new subnet to your account. Valid quantities vary by type.
\b
Type - Valid Quantities (IPv4)
public - 4, 8, 16, 32
private - 4, 8, 16, 32, 64
\b
Type - Valid Quantities (IPv6)
public - 64
"""
mgr = SoftLayer.NetworkManager(env.client)
if not (test or env.skip_confirmations):
if not formatting.confirm("This action will incur charges on your "
"account. Continue?"):
raise exceptions.CLIAbort('Cancelling order.')
version = 4
if ipv6:
version = 6
result = mgr.add_subnet(network,
quantity=quantity,
vlan_id=vlan_id,
version=version,
test_order=test)
if not result:
raise exceptions.CLIAbort(
'Unable to place order: No valid price IDs found.')
table = formatting.Table(['Item', 'cost'])
table.align['Item'] = 'r'
table.align['cost'] = 'r'
total = 0.0
if 'prices' in result:
for price in result['prices']:
total += float(price.get('recurringFee', 0.0))
rate = "%.2f" % float(price['recurringFee'])
table.add_row([price['item']['description'], rate])
table.add_row(['Total monthly cost', "%.2f" % total])
env.fout(table)
| {
"repo_name": "nanjj/softlayer-python",
"path": "SoftLayer/CLI/subnet/create.py",
"copies": "4",
"size": "2070",
"license": "mit",
"hash": -3933584028160715300,
"line_mean": 29.8955223881,
"line_max": 75,
"alpha_frac": 0.5898550725,
"autogenerated": false,
"ratio": 3.7981651376146788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 67
} |
"""Add a new subnet to your account."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
import click
@click.command(short_help="Add a new subnet to your account")
@click.argument('network', type=click.Choice(['public', 'private']))
@click.argument('quantity', type=click.INT)
@click.argument('vlan-id')
@click.option('--v6', '--ipv6', is_flag=True, help="Order IPv6 Addresses")
@click.option('--test',
is_flag=True,
help="Do not order the subnet; just get a quote")
@environment.pass_env
def cli(env, network, quantity, vlan_id, ipv6, test):
"""Add a new subnet to your account. Valid quantities vary by type.
\b
Type - Valid Quantities (IPv4)
public - 4, 8, 16, 32
private - 4, 8, 16, 32, 64
\b
Type - Valid Quantities (IPv6)
public - 64
"""
mgr = SoftLayer.NetworkManager(env.client)
if not (test or env.skip_confirmations):
if not formatting.confirm("This action will incur charges on your "
"account. Continue?"):
raise exceptions.CLIAbort('Cancelling order.')
version = 4
if ipv6:
version = 6
result = mgr.add_subnet(network,
quantity=quantity,
vlan_id=vlan_id,
version=version,
test_order=test)
if not result:
raise exceptions.CLIAbort(
'Unable to place order: No valid price IDs found.')
table = formatting.Table(['Item', 'cost'])
table.align['Item'] = 'r'
table.align['cost'] = 'r'
total = 0.0
if 'prices' in result:
for price in result['prices']:
total += float(price.get('recurringFee', 0.0))
rate = "%.2f" % float(price['recurringFee'])
table.add_row([price['item']['description'], rate])
table.add_row(['Total monthly cost', "%.2f" % total])
env.fout(table)
| {
"repo_name": "briancline/softlayer-python",
"path": "SoftLayer/CLI/subnet/create.py",
"copies": "2",
"size": "2071",
"license": "mit",
"hash": 8362615408827581000,
"line_mean": 29.4558823529,
"line_max": 75,
"alpha_frac": 0.5895702559,
"autogenerated": false,
"ratio": 3.8,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 68
} |
# Add a new view at /today that shows the current user's lunch order for today.
# It should render the "today.html" template. Don't worry about editing the template yet.
# Since this is about the current user, login should be required.
from flask import Flask, g, render_template, flash, redirect, url_for
from flask.ext.bcrypt import check_password_hash
from flask.ext.login import LoginManager, login_user, current_user, login_required, logout_user
import forms
import models
app = Flask(__name__)
app.secret_key = 'this is our super secret key. do not share it with anyone!'
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(userid):
try:
return models.User.select().where(
models.User.id == int(userid)
).get()
except models.DoesNotExist:
return None
@app.before_request
def before_request():
g.db = models.DATABASE
g.db.connect()
g.user = current_user
@app.after_request
def after_request(response):
g.db.close()
return response
@app.route('/register', methods=('GET', 'POST'))
def register():
form = forms.SignUpInForm()
if form.validate_on_submit():
models.User.new(
email=form.email.data,
password=form.password.data
)
flash("Thanks for registering!")
return render_template('register.html', form=form)
@app.route('/login', methods=('GET', 'POST'))
def login():
form = forms.SignUpInForm()
if form.validate_on_submit():
try:
user = models.User.get(
models.User.email == form.email.data
)
if check_password_hash(user.password, form.password.data):
login_user(user)
flash("You're now logged in!")
else:
flash("No user with that email/password combo")
except models.DoesNotExist:
flash("No user with that email/password combo")
return render_template('register.html', form=form)
@app.route('/secret')
@login_required
def secret():
return "I should only be visible to logged-in users"
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
@app.route('/')
def index():
return render_template('index.html')
@app.route('/order', methods=('GET', 'POST'))
def order_lunch():
form = forms.LunchOrderForm()
if form.validate_on_submit():
models.LunchOrder.create(
user=g.user._get_current_object(),
date=form.date.data,
order=form.order.data.strip()
)
return render_template('lunch.html', form=form)
@app.route('/today')
@login_required
def today():
user = models.LunchOrder.get(models.LunchOrder.user == g.user._get_current_object())
return render_template('today.html',user=user) | {
"repo_name": "CaseyNord/Treehouse",
"path": "Build a Social Network with Flask/lunch_order_view/lunch.py",
"copies": "1",
"size": "2990",
"license": "mit",
"hash": 2296330154014343700,
"line_mean": 27.0485436893,
"line_max": 95,
"alpha_frac": 0.6204013378,
"autogenerated": false,
"ratio": 3.705080545229244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4825481883029244,
"avg_score": null,
"num_lines": null
} |
# Add a new view to lunch.py. The function name should be register and the route should be "/register".
# It should accept both GET and POST methods. For now, have it return the string "register".
# Your register() view needs to create an instance of the SignUpForm from forms.
# It should also render and return the register.html template. You'll need to import render_template.
# In the template's context, name the SignUpForm instance as form.
#Finally, update the register() view so that the form is validated on submission.
# If it's valid, use the models.User.new() method to create a new User from the form data and flash
# the message "Thanks for registering!". You'll need to import flash().
from flask import Flask, g, render_template, flash
from flask.ext.login import LoginManager
import forms
import models
app = Flask(__name__)
app.secret_key = 'this is our super secret key. do not share it with anyone!'
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(userid):
try:
return models.User.select().where(
models.User.id == int(userid)
).get()
except models.DoesNotExist:
return None
@app.before_request
def before_request():
g.db = models.DATABASE
g.db.connect()
@app.after_request
def after_request(response):
g.db.close()
return response
@app.route('/register', methods=('GET', 'POST'))
def register():
form = forms.SignUpForm()
if form.validate_on_submit():
flash("Thanks for registering!")
models.User.new(email=form.email.data, password=form.password.data)
return render_template('register.html', form=form) | {
"repo_name": "CaseyNord/Treehouse",
"path": "Build a Social Network with Flask/form_view/lunch.py",
"copies": "1",
"size": "1737",
"license": "mit",
"hash": -7126782064044753000,
"line_mean": 31.4423076923,
"line_max": 104,
"alpha_frac": 0.6879677605,
"autogenerated": false,
"ratio": 3.71948608137045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9881453628287447,
"avg_score": 0.005200042716600534,
"num_lines": 52
} |
"""Add an index to audit events data blob to make it easier to search by supplier id. The data needed to be converted
to JSONB from JSON for the index to work. It's a partial index - so only indexing rows that have the 'supplierId' field
in their data blob. It's also an expression index, so only indexing the 'supplierId' field rather than all of the data.
Revision ID: 1260
Revises: 1250
Create Date: 2018-08-13 11:54:17.725685
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSONB
# revision identifiers, used by Alembic.
revision = '1260'
down_revision = '1250'
def upgrade():
op.alter_column('audit_events', 'data', type_=JSONB, postgresql_using='data::text::jsonb')
op.create_index(
'idx_audit_events_data_supplier_id',
'audit_events',
[sa.text("COALESCE((data ->> 'supplierId'), (data ->> 'supplier_id'))")],
unique=False,
postgresql_where=sa.text("COALESCE(data ->> 'supplierId', data ->> 'supplier_id') IS NOT NULL"),
)
def downgrade():
op.drop_index('idx_audit_events_data_supplier_id', table_name='audit_events')
| {
"repo_name": "alphagov/digitalmarketplace-api",
"path": "migrations/versions/1260_add_index_on_audit_event_data.py",
"copies": "1",
"size": "1134",
"license": "mit",
"hash": -566201992790177300,
"line_mean": 34.4375,
"line_max": 119,
"alpha_frac": 0.69664903,
"autogenerated": false,
"ratio": 3.4468085106382977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4643457540638298,
"avg_score": null,
"num_lines": null
} |
"""Add annotation parameters
Revision ID: 454bdc604dcd
Revises: 1e6d5f30aa47
Create Date: 2013-03-14 15:37:16.410175
"""
# revision identifiers, used by Alembic.
revision = '454bdc604dcd'
down_revision = '1e6d5f30aa47'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('exclude',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('annotation_id', sa.Integer(), nullable=True),
sa.Column('sample_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['annotation_id'], ['annotation.id'], ),
sa.ForeignKeyConstraint(['sample_id'], ['sample.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('local_frequency',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('annotation_id', sa.Integer(), nullable=True),
sa.Column('sample_id', sa.Integer(), nullable=True),
sa.Column('label', sa.String(length=200), nullable=True),
sa.ForeignKeyConstraint(['annotation_id'], ['annotation.id'], ),
sa.ForeignKeyConstraint(['sample_id'], ['sample.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column(u'annotation', sa.Column('global_frequencies', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'annotation', 'global_frequencies')
op.drop_table('local_frequency')
op.drop_table('exclude')
### end Alembic commands ###
| {
"repo_name": "varda/varda",
"path": "alembic/versions/454bdc604dcd_add_annotation_param.py",
"copies": "2",
"size": "1527",
"license": "mit",
"hash": -8543372132387256000,
"line_mean": 32.9333333333,
"line_max": 94,
"alpha_frac": 0.6725605763,
"autogenerated": false,
"ratio": 3.4863013698630136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00958538997583402,
"num_lines": 45
} |
"""Add annotation queries
Revision ID: 1d808cef0787
Revises: 1c15bafd311a
Create Date: 2015-08-17 14:31:52.751784
"""
# revision identifiers, used by Alembic.
revision = '1d808cef0787'
down_revision = '1c15bafd311a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('query',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=200), nullable=True),
sa.Column('expression', sa.Text(), nullable=True),
sa.Column('require_active', sa.Boolean(), nullable=True),
sa.Column('require_coverage_profile', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_table('annotation_query',
sa.Column('annotation_id', sa.Integer(), nullable=False),
sa.Column('query_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['annotation_id'], ['annotation.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['query_id'], ['query.id'], ondelete='CASCADE')
)
op.drop_table('sample_frequency')
op.drop_column('annotation', 'global_frequency')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('annotation', sa.Column('global_frequency', sa.BOOLEAN(), nullable=True))
op.create_table('sample_frequency',
sa.Column('annotation_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('sample_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['annotation_id'], [u'annotation.id'], name=u'sample_frequency_annotation_id_fkey', ondelete=u'CASCADE'),
sa.ForeignKeyConstraint(['sample_id'], [u'sample.id'], name=u'sample_frequency_sample_id_fkey', ondelete=u'CASCADE')
)
op.drop_table('query')
op.drop_table('annotation_query')
### end Alembic commands ###
| {
"repo_name": "varda/varda",
"path": "alembic/versions/1d808cef0787_add_annotation_queries.py",
"copies": "1",
"size": "1959",
"license": "mit",
"hash": 1152652704226127900,
"line_mean": 37.4117647059,
"line_max": 133,
"alpha_frac": 0.6860643185,
"autogenerated": false,
"ratio": 3.536101083032491,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4722165401532491,
"avg_score": null,
"num_lines": null
} |
"""add another enum type
Revision ID: 804a59c2c76c
Revises: 4c8331188a48
Create Date: 2020-12-06 08:47:45.967439
"""
# revision identifiers, used by Alembic.
revision = '804a59c2c76c'
down_revision = '4c8331188a48'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("COMMIT")
op.execute("ALTER TYPE dlstate_enum ADD VALUE 'manually_deferred';")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
raise RuntimeError("Cannot downgrade!")
# ### end Alembic commands ###
| {
"repo_name": "fake-name/ReadableWebProxy",
"path": "alembic/versions/2020-12-06_804a59c2c76c_add_another_enum_type.py",
"copies": "1",
"size": "1219",
"license": "bsd-3-clause",
"hash": 1775384547388071000,
"line_mean": 25.5,
"line_max": 72,
"alpha_frac": 0.7604593929,
"autogenerated": false,
"ratio": 3.6279761904761907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9829457553596261,
"avg_score": 0.011795605955985944,
"num_lines": 46
} |
""" Add an owner to a resource or resources
Usage: add_owner {username} {resource list}
"""
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from hs_core.models import BaseResource
from hs_core.hydroshare.utils import get_resource_by_shortkey
from hs_access_control.models.privilege import UserResourcePrivilege, PrivilegeCodes
from django_irods.icommands import SessionException
from django.db import transaction
def set_quota_holder(resource, user):
try:
resource.set_quota_holder(user, user)
except SessionException as ex:
# some resources copied from www for testing do not exist in the iRODS backend,
# hence need to skip these test artifects
print(resource.short_id + ' raised SessionException when setting quota holder: ' +
ex.stderr)
except AttributeError as ex:
# when federation is not set up correctly, istorage does not have a session
# attribute, hence raise AttributeError - ignore for testing
print((resource.short_id + ' raised AttributeError when setting quota holder: ' +
str(ex)))
except ValueError as ex:
# when federation is not set up correctly, istorage does not have a session
# attribute, hence raise AttributeError - ignore for testing
print((resource.short_id + ' raised ValueError when setting quota holder: ' +
str(ex)))
class Command(BaseCommand):
help = "add owner to resource"
def add_arguments(self, parser):
parser.add_argument('new_owner', type=str)
parser.add_argument(
'--owned_by',
dest='owned_by',
help='prior owner of the resources'
)
parser.add_argument(
'--set_quota_holder',
action='store_true', # True for presence, False for absence
dest='set_quota_holder', # value is options['set_quota_holder']
help='set quota holder as new owner')
# a list of resource id's: none does nothing.
parser.add_argument('resource_ids', nargs='*', type=str)
def handle(self, *args, **options):
user = User.objects.get(username=options['new_owner'])
admin = User.objects.get(username='admin')
if options['owned_by'] is not None:
prior = User.objects.get(username=options['owned_by'])
for res in BaseResource.objects.filter(r2urp__user=prior,
r2urp__privilege=PrivilegeCodes.OWNER):
with transaction.atomic():
resource = res.get_content_model()
UserResourcePrivilege.share(user=user,
resource=resource,
privilege=PrivilegeCodes.OWNER,
grantor=admin)
print("added owner {} to {}".format(options['new_owner'], resource.short_id))
if options['set_quota_holder']:
set_quota_holder(resource, user)
print("set quota holder to {} for {}".format(options['new_owner'],
resource.short_id))
if len(options['resource_ids']) > 0: # an array of resource short_id to check.
for rid in options['resource_ids']:
resource = get_resource_by_shortkey(rid, or_404=False)
with transaction.atomic():
UserResourcePrivilege.share(user=user,
resource=resource,
privilege=PrivilegeCodes.OWNER,
grantor=admin)
print("added owner {} to {}".format(options['new_owner'], rid))
if options['set_quota_holder']:
set_quota_holder(resource, user)
print("set quota holder to {} for {}".format(options['new_owner'],
resource.short_id))
| {
"repo_name": "hydroshare/hydroshare",
"path": "hs_core/management/commands/add_owner.py",
"copies": "1",
"size": "4119",
"license": "bsd-3-clause",
"hash": -279484237167587230,
"line_mean": 44.7666666667,
"line_max": 97,
"alpha_frac": 0.56931294,
"autogenerated": false,
"ratio": 4.729047072330655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5798360012330654,
"avg_score": null,
"num_lines": null
} |
"""add ant_status table
Revision ID: d208118c58d0
Revises: fb372bb87c37
Create Date: 2019-03-23 00:10:30.704936+00:00
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'd208118c58d0'
down_revision = 'fb372bb87c37'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('antenna_status',
sa.Column('time', sa.BigInteger(), nullable=False),
sa.Column('antenna_number', sa.Integer(), nullable=False),
sa.Column('antenna_feed_pol', sa.String(), nullable=False),
sa.Column('snap_hostname', sa.String(), nullable=True),
sa.Column('snap_channel_number', sa.Integer(), nullable=True),
sa.Column('adc_mean', sa.Float(), nullable=True),
sa.Column('adc_rms', sa.Float(), nullable=True),
sa.Column('adc_power', sa.Float(), nullable=True),
sa.Column('pam_atten', sa.Integer(), nullable=True),
sa.Column('pam_power', sa.Float(), nullable=True),
sa.Column('eq_coeffs', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('time', 'antenna_number', 'antenna_feed_pol')
)
def downgrade():
op.drop_table('antenna_status')
| {
"repo_name": "HERA-Team/Monitor_and_Control",
"path": "alembic/versions/d208118c58d0_add_ant_status_table.py",
"copies": "2",
"size": "1390",
"license": "bsd-2-clause",
"hash": -6908320864257258000,
"line_mean": 36.5675675676,
"line_max": 89,
"alpha_frac": 0.5884892086,
"autogenerated": false,
"ratio": 3.736559139784946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006259271319512283,
"num_lines": 37
} |
"""Add an UnservedLoad component, which ensures the model is always feasible.
This is often useful when the model is constrained to the edge of infeasibility,
(e.g., when evaluating a pre-defined, just-feasible construction plan) to avoid
spurious reports of infeasibility."""
from pyomo.environ import *
def define_arguments(argparser):
argparser.add_argument("--unserved-load-penalty", type=float, default=None,
help="Penalty to charge per MWh of unserved load. Usually set high enough to force unserved load to zero (default is $10,000/MWh).")
def define_components(m):
# create an unserved load variable with a high penalty cost,
# to avoid infeasibilities when
# evaluating scenarios that are on the edge of infeasibility
# cost per MWh for unserved load (high)
if m.options.unserved_load_penalty is not None:
# always use penalty factor supplied on the command line, if any
m.unserved_load_penalty_per_mwh = Param(initialize=m.options.unserved_load_penalty)
else:
# no penalty on the command line, use whatever is in the parameter files, or 10000
m.unserved_load_penalty_per_mwh = Param(default=10000)
# amount of unserved load during each timepoint
m.UnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals)
# total cost for unserved load
m.UnservedLoadPenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp:
sum(m.UnservedLoad[lz, tp] * m.unserved_load_penalty_per_mwh for lz in m.LOAD_ZONES)
)
# add the unserved load to the model's energy balance
m.LZ_Energy_Components_Produce.append('UnservedLoad')
# add the unserved load penalty to the model's objective function
m.cost_components_tp.append('UnservedLoadPenalty')
| {
"repo_name": "OCM-Lab-PUC/switch-chile",
"path": "switch_mod/hawaii/unserved_load.py",
"copies": "2",
"size": "1758",
"license": "apache-2.0",
"hash": 3410838443089577500,
"line_mean": 53.9375,
"line_max": 140,
"alpha_frac": 0.7337883959,
"autogenerated": false,
"ratio": 3.5877551020408163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5321543497940816,
"avg_score": null,
"num_lines": null
} |
# Add any Sphinx extension module names here, as strings.
# They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions += ['sphinx.ext.inheritance_diagram', 'sphinxcontrib.blockdiag',
'sphinxcontrib.actdiag', 'sphinxcontrib.seqdiag',
'sphinxcontrib.nwdiag']
# The encoding of source files.
source_encoding = 'utf-8-sig'
#source_encoding = 'shift_jis'
# The language for content autogenerated by Sphinx.
language = 'en'
#language = 'ja'
# The theme to use for HTML and HTML Help pages.
#html_theme = 'default'
#html_theme = 'sphinxdoc'
#html_theme = 'scrolls'
#html_theme = 'agogo'
#html_theme = 'traditional'
#html_theme = 'nature'
#html_theme = 'haiku'
# If this is not the empty string, a 'Last updated on:' timestamp
# is inserted at every page bottom, using the given strftime() format.
# Default is '%b %d, %Y' (or a locale-dependent equivalent).
html_last_updated_fmt = '%Y/%m/%d'
# Enable Antialiasing
blockdiag_antialias = True
acttdiag_antialias = True
seqdiag_antialias = True
nwdiag_antialias = True
extensions += ['rst2pdf.pdfbuilder']
pdf_documents = [
(master_doc, project, project, copyright),
]
pdf_stylesheets = ['sphinx', 'kerning', 'a4']
pdf_language = "en_US"
# Mode for literal blocks wider than the frame. Can be
# overflow, shrink or truncate
pdf_fit_mode = "shrink"
# Section level that forces a break page.
# For example: 1 means top-level sections start in a new page
# 0 means disabled
#pdf_break_level = 0
# When a section starts in a new page, force it to be 'even', 'odd',
# or just use 'any'
pdf_breakside = 'any'
# Insert footnotes where they are defined instead of
# at the end.
pdf_inline_footnotes = False
# verbosity level. 0 1 or 2
pdf_verbosity = 0
# If false, no index is generated.
pdf_use_index = True
# If false, no modindex is generated.
pdf_use_modindex = True
# If false, no coverpage is generated.
pdf_use_coverpage = True
# Name of the cover page template to use
#pdf_cover_template = 'sphinxcover.tmpl'
# Documents to append as an appendix to all manuals.
#pdf_appendices = []
# Enable experimental feature to split table cells. Use it
# if you get "DelayedTable too big" errors
#pdf_splittables = False
# Set the default DPI for images
#pdf_default_dpi = 72
# Enable rst2pdf extension modules (default is only vectorpdf)
# you need vectorpdf if you want to use sphinx's graphviz support
#pdf_extensions = ['vectorpdf']
# Page template name for "regular" pages
#pdf_page_template = 'cutePage'
# Show Table Of Contents at the beginning?
pdf_use_toc = True
# How many levels deep should the table of contents be?
pdf_toc_depth = 3
# Add section number to section references
pdf_use_numbered_links = False
# Background images fitting mode
pdf_fit_background_mode = 'scale'
| {
"repo_name": "Axam/nsx-web",
"path": "docs/common_conf.py",
"copies": "9",
"size": "2815",
"license": "apache-2.0",
"hash": -535691035188243200,
"line_mean": 26.8712871287,
"line_max": 75,
"alpha_frac": 0.7214920071,
"autogenerated": false,
"ratio": 3.2962529274004684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006441197899116703,
"num_lines": 101
} |
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions += ['sphinx.ext.inheritance_diagram', 'sphinxcontrib.blockdiag', 'sphinxcontrib.actdiag', 'sphinxcontrib.seqdiag', 'sphinxcontrib.nwdiag']
# The encoding of source files.
source_encoding = 'utf-8-sig'
#source_encoding = 'shift_jis'
# The language for content autogenerated by Sphinx.
#language = 'en'
#language = 'ja'
# The theme to use for HTML and HTML Help pages.
#html_theme = 'default'
#html_theme = 'sphinxdoc'
#html_theme = 'scrolls'
#html_theme = 'agogo'
#html_theme = 'traditional'
#html_theme = 'nature'
#html_theme = 'haiku'
# If this is not the empty string, a 'Last updated on:' timestamp
# is inserted at every page bottom, using the given strftime() format.
# Default is '%b %d, %Y' (or a locale-dependent equivalent).
html_last_updated_fmt = '%Y/%m/%d'
# Enable Antialiasing
blockdiag_antialias = True
acttdiag_antialias = True
seqdiag_antialias = True
nwdiag_antialias = True
extensions += ['rst2pdf.pdfbuilder']
pdf_documents = [
(master_doc, project, project, copyright),
]
pdf_stylesheets = ['b4', 'kerning']
#pdf_language = "en"
# Mode for literal blocks wider than the frame. Can be
# overflow, shrink or truncate
#pdf_fit_mode = "shrink"
# Section level that forces a break page.
# For example: 1 means top-level sections start in a new page
# 0 means disabled
#pdf_break_level = 0
# When a section starts in a new page, force it to be 'even', 'odd',
# or just use 'any'
pdf_breakside = 'any'
# Insert footnotes where they are defined instead of
# at the end.
pdf_inline_footnotes = True
# verbosity level. 0 1 or 2
pdf_verbosity = 0
# If false, no index is generated.
pdf_use_index = True
# If false, no modindex is generated.
pdf_use_modindex = True
# If false, no coverpage is generated.
pdf_use_coverpage = True
# Name of the cover page template to use
#pdf_cover_template = 'sphinxcover.tmpl'
# Documents to append as an appendix to all manuals.
#pdf_appendices = []
# Enable experimental feature to split table cells. Use it
# if you get "DelayedTable too big" errors
#pdf_splittables = False
# Set the default DPI for images
#pdf_default_dpi = 72
# Enable rst2pdf extension modules (default is only vectorpdf)
# you need vectorpdf if you want to use sphinx's graphviz support
#pdf_extensions = ['vectorpdf']
# Page template name for "regular" pages
#pdf_page_template = 'cutePage'
# Show Table Of Contents at the beginning?
pdf_use_toc = True
# How many levels deep should the table of contents be?
pdf_toc_depth = 2
# Add section number to section references
pdf_use_numbered_links = False
# Background images fitting mode
pdf_fit_background_mode = 'scale'
pdf_font_path = ['C:\\Windows\\Fonts\\', '/usr/share/fonts'] | {
"repo_name": "Axam/nsx-library",
"path": "docs/common_conf.py",
"copies": "6",
"size": "2943",
"license": "apache-2.0",
"hash": 6346179654456003000,
"line_mean": 27.45,
"line_max": 149,
"alpha_frac": 0.6996262317,
"autogenerated": false,
"ratio": 3.174757281553398,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02551258823079505,
"num_lines": 100
} |
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['openstackdocstheme', 'reno.sphinxext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'diskimage-builder'
copyright = u'2016, Diskimage-builder contributors'
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/diskimage-builder'
openstackdocs_bug_project = 'diskimage-builder'
openstackdocs_auto_name = False
openstackdocs_bug_tag = ''
# Release notes do not need a version in the title, they span
# multiple versions.
# The short X.Y version.
release = ''
# The full version, including alpha/beta/rc tags.
version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'diskimage-builderdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'diskimage-builder-releasenotes.tex',
u'diskimage-builder Documentation',
u'OpenStack', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'diskimage-builder', u'diskimage-builder Release Notes',
[u'OpenStack'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'diskimage-builder', u'diskimage-builder Release Notes',
u'OpenStack', 'diskimage-builder', 'Build disk images for CI and more.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
| {
"repo_name": "switch-ch/diskimage-builder",
"path": "releasenotes/source/conf.py",
"copies": "2",
"size": "7596",
"license": "apache-2.0",
"hash": -2014474414381001200,
"line_mean": 30.7824267782,
"line_max": 79,
"alpha_frac": 0.7093206951,
"autogenerated": false,
"ratio": 3.7290132547864507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00852031664737511,
"num_lines": 239
} |
"""Add `application_close_date` for Framework
Revision ID: 840
Revises: 830
Create Date: 2017-02-06 11:09:26.852142
"""
from alembic import op
from datetime import datetime
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '840'
down_revision = '830'
frameworks_table = sa.table(
'frameworks',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('slug', sa.String, nullable=False, unique=True, index=True),
sa.Column('allow_declaration_reuse', sa.Boolean),
sa.Column('application_close_date', sa.DateTime)
)
def upgrade():
op.add_column('frameworks', sa.Column('application_close_date', sa.DateTime(), nullable=True))
op.add_column(
'frameworks',
sa.Column('allow_declaration_reuse', sa.Boolean(), nullable=False, server_default='false')
)
fields = ('slug', 'application_close_date', 'allow_declaration_reuse')
new_values = (
('digital-outcomes-and-specialists', datetime(2016, 1, 1, 15), True),
('digital-outcomes-and-specialists-2', datetime(2017, 1, 16, 17), True),
('g-cloud-8', datetime(2016, 6, 1, 17), True),
)
new_values = [dict(zip(fields, i)) for i in new_values]
for i in new_values:
op.execute(
frameworks_table.update().where(frameworks_table.c.slug==i.pop('slug')).values(**i)
)
def downgrade():
op.drop_column('frameworks', 'allow_declaration_reuse')
op.drop_column('frameworks', 'application_close_date')
| {
"repo_name": "alphagov/digitalmarketplace-api",
"path": "migrations/versions/840_add_application_close_date_for_framework.py",
"copies": "1",
"size": "1490",
"license": "mit",
"hash": 4449509307320801300,
"line_mean": 30.0416666667,
"line_max": 98,
"alpha_frac": 0.6590604027,
"autogenerated": false,
"ratio": 3.378684807256236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4537745209956236,
"avg_score": null,
"num_lines": null
} |
"""add application plans
Revision ID: 1edb643716c3
Revises: e52a51870d
Create Date: 2013-04-14 14:40:45.693170
"""
# revision identifiers, used by Alembic.
revision = '1edb643716c3'
down_revision = 'e52a51870d'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
def upgrade():
op.create_table(
'plan',
sa.Column('id', sa.Integer, primary_key=True, autoincrement=True),
sa.Column('code_name', sa.String(200), unique=True),
sa.Column('name', sa.String(200)),
sa.Column('price', sa.Numeric(5, 2)),
sa.Column('available', sa.Boolean),
sa.Column('custom_domain', sa.Boolean),
sa.Column('cms', sa.Boolean),
sa.Column('size_total', sa.Integer, nullable=True),
sa.Column('size_static', sa.Integer, nullable=True),
sa.Column('transfer', sa.Integer, nullable=True),
sa.Column('count_template', sa.Integer, nullable=True),
sa.Column('count_static', sa.Integer, nullable=True),
)
plan_table = table(
'plan',
column('code_name', sa.String(200)),
column('name', sa.String(200)),
column('price', sa.Numeric(5, 2)),
column('available', sa.Boolean),
column('custom_domain', sa.Boolean),
column('cms', sa.Boolean),
column('size_total', sa.Integer),
column('size_static', sa.Integer),
column('transfer', sa.Integer),
column('count_template', sa.Integer),
column('count_static', sa.Integer)
)
beta_plan = { 'code_name': 'beta test',
'name': 'Beta Test',
'price': '0.00',
'available': True,
'custom_domain': True,
'cms': True,
'size_total': 0,
'size_static': 0,
'transfer': 0,
'count_template': 0,
'count_static': 0}
op.create_index('idx_plan_id_available', 'plan', ['id', 'available'])
op.bulk_insert(plan_table, [beta_plan])
op.add_column('application', sa.Column('plan_name', sa.String(200)))
op.add_column('application', sa.Column('price', sa.Numeric(5, 2)))
op.add_column('application', sa.Column('custom_domain', sa.Boolean))
op.add_column('application', sa.Column('cms', sa.Boolean))
op.add_column('application', sa.Column('size_total', sa.Integer))
op.add_column('application', sa.Column('size_static', sa.Integer))
op.add_column('application', sa.Column('transfer', sa.Integer))
op.add_column('application', sa.Column('count_template', sa.Integer))
op.add_column('application', sa.Column('count_static', sa.Integer))
op.create_index(
'idx_application_name_custom_domain',
'application',
['name', 'custom_domain'])
app_table = table(
'application',
column('plan_name', sa.String(200)),
column('custom_domain', sa.Boolean),
column('price', sa.Numeric(5, 2)),
column('cms', sa.Boolean),
column('size_total', sa.Integer),
column('size_static', sa.Integer),
column('transfer', sa.Integer),
column('count_template', sa.Integer),
column('count_static', sa.Integer),
)
conn = op.get_bind()
updates = sa.sql.expression.update(
app_table,
values={'plan_name': 'Beta Test',
'custom_domain': True,
'price': '0.00',
'cms': True,
'size_total': 0,
'size_static': 0,
'transfer': 0,
'count_template': 0,
'count_static': 0}
)
conn.execute(updates)
def downgrade():
op.drop_table('plan')
op.drop_index('idx_application_name_custom_domain')
op.drop_column('application', 'plan_name')
op.drop_column('application', 'price')
op.drop_column('application', 'custom_domain')
op.drop_column('application', 'cms')
op.drop_column('application', 'size_total')
op.drop_column('application', 'size_static')
op.drop_column('application', 'transfer')
op.drop_column('application', 'count_template')
op.drop_column('application', 'count_static')
| {
"repo_name": "glennyonemitsu/MarkupHiveServer",
"path": "alembic/versions/20130414144045-add_application_plan.py",
"copies": "1",
"size": "4237",
"license": "mit",
"hash": 6021741508898190000,
"line_mean": 35.525862069,
"line_max": 74,
"alpha_frac": 0.5746990795,
"autogenerated": false,
"ratio": 3.6431642304385212,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9664592437914752,
"avg_score": 0.010654174404754031,
"num_lines": 116
} |
"""Add applications translations
Revision ID: df42ba6d96
Revises: 4811ae4298e3
Create Date: 2015-07-31 15:15:05.822909
"""
# revision identifiers, used by Alembic.
revision = 'df42ba6d96'
down_revision = '4811ae4298e3'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('EmbedApplicationTranslation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('embed_application_id', sa.Integer(), nullable=True),
sa.Column('url', sa.Unicode(length=255), nullable=False),
sa.Column('language', sa.Unicode(length=10), nullable=False),
sa.ForeignKeyConstraint(['embed_application_id'], ['EmbedApplications.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_EmbedApplicationTranslation_language', 'EmbedApplicationTranslation', ['language'], unique=False)
op.create_index(u'ix_EmbedApplicationTranslation_url', 'EmbedApplicationTranslation', ['url'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_EmbedApplicationTranslation_url', table_name='EmbedApplicationTranslation')
op.drop_index(u'ix_EmbedApplicationTranslation_language', table_name='EmbedApplicationTranslation')
op.drop_table('EmbedApplicationTranslation')
### end Alembic commands ###
| {
"repo_name": "porduna/appcomposer",
"path": "alembic/versions/df42ba6d96_add_applications_translations.py",
"copies": "3",
"size": "1409",
"license": "bsd-2-clause",
"hash": -9173297622114683000,
"line_mean": 37.0810810811,
"line_max": 122,
"alpha_frac": 0.7295954578,
"autogenerated": false,
"ratio": 3.7374005305039786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5966995988303978,
"avg_score": null,
"num_lines": null
} |
"""Add App
Revision ID: 3466afd5950f
Revises: 1d3b0df7f698
Create Date: 2013-09-17 23:12:36.391127
"""
# revision identifiers, used by Alembic.
revision = '3466afd5950f'
down_revision = '1d3b0df7f698'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('Apps',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('unique_id', sa.Unicode(length=50), nullable=True),
sa.Column('name', sa.Unicode(length=50), nullable=True),
sa.Column('owner_id', sa.Integer(), nullable=False),
sa.Column('creation_date', sa.DateTime(), nullable=False),
sa.Column('modification_date', sa.DateTime(), nullable=False),
sa.Column('last_access_date', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['owner_id'], ['Users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name','owner_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('Apps')
### end Alembic commands ###
| {
"repo_name": "porduna/appcomposer",
"path": "alembic/versions/3466afd5950f_add_app.py",
"copies": "3",
"size": "1110",
"license": "bsd-2-clause",
"hash": -3337743612340993000,
"line_mean": 29,
"line_max": 66,
"alpha_frac": 0.6711711712,
"autogenerated": false,
"ratio": 3.3233532934131738,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5494524464613174,
"avg_score": null,
"num_lines": null
} |
"""add app status table for enginelight
Revision ID: 10aaa4625220
Revises: 3aba96e0a6ab
Create Date: 2015-05-19 22:19:48.978538
"""
# revision identifiers, used by Alembic.
revision = '10aaa4625220'
down_revision = '3aba96e0a6ab'
from alembic import op
import sqlalchemy as sa
import datetime
def upgrade():
### commands auto generated by Alembic - please adjust! ###
app_status = op.create_table('app_status',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('status', sa.String(length=255), nullable=True),
sa.Column('last_updated', sa.DateTime(), nullable=True),
sa.Column('message', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# default to giving the app an OK status when we start
op.bulk_insert(
app_status,
[
{ 'status': 'ok', 'last_updated': datetime.datetime.now(), 'message': None }
]
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('app_status')
### end Alembic commands ###
| {
"repo_name": "codeforamerica/pittsburgh-purchasing-suite",
"path": "migrations/versions/10aaa4625220_add_app_status_table_for_enginelight.py",
"copies": "3",
"size": "1087",
"license": "bsd-3-clause",
"hash": -3430117073531873300,
"line_mean": 26.175,
"line_max": 88,
"alpha_frac": 0.6522539098,
"autogenerated": false,
"ratio": 3.483974358974359,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011009808888684292,
"num_lines": 40
} |
# Addapted from killableprocess.py.
#______________________________________________________________________________
#
# killableprocess - subprocesses which can be reliably killed
#
# Parts of this module are copied from the subprocess.py file contained
# in the Python distribution.
#
# Copyright (c) 2003-2004 by Peter Astrand <astrand@lysator.liu.se>
#
# Additions and modifications written by Benjamin Smedberg
# <benjamin@smedbergs.us> are Copyright (c) 2006 by the Mozilla Foundation
# <http://www.mozilla.org/>
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of the
# author not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
r"""killableprocess - Subprocesses which can be reliably killed
This module is a subclass of the builtin "subprocess" module. It allows
processes that launch subprocesses to be reliably killed on Windows (via the Popen.kill() method.
It also adds a timeout argument to Wait() for a limited period of time before
forcefully killing the process.
Note: On Windows, this module requires Windows 2000 or higher (no support for
Windows 95, 98, or NT 4.0). It also requires ctypes, which is bundled with
Python 2.5+ or available from http://python.net/crew/theller/ctypes/
"""
import subprocess
from subprocess import PIPE
import sys
import os
import types
try:
from subprocess import CalledProcessError
except ImportError:
# Python 2.4 doesn't implement CalledProcessError
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
mswindows = (sys.platform == "win32")
skip = False
if mswindows:
import platform
if platform.uname()[3] == '' or platform.uname()[3] > '6.0.6000':
# Killable process does not work under vista when starting for
# something else than cmd.
skip = True
else:
import winprocess
else:
import signal
if not mswindows:
def DoNothing(*args):
pass
if skip:
Popen = subprocess.Popen
else:
class Popen(subprocess.Popen):
if not mswindows:
# Override __init__ to set a preexec_fn
def __init__(self, *args, **kwargs):
if len(args) >= 7:
raise Exception("Arguments preexec_fn and after must be passed by keyword.")
real_preexec_fn = kwargs.pop("preexec_fn", None)
def setpgid_preexec_fn():
os.setpgid(0, 0)
if real_preexec_fn:
apply(real_preexec_fn)
kwargs['preexec_fn'] = setpgid_preexec_fn
subprocess.Popen.__init__(self, *args, **kwargs)
if mswindows:
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines, startupinfo,
creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
if not isinstance(args, types.StringTypes):
args = subprocess.list2cmdline(args)
if startupinfo is None:
startupinfo = winprocess.STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= winprocess.STARTF_USESTDHANDLES
startupinfo.hStdInput = int(p2cread)
startupinfo.hStdOutput = int(c2pwrite)
startupinfo.hStdError = int(errwrite)
if shell:
startupinfo.dwFlags |= winprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = winprocess.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
# We create a new job for this process, so that we can kill
# the process and any sub-processes
self._job = winprocess.CreateJobObject()
creationflags |= winprocess.CREATE_SUSPENDED
creationflags |= winprocess.CREATE_UNICODE_ENVIRONMENT
hp, ht, pid, tid = winprocess.CreateProcess(
executable, args,
None, None, # No special security
1, # Must inherit handles!
creationflags,
winprocess.EnvironmentBlock(env),
cwd, startupinfo)
self._child_created = True
self._handle = hp
self._thread = ht
self.pid = pid
# XXX: A try/except to fix UAC-related problems under
# Windows Vista, when reparenting jobs.
try:
winprocess.AssignProcessToJobObject(self._job, hp)
except WindowsError:
pass
winprocess.ResumeThread(ht)
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def kill(self, group=True):
"""Kill the process. If group=True, all sub-processes will also be killed."""
if mswindows:
if group:
winprocess.TerminateJobObject(self._job, 127)
else:
winprocess.TerminateProcess(self._handle, 127)
self.returncode = 127
else:
if group:
os.killpg(self.pid, signal.SIGKILL)
else:
os.kill(self.pid, signal.SIGKILL)
self.returncode = -9
| {
"repo_name": "mastizada/kuma",
"path": "vendor/packages/ipython/IPython/frontend/process/killableprocess.py",
"copies": "7",
"size": "6860",
"license": "mpl-2.0",
"hash": 7230735833677319000,
"line_mean": 36.2826086957,
"line_max": 97,
"alpha_frac": 0.6185131195,
"autogenerated": false,
"ratio": 4.295554164057608,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00824686717134944,
"num_lines": 184
} |
"""Add a quality control data entry to a clinical report.
Usage: post_qc_data.py 2029 '{"Cluster Density": "170", "Clusters PF": ".82"}'
"""
import os
import requests
from requests.auth import HTTPBasicAuth
import sys
import json
import argparse
#Load environment variables for request authentication parameters
if "FABRIC_API_PASSWORD" not in os.environ:
sys.exit("FABRIC_API_PASSWORD environment variable missing")
if "FABRIC_API_LOGIN" not in os.environ:
sys.exit("FABRIC_API_LOGIN environment variable missing")
FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN']
FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD']
FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com')
auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD)
def add_fields_to_cr(cr_id, qc_fields):
"""Use the Omicia API to fill in custom patient fields for a clinical report
"""
#Construct request
url = "{}/reports/{}/qc_data"
url = url.format(FABRIC_API_URL, cr_id)
url_payload = qc_fields
sys.stdout.write("Adding quality control data entry to report...")
sys.stdout.write("\n\n")
sys.stdout.flush()
# If patient information was not provided, make a post request to reports
# without a patient information parameter in the url
result = requests.post(url, auth=auth, data=url_payload)
return result.json()
def main():
"""main function. Upload a specified VCF file to a specified project.
"""
parser = argparse.ArgumentParser(description='Add a quality control data entry to a clinical report')
parser.add_argument('c', metavar='clinical_report_id', type=int)
parser.add_argument('f', metavar='qc_fields', type=str)
args = parser.parse_args()
cr_id = args.c
qc_fields = args.f
json_response = add_fields_to_cr(cr_id, qc_fields)
print json.dumps(json_response, indent=4)
if __name__ == "__main__":
main()
| {
"repo_name": "Omicia/omicia_api_examples",
"path": "python/ClinicalReportLaunchers/post_qc_data.py",
"copies": "1",
"size": "1929",
"license": "mit",
"hash": 621543454333306000,
"line_mean": 32.8421052632,
"line_max": 105,
"alpha_frac": 0.7029548989,
"autogenerated": false,
"ratio": 3.33160621761658,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.453456111651658,
"avg_score": null,
"num_lines": null
} |
# Add article and event class from
# from wsgi.iportalen_django.articles.models import Article
# and same but event or create new slim classes?
from django.db import models, transaction
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.conf import settings
import os
from django.utils.translation import ugettext_lazy as _
from utils.validators import less_than_160_characters_validator
from utils import time
from .managers import ThesisManager
from organisations.models import Organisation
class Thesis_Article(models.Model):
company = models.CharField(
verbose_name=_("företagsnamn"),
max_length=255,
help_text=_("Annonseringsföretagets namn"))
headline = models.CharField(
verbose_name=_("rubrik"),
max_length=255,
help_text=_("Rubriken till annonsen"))
lead = models.TextField(
verbose_name=_("ingress"),
help_text=_("Ingressen är den text som syns i nyhetsflödet. Max 160 tecken."),
validators=[less_than_160_characters_validator])
body = models.TextField(
verbose_name=_("brödtext"),
help_text=_("Brödtext syns när annonsen visas enskilt."))
location = models.CharField(
verbose_name=_("ort"),
max_length=64,
help_text=_("Ort"))
payed = models.BooleanField(
verbose_name=_("betalt"),
default=False,
help_text=_("Kryssa i om exjobbet är betalt"))
visible_from = models.DateTimeField(
verbose_name=_("publicering"),
help_text=_("Publiceringsdatum"),
default=time.now)
visible_to = models.DateTimeField(
verbose_name=_("avpublicering"),
help_text=_("Avpubliceringsdatum"),
default=time.now_plus_one_month)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_("användare"),
help_text=_("Användaren som skrivit texten"),
null=True,
on_delete=models.SET_NULL)
created = models.DateTimeField(editable=False)
modified = models.DateTimeField(editable=False)
organisations = models.ManyToManyField(
Organisation,
blank=True,
verbose_name=_("organisationer"),
help_text=_("Om du väljer en organisation i listan du inte tillhör kommer du att tappa åtkomsten till artikeln."
" Håll ner Ctrl för att markera flera."))
objects = ThesisManager() # Manager
###########################################################################
# Meta data for model
###########################################################################
class Meta:
verbose_name = _("Annons")
verbose_name_plural = _("Annonser")
permissions = (('can_approve_thesis_article', 'Can approve thesis articles'),)
###########################################################################
# Overridden and standard functions
###########################################################################
def save(self, *args, **kwargs):
"""Override save to set created and modifed date before saving."""
if not self.id:
self.created = timezone.now()
self.modified = timezone.now()
super(Thesis_Article, self).save(*args, **kwargs)
def __str__(self):
"""Return string representation of object"""
return self.headline
def get_absolute_url(self):
"""Get url of object"""
return reverse('thesis_portal:thesis_article', kwargs={'pk': self.pk})
###########################################################################
# Properties reachable in template
###########################################################################
def _type(self):
"""Return model name"""
return "article"
type = property(_type)
@property
def show_article_before_experation(self):
"""Returns the end date to hinder people from accesing the article through URL, unless admin"""
if self.visible_to > timezone.now():
return True
return False
###########################################################################
# Member function
###########################################################################
def can_administer(self, user):
if not user.is_authenticated():
return False
article_orgs = self.organisations.all()
user_orgs = user.get_organisations()
intersection = set(article_orgs).intersection(user_orgs)
# Like a venn diagram where the intersections is the organisations that both the user and the event have.
if intersection:
return True
if self.user == user:
return True
if user.has_perm("articles.can_approve_article"):
return True
return False
| {
"repo_name": "I-sektionen/i-portalen",
"path": "wsgi/iportalen_django/thesis_portal/models.py",
"copies": "1",
"size": "4873",
"license": "mit",
"hash": 8703491082069065000,
"line_mean": 36.6589147287,
"line_max": 120,
"alpha_frac": 0.5584602717,
"autogenerated": false,
"ratio": 4.5359477124183005,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.55944079841183,
"avg_score": null,
"num_lines": null
} |
"""Add article tables
Revision ID: b00769d6f457
Revises: 0f780af615ef
Create Date: 2017-10-15 13:16:36.512159
"""
from alembic import op # type: ignore
import sqlalchemy as sa # type: ignore
from sqlalchemy.sql import table, column # type: ignore
from typing import List, Dict, Any # noqa
from mypy_extensions import TypedDict
from pathlib import Path
from json import load
from collections import OrderedDict
# revision identifiers, used by Alembic.
revision = 'b00769d6f457'
down_revision = '0f780af615ef'
branch_labels = None
depends_on = None
confesses = table('confesses',
column('id', sa.Integer),
column('command', sa.String),
column('name', sa.String),
column('type_id', sa.Integer,
sa.ForeignKey('confess_types.id')),
column('numbering_id', sa.Integer,
sa.ForeignKey('confess_numbering_type.id')))
class ArticlesJSON(TypedDict):
title: str
articles: List[List[str]]
def _get_article_records(id: str, data: ArticlesJSON) -> List[Dict[str, Any]]:
articles = [] # type: List[Dict[str, Any]]
for index in range(len(data['articles'])):
title, text = data['articles'][index]
articles.append({'confess_id': id, 'article_number': index + 1,
'title': title, 'text': text})
return articles
def upgrade():
op.alter_column('confess_paragraphs', 'chapter_number', nullable=False)
confess_numbering_type = op.create_table('confess_numbering_type',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('numbering', sa.String(20),
unique=True, nullable=False))
confess_articles = op.create_table('confess_articles',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('confess_id', sa.Integer,
sa.ForeignKey('confesses.id'), nullable=False),
sa.Column('article_number', sa.Integer, nullable=False),
sa.Column('title', sa.String, nullable=False),
sa.Column('text', sa.Text, nullable=False),
sa.Index('confess_articles_text_idx',
sa.func.to_tsvector('english', 'text'),
postgresql_using='gin'))
op.bulk_insert(confess_numbering_type, [
{'id': 1, 'numbering': 'ARABIC'},
{'id': 2, 'numbering': 'ROMAN'}
])
op.add_column('confesses', sa.Column('numbering_id', sa.Integer,
sa.ForeignKey('confess_numbering_type.id')))
op.execute('''UPDATE confesses set numbering_id = 1;''')
op.alter_column('confesses', 'numbering_id', nullable=False)
op.bulk_insert(confesses, [
{'id': 6, 'command': 'bcf', 'type_id': 1, 'numbering_id': 1,
'name': 'The Belgic Confession of Faith'},
{'id': 7, 'command': '39a', 'type_id': 1, 'numbering_id': 2,
'name': 'The 39 Articles'}
])
with (Path(__file__).resolve().parent / f'{revision}_bcf.json').open() as f:
bcf_data = load(f, object_pairs_hook=lambda x: OrderedDict(x))
with (Path(__file__).resolve().parent / f'{revision}_articles.json').open() as f:
thirty_nine_data = load(f, object_pairs_hook=lambda x: OrderedDict(x))
op.bulk_insert(confess_articles, _get_article_records(6, bcf_data) + _get_article_records(7, thirty_nine_data))
def downgrade():
op.execute(confesses.delete().where(confesses.c.type_id == 1))
op.drop_column('confesses', 'numbering_id')
op.drop_table('confess_articles')
op.drop_table('confess_numbering_type')
op.alter_column('confess_paragraphs', 'chapter_number', nullable=True)
| {
"repo_name": "bryanforbes/Erasmus",
"path": "alembic/versions/b00769d6f457_add_article_tables.py",
"copies": "1",
"size": "4064",
"license": "bsd-3-clause",
"hash": -8179636370294771000,
"line_mean": 38.8431372549,
"line_max": 115,
"alpha_frac": 0.5551181102,
"autogenerated": false,
"ratio": 3.7216117216117217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4776729831811722,
"avg_score": null,
"num_lines": null
} |
"""add_artifact_dates
Revision ID: 87cbddd5b946
Revises: 9418cac1c4ee
Create Date: 2017-11-21 08:03:24.923077
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "87cbddd5b946"
down_revision = "9418cac1c4ee"
branch_labels = ()
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"artifact",
sa.Column("date_finished", sa.TIMESTAMP(timezone=True), nullable=True),
)
op.add_column(
"artifact",
sa.Column("date_started", sa.TIMESTAMP(timezone=True), nullable=True),
)
op.add_column(
"artifact",
sa.Column("date_updated", sa.TIMESTAMP(timezone=True), nullable=True),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("artifact", "date_updated")
op.drop_column("artifact", "date_started")
op.drop_column("artifact", "date_finished")
# ### end Alembic commands ###
| {
"repo_name": "getsentry/zeus",
"path": "zeus/migrations/87cbddd5b946_add_artifact_dates.py",
"copies": "1",
"size": "1049",
"license": "apache-2.0",
"hash": 930996555176976800,
"line_mean": 22.3111111111,
"line_max": 79,
"alpha_frac": 0.6510962822,
"autogenerated": false,
"ratio": 3.3838709677419354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4534967249941935,
"avg_score": null,
"num_lines": null
} |
"""Add Artifact
Revision ID: 4e68c2a3d269
Revises: 586238e1375a
Create Date: 2014-02-06 10:24:04.343490
"""
# revision identifiers, used by Alembic.
revision = '4e68c2a3d269'
down_revision = '586238e1375a'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'artifact',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('job_id', sa.GUID(), nullable=False),
sa.Column('step_id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('data', sa.JSONEncodedDict(), nullable=True),
sa.ForeignKeyConstraint(['job_id'], ['job.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['step_id'], ['jobstep.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('step_id', 'name', name='unq_artifact_name'),
)
def downgrade():
op.drop_table('artifact')
| {
"repo_name": "alex/changes",
"path": "migrations/versions/4e68c2a3d269_add_artifact.py",
"copies": "4",
"size": "1147",
"license": "apache-2.0",
"hash": 2160279996072518400,
"line_mean": 30.8611111111,
"line_max": 84,
"alpha_frac": 0.6434176112,
"autogenerated": false,
"ratio": 3.3636363636363638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6007053974836364,
"avg_score": null,
"num_lines": null
} |
"""Add artist tags
This relation links tags to an artist, allowing them to tag
themselves with types of content that they prefer to create
or will not create. This is done in the interest of providing
a more meaningful commission search by increasing the search
rank when an artist's strengths match the content requested.
Revision ID: 40c00abab5f9
Revises: eff79a07a88d
Create Date: 2016-09-23 01:56:20.093477
"""
# revision identifiers, used by Alembic.
revision = '40c00abab5f9'
down_revision = 'eff79a07a88d'
from alembic import op # lgtm[py/unused-import]
import sqlalchemy as sa # lgtm[py/unused-import]
def upgrade():
op.create_table('artist_preferred_tags',
sa.Column('tagid', sa.Integer(), nullable=False),
sa.Column('targetid', sa.Integer(), nullable=False),
sa.Column('settings', sa.String(), server_default='', nullable=False),
sa.ForeignKeyConstraint(['tagid'], ['searchtag.tagid'], onupdate='CASCADE', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['targetid'], ['login.userid'], onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('tagid', 'targetid')
)
op.create_index('ind_artist_preferred_tags_tagid', 'artist_preferred_tags', ['tagid'])
op.create_index('ind_artist_preferred_tags_targetid', 'artist_preferred_tags', ['targetid'])
op.create_table('artist_optout_tags',
sa.Column('tagid', sa.Integer(), nullable=False),
sa.Column('targetid', sa.Integer(), nullable=False),
sa.Column('settings', sa.String(), server_default='', nullable=False),
sa.ForeignKeyConstraint(['tagid'], ['searchtag.tagid'], onupdate='CASCADE', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['targetid'], ['login.userid'], onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('tagid', 'targetid')
)
op.create_index('ind_artist_optout_tags_tagid', 'artist_optout_tags', ['tagid'])
op.create_index('ind_artist_optout_tags_targetid', 'artist_optout_tags', ['targetid'])
op.execute('CREATE EXTENSION IF NOT EXISTS FUZZYSTRMATCH')
def downgrade():
op.drop_index('ind_artist_preferred_tags_tagid', 'artist_preferred_tags')
op.drop_index('ind_artist_preferred_tags_targetid', 'artist_preferred_tags')
op.drop_table('artist_preferred_tags')
op.drop_index('ind_artist_optout_tags_tagid', 'artist_optout_tags')
op.drop_index('ind_artist_optout_tags_targetid', 'artist_optout_tags')
op.drop_table('artist_optout_tags')
op.execute('DROP EXTENSION IF EXISTS FUZZYSTRMATCH')
| {
"repo_name": "Weasyl/weasyl",
"path": "libweasyl/libweasyl/alembic/versions/40c00abab5f9_add_artist_tags.py",
"copies": "1",
"size": "2725",
"license": "apache-2.0",
"hash": 216608928310190850,
"line_mean": 48.5454545455,
"line_max": 116,
"alpha_frac": 0.6579816514,
"autogenerated": false,
"ratio": 3.566753926701571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9710866147066181,
"avg_score": 0.0027738862070780406,
"num_lines": 55
} |
'''Add a simple header to a text file, in preparation for corpus inclusion'''
from string import Template
import os
import sys
import re
import codecs
from datetime import date
HEADER = Template('''
AUTHOR: N/A
TITLE: $TITLE
MEDIUM: $MEDIUM
TOPIC: $TOPIC
PUBLISHER: $PUBLISHER
COPYRIGHT: $CPHOLDER
PUBLISHED: $DATE
EDITION: 1
ADDED: 05/01/2015
ISBN: N/A
NOTE: $NOTE
====
''')
indir = sys.argv[1]
outdir = sys.argv[2]
for f in os.listdir(indir):
title = 'n/a'
medium = sys.argv[3]
topic = sys.argv[4]
publisher = sys.argv[5]
copyright = sys.argv[6]
published = 'n/a'
note = sys.argv[7]
if not f.endswith(".txt"):
continue
match = re.search('illum.(\d\d\d\d-\d\d-\d\d)', f)
if match is not None:
dateText = match.group(0)
else:
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(indir + "/" + f)
d = date.fromtimestamp(mtime)
dateText = d.strftime("%d/%m/%Y")
header = HEADER.substitute(TITLE=title, MEDIUM=medium,TOPIC=topic,PUBLISHER=publisher,CPHOLDER=copyright,DATE=dateText,NOTE=note)
reader = codecs.open(indir + "/" + f, 'r', 'utf-8')
writer = codecs.open(outdir + "/" + f, 'w', 'utf-8')
writer.write(header+"\n")
for line in reader.readlines():
writer.write(line+"\n")
reader.close()
writer.close()
| {
"repo_name": "bertugatt/textmt",
"path": "textmt/utils/add_file_header.py",
"copies": "1",
"size": "1389",
"license": "mit",
"hash": 7951935769047278000,
"line_mean": 22.1666666667,
"line_max": 133,
"alpha_frac": 0.6090712743,
"autogenerated": false,
"ratio": 3.032751091703057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8963145233303577,
"avg_score": 0.03573542653989604,
"num_lines": 60
} |
"""add association between work and person with role
Revision ID: 56cf8a7b4ce
Revises: 7f6fc70526
Create Date: 2014-09-01 21:16:35.758002
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '56cf8a7b4ce'
down_revision = '7f6fc70526'
def upgrade():
credits_role = sa.Enum('artist', 'author', 'editor', name='credits_role')
op.create_table(
'credits',
sa.Column('work_id', sa.Integer(), nullable=False),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('role', credits_role, nullable=True),
sa.Column('created_at',
sa.DateTime(timezone=True),
nullable=False),
sa.ForeignKeyConstraint(['person_id'], ['people.id'], ),
sa.ForeignKeyConstraint(['work_id'], ['works.id'], ),
sa.PrimaryKeyConstraint('work_id', 'person_id')
)
def downgrade():
op.drop_table('credits')
# drop credits_role directly
sa.Enum(name='credits_role').drop(op.get_bind(), checkfirst=False)
| {
"repo_name": "clicheio/cliche",
"path": "cliche/migrations/versions/56cf8a7b4ce_add_association_between_work_and_person_.py",
"copies": "2",
"size": "1055",
"license": "mit",
"hash": 7745102114318234000,
"line_mean": 28.3055555556,
"line_max": 77,
"alpha_frac": 0.6388625592,
"autogenerated": false,
"ratio": 3.3814102564102564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5020272815610256,
"avg_score": null,
"num_lines": null
} |
"""Add a subnet to an IPSEC tunnel context."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI.custom_types import NetworkParamType
from SoftLayer.CLI import environment
from SoftLayer.CLI.exceptions import ArgumentError
from SoftLayer.CLI.exceptions import CLIHalt
@click.command()
@click.argument('context_id', type=int)
@click.option('-s',
'--subnet-id',
default=None,
type=int,
help='Subnet identifier to add')
@click.option('-t',
'--subnet-type',
'--type',
required=True,
type=click.Choice(['internal', 'remote', 'service']),
help='Subnet type to add')
@click.option('-n',
'--network-identifier',
'--network',
default=None,
type=NetworkParamType(),
help='Subnet network identifier to create')
@environment.pass_env
def cli(env, context_id, subnet_id, subnet_type, network_identifier):
"""Add a subnet to an IPSEC tunnel context.
A subnet id may be specified to link to the existing tunnel context.
Otherwise, a network identifier in CIDR notation should be specified,
indicating that a subnet resource should first be created before associating
it with the tunnel context. Note that this is only supported for remote
subnets, which are also deleted upon failure to attach to a context.
A separate configuration request should be made to realize changes on
network devices.
"""
create_remote = False
if subnet_id is None:
if network_identifier is None:
raise ArgumentError('Either a network identifier or subnet id '
'must be provided.')
if subnet_type != 'remote':
raise ArgumentError('Unable to create {} subnets'
.format(subnet_type))
create_remote = True
manager = SoftLayer.IPSECManager(env.client)
context = manager.get_tunnel_context(context_id)
if create_remote:
subnet = manager.create_remote_subnet(context['accountId'],
identifier=network_identifier[0],
cidr=network_identifier[1])
subnet_id = subnet['id']
env.out('Created subnet {}/{} #{}'
.format(network_identifier[0],
network_identifier[1],
subnet_id))
succeeded = False
if subnet_type == 'internal':
succeeded = manager.add_internal_subnet(context_id, subnet_id)
elif subnet_type == 'remote':
succeeded = manager.add_remote_subnet(context_id, subnet_id)
elif subnet_type == 'service':
succeeded = manager.add_service_subnet(context_id, subnet_id)
if succeeded:
env.out('Added {} subnet #{}'.format(subnet_type, subnet_id))
else:
raise CLIHalt('Failed to add {} subnet #{}'
.format(subnet_type, subnet_id))
| {
"repo_name": "kyubifire/softlayer-python",
"path": "SoftLayer/CLI/vpn/ipsec/subnet/add.py",
"copies": "3",
"size": "3066",
"license": "mit",
"hash": 3168360998510718500,
"line_mean": 36.8518518519,
"line_max": 80,
"alpha_frac": 0.600456621,
"autogenerated": false,
"ratio": 4.528803545051699,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6629260166051698,
"avg_score": null,
"num_lines": null
} |
# addata.activity
# PYTHON
from datetime import timedelta
import json
# QSSTATS
from qsstats import QuerySetStats
# FBDATA
from fbdata.fbids import get_fbuser_from_djuser
from fbdata.utils import (
date_to_timestamp,
padded_date_range
)
# ADDATA
from .models import (
AdHourlyActivity,
AdRecord,
FBAd,
FBSponsored
)
def get_hourly_activity(user, date, hour):
activity, created = AdHourlyActivity.objects.get_or_create(user=user,
date=date,
hour=hour)
return activity
def get_activity_for_day(user, date):
return AdHourlyActivity.objects.filter(user=user, date=date)
def get_activity_for_period(user, start_date, end_date):
return AdHourlyActivity.objects.filter(user=user,
date__gte=start_date,
date__lte=end_date)
def process_hourly_activity(user, start, end, modelclass):
if modelclass == AdRecord:
qs = modelclass.objects.filter(_user_id=user.id, date__range=(start, end))
else:
qs = modelclass.objects.filter(user=user, date__range=(start, end))
if not qs:
return {}
qss = QuerySetStats(qs, 'date')
start_date = start.date()
end_date = end.date()
if start_date == end_date:
end_date = start_date + timedelta(days=1)
timeseries = qss.time_series(
start_date, end=end_date, interval='hours', date_field='date')
activities = {}
for t in timeseries:
if t[1]:
d = t[0]
activity = get_hourly_activity(user, start_date, d.hour)
if modelclass == AdRecord:
for r in qs.filter(date__hour=d.hour):
activity.adrecords.add(r)
elif modelclass == FBSponsored:
for r in qs.filter(date__hour=d.hour):
activity.fbsponsored.add(r)
elif modelclass == FBAd:
for r in qs.filter(date__hour=d.hour):
activity.fbads.add(r)
activity.adtotal = activity.adrecords.count()
activity.fbadtotal = activity.fbads.count()
activity.fbsptotal = activity.fbsponsored.count()
activity.save()
activities[d] = activity
return activities
def process_all_hourly_activity(user, start, end):
ad_activities = process_hourly_activity(user, start, end, AdRecord)
fbsp_activities = process_hourly_activity(user, start, end, FBSponsored)
fbad_activities = process_hourly_activity(user, start, end, FBAd)
return {'ad_activities': ad_activities,
'fbsp_activities': fbsp_activities,
'fbad_activities': fbad_activities}
def process_activity_range(user, start_date, end_date):
diff = end_date - start_date
for i in range(diff.days):
day = start_date + timedelta(days=i)
start, end = padded_date_range(day)
process_all_hourly_activity(user, start, end)
def activity_data_json(user, start_date, end_date, fbuser=None, anon=True):
data = get_activity_for_period(user, start_date, end_date)
jdata = {'dates': {'start': date_to_timestamp(start_date),
'end': date_to_timestamp(end_date)},
'activity': [a.packed_data() for a in data]}
return json.dumps(jdata)
| {
"repo_name": "valuesandvalue/valuesandvalue",
"path": "vavs_project/addata/activity.py",
"copies": "1",
"size": "3445",
"license": "mit",
"hash": 6914098305760453000,
"line_mean": 35.6489361702,
"line_max": 86,
"alpha_frac": 0.5907111756,
"autogenerated": false,
"ratio": 3.614900314795383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9686593227858442,
"avg_score": 0.0038036525073881374,
"num_lines": 94
} |
# addata.api
# DJANGO
from django.contrib.auth import get_user_model
# TASTYPIE
from tastypie import fields
from tastypie.api import Api
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import (
Authorization,
DjangoAuthorization,
ReadOnlyAuthorization
)
from tastypie.exceptions import Unauthorized
from tastypie.models import ApiKey
from tastypie.resources import ModelResource
# FBDATA
from fbdata.participant import has_participant_consent
# ADDATA
from .models import (
DomainName,
FBSponsored,
FBAd,
RawData
)
###############
# AUTHORIZATION
###############
class LoginAuthorization(ReadOnlyAuthorization):
def read_list(self, object_list, bundle):
return object_list.filter(id=bundle.request.user.id)
def read_detail(self, object_list, bundle):
return bundle.obj.id == bundle.request.user.id
class UserObjectsOnlyAuthorization(Authorization):
def read_list(self, object_list, bundle):
return object_list.filter(user=bundle.request.user)
def read_detail(self, object_list, bundle):
return bundle.obj.user == bundle.request.user
def create_list(self, object_list, bundle):
return object_list
def create_detail(self, object_list, bundle):
return bundle.obj.user == bundle.request.user
def update_list(self, object_list, bundle):
allowed = []
for obj in object_list:
if obj.user == bundle.request.user:
allowed.append(obj)
return allowed
def update_detail(self, object_list, bundle):
return bundle.obj.user == bundle.request.user
def delete_list(self, object_list, bundle):
raise Unauthorized("Sorry, no deletes.")
def delete_detail(self, object_list, bundle):
raise Unauthorized("Sorry, no deletes.")
###############
# API KEY
###############
def get_api_key(user):
if has_participant_consent(user):
apikey, created = ApiKey.objects.get_or_create(user=user)
if created:
apikey.save()
return apikey.key
return None
def reset_api_key(user):
if has_participant_consent(user):
apikey, created = ApiKey.objects.get_or_create(user=user)
if created:
apikey.save()
else:
apikey.delete()
apikey = ApiKey.objects.create(user=user)
return apikey.key
return None
###############
# RESOURCES
###############
class UserResource(ModelResource):
class Meta:
queryset = get_user_model().objects.all()
resource_name = 'user'
allowed_methods = ['get']
fields = ['username']
authorization = DjangoAuthorization()
authentication = ApiKeyAuthentication()
class LoginResource(ModelResource):
class Meta:
queryset = get_user_model().objects.all()
resource_name = 'login'
allowed_methods = ['get']
fields = ['username']
authorization = LoginAuthorization()
authentication = ApiKeyAuthentication()
def apply_authorization_limits(self, request, object_list):
return object_list.filter(id=request.user.id)
class URLRecordResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta:
queryset = RawData.objects.filter(datatype=RawData.DATA_URLS)
allowed_methods = ['post', 'patch', 'put']
resource_name = 'urlrecords'
excludes = ['user']
authorization = UserObjectsOnlyAuthorization()
authentication = ApiKeyAuthentication()
def apply_authorization_limits(self, request, object_list):
return object_list.filter(user=request.user)
def obj_create(self, bundle, **kwargs):
return super(URLRecordResource, self).obj_create(bundle,
user=bundle.request.user,
datatype=RawData.DATA_URLS)
class FBSponsoredResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta:
queryset = RawData.objects.filter(datatype=RawData.DATA_FB)
allowed_methods = ['post', 'patch', 'put']
resource_name = 'fbsponsored'
excludes = ['user']
authorization = UserObjectsOnlyAuthorization()
authentication = ApiKeyAuthentication()
def apply_authorization_limits(self, request, object_list):
return object_list.filter(user=request.user)
def obj_create(self, bundle, **kwargs):
return super(FBSponsoredResource, self).obj_create(bundle,
user=bundle.request.user,
datatype=RawData.DATA_FB)
class FBAdResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta:
queryset = RawData.objects.filter(datatype=RawData.DATA_FBADS)
allowed_methods = ['post', 'patch', 'put']
resource_name = 'fbad'
excludes = ['user']
authorization = UserObjectsOnlyAuthorization()
authentication = ApiKeyAuthentication()
def apply_authorization_limits(self, request, object_list):
return object_list.filter(user=request.user)
def obj_create(self, bundle, **kwargs):
return super(FBAdResource, self).obj_create(bundle,
user=bundle.request.user,
datatype=RawData.DATA_FBADS)
class FBListingResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta:
queryset = RawData.objects.filter(datatype=RawData.DATA_FBLISTING)
allowed_methods = ['post', 'patch', 'put']
resource_name = 'fblisting'
excludes = ['user']
authorization = UserObjectsOnlyAuthorization()
authentication = ApiKeyAuthentication()
def apply_authorization_limits(self, request, object_list):
return object_list.filter(user=request.user)
def obj_create(self, bundle, **kwargs):
return super(FBListingResource, self).obj_create(bundle,
user=bundle.request.user,
datatype=RawData.DATA_FBLISTING)
###############
# API URLS
###############
def get_api():
addata_api = Api(api_name='v1')
addata_api.register(LoginResource())
addata_api.register(URLRecordResource())
addata_api.register(FBAdResource())
addata_api.register(FBSponsoredResource())
addata_api.register(FBListingResource())
return addata_api
| {
"repo_name": "valuesandvalue/valuesandvalue",
"path": "vavs_project/addata/api.py",
"copies": "1",
"size": "6648",
"license": "mit",
"hash": -7675342673107550000,
"line_mean": 32.0746268657,
"line_max": 74,
"alpha_frac": 0.6254512635,
"autogenerated": false,
"ratio": 4.157598499061914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5283049762561913,
"avg_score": null,
"num_lines": null
} |
# addata.details
# PYTHON
from datetime import timedelta
import json
# DJANGO
from django.db.models import Q
# FBDATA
from fbdata.utils import (
date_to_timestamp,
json_error,
padded_date_range
)
# ADDATA
from .handlers import ad_class_for_type
from .models import AdRecord
def get_ad_data_json(ad_type, pk, anon=True):
adobject_class = ad_class_for_type(ad_type)
if not adobject_class:
return json_error("Not found: %s %s" % (ad_type, pk))
try:
adobject = adobject_class.objects.get(pk=pk)
except adobject_class.DoesNotExist:
return json.dumps(
{'type':'error', 'message':'%s not found: %s' % (ad_type, pk)})
adobject_data = adobject.detail_data(anon=anon)
return json.dumps(adobject_data)
def ad_domain_data_for_period(user, start_time, end_time, anon=True):
ads = AdRecord.objects.filter(~Q(_ref_domain_id=0), ~Q(_domain_id=0),
_user_id=user.id,
date__range=(start_time, end_time))
data = {}
refs = set()
for ad in ads:
domain = ad.get_domain().name
ref = ad.get_ref_domain().ref_name(anon=anon)
refs.add(ref)
if not data.has_key(domain):
data[domain] = {}
if not data[domain].has_key(ref):
data[domain][ref] = 1
else:
data[domain][ref] += 1
ad_data = {}
for domain, values in data.items():
reflist = []
for ref, count in values.items():
reflist.append([ref, count])
ad_data[domain] = reflist
return {'ads': ad_data, 'refs':list(refs)}
def ad_details_json(user, start_time, end_time, anon=True):
ad_data = ad_domain_data_for_period(user, start_time, end_time, anon=anon)
if ad_data:
ad_data['dates'] = {'start': date_to_timestamp(start_time),
'end': date_to_timestamp(end_time)}
return json.dumps(ad_data)
else:
return json_error('No data for period: %s %s' % (start_time, end_time))
def ad_details_for_hour_json(user, hour, anon=True):
start_time = hour
end_time = hour + timedelta(hours=1)
ad_data = ad_domain_data_for_period(user, start_time, end_time, anon=anon)
if ad_data:
ad_data['dates'] = {'start': date_to_timestamp(start_time),
'end': date_to_timestamp(end_time)}
ad_data['hour'] = date_to_timestamp(hour)
return json.dumps(ad_data)
else:
return json_error('No data for period: %s %s' % (start_time, end_time))
| {
"repo_name": "valuesandvalue/valuesandvalue",
"path": "vavs_project/addata/details.py",
"copies": "1",
"size": "2584",
"license": "mit",
"hash": -6407361884623878000,
"line_mean": 33,
"line_max": 86,
"alpha_frac": 0.5820433437,
"autogenerated": false,
"ratio": 3.2421580928481806,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43242014365481807,
"avg_score": null,
"num_lines": null
} |
# addata.handlers
# ADDATA
from .models import (
AdRecord,
FBAd,
FBSponsored
)
_AD_CLASSES = {
'ad': AdRecord,
'fbad': FBAd,
'fbsp': FBSponsored,
}
def ad_class_for_type(object_type):
return _AD_CLASSES.get(object_type, None)
def get_user_ads(user, start_date, end_date):
return AdRecord.objects.filter(_user_id=user.id,
date__gte=start_date, date__lte=end_date)
def get_user_fbads(user, start_date, end_date):
return FBAd.objects.filter(user=user,
date__gte=start_date, date__lte=end_date)
def get_user_fbsponsored(user, start_date, end_date):
return FBSponsored.objects.filter(user=user,
date__gte=start_date, date__lte=end_date)
_data_handlers = {
'ads': get_user_ads,
'fbads': get_user_fbads,
'fbsponsored': get_user_fbsponsored
}
def get_user_data(user, datatype, start_date, end_date):
data = {}
if datatype == 'all':
data['ads'] = get_user_ads(user, start_date, end_date)
data['fbads'] = get_user_fbads(user, start_date, end_date)
data['fbsps'] = get_user_fbsponsored(user, start_date, end_date)
else:
func = _data_handlers.get(datatype, None)
if func:
data[datatype] = func(user, start_date, end_date)
return data
def get_all_user_data(user):
data = {}
data['ads'] = AdRecord.objects.filter(_user_id=user.id)
data['fbads'] = FBAd.objects.filter(user=user)
data['fbsps'] = FBSponsored.objects.filter(user=user)
return data
| {
"repo_name": "valuesandvalue/valuesandvalue",
"path": "vavs_project/addata/handlers.py",
"copies": "1",
"size": "1568",
"license": "mit",
"hash": -609443636511921700,
"line_mean": 27.5090909091,
"line_max": 72,
"alpha_frac": 0.6116071429,
"autogenerated": false,
"ratio": 2.871794871794872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8885943750371162,
"avg_score": 0.019491652864741738,
"num_lines": 55
} |
# addata.media
# PYTHON
import os
from time import time
import urllib
from urlparse import urlparse
# DJANGO
from django.conf import settings
# PILLOW
from PIL import Image
from .models import (
AdRecord,
FBAdImage
)
_IMAGE_TYPES = [
'image/gif',
'image/jpeg',
'image/png',
]
_MEDIA_TYPES = [
'application/x-shockwave-flash',
]
_DYNAMIC_TYPES = [
'text/javascript',
'text/javascript; charset=UTF-8',
'text/html',
'text/html; charset=UTF-8',
]
_CTYPE_EXTS = {
'image/gif': '.gif',
'image/jpeg': '.jpg',
'image/png': '.png',
'application/x-shockwave-flash': '.swf'
}
_FORMAT_EXTS = {
'GIF': '.gif',
'JPEG': '.jpg',
'PNG': '.png'
}
_MEDIA_EXTS = ['.gif', '.jpg', '.png', '.swf']
def is_image_file(adrecord):
return adrecord.content_type in _IMAGE_TYPES
def is_dynamic(adrecord):
if adrecord.method == "POST" or adrecord.is_ajax:
return True
if adrecord.content_type in _DYNAMIC_TYPES:
return True
return False
def ext_from_ctype(ctype, default=''):
return _CTYPE_EXTS.get(ctype, default)
def get_extension(url, ctype=None):
ext = os.path.splitext(urlparse(url).path)[1]
if not ext:
return ext_from_ctype(ctype)
else:
return ext
def make_local_name():
return str(int(time()*1000000))
def remove_download(filename):
filepath = os.path.join(settings.VAVS_ROOT,
settings.VAVS_DOWNLOAD_DIR, filename)
os.remove(filepath)
def remove_thumb(filename):
filepath = os.path.join(settings.VAVS_ROOT,
settings.VAVS_THUMBNAILS_DIR, filename)
os.remove(filepath)
def download_media(src, ctype=None):
localname = make_local_name()
dst = '%s%s' % (localname, get_extension(src))
dstpath = os.path.join(settings.VAVS_ROOT, settings.VAVS_DOWNLOAD_DIR, dst)
try:
urllib.urlretrieve(src, dstpath)
except Exception, e:
print e
return (localname, None)
else:
dst = verify_filename(dst)
return (localname, dst)
def make_thumbnail(filename, localname):
infile = os.path.join(settings.VAVS_ROOT,
settings.VAVS_DOWNLOAD_DIR, filename)
thumbfile = '%s.jpg' % localname
outfile = os.path.join(settings.MEDIA_ROOT,
settings.VAVS_THUMBNAILS_DIR, thumbfile)
try:
im = Image.open(infile)
im.thumbnail(settings.VAVS_THUMBNAILS_SIZE)
im.convert("RGB").save(outfile, "JPEG")
except IOError, e:
print e
return None
else:
return thumbfile
def is_tracker_image(filename):
infile = os.path.join(settings.VAVS_ROOT,
settings.VAVS_DOWNLOAD_DIR, filename)
try:
im = Image.open(infile)
except IOError, e:
print e
return False
return im.size[0] < 10 and im.size[1] < 10
def download_adrecord(adrecord):
if is_dynamic(adrecord):
adrecord.ad_type = AdRecord.TYPE_DYNAMIC
elif is_image_file(adrecord):
localname, dst = download_media(
adrecord.url, ctype=adrecord.content_type)
if dst:
adrecord.localfile = dst
if is_tracker_image(dst):
adrecord.ad_type = AdRecord.TYPE_TRACKER
remove_download(dst)
else:
adrecord.ad_type = AdRecord.TYPE_AD
thumbfile = make_thumbnail(dst, localname)
if thumbfile:
adrecord.thumbfile = thumbfile
adrecord.status = AdRecord.STATUS_HAS_MEDIA
else:
adrecord.status = AdRecord.STATUS_NO_THUMB
else:
adrecord.status = AdRecord.STATUS_NO_DOWNLOAD
else:
adrecord.status = AdRecord.STATUS_NO_MEDIA
adrecord.save()
def download_fbadimage(fbadimage):
localname, dst = download_media(fbadimage.url)
if dst:
fbadimage.localfile = dst
thumbfile = make_thumbnail(dst, localname)
if thumbfile:
fbadimage.thumbfile = thumbfile
fbadimage.status = FBAdImage.STATUS_HAS_MEDIA
else:
fbadimage.status = FBAdImage.STATUS_NO_THUMB
else:
fbadimage.status = FBAdImage.STATUS_NO_DOWNLOAD
fbadimage.save()
def ext_from_format(filename):
infile = os.path.join(settings.VAVS_ROOT,
settings.VAVS_DOWNLOAD_DIR, filename)
try:
im = Image.open(infile)
except IOError, e:
print e
return ''
return _FORMAT_EXTS.get(im.format, '')
def change_ext(filename, new_ext):
name = os.path.splitext(filename)[0]
return '%s%s' % (name, new_ext)
def rename_file(oldname, newname):
oldfile = os.path.join(settings.VAVS_ROOT,
settings.VAVS_DOWNLOAD_DIR, oldname)
newfile = os.path.join(settings.VAVS_ROOT,
settings.VAVS_DOWNLOAD_DIR, newname)
os.rename(oldfile, newfile)
def verify_filename(filename):
ext = os.path.splitext(filename)[1]
if ext and ext in _MEDIA_EXTS:
return filename
else:
new_ext = ext_from_format(filename)
newname = change_ext(filename, new_ext)
rename_file(filename, newname)
return newname
| {
"repo_name": "valuesandvalue/valuesandvalue",
"path": "vavs_project/addata/media.py",
"copies": "1",
"size": "5354",
"license": "mit",
"hash": 380639979671535100,
"line_mean": 26.7409326425,
"line_max": 79,
"alpha_frac": 0.5958162122,
"autogenerated": false,
"ratio": 3.451966473243069,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45477826854430686,
"avg_score": null,
"num_lines": null
} |
# addata.models
# PYTHON
from datetime import datetime
import json
import os
# DJANGO
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.utils.timezone import now
# FBDATA
from fbdata.utils import (
date_to_timestamp,
get_choice_name
)
class RawData(models.Model):
DATA_NONE = 0
DATA_URLS = 1
DATA_COOKIES = 2
DATA_FBADS = 3
DATA_FB = 4
DATA_FBLISTING = 5
DATA_CHOICES = (
(DATA_NONE, 'none'),
(DATA_URLS, 'urls'),
(DATA_COOKIES, 'cookies'),
(DATA_FBADS, 'fbads'),
(DATA_FB, 'fb'),
(DATA_FBLISTING, 'fblisting')
)
STATUS_NEW = 0
STATUS_DONE = 1
STATUS_ERROR = 2
STATUS_UNPROCESSED = 3
STATUS_CHOICES = (
(STATUS_NEW, 'new'),
(STATUS_DONE, 'done'),
(STATUS_ERROR, 'error'),
(STATUS_UNPROCESSED, 'unprocessed'),
)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)
created = models.DateTimeField(auto_now_add=True)
datatype = models.SmallIntegerField(choices=DATA_CHOICES,
default=DATA_NONE)
data = models.TextField()
status = models.SmallIntegerField(choices=STATUS_CHOICES,
default=STATUS_NEW)
error = models.TextField(null=True)
def status_str(self):
return get_choice_name(self.status, self.STATUS_CHOICES)
class DomainList(models.Model):
name = models.CharField(max_length=24, unique=True)
domains = models.ManyToManyField(
'DomainName', related_name='listed_domains')
def __unicode__(self):
return self.name
class DomainName(models.Model):
name = models.CharField(max_length=128)
def __unicode__(self):
return self.name
def ref_name(self, anon=True):
return unicode(self.id) if anon else self.name
class FBSponsored(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
date = models.DateTimeField(default=now)
actor = models.CharField(max_length=128, null=True)
target = models.CharField(max_length=128, null=True)
type_id = models.PositiveIntegerField(default=0)
text = models.TextField(null=True)
title = models.TextField()
images = models.ManyToManyField('FBAdImage', related_name='sponsored_images')
links = models.ManyToManyField('FBAdLink', related_name='sponsored_links')
def basic_data(self, anon=True):
return {'date': date_to_timestamp(self.date),
'id': self.id,
'title': self.title}
def detail_data(self, anon=True):
return {'date': date_to_timestamp(self.date),
'type': 'fbsp',
'id': self.id,
'title': self.title,
'text': self.text,
'images':
[i.thumbpath() for i in self.images.all() if i.thumbfile],
}
class FBAd(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
date = models.DateTimeField(default=now)
adid = models.CharField(max_length=128)
text = models.TextField(null=True)
title = models.TextField(null=True)
images = models.ManyToManyField('FBAdImage', related_name='ad_images')
links = models.ManyToManyField('FBAdLink', related_name='ad_links')
def basic_data(self, anon=True):
return {'date': date_to_timestamp(self.date),
'id': self.id,
'title': self.title}
def detail_data(self, anon=True):
return {'date': date_to_timestamp(self.date),
'type': 'fbad',
'id': self.id,
'title': self.title,
'text': self.text,
'adid': self.adid,
'images':
[i.thumbpath() for i in self.images.all() if i.thumbfile],
}
class FBAdLink(models.Model):
domain = models.ForeignKey('DomainName', null=True)
url = models.TextField(unique=True)
localfile = models.CharField(max_length=128, null=True)
thumbfile = models.CharField(max_length=128, null=True)
def thumbpath(self):
if self.thumbfile:
return os.path.join(settings.VAVS_THUMBNAILS_DIR, self.thumbfile)
else:
return ''
class FBAdImage(models.Model):
STATUS_NEW = 0
STATUS_DONE = 1
STATUS_ERROR = 2
STATUS_NO_MEDIA = 3
STATUS_HAS_MEDIA = 4
STATUS_NO_THUMB = 5
STATUS_NO_DOWNLOAD = 6
STATUS_CHOICES = (
(STATUS_NEW, 'new'),
(STATUS_DONE, 'done'),
(STATUS_ERROR, 'error'),
(STATUS_NO_MEDIA, 'no media'),
(STATUS_HAS_MEDIA, 'has media'),
(STATUS_NO_THUMB, 'no thumb'),
(STATUS_NO_DOWNLOAD, 'no download'),
)
domain = models.ForeignKey('DomainName', null=True)
url = models.TextField(unique=True)
localfile = models.CharField(max_length=128, null=True)
thumbfile = models.CharField(max_length=128, null=True)
status = models.SmallIntegerField(choices=STATUS_CHOICES,
default=STATUS_NEW)
def thumbpath(self):
if self.thumbfile:
return os.path.join(settings.VAVS_THUMBNAILS_DIR, self.thumbfile)
else:
return ''
def status_str(self):
return get_choice_name(self.status, self.STATUS_CHOICES)
class FBListing(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
date = models.DateTimeField(default=now)
data = models.TextField(null=True)
@property
def listing(self):
return json.loads(self.data) if self.data else []
@listing.setter
def listing(self, list_data):
self.data = json.dumps(list_data)
class AdRecord(models.Model):
STATUS_NEW = 0
STATUS_DONE = 1
STATUS_ERROR = 2
STATUS_NO_MEDIA = 3
STATUS_HAS_MEDIA = 4
STATUS_NO_THUMB = 5
STATUS_NO_DOWNLOAD = 6
STATUS_CHOICES = (
(STATUS_NEW, 'new'),
(STATUS_DONE, 'done'),
(STATUS_ERROR, 'error'),
(STATUS_NO_MEDIA, 'no media'),
(STATUS_HAS_MEDIA, 'has media'),
(STATUS_NO_THUMB, 'no thumb'),
(STATUS_NO_DOWNLOAD, 'no download'),
)
TYPE_NONE = 0
TYPE_AD = 1
TYPE_TRACKER = 2
TYPE_DYNAMIC = 3
TYPE_CHOICES = (
(TYPE_NONE, 'none'),
(TYPE_AD, 'ad'),
(TYPE_TRACKER, 'tracker'),
(TYPE_DYNAMIC, 'dynamic'),
)
_user_id = models.PositiveIntegerField(default=0)
date = models.DateTimeField()
_ref_domain_id = models.PositiveIntegerField(default=0)
_domain_id = models.PositiveIntegerField(default=0)
url = models.TextField()
method = models.CharField(max_length=12, null=True)
content_type = models.CharField(max_length=64, null=True)
cookie_url = models.TextField(null=True)
is_ajax = models.BooleanField(default=False)
localfile = models.CharField(max_length=128, null=True)
thumbfile = models.CharField(max_length=128, null=True)
status = models.SmallIntegerField(choices=STATUS_CHOICES,
default=STATUS_NEW)
ad_type = models.SmallIntegerField(choices=TYPE_CHOICES,
default=TYPE_NONE)
def get_user(self):
return User.objects.get(pk=self._user_id)
def get_domain(self):
return DomainName.objects.get(pk=self._domain_id)
def get_ref_domain(self):
return DomainName.objects.get(pk=self._ref_domain_id)
def thumbpath(self):
if self.thumbfile:
return os.path.join(settings.VAVS_THUMBNAILS_DIR, self.thumbfile)
else:
return ''
def status_str(self):
return get_choice_name(self.status, self.STATUS_CHOICES)
def type_str(self):
return get_choice_name(self.ad_type, self.TYPE_CHOICES)
def detail_data(self, anon=True, domain=True):
data = {'id': self.id,
'date': date_to_timestamp(self.date),
'type': self.ad_type}
if domain:
data['domain'] = self.domain.ref_name(anon=anon)
if self.ref_domain:
data['ref'] = self.ref_domain.ref_name(anon=anon)
if self.thumbfile and not anon:
data['img'] = self.thumbfile
return data
def basic_data(self, anon=True):
data = {'id': self.id,
'date': date_to_timestamp(self.date),
'type': self.ad_type}
if self.thumbfile and not anon:
data['img'] = self.thumbfile
return data
class AdHourlyActivity(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
date = models.DateField()
hour = models.PositiveIntegerField(default=0)
adtotal = models.PositiveIntegerField(default=0)
fbadtotal = models.PositiveIntegerField(default=0)
fbsptotal = models.PositiveIntegerField(default=0)
fbsponsored = models.ManyToManyField('FBSponsored',
related_name='ad_hour_fbsponsored')
fbads = models.ManyToManyField('FBAd', related_name='ad_hour_fbads')
adrecords = models.ManyToManyField('AdRecord',
related_name='ad_hour_adrecords')
def get_timestamp(self):
return date_to_timestamp(datetime(self.date.year,
self.date.month,
self.date.day,
self.hour))
def packed_data(self):
return [self.get_timestamp(),
self.adtotal, self.fbadtotal, self.fbsptotal]
def detail_data(self, anon=True):
return {'date': date_to_timestamp(self.date),
'hour': self.hour,
'adtotal': self.adtotal,
'fbadtotal': self.fbadtotal,
'fbsptotal': self.fbsptotal}
| {
"repo_name": "valuesandvalue/valuesandvalue",
"path": "vavs_project/addata/models.py",
"copies": "1",
"size": "10159",
"license": "mit",
"hash": 8349177091439896000,
"line_mean": 33.0906040268,
"line_max": 81,
"alpha_frac": 0.5787971257,
"autogenerated": false,
"ratio": 3.779389880952381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4858187006652381,
"avg_score": null,
"num_lines": null
} |
# addata.processing
# PYTHON
import json
import logging
from urlparse import urlparse
# DATEUTIL
import dateutil.parser
from dateutil import tz
# FBDATA
from fbdata.utils import (
padded_date_range,
timestamp_to_datetime
)
# ADDATA
from .models import (
AdHourlyActivity,
AdRecord,
DomainName,
FBAd,
FBAdImage,
FBAdLink,
FBListing,
FBSponsored,
RawData
)
def get_tldextract():
import tldextract
return tldextract.TLDExtract(suffix_list_url=False)
def parse_datestr(datestr):
d = dateutil.parser.parse(datestr)
d = d.replace(tzinfo=tz.tzlocal())
return d.astimezone(tz.tzutc())
def extract_domain(urlstr, default=''):
parts = urlparse(urlstr)
return parts.hostname or default
def make_domain(url, tldextractor, default=''):
hostname = tldextractor(url).registered_domain or default
domain, created = DomainName.objects.get_or_create(name=hostname)
return domain
def data_to_adrecord(user, data, tldextractor):
try:
timestamp = parse_datestr(data[0])
except ValueError, e:
logger = logging.getLogger('vavs.tasks.analytics')
logger.error("data_to_adrecord: %s, date: '%s'" % (user, data[0]))
raise
ref_url = data[1]
url = data[2]
try:
adrecord, created = AdRecord.objects.get_or_create(
_user_id = user.id,
date = timestamp,
ref_url = ref_url,
url = url)
except AdRecord.MultipleObjectsReturned, e:
return AdRecord.objects.filter(
_user_id = user.id,
date = timestamp,
ref_url = ref_url,
url = url)[0]
else:
if created:
adrecord.method = data[3]
adrecord.content_type = data[4]
adrecord.cookie_url = data[5]
adrecord.is_ajax = data[6]
adrecord._ref_domain_id = make_domain(ref_url, tldextractor).id
adrecord._domain_id = make_domain(url, tldextractor).id
adrecord.save()
return adrecord
def make_fbadimage(url, tldextractor):
domain = make_domain(url, tldextractor, default='facebook.com')
obj, created = FBAdImage.objects.get_or_create(domain=domain, url=url)
return obj
def make_fbadlink(url, tldextractor):
domain = make_domain(url, tldextractor, default='facebook.com')
obj, created = FBAdLink.objects.get_or_create(domain=domain, url=url)
return obj
def data_to_fbad(user, data, tldextractor):
timestamp = timestamp_to_datetime(int(data[0])*0.001)
adid = data[1]
fbad, created = FBAd.objects.get_or_create(
user = user,
date = timestamp,
adid = adid)
if created:
fbad.text = data[2]
fbad.title = data[3]
images = data[4]
links = data[5]
for url in images:
fbad.images.add(make_fbadimage(url, tldextractor))
for url in links:
fbad.links.add(make_fbadlink(url, tldextractor))
fbad.save()
return fbad
def data_to_fbsponsored(user, data, tldextractor):
try:
timestamp = timestamp_to_datetime(float(data[0]))
except ValueError:
logger = logging.getLogger('vavs.tasks.analytics')
logger.error("data_to_fbsponsored: %s, date: '%s', text: %s" % (
user, data[0], data[4]))
raise
title = data[5]
fbsp, created = FBSponsored.objects.get_or_create(
user = user,
date = timestamp,
title = title)
if created:
fbsp.actor = data[1]
fbsp.target = data[2]
fbsp.type_id = data[3]
fbsp.text = data[4]
images = data[6]
links = data[7]
for url in images:
fbsp.images.add(make_fbadimage(url, tldextractor))
for url in links:
fbsp.links.add(make_fbadlink(url, tldextractor))
fbsp.save()
return fbsp
def data_to_fblisting(user, data, tldextractor=None):
try:
timestamp = timestamp_to_datetime(float(data['timestamp'])*0.001)
except ValueError:
logger = logging.getLogger('vavs.tasks.analytics')
logger.error("data_to_fblisting: %s, date: '%s', text: %s" % (
user, data['timestamp'], data['list']))
raise
fblist, created = FBListing.objects.get_or_create(user=user,
date=timestamp)
if created:
fblist.listing = data['list']
fblist.save()
return fblist
def get_earliest_raw_data(status=RawData.STATUS_NEW):
try:
return RawData.objects.filter(status=status).earliest('created')
except RawData.DoesNotExist:
return None
def get_raw_data_slice(start, end, status=RawData.STATUS_NEW):
return RawData.objects.filter(pk__gte=start, pk__lt=end, status=status)
_raw_data_handlers = {
RawData.DATA_URLS: data_to_adrecord,
RawData.DATA_FBADS: data_to_fbad,
RawData.DATA_FB: data_to_fbsponsored,
RawData.DATA_FBLISTING: data_to_fblisting
}
def process_raw_datatype(user, datatype, tldextractor=None):
rdset = RawData.objects.filter(
user=user, datatype=datatype, status=RawData.STATUS_NEW)
return process_raw_data_set(rdset, tldextractor=tldextractor)
def process_raw_data(user, tldextractor=None):
rdset = RawData.objects.filter(user=user, status=RawData.STATUS_NEW)
return process_raw_data_set(rdset, tldextractor=tldextractor)
def process_raw_data_set(rdset, tldextractor=None):
proc_ok = []
proc_error = []
proc_unproc = []
tldextractor = tldextractor or get_tldextract()
for rd in rdset:
_proc_rd(rd, proc_ok, proc_error, proc_unproc, tldextractor)
return (proc_ok, proc_error, proc_unproc)
def _proc_rd(rd, proc_ok, proc_error, proc_unproc, tldextractor):
try:
jdata = json.loads(rd.data)
except Exception, e:
rd.error = unicode(e)
rd.status = RawData.STATUS_ERROR
rd.save()
proc_error.append(rd)
else:
func = _raw_data_handlers.get(rd.datatype, None)
if func:
if isinstance(jdata, dict) or not isinstance(
jdata[0], (tuple, list, set)):
try:
func(rd.user, jdata, tldextractor)
except Exception, e:
rd.error = unicode(e)
rd.status = RawData.STATUS_ERROR
rd.save()
proc_error.append(rd)
return
else:
for data in jdata:
try:
func(rd.user, data, tldextractor)
except Exception, e:
rd.error = unicode(e)
rd.status = RawData.STATUS_ERROR
rd.save()
proc_error.append(rd)
return
rd.status = RawData.STATUS_DONE
proc_ok.append(rd)
else:
rd.status = RawData.STATUS_UNPROCESSED
proc_unproc.append(rd)
finally:
rd.save()
def purge_raw_data(user, datatype, status=RawData.STATUS_DONE):
RawData.objects.filter(user=user, datatype=datatype, status=status).delete()
def report_raw_data_errors():
pass
| {
"repo_name": "valuesandvalue/valuesandvalue",
"path": "vavs_project/addata/processing.py",
"copies": "1",
"size": "7709",
"license": "mit",
"hash": -4338171550746799000,
"line_mean": 32.5173913043,
"line_max": 80,
"alpha_frac": 0.5623297445,
"autogenerated": false,
"ratio": 3.5689814814814813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9533627573252978,
"avg_score": 0.01953673054570072,
"num_lines": 230
} |
# addata.tasks
# PYTHON
from datetime import timedelta
import logging
# DJANGO
from django.conf import settings
from django.utils.timezone import now
# CELERY
from celery import task
# UTILS
from utils.slices import get_index_slice
# FBDATA
from fbdata.participant import get_participants
from fbdata.utils import padded_date_range
# ADDATA
from .activity import process_all_hourly_activity
from .processing import (
get_earliest_raw_data,
get_raw_data_slice,
get_tldextract,
process_raw_data,
process_raw_data_set
)
from .media import (
download_adrecord,
download_fbadimage
)
from .models import (
AdRecord,
FBAdImage,
RawData
)
_RD_SLICE = 200
_AR_SLICE = 100
_FBAD_SLICE = 100
@task.task(ignore_result=False, name='addata.tasks.async_process_raw_data')
def async_process_raw_data():
logger = logging.getLogger('vavs.tasks.analytics')
earliest = get_earliest_raw_data()
if not earliest:
return
start = earliest.pk
end = start + _RD_SLICE
start_time = now()
tldextractor = get_tldextract()
rdset = get_raw_data_slice(start, end)
ok, error, unproc = process_raw_data_set(rdset, tldextractor=tldextractor)
end_time = now()
lines = ['async_process_raw_data: %s to %s' % (start, end)]
lines.append('time: %s duration: %s' % (start_time, end_time-start_time))
lines.append('processed OK: %d ERRORS: %d' % (len(ok), len(error)))
lines.append('unprocessed: %d' % len(unproc))
message = '\n'.join(lines)
logger.info(message)
@task.task(ignore_result=False, name='addata.tasks.async_purge_raw_data')
def async_purge_raw_data():
logger = logging.getLogger('vavs.tasks.analytics')
cutoff = now() - timedelta(days=2)
RawData.objects.filter(created__lte=cutoff,
status=RawData.STATUS_DONE).delete()
logger.info('async_purge_raw_data')
@task(ignore_result=True, name='addata.tasks.aysnc_download_adrecord_media')
def aysnc_download_adrecord_media():
logger = logging.getLogger('vavs.tasks.media')
try:
earliest = AdRecord.objects.filter(
status=FBAdImage.STATUS_NEW).earliest('id')
except AdRecord.DoesNotExist:
return
index = earliest.id
adrecords = AdRecord.objects.filter(id__range=(index, index+_AR_SLICE),
status=AdRecord.STATUS_NEW)
lines = ['aysnc_download_adrecord_media: %d %d, %s' % (index,
adrecords.count(), now())]
for adrecord in adrecords:
download_adrecord(adrecord)
lines.append("AdRecord %d %s %s" % (adrecord.id, adrecord.status_str(),
adrecord.content_type))
message = '\n'.join(lines)
logger.info(message)
@task(ignore_result=True, name='addata.tasks.aysnc_download_fbadimage_media')
def aysnc_download_fbadimage_media():
logger = logging.getLogger('vavs.tasks.media')
try:
earliest = FBAdImage.objects.filter(
status=FBAdImage.STATUS_NEW).earliest('id')
except FBAdImage.DoesNotExist:
return
index = earliest.id
fbadimages = FBAdImage.objects.filter(id__range=(index, index+_FBAD_SLICE),
status=FBAdImage.STATUS_NEW)
lines = ['aysnc_download_fbadimage_media: %d %d, %s' % (index,
fbadimages.count(), now())]
for fbadimage in fbadimages:
if len(fbadimage.url) > 1:
download_fbadimage(fbadimage)
lines.append("FBAdImage %d %s" % (
fbadimage.id, fbadimage.status_str()))
else:
lines.append("FBAdImage %d no URL, deleted" % fbadimage.id)
fbadimage.delete()
message = '\n'.join(lines)
logger.info(message)
@task.task(ignore_result=False,
name='addata.tasks.aysnc_update_hourly_activity')
def aysnc_update_hourly_activity():
logger = logging.getLogger('vavs.tasks.analytics')
end = now()
start = end - timedelta(days=1)
lines = ['aysnc_update_hourly_activity: %s' % end]
lines.append('Date: %s (%s - %s)' % (end.date(), start, end))
participants = get_participants()
for user in participants:
activities = process_all_hourly_activity(user, start, end)
lines.append('%s activities' % user.username)
lines.append('\tads:\t%d' % len(activities['ad_activities']))
lines.append('\tfbsp:\t%d' % len(activities['fbsp_activities']))
lines.append('\tfbad:\t%d' % len(activities['fbad_activities']))
message = '\n'.join(lines)
logger.info(message)
| {
"repo_name": "valuesandvalue/valuesandvalue",
"path": "vavs_project/addata/tasks.py",
"copies": "1",
"size": "4721",
"license": "mit",
"hash": 9135598633840480000,
"line_mean": 34.7651515152,
"line_max": 80,
"alpha_frac": 0.6216903198,
"autogenerated": false,
"ratio": 3.34822695035461,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9446376507261913,
"avg_score": 0.004708152578539533,
"num_lines": 132
} |
# addata.views
# DJANGO
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.shortcuts import get_object_or_404
from django.views.generic.base import TemplateView, View
from django.views.generic import (
ArchiveIndexView,
CreateView,
UpdateView,
DetailView
)
# DJANGO-BRACES
from braces.views import LoginRequiredMixin, StaffuserRequiredMixin
# ACCOUNTS
from accounts.handlers import get_staff_profile
# FBDATA
from fbdata.participant import get_participant_profile
from fbdata.utils import (
can_access_user,
isodatestr_to_datetime,
timestamp_to_datetime
)
# ADDATA
from addata.activity import activity_data_json
from .api import (
get_api_key,
reset_api_key
)
from .details import (
ad_details_for_hour_json,
get_ad_data_json
)
from .handlers import get_user_data, get_all_user_data
class ApiKeyView(LoginRequiredMixin, TemplateView):
template_name = 'addata/user_setup.html'
def get(self, request, reset=False):
user = request.user
if user.is_staff:
profile = get_staff_profile(user)
else:
profile = user.get_profile()
if reset:
api_key = reset_api_key(user)
else:
api_key = get_api_key(user)
if api_key is None:
return HttpResponseRedirect(reverse('participant_consent'))
else:
return self.render_to_response({
'user':user,
'profile':profile,
'api_key':api_key })
class AdData(LoginRequiredMixin, View):
"""Returns JSON ad data."""
def get(self, request, username, start, end):
if request.is_ajax()
if not can_access_user(request.user, username):
raise Http404
else:
user = get_object_or_404(User, username=username)
participant = get_participant_profile(user)
anon = False if request.user == user else participant.anon_data
start = isodatestr_to_datetime(start)
end = isodatestr_to_datetime(end)
json_data = activity_data_json(
user, start, end, fbuser=participant.fbuser, anon=anon)
return HttpResponse(json_data, content_type="application/json")
else:
raise Http404
class AdDetailData(LoginRequiredMixin, View):
"""Returns JSON detail data."""
def get(self, request, username, ad_type, pk):
if request.is_ajax()
if not can_access_user(request.user, username):
raise Http404
else:
user = get_object_or_404(User, username=username)
participant = get_participant_profile(user)
anon = False if request.user == user else participant.anon_data
json_data = get_ad_data_json(ad_type, pk, anon=anon)
return HttpResponse(json_data, content_type="application/json")
else:
raise Http404
class AdActivityDetailData(LoginRequiredMixin, View):
"""Returns JSON detail data."""
def get(self, request, username, hour):
if request.is_ajax()
if not can_access_user(request.user, username):
raise Http404
else:
user = get_object_or_404(User, username=username)
participant = get_participant_profile(user)
hour = timestamp_to_datetime(hour)
anon = False if request.user == user else participant.anon_data
json_data = ad_details_for_hour_json(user, hour, anon=anon)
return HttpResponse(json_data, content_type="application/json")
else:
raise Http404
| {
"repo_name": "valuesandvalue/valuesandvalue",
"path": "vavs_project/addata/views.py",
"copies": "1",
"size": "3938",
"license": "mit",
"hash": 8841345658644990000,
"line_mean": 34.4774774775,
"line_max": 80,
"alpha_frac": 0.6114779076,
"autogenerated": false,
"ratio": 4.089304257528557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007316290813223394,
"num_lines": 111
} |
"""add Attachment element
Revision ID: 93bd528a83
Revises: 51d6e03ecba
Create Date: 2015-06-04 22:02:36.082013
"""
# revision identifiers, used by Alembic.
revision = '93bd528a83'
down_revision = '51d6e03ecba'
from alembic import op
import sqlalchemy as sa
from redwind import create_app
from redwind import util
from redwind import admin
from redwind.models import Post, Attachment
from redwind.extensions import db
import os
import datetime
import random
import string
import mimetypes
import shutil
from flask import current_app
def upgrade():
# commands auto generated by Alembic - please adjust! ###
op.create_table(
'attachment',
sa.Column('id', sa.Integer(), nullable=False, index=True),
sa.Column('filename', sa.String(length=256), nullable=True),
sa.Column('mimetype', sa.String(length=256), nullable=True),
sa.Column('storage_path', sa.String(length=256), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),
sa.PrimaryKeyConstraint('id'))
# end Alembic commands ###
def downgrade():
# commands auto generated by Alembic - please adjust! ###
op.drop_table('attachment')
# end Alembic commands ###
| {
"repo_name": "Lancey6/redwind",
"path": "alembic_migrations/versions/93bd528a83_add_attachment_element.py",
"copies": "2",
"size": "1273",
"license": "bsd-2-clause",
"hash": -4587696453614830600,
"line_mean": 24.9795918367,
"line_max": 72,
"alpha_frac": 0.6999214454,
"autogenerated": false,
"ratio": 3.64756446991404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5347485915314041,
"avg_score": null,
"num_lines": null
} |
"""Add attachment_id to queue
Revision ID: d8e65cb6160d
Revises: 6ef9616e57cb
Create Date: 2021-04-27 13:59:11.538263
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'd8e65cb6160d'
down_revision = '6ef9616e57cb'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('queues', sa.Column('attachment_id', sa.Integer(), nullable=True, index=True), schema='plugin_livesync')
op.create_foreign_key(None, 'queues', 'attachments', ['attachment_id'], ['id'], source_schema='plugin_livesync',
referent_schema='attachments')
op.drop_constraint('ck_queues_valid_enum_type', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_category_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_event_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_contribution_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_subcontribution_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_session_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_note_entry', 'queues', schema='plugin_livesync')
op.execute('''
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_enum_type CHECK ((type = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7])));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_attachment_entry CHECK (((type <> 7) OR ((category_id IS NULL) AND (contribution_id IS NULL) AND (event_id IS NULL) AND (note_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (attachment_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_category_entry CHECK (((type <> 1) OR ((attachment_id IS NULL) AND (contribution_id IS NULL) AND (event_id IS NULL) AND (note_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (category_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_contribution_entry CHECK (((type <> 3) OR ((attachment_id IS NULL) AND (category_id IS NULL) AND (event_id IS NULL) AND (note_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (contribution_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_event_entry CHECK (((type <> 2) OR ((attachment_id IS NULL) AND (category_id IS NULL) AND (contribution_id IS NULL) AND (note_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (event_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_note_entry CHECK (((type <> 6) OR ((attachment_id IS NULL) AND (category_id IS NULL) AND (contribution_id IS NULL) AND (event_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (note_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_session_entry CHECK (((type <> 5) OR ((attachment_id IS NULL) AND (category_id IS NULL) AND (contribution_id IS NULL) AND (event_id IS NULL) AND (note_id IS NULL) AND (subcontribution_id IS NULL) AND (session_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_subcontribution_entry CHECK (((type <> 4) OR ((attachment_id IS NULL) AND (category_id IS NULL) AND (contribution_id IS NULL) AND (event_id IS NULL) AND (note_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NOT NULL))));
''')
def downgrade():
op.execute('DELETE FROM plugin_livesync.queues WHERE type = 7')
op.drop_constraint('ck_queues_valid_enum_type', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_category_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_event_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_contribution_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_subcontribution_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_session_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_note_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_attachment_entry', 'queues', schema='plugin_livesync')
op.drop_column('queues', 'attachment_id', schema='plugin_livesync')
op.execute('''
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_enum_type CHECK ((type = ANY (ARRAY[1, 2, 3, 4, 5, 6])));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_category_entry CHECK (((type <> 1) OR ((contribution_id IS NULL) AND (event_id IS NULL) AND (note_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (category_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_contribution_entry CHECK (((type <> 3) OR ((category_id IS NULL) AND (event_id IS NULL) AND (note_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (contribution_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_event_entry CHECK (((type <> 2) OR ((category_id IS NULL) AND (contribution_id IS NULL) AND (note_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (event_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_note_entry CHECK (((type <> 6) OR ((category_id IS NULL) AND (contribution_id IS NULL) AND (event_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (note_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_session_entry CHECK (((type <> 5) OR ((category_id IS NULL) AND (contribution_id IS NULL) AND (event_id IS NULL) AND (note_id IS NULL) AND (subcontribution_id IS NULL) AND (session_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_subcontribution_entry CHECK (((type <> 4) OR ((category_id IS NULL) AND (contribution_id IS NULL) AND (event_id IS NULL) AND (note_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NOT NULL))));
''')
| {
"repo_name": "indico/indico-plugins",
"path": "livesync/indico_livesync/migrations/20210427_1359_d8e65cb6160d_add_attachment_id_to_queue.py",
"copies": "1",
"size": "6294",
"license": "mit",
"hash": -1592516358814095400,
"line_mean": 97.34375,
"line_max": 310,
"alpha_frac": 0.7132189387,
"autogenerated": false,
"ratio": 3.3301587301587303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.454337766885873,
"avg_score": null,
"num_lines": null
} |
"""Add audit events
Revision ID: 30_add_audit_events
Revises: 40_add_draft_services
Create Date: 2015-06-05 11:30:26.425563
"""
# revision identifiers, used by Alembic.
revision = '30_add_audit_events'
down_revision = '40_add_draft_services'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('audit_events',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('type', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('user', sa.String(), nullable=True),
sa.Column('data', postgresql.JSON(), nullable=True),
sa.Column('object_type', sa.String(), nullable=True),
sa.Column('object_id', sa.BigInteger(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('audit_events')
### end Alembic commands ###
| {
"repo_name": "alphagov/digitalmarketplace-api",
"path": "migrations/versions/30_add_audit_events.py",
"copies": "3",
"size": "1064",
"license": "mit",
"hash": -8463164095208960000,
"line_mean": 29.4,
"line_max": 63,
"alpha_frac": 0.6795112782,
"autogenerated": false,
"ratio": 3.546666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5726177944866667,
"avg_score": null,
"num_lines": null
} |
"""Add auditorsettings
Revision ID: 57f648d4b597
Revises: 2705e6e13a8f
Create Date: 2015-01-30 22:32:18.420819
"""
# revision identifiers, used by Alembic.
revision = '57f648d4b597'
down_revision = '2705e6e13a8f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('auditorsettings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('tech_id', sa.Integer(), nullable=True),
sa.Column('notes', sa.String(length=512), nullable=True),
sa.Column('account_id', sa.Integer(), nullable=True),
sa.Column('disabled', sa.Boolean(), nullable=False),
sa.Column('issue', sa.String(length=512), nullable=False),
sa.ForeignKeyConstraint(['account_id'], ['account.id'], ),
sa.ForeignKeyConstraint(['tech_id'], ['technology.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('auditorsettings')
### end Alembic commands ###
| {
"repo_name": "monkeysecurity/security_monkey",
"path": "migrations/versions/57f648d4b597_.py",
"copies": "17",
"size": "1082",
"license": "apache-2.0",
"hash": -7996022351389363000,
"line_mean": 29.0555555556,
"line_max": 63,
"alpha_frac": 0.6765249538,
"autogenerated": false,
"ratio": 3.370716510903427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""Add audit table
Revision ID: a1a4f4eccae5
Revises: 30da16fadab5
Create Date: 2018-08-01 07:36:14.020930
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = 'a1a4f4eccae5'
down_revision = '33228b8da578'
def upgrade():
op.create_table(
'audit',
sa.Column('id', sa.String(40), nullable=False),
sa.Column('activity', sa.Unicode(), nullable=True),
sa.Column('data', postgresql.JSONB(astext_type=sa.Text()),
nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('count', sa.Integer(), nullable=True),
sa.Column('session_id', sa.Unicode(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(
op.f('ix_audit_role_id'), 'audit', ['role_id'], unique=False
)
def downgrade():
op.drop_index(op.f('ix_audit_role_id'), table_name='audit')
op.drop_table('audit')
| {
"repo_name": "pudo/aleph",
"path": "aleph/migrate/versions/a1a4f4eccae5_add_audit_table.py",
"copies": "1",
"size": "1148",
"license": "mit",
"hash": 4515783488029765000,
"line_mean": 29.2105263158,
"line_max": 68,
"alpha_frac": 0.6280487805,
"autogenerated": false,
"ratio": 3.2247191011235956,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43527678816235954,
"avg_score": null,
"num_lines": null
} |
"""Add audit tables
Revision ID: 53c3b75c86ed
Revises: 53ef72c8a867
Create Date: 2013-09-11 19:34:08.020226
"""
# revision identifiers, used by Alembic.
revision = '53c3b75c86ed'
down_revision = '53ef72c8a867'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('audits',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=250), nullable=False),
sa.Column('slug', sa.String(length=250), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('url', sa.String(length=250), nullable=True),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('report_start_date', sa.Date(), nullable=True),
sa.Column('report_end_date', sa.Date(), nullable=True),
sa.Column('owner_id', sa.Integer(), nullable=True),
sa.Column('audit_firm', sa.String(length=250), nullable=True),
sa.Column('status', sa.Enum(u'Planned', u'In Progress', u'Manager Review', u'Ready for External Review', u'Completed'), nullable=False),
sa.Column('gdrive_evidence_folder', sa.String(length=250), nullable=True),
sa.Column('program_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('modified_by_id', sa.Integer(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('context_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['context_id'], ['contexts.id'], ),
sa.ForeignKeyConstraint(['owner_id'], ['people.id'], ),
sa.ForeignKeyConstraint(['program_id'], ['programs.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('audits')
| {
"repo_name": "vladan-m/ggrc-core",
"path": "src/ggrc/migrations/versions/20130911193408_53c3b75c86ed_add_audit_tables.py",
"copies": "2",
"size": "1751",
"license": "apache-2.0",
"hash": 3423799841161802000,
"line_mean": 39.7209302326,
"line_max": 140,
"alpha_frac": 0.6767561393,
"autogenerated": false,
"ratio": 3.2425925925925925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9851298850835173,
"avg_score": 0.013609976211483837,
"num_lines": 43
} |
"""add auth for arcgis layers
Revision ID: 6143111b020b
Revises: 19e97a222003
Create Date: 2020-10-15 11:52:42.076090
"""
# revision identifiers, used by Alembic.
revision = '6143111b020b'
down_revision = '19e97a222003'
branch_labels = None
depends_on = None
from alembic import op, context
import sqlalchemy as sa
def upgrade():
schema = context.get_context().config.get_main_option('schema')
op.add_column('lux_layer_internal_wms',
sa.Column('use_auth',
sa.BOOLEAN,
autoincrement=False,
nullable=True
),
schema=schema
)
op.add_column('lux_getfeature_definition',
sa.Column('use_auth',
sa.BOOLEAN,
autoincrement=False,
nullable=True
),
schema=schema
)
def downgrade():
schema = context.get_context().config.get_main_option('schema')
op.drop_column('lux_layer_internal_wms', 'use_auth', schema=schema)
op.drop_column('lux_getfeature_definition', 'use_auth', schema=schema)
| {
"repo_name": "Geoportail-Luxembourg/geoportailv3",
"path": "geoportal/LUX_alembic/versions/6143111b020b_add_auth_for_arcgis_layers.py",
"copies": "1",
"size": "1237",
"license": "mit",
"hash": -1479748314140033500,
"line_mean": 28.4523809524,
"line_max": 74,
"alpha_frac": 0.5351657235,
"autogenerated": false,
"ratio": 4.096026490066225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5131192213566225,
"avg_score": null,
"num_lines": null
} |
"""add authored column
Revision ID: 648d99c5c414
Revises: 80d36c1e37e2
Create Date: 2019-03-08 11:25:48.367648
"""
import model.utils
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "648d99c5c414"
down_revision = "dd408c868dc6"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("questionnaire_response", sa.Column("authored", model.utils.UTCDateTime(), nullable=True))
# ### end Alembic commands ###
op.execute("ALTER TABLE questionnaire_response CHANGE COLUMN `authored` `authored` DATETIME NULL AFTER created")
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("questionnaire_response", "authored")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/648d99c5c414_add_authored_column.py",
"copies": "1",
"size": "1270",
"license": "bsd-3-clause",
"hash": 5451016883212903000,
"line_mean": 24.9183673469,
"line_max": 116,
"alpha_frac": 0.6834645669,
"autogenerated": false,
"ratio": 3.5277777777777777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47112423446777774,
"avg_score": null,
"num_lines": null
} |
"""add authored time
Revision ID: b662c5bb00cc
Revises: d1f67196215e
Create Date: 2019-05-16 15:43:49.473126
"""
import model.utils
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "b662c5bb00cc"
down_revision = "d1f67196215e"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("patient_status", sa.Column("authored", model.utils.UTCDateTime(), nullable=True))
op.add_column("patient_status", sa.Column("site_id", sa.Integer(), nullable=False))
op.create_foreign_key(None, "patient_status", "site", ["site_id"], ["site_id"])
op.add_column("patient_status", sa.Column("user", sa.String(length=80), nullable=False))
op.create_index(op.f("ix_patient_status_organization_id"), "patient_status", ["organization_id"], unique=False)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_patient_status_organization_id"), table_name="patient_status")
op.drop_column("patient_status", "user")
op.drop_constraint(None, "patient_status", type_="foreignkey")
op.drop_column("patient_status", "site_id")
op.drop_column("patient_status", "authored")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/b662c5bb00cc_add_authored_time.py",
"copies": "1",
"size": "1766",
"license": "bsd-3-clause",
"hash": -7098534624899334000,
"line_mean": 30.5357142857,
"line_max": 115,
"alpha_frac": 0.6732729332,
"autogenerated": false,
"ratio": 3.3702290076335877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45435019408335875,
"avg_score": null,
"num_lines": null
} |
"""add auth related models
Revision ID: 48dbba323e2
Revises: 3b54219c648
Create Date: 2015-10-29 01:57:49.563537
"""
# revision identifiers, used by Alembic.
revision = '48dbba323e2'
down_revision = '3b54219c648'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('confirmed_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('roles_users',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles_users')
op.drop_table('user')
op.drop_table('role')
### end Alembic commands ###
| {
"repo_name": "fdgogogo/fangs",
"path": "backend/migrations/versions/201510290157_48dbba323e2_add_auth_related_models.py",
"copies": "1",
"size": "1518",
"license": "mit",
"hash": 2625201288726727000,
"line_mean": 29.9795918367,
"line_max": 67,
"alpha_frac": 0.6574440053,
"autogenerated": false,
"ratio": 3.403587443946188,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45610314492461884,
"avg_score": null,
"num_lines": null
} |
# Add auto-completion and a stored history file of commands to your Python
# interactive interpreter. Requires Python 2.0+, readline. Autocomplete is
# bound to the Esc key by default (you can change it - see readline docs).
#
# Set an environment variable to point to it: "export PYTHONSTARTUP=foo"
#
# Note that PYTHONSTARTUP does *not* expand "~", so you have to put in the full
# path to your home directory.
import atexit
import os
import readline
import rlcompleter
historyPath = os.path.expanduser("~/.pyhistory")
def save_history(path=historyPath):
import readline
readline.write_history_file(path)
if os.path.exists(historyPath):
readline.read_history_file(historyPath)
try:
import __builtin__
except ImportError:
import builtins as __builtin__
__builtin__.true = True
__builtin__.false = False
__builtin__.null = None
readline.parse_and_bind("tab: complete")
readline.parse_and_bind(r"\C-a: beginning-of-line")
readline.parse_and_bind(r"\C-e: end-of-line")
atexit.register(save_history)
del os, atexit, readline, rlcompleter, save_history, historyPath
# vim: ft=python
| {
"repo_name": "keith/dotfiles",
"path": "langs/pystartup.py",
"copies": "1",
"size": "1110",
"license": "mit",
"hash": -2357984532752763400,
"line_mean": 26.0731707317,
"line_max": 79,
"alpha_frac": 0.7387387387,
"autogenerated": false,
"ratio": 3.490566037735849,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47293047764358487,
"avg_score": null,
"num_lines": null
} |
"""Add automatic to TranslationUrl
Revision ID: 4e20dff98010
Revises: 4c0992d0b552
Create Date: 2015-05-11 11:29:49.824142
"""
# revision identifiers, used by Alembic.
revision = '4e20dff98010'
down_revision = '4c0992d0b552'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql as sql
from appcomposer.db import db
from appcomposer.application import app
metadata = db.MetaData()
translation_urls = db.Table('TranslationUrls', metadata,
db.Column('id', db.Integer()),
db.Column('automatic', db.Boolean),
)
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('TranslationUrls', sa.Column('automatic', sa.Boolean(), nullable=True))
op.create_index(u'ix_TranslationUrls_automatic', 'TranslationUrls', ['automatic'], unique=False)
### end Alembic commands ###
with app.app_context():
update_stmt = translation_urls.update().values(automatic = True)
db.session.execute(update_stmt)
db.session.commit()
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationUrls_automatic', table_name='TranslationUrls')
op.drop_column('TranslationUrls', 'automatic')
### end Alembic commands ###
| {
"repo_name": "go-lab/appcomposer",
"path": "alembic/versions/4e20dff98010_add_automatic_to_translationurl.py",
"copies": "3",
"size": "1257",
"license": "bsd-2-clause",
"hash": 1818769536224944000,
"line_mean": 29.6585365854,
"line_max": 100,
"alpha_frac": 0.7104216388,
"autogenerated": false,
"ratio": 3.501392757660167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5711814396460166,
"avg_score": null,
"num_lines": null
} |
# addautoscalerules.py - example program to add autoscale rules to a VM scale set
# in this example the resource group, scale set, and rules are hardcoded
# to do: make this into a generic program to turn a regular scale set into an autoscale scale set
import json
import azurerm
# Load Azure app defaults
try:
with open('azurermconfig.json') as configFile:
configData = json.load(configFile)
except FileNotFoundError:
print("Error: Expecting azurermconfig.json in current folder")
sys.exit()
tenant_id = configData['tenantId']
app_id = configData['appId']
app_secret = configData['appSecret']
subscription_id = configData['subscriptionId']
# hardcode vmss parameters for now
rgname = 'sgelastic'
vmssname = 'sgelastic'
# authenticate
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# figure out location
try:
rg = azurerm.get_resource_group(access_token, subscription_id, rgname)
location = rg['location']
except KeyError:
print('Cannot find resource group ' + rgname + '. Check connection/authorization.')
print(json.dumps(rg, sort_keys=False, indent=2, separators=(',', ': ')))
sys.exit()
print('location = ' + location)
# create autoscale rule
print('Creating autoscale rules')
metric_name = 'Percentage CPU'
operator = 'GreaterThan'
threshold = 60
direction = 'Increase'
change_count = 1
rule1 = azurerm.create_autoscale_rule(subscription_id, rgname, vmssname, metric_name, operator, \
threshold, direction, change_count)
threshold = 10
operator = 'LessThan'
direction = 'Decrease'
rule2 = azurerm.create_autoscale_rule(subscription_id, rgname, vmssname, metric_name, operator, \
threshold, direction, change_count)
rules = [rule1, rule2]
# print(json.dumps(rules, sort_keys=False, indent=2, separators=(',', ': ')))
# create autoscale setting
setting_name = "SGELASTIC autoscale settings"
print('Creating autoscale setting: ' + setting_name)
min = 999
max = 1000
default = 1000
response = azurerm.create_autoscale_setting(access_token, subscription_id, rgname, setting_name, \
vmssname, location, min, max, default, rules)
if response.status_code != 201:
print("Autoscale setting create error: " + str(response.status_code))
else:
print("Autoscale settings created.")
| {
"repo_name": "gbowerman/vmsstools",
"path": "cpuload/addautoscalerules.py",
"copies": "1",
"size": "2262",
"license": "mit",
"hash": -4919965661857571000,
"line_mean": 33.8,
"line_max": 98,
"alpha_frac": 0.7360742706,
"autogenerated": false,
"ratio": 3.4961360123647602,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9706501586378282,
"avg_score": 0.005141739317295469,
"num_lines": 65
} |
"""Add a variant note.
"""
import os
import requests
from requests.auth import HTTPBasicAuth
import sys
import json
import argparse
#Load environment variables for request authentication parameters
if "FABRIC_API_PASSWORD" not in os.environ:
sys.exit("FABRIC_API_PASSWORD environment variable missing")
if "FABRIC_API_LOGIN" not in os.environ:
sys.exit("FABRIC_API_LOGIN environment variable missing")
FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN']
FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD']
FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com')
auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD)
def add_variant_note(cr_id, report_variant_id, note):
"""Add an internal note to a clinical report variant
"""
# Construct request
url = "{}/reports/{}/variants/{}/internal_notes"
url = url.format(FABRIC_API_URL, cr_id, report_variant_id)
# Build the patch payload
url_payload = {"note": note}
sys.stdout.flush()
result = requests.post(url, auth=auth, json=url_payload)
return result
def main():
"""Main function. Add an internal note to a variant.
"""
parser = argparse.ArgumentParser(description='Add an internal note to a clinical report variant.')
parser.add_argument('cr_id', metavar='clinical_report_id', type=int)
parser.add_argument('report_variant_id', metavar='report_variant_id', type=int)
parser.add_argument('note', metavar='note', type=str)
args = parser.parse_args()
cr_id = args.cr_id
report_variant_id = args.report_variant_id
note = args.note
response = add_variant_note(cr_id, report_variant_id, note)
try:
sys.stdout.write(json.dumps(response.json(), indent=4))
except KeyError:
sys.stderr.write(response.text)
sys.stdout.write('\n')
if __name__ == "__main__":
main()
| {
"repo_name": "Omicia/omicia_api_examples",
"path": "python/ClinicalReportLaunchers/add_report_variant_note.py",
"copies": "1",
"size": "1877",
"license": "mit",
"hash": 649081803685233900,
"line_mean": 30.2833333333,
"line_max": 102,
"alpha_frac": 0.6941928609,
"autogenerated": false,
"ratio": 3.3398576512455516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4534050512145551,
"avg_score": null,
"num_lines": null
} |
"""Add avatar file id to user.
Revision ID: fe6ff06f1f5b
Revises: 1f4385bac8f9
Create Date: 2018-03-30 21:46:07.421190
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
import re
import glob
from app import hashfs
from app.models.base_model import BaseEntity
from app.enums import FileCategory
# revision identifiers, used by Alembic.
revision = 'fe6ff06f1f5b'
down_revision = '1f4385bac8f9'
Base = declarative_base()
db = sa
db.Model = Base
db.relationship = relationship
filename_regex = re.compile(r'.+/avatar_(\d+)\.+(.+)')
class File(db.Model, BaseEntity):
__tablename__ = 'file'
hash = db.Column(db.String(200), nullable=False)
extension = db.Column(db.String(20), nullable=False)
category = db.Column(db.Enum(FileCategory, name='file_category'),
nullable=False)
display_name = db.Column(db.String(200))
class User(db.Model, BaseEntity):
__tablename__ = 'user'
avatar_file_id = db.Column(db.Integer, db.ForeignKey('file.id'))
avatar_file = db.relationship(File, foreign_keys=[avatar_file_id],
lazy='joined')
def migrate_files():
print("Migrating all avatars to HashFS")
pattern = 'app/static/files/users/avatar_*'
avatars = glob.glob(pattern)
total = len(avatars)
stepsize = 10
for i, avatar in enumerate(avatars):
if (i + 1) % stepsize == 0:
print("{}/{}".format(i + 1, total))
m = filename_regex.match(avatar)
if not m:
print("Filename does not have correct format:", avatar)
continue
user_id = int(m.group(1))
extension = m.group(2)
user = db.session.query(User).get(user_id)
if user is None:
continue
with open(avatar, 'rb') as file_reader:
address = hashfs.put(file_reader)
f = File()
f.category = FileCategory.USER_AVATAR
f.hash = address.id
f.extension = extension
user.avatar_file = f
db.session.add(f)
db.session.commit()
def create_session():
connection = op.get_bind()
session_maker = sa.orm.sessionmaker()
session = session_maker(bind=connection)
db.session = session
def upgrade():
create_session()
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('avatar_file_id', sa.Integer(), nullable=True))
op.create_foreign_key(op.f('fk_user_avatar_file_id_file'), 'user', 'file', ['avatar_file_id'], ['id'])
try:
migrate_files()
except:
downgrade()
raise
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
raise Exception("Undoing this migration is impossible")
# op.drop_constraint(op.f('fk_user_avatar_file_id_file'), 'user', type_='foreignkey')
# op.drop_column('user', 'avatar_file_id')
# ### end Alembic commands ###
# vim: ft=python
| {
"repo_name": "viaict/viaduct",
"path": "migrations/versions/2018_03_30_fe6ff06f1f5b_add_avatar_file_id_to_user.py",
"copies": "1",
"size": "3083",
"license": "mit",
"hash": 2054009350335641000,
"line_mean": 23.8629032258,
"line_max": 106,
"alpha_frac": 0.6282841388,
"autogenerated": false,
"ratio": 3.479683972911964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46079681117119636,
"avg_score": null,
"num_lines": null
} |
"""add aw3 job run id to genomic_set_member
Revision ID: 7029234abc61
Revises: d5d97368b14d
Create Date: 2020-08-03 13:46:15.629635
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7029234abc61'
down_revision = 'd5d97368b14d'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('arr_aw3_manifest_job_run_id', sa.Integer(), nullable=True))
op.add_column('genomic_set_member_history', sa.Column('arr_aw3_manifest_job_run_id', sa.Integer(), nullable=True))
op.add_column('genomic_set_member', sa.Column('wgs_aw3_manifest_job_run_id', sa.Integer(), nullable=True))
op.add_column('genomic_set_member_history', sa.Column('wgs_aw3_manifest_job_run_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'genomic_set_member', 'genomic_job_run', ['arr_aw3_manifest_job_run_id'], ['id'])
op.create_foreign_key(None, 'genomic_set_member', 'genomic_job_run', ['wgs_aw3_manifest_job_run_id'], ['id'])
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'genomic_set_member', type_='foreignkey')
op.drop_constraint(None, 'genomic_set_member', type_='foreignkey')
op.drop_column('genomic_set_member', 'wgs_aw3_manifest_job_run_id')
op.drop_column('genomic_set_member_history', 'wgs_aw3_manifest_job_run_id')
op.drop_column('genomic_set_member', 'arr_aw3_manifest_job_run_id')
op.drop_column('genomic_set_member_history', 'arr_aw3_manifest_job_run_id')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/7029234abc61_add_aw3_job_run_id_to_genomic_set_member.py",
"copies": "1",
"size": "2128",
"license": "bsd-3-clause",
"hash": 9215649131656247000,
"line_mean": 33.3225806452,
"line_max": 118,
"alpha_frac": 0.6781015038,
"autogenerated": false,
"ratio": 2.9971830985915493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4175284602391549,
"avg_score": null,
"num_lines": null
} |
"""Add award, genre and their relationship tables. Add columns to work.
Revision ID: 2e3e3a9a625
Revises: 441c936f555
Create Date: 2014-08-14 14:45:56.769052
"""
from alembic import context, op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2e3e3a9a625'
down_revision = '441c936f555'
driver_name = context.get_bind().dialect.name
def upgrade():
op.create_table('genres',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True),
nullable=False),
sa.PrimaryKeyConstraint('id'))
op.create_index(op.f('ix_genres_created_at'), 'genres',
['created_at'], unique=False)
op.create_index(op.f('ix_genres_name'), 'genres', ['name'], unique=False)
op.create_table('awards',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True),
nullable=False),
sa.PrimaryKeyConstraint('id'))
op.create_index(op.f('ix_awards_created_at'), 'awards', ['created_at'],
unique=False)
op.create_index(op.f('ix_awards_name'), 'awards', ['name'], unique=False)
op.create_table('award_winners',
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('award_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True),
nullable=False),
sa.ForeignKeyConstraint(['award_id'], ['awards.id'], ),
sa.ForeignKeyConstraint(['person_id'], ['people.id'], ),
sa.PrimaryKeyConstraint('person_id', 'award_id'))
op.create_table('work_awards',
sa.Column('work_id', sa.Integer(), nullable=False),
sa.Column('award_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True),
nullable=False),
sa.ForeignKeyConstraint(['award_id'], ['awards.id'], ),
sa.ForeignKeyConstraint(['work_id'], ['works.id'], ),
sa.PrimaryKeyConstraint('work_id', 'award_id'))
op.create_table('work_genres',
sa.Column('work_id', sa.Integer(), nullable=False),
sa.Column('genre_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True),
nullable=False),
sa.ForeignKeyConstraint(['genre_id'], ['genres.id'], ),
sa.ForeignKeyConstraint(['work_id'], ['works.id'], ),
sa.PrimaryKeyConstraint('work_id', 'genre_id'))
op.drop_index('ix_works_title', table_name='works')
op.add_column('works', sa.Column('isbn', sa.String(), nullable=True))
op.add_column('works', sa.Column('number_of_pages', sa.Integer(),
nullable=True))
op.add_column('works', sa.Column('published_at', sa.Date(), nullable=True))
op.alter_column('works', 'title', new_column_name='name')
op.drop_column('works', 'dop')
op.create_index(op.f('ix_works_created_at'), 'works', ['created_at'],
unique=False)
op.create_index(op.f('ix_works_name'), 'works', ['name'], unique=False)
def downgrade():
op.drop_table('work_genres')
op.drop_table('work_awards')
op.drop_table('award_winners')
op.drop_index(op.f('ix_awards_name'), table_name='awards')
op.drop_index(op.f('ix_awards_created_at'), table_name='awards')
op.drop_table('awards')
op.drop_index(op.f('ix_genres_name'), table_name='genres')
op.drop_index(op.f('ix_genres_created_at'), table_name='genres')
op.drop_table('genres')
op.drop_index(op.f('ix_works_created_at'), table_name='works')
op.drop_index(op.f('ix_works_name'), table_name='works')
op.add_column('works', sa.Column('dop', sa.Date(), autoincrement=False,
nullable=True))
op.add_column('works', sa.Column('title', sa.String(),
autoincrement=False, nullable=False))
op.create_index('ix_works_title', 'works', ['title'], unique=False)
op.drop_column('works', 'published_at')
op.drop_column('works', 'number_of_pages')
op.drop_column('works', 'name')
op.drop_column('works', 'isbn')
| {
"repo_name": "clicheio/cliche",
"path": "cliche/migrations/versions/2e3e3a9a625_add_award_genre_and_their_relationship_.py",
"copies": "2",
"size": "4674",
"license": "mit",
"hash": -3187338375931962400,
"line_mean": 45.74,
"line_max": 79,
"alpha_frac": 0.5571245186,
"autogenerated": false,
"ratio": 3.6630094043887147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 100
} |
"""add_award_procurement
Revision ID: 73db7d2cc754
Revises: 31876fecc214
Create Date: 2016-09-01 15:08:33.267152
"""
# revision identifiers, used by Alembic.
revision = '73db7d2cc754'
down_revision = '31876fecc214'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('award_procurement',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('award_procurement_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('row_number', sa.Integer(), nullable=False),
sa.Column('piid', sa.Text(), nullable=True),
sa.Column('awarding_sub_tier_agency_c', sa.Text(), nullable=True),
sa.Column('awarding_sub_tier_agency_n', sa.Text(), nullable=True),
sa.Column('awarding_agency_code', sa.Text(), nullable=True),
sa.Column('awarding_agency_name', sa.Text(), nullable=True),
sa.Column('parent_award_id', sa.Text(), nullable=True),
sa.Column('award_modification_amendme', sa.Text(), nullable=True),
sa.Column('type_of_contract_pricing', sa.Text(), nullable=True),
sa.Column('contract_award_type', sa.Text(), nullable=True),
sa.Column('naics', sa.Text(), nullable=True),
sa.Column('naics_description', sa.Text(), nullable=True),
sa.Column('awardee_or_recipient_uniqu', sa.Text(), nullable=True),
sa.Column('ultimate_parent_legal_enti', sa.Text(), nullable=True),
sa.Column('ultimate_parent_unique_ide', sa.Text(), nullable=True),
sa.Column('award_description', sa.Text(), nullable=True),
sa.Column('place_of_performance_zip4a', sa.Text(), nullable=True),
sa.Column('place_of_performance_congr', sa.Text(), nullable=True),
sa.Column('awardee_or_recipient_legal', sa.Text(), nullable=True),
sa.Column('legal_entity_city_name', sa.Text(), nullable=True),
sa.Column('legal_entity_state_code', sa.Text(), nullable=True),
sa.Column('legal_entity_zip4', sa.Text(), nullable=True),
sa.Column('legal_entity_congressional', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line1', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line2', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line3', sa.Text(), nullable=True),
sa.Column('legal_entity_country_code', sa.Text(), nullable=True),
sa.Column('legal_entity_country_name', sa.Text(), nullable=True),
sa.Column('period_of_performance_star', sa.Text(), nullable=True),
sa.Column('period_of_performance_curr', sa.Text(), nullable=True),
sa.Column('period_of_perf_potential_e', sa.Text(), nullable=True),
sa.Column('ordering_period_end_date', sa.Text(), nullable=True),
sa.Column('action_date', sa.Text(), nullable=True),
sa.Column('action_type', sa.Text(), nullable=True),
sa.Column('federal_action_obligation', sa.Text(), nullable=True),
sa.Column('current_total_value_award', sa.Numeric(), nullable=True),
sa.Column('potential_total_value_awar', sa.Numeric(), nullable=True),
sa.Column('funding_sub_tier_agency_co', sa.Text(), nullable=True),
sa.Column('funding_sub_tier_agency_na', sa.Text(), nullable=True),
sa.Column('funding_office_code', sa.Text(), nullable=True),
sa.Column('funding_office_name', sa.Text(), nullable=True),
sa.Column('awarding_office_code', sa.Text(), nullable=True),
sa.Column('awarding_office_name', sa.Text(), nullable=True),
sa.Column('referenced_idv_agency_iden', sa.Text(), nullable=True),
sa.Column('funding_agency_code', sa.Text(), nullable=True),
sa.Column('funding_agency_name', sa.Text(), nullable=True),
sa.Column('place_of_performance_locat', sa.Text(), nullable=True),
sa.Column('place_of_performance_state', sa.Text(), nullable=True),
sa.Column('place_of_perform_country_c', sa.Text(), nullable=True),
sa.Column('idv_type', sa.Text(), nullable=True),
sa.Column('vendor_doing_as_business_n', sa.Text(), nullable=True),
sa.Column('vendor_phone_number', sa.Text(), nullable=True),
sa.Column('vendor_fax_number', sa.Text(), nullable=True),
sa.Column('multiple_or_single_award_i', sa.Text(), nullable=True),
sa.Column('type_of_idc', sa.Text(), nullable=True),
sa.Column('a_76_fair_act_action', sa.Text(), nullable=True),
sa.Column('dod_claimant_program_code', sa.Text(), nullable=True),
sa.Column('clinger_cohen_act_planning', sa.Text(), nullable=True),
sa.Column('commercial_item_acquisitio', sa.Text(), nullable=True),
sa.Column('commercial_item_test_progr', sa.Text(), nullable=True),
sa.Column('consolidated_contract', sa.Text(), nullable=True),
sa.Column('contingency_humanitarian_o', sa.Text(), nullable=True),
sa.Column('contract_bundling', sa.Text(), nullable=True),
sa.Column('contract_financing', sa.Text(), nullable=True),
sa.Column('contracting_officers_deter', sa.Text(), nullable=True),
sa.Column('cost_accounting_standards', sa.Text(), nullable=True),
sa.Column('cost_or_pricing_data', sa.Text(), nullable=True),
sa.Column('country_of_product_or_serv', sa.Text(), nullable=True),
sa.Column('davis_bacon_act', sa.Text(), nullable=True),
sa.Column('evaluated_preference', sa.Text(), nullable=True),
sa.Column('extent_competed', sa.Text(), nullable=True),
sa.Column('fed_biz_opps', sa.Text(), nullable=True),
sa.Column('foreign_funding', sa.Text(), nullable=True),
sa.Column('government_furnished_equip', sa.Text(), nullable=True),
sa.Column('information_technology_com', sa.Text(), nullable=True),
sa.Column('interagency_contracting_au', sa.Text(), nullable=True),
sa.Column('local_area_set_aside', sa.Text(), nullable=True),
sa.Column('major_program', sa.Text(), nullable=True),
sa.Column('purchase_card_as_payment_m', sa.Text(), nullable=True),
sa.Column('multi_year_contract', sa.Text(), nullable=True),
sa.Column('national_interest_action', sa.Text(), nullable=True),
sa.Column('number_of_actions', sa.Text(), nullable=True),
sa.Column('number_of_offers_received', sa.Text(), nullable=True),
sa.Column('other_statutory_authority', sa.Text(), nullable=True),
sa.Column('performance_based_service', sa.Text(), nullable=True),
sa.Column('place_of_manufacture', sa.Text(), nullable=True),
sa.Column('price_evaluation_adjustmen', sa.Text(), nullable=True),
sa.Column('product_or_service_code', sa.Text(), nullable=True),
sa.Column('program_acronym', sa.Text(), nullable=True),
sa.Column('other_than_full_and_open_c', sa.Text(), nullable=True),
sa.Column('recovered_materials_sustai', sa.Text(), nullable=True),
sa.Column('research', sa.Text(), nullable=True),
sa.Column('sea_transportation', sa.Text(), nullable=True),
sa.Column('service_contract_act', sa.Text(), nullable=True),
sa.Column('small_business_competitive', sa.Text(), nullable=True),
sa.Column('solicitation_identifier', sa.Text(), nullable=True),
sa.Column('solicitation_procedures', sa.Text(), nullable=True),
sa.Column('fair_opportunity_limited_s', sa.Text(), nullable=True),
sa.Column('subcontracting_plan', sa.Text(), nullable=True),
sa.Column('program_system_or_equipmen', sa.Text(), nullable=True),
sa.Column('type_set_aside', sa.Text(), nullable=True),
sa.Column('epa_designated_product', sa.Text(), nullable=True),
sa.Column('walsh_healey_act', sa.Text(), nullable=True),
sa.Column('transaction_number', sa.Text(), nullable=True),
sa.Column('sam_exception', sa.Text(), nullable=True),
sa.Column('city_local_government', sa.Text(), nullable=True),
sa.Column('county_local_government', sa.Text(), nullable=True),
sa.Column('inter_municipal_local_gove', sa.Text(), nullable=True),
sa.Column('local_government_owned', sa.Text(), nullable=True),
sa.Column('municipality_local_governm', sa.Text(), nullable=True),
sa.Column('school_district_local_gove', sa.Text(), nullable=True),
sa.Column('township_local_government', sa.Text(), nullable=True),
sa.Column('us_state_government', sa.Text(), nullable=True),
sa.Column('us_federal_government', sa.Text(), nullable=True),
sa.Column('federal_agency', sa.Text(), nullable=True),
sa.Column('federally_funded_research', sa.Text(), nullable=True),
sa.Column('us_tribal_government', sa.Text(), nullable=True),
sa.Column('foreign_government', sa.Text(), nullable=True),
sa.Column('community_developed_corpor', sa.Text(), nullable=True),
sa.Column('labor_surplus_area_firm', sa.Text(), nullable=True),
sa.Column('corporate_entity_not_tax_e', sa.Text(), nullable=True),
sa.Column('corporate_entity_tax_exemp', sa.Text(), nullable=True),
sa.Column('partnership_or_limited_lia', sa.Text(), nullable=True),
sa.Column('sole_proprietorship', sa.Text(), nullable=True),
sa.Column('small_agricultural_coopera', sa.Text(), nullable=True),
sa.Column('international_organization', sa.Text(), nullable=True),
sa.Column('us_government_entity', sa.Text(), nullable=True),
sa.Column('emerging_small_business', sa.Text(), nullable=True),
sa.Column('c8a_program_participant', sa.Text(), nullable=True),
sa.Column('sba_certified_8_a_joint_ve', sa.Text(), nullable=True),
sa.Column('dot_certified_disadvantage', sa.Text(), nullable=True),
sa.Column('self_certified_small_disad', sa.Text(), nullable=True),
sa.Column('historically_underutilized', sa.Text(), nullable=True),
sa.Column('small_disadvantaged_busine', sa.Text(), nullable=True),
sa.Column('the_ability_one_program', sa.Text(), nullable=True),
sa.Column('historically_black_college', sa.Text(), nullable=True),
sa.Column('c1862_land_grant_college', sa.Text(), nullable=True),
sa.Column('c1890_land_grant_college', sa.Text(), nullable=True),
sa.Column('c1994_land_grant_college', sa.Text(), nullable=True),
sa.Column('minority_institution', sa.Text(), nullable=True),
sa.Column('private_university_or_coll', sa.Text(), nullable=True),
sa.Column('school_of_forestry', sa.Text(), nullable=True),
sa.Column('state_controlled_instituti', sa.Text(), nullable=True),
sa.Column('tribal_college', sa.Text(), nullable=True),
sa.Column('veterinary_college', sa.Text(), nullable=True),
sa.Column('educational_institution', sa.Text(), nullable=True),
sa.Column('alaskan_native_servicing_i', sa.Text(), nullable=True),
sa.Column('community_development_corp', sa.Text(), nullable=True),
sa.Column('native_hawaiian_servicing', sa.Text(), nullable=True),
sa.Column('domestic_shelter', sa.Text(), nullable=True),
sa.Column('manufacturer_of_goods', sa.Text(), nullable=True),
sa.Column('hospital_flag', sa.Text(), nullable=True),
sa.Column('veterinary_hospital', sa.Text(), nullable=True),
sa.Column('hispanic_servicing_institu', sa.Text(), nullable=True),
sa.Column('foundation', sa.Text(), nullable=True),
sa.Column('woman_owned_business', sa.Text(), nullable=True),
sa.Column('minority_owned_business', sa.Text(), nullable=True),
sa.Column('women_owned_small_business', sa.Text(), nullable=True),
sa.Column('economically_disadvantaged', sa.Text(), nullable=True),
sa.Column('joint_venture_women_owned', sa.Text(), nullable=True),
sa.Column('joint_venture_economically', sa.Text(), nullable=True),
sa.Column('veteran_owned_business', sa.Text(), nullable=True),
sa.Column('service_disabled_veteran_o', sa.Text(), nullable=True),
sa.Column('contracts', sa.Text(), nullable=True),
sa.Column('grants', sa.Text(), nullable=True),
sa.Column('receives_contracts_and_gra', sa.Text(), nullable=True),
sa.Column('airport_authority', sa.Text(), nullable=True),
sa.Column('council_of_governments', sa.Text(), nullable=True),
sa.Column('housing_authorities_public', sa.Text(), nullable=True),
sa.Column('interstate_entity', sa.Text(), nullable=True),
sa.Column('planning_commission', sa.Text(), nullable=True),
sa.Column('port_authority', sa.Text(), nullable=True),
sa.Column('transit_authority', sa.Text(), nullable=True),
sa.Column('subchapter_s_corporation', sa.Text(), nullable=True),
sa.Column('limited_liability_corporat', sa.Text(), nullable=True),
sa.Column('foreign_owned_and_located', sa.Text(), nullable=True),
sa.Column('american_indian_owned_busi', sa.Text(), nullable=True),
sa.Column('alaskan_native_owned_corpo', sa.Text(), nullable=True),
sa.Column('indian_tribe_federally_rec', sa.Text(), nullable=True),
sa.Column('native_hawaiian_owned_busi', sa.Text(), nullable=True),
sa.Column('tribally_owned_business', sa.Text(), nullable=True),
sa.Column('asian_pacific_american_own', sa.Text(), nullable=True),
sa.Column('black_american_owned_busin', sa.Text(), nullable=True),
sa.Column('hispanic_american_owned_bu', sa.Text(), nullable=True),
sa.Column('native_american_owned_busi', sa.Text(), nullable=True),
sa.Column('subcontinent_asian_asian_i', sa.Text(), nullable=True),
sa.Column('other_minority_owned_busin', sa.Text(), nullable=True),
sa.Column('for_profit_organization', sa.Text(), nullable=True),
sa.Column('nonprofit_organization', sa.Text(), nullable=True),
sa.Column('other_not_for_profit_organ', sa.Text(), nullable=True),
sa.Column('us_local_government', sa.Text(), nullable=True),
sa.Column('referenced_idv_modificatio', sa.Text(), nullable=True),
sa.Column('undefinitized_action', sa.Text(), nullable=True),
sa.Column('domestic_or_foreign_entity', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('award_procurement_id')
)
op.create_index(op.f('ix_award_procurement_job_id'), 'award_procurement', ['job_id'], unique=False)
op.create_index(op.f('ix_award_procurement_submission_id'), 'award_procurement', ['submission_id'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_award_procurement_submission_id'), table_name='award_procurement')
op.drop_index(op.f('ix_award_procurement_job_id'), table_name='award_procurement')
op.drop_table('award_procurement')
### end Alembic commands ###
| {
"repo_name": "chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend",
"path": "dataactcore/migrations/versions/73db7d2cc754_add_award_procurement.py",
"copies": "2",
"size": "14550",
"license": "cc0-1.0",
"hash": 228395524746683330,
"line_mean": 58.387755102,
"line_max": 117,
"alpha_frac": 0.6843298969,
"autogenerated": false,
"ratio": 3.1699346405228757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9767098915601853,
"avg_score": 0.017433124364204636,
"num_lines": 245
} |
""" Add bambukupdatelogs table
Revision ID: 2b194f6c44e1
Revises: start_bambuk
Create Date: 2016-07-25 17:08:30.135729
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2b194f6c44e1'
down_revision = 'start_bambuk'
def upgrade():
op.create_table(
'bambukupdatelogs',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id', sa.String(length=255), nullable=False),
sa.Column('obj_id', sa.String(length=36), nullable=True),
sa.Column('obj_type', sa.String(length=36), nullable=True),
sa.Column('action_type', sa.String(length=36), nullable=False),
sa.Column('created_at', sa.DateTime, nullable=False),
sa.Column('nb_retry', sa.SmallInteger, default=0, nullable=False),
sa.Column('last_retry', sa.DateTime, nullable=True),
sa.Column('next_retry', sa.DateTime, nullable=True),
sa.Column('extra_id', sa.String(length=36), nullable=True),
sa.Column('extra_data', sa.String(length=255), nullable=True),
)
| {
"repo_name": "lionelz/networking-bambuk",
"path": "networking_bambuk/db/migration/alembic_migrations/versions/2b194f6c44e1_port_updates_log.py",
"copies": "1",
"size": "1083",
"license": "apache-2.0",
"hash": -8181295649929476000,
"line_mean": 32.84375,
"line_max": 74,
"alpha_frac": 0.6648199446,
"autogenerated": false,
"ratio": 3.1482558139534884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43130757585534885,
"avg_score": null,
"num_lines": null
} |
"""add BankAccount used for usersheets to config
Revision ID: cd588620e7d0
Revises: a32def81e36a
Create Date: 2018-09-10 21:54:26.977248
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table
import pycroft
# revision identifiers, used by Alembic.
revision = 'cd588620e7d0'
down_revision = 'a32def81e36a'
branch_labels = None
depends_on = None
def upgrade():
# has to be nullable since there is actually no data...
op.add_column('config', sa.Column('membership_fee_bank_account_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'config', 'bank_account', ['membership_fee_bank_account_id'], ['id'])
# ...insert data...
config = table(
'config',
sa.Column('membership_fee_bank_account_id', sa.Integer()),
# Other columns not needed for the data migration
)
op.execute(
config
.update()
.values({'membership_fee_bank_account_id': 1})
)
# ...set NOT NULL
op.alter_column('config', 'membership_fee_bank_account_id', nullable=False)
def downgrade():
op.drop_constraint(None, 'config', type_='foreignkey')
op.drop_column('config', 'membership_fee_bank_account_id')
| {
"repo_name": "agdsn/pycroft",
"path": "pycroft/model/alembic/versions/cd588620e7d0_add_bankaccount_used_for_usersheets_to_.py",
"copies": "2",
"size": "1212",
"license": "apache-2.0",
"hash": 1314137969763706400,
"line_mean": 27.8571428571,
"line_max": 101,
"alpha_frac": 0.6707920792,
"autogenerated": false,
"ratio": 3.3854748603351954,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001549104490280961,
"num_lines": 42
} |
"""add battle duration
Revision ID: 3a763038a0c9
Revises: 18b90187fbae
Create Date: 2013-12-09 19:29:49.336000
"""
# revision identifiers, used by Alembic.
revision = '3a763038a0c9'
down_revision = '18b90187fbae'
from alembic import op
import sqlalchemy as sa
from whyattend.model import Base, Battle, db_session
Base.metadata.bind = op.get_bind()
def upgrade():
op.add_column('battle', sa.Column('duration', sa.Integer, default=0, nullable=True))
# Parse battle duration from existing replay pickles
for battle in Battle.query.all():
replay = battle.replay
if replay and replay.replay_pickle:
try:
replay_data = replay.unpickle()
pickle = replay_data['pickle']
battle.duration = int(pickle['common']['duration'])
except Exception as e:
print "Error parsing pickle of battle " + str(battle.id), e
pass
db_session.commit()
def downgrade():
op.drop_column('battle', 'duration')
| {
"repo_name": "ceari/whyattend",
"path": "whyattend/alembic/versions/3a763038a0c9_add_battle_duration.py",
"copies": "1",
"size": "1025",
"license": "bsd-2-clause",
"hash": -5545662740111849000,
"line_mean": 26.7027027027,
"line_max": 88,
"alpha_frac": 0.6468292683,
"autogenerated": false,
"ratio": 3.546712802768166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4693542071068166,
"avg_score": null,
"num_lines": null
} |
# add bayespy dir
import matplotlib.pyplot as plt
import numpy as np
from . import settings
from .influence import ProbTable
# import math
_cache = None
def set_cache(cache):
global _cache
_cache = cache
_id = 0
def _get_id():
global _id
id = _id
_id += 1
return id
class CacheMixin(object):
def __init__(self, prefix):
self.cache = self._Cache(prefix, _cache)
class _Cache(object):
def __init__(self, prefix, cache_dict):
self.__dict__['_cache'] = cache_dict
self.__dict__['_prefix'] = prefix
def _get_key_name(self, name):
return "%s_%s" % (self._prefix, name)
def __getattr__(self, name):
if not self._cache:
raise Exception('Cache is None')
p_name = self._get_key_name(name)
if p_name in self._cache:
return True, self._cache[p_name]
else:
return False, None
def __setattr__(self, name, value):
if not self._cache:
raise Exception('Cache is None')
p_name = self._get_key_name(name)
self._cache[p_name] = value
class Node(CacheMixin):
""" Node
" Cache: self.histogram, self.samples
"""
def __init__(self, *argv, **kargv):
self.id = _get_id()
self.successors = []
self.predecessor = []
super(Node, self).__init__(str(self.id))
def add_successors(self, *nodes):
for node in nodes:
if node not in self.successors:
self.successors.append(node)
node.add_predecessor([self, ])
def add_predecessor(self, nodes):
for node in nodes:
if node not in self.predecessor:
self.predecessor.append(node)
def get_histogram(self, recalc=True):
cached, cache_hist = self.cache.histogram
if not cached or recalc:
g_samples = self.get_samples()
# print(g_samples)
h = plt.hist(g_samples, bins=settings.NumberOfBins, normed=True)
self.cache.histogram = h
return h
else:
return cache_hist
def draw_bar(self):
cached, cache_hist = self.cache.histogram
if not cached:
cache_hist = self.get_histogram()
h = cache_hist
heights = h[0]
lefts = h[1][:len(h[1]) - 1]
width = lefts[1] - lefts[0] if len(lefts) > 1 else 0.8
plt.bar(lefts, heights, width=width)
plt.show()
def get_samples_iter(self, number=None):
n = number if number else settings.NumberOfSample
i = 0
while i < n:
yield self.get_samples(1)[0]
i += 1
def get_samples_cache(self):
cached, cache_samples = self.cache.samples
if not cached:
print('Calc samples of node %s' % str(self.id))
n = settings.NumberOfSample
cache_samples = self.get_samples(n)
self.cache.samples = cache_samples
else:
print('Hit cache node %s' % str(self.id))
return cache_samples
def get_samples(self, number=None):
raise NotImplementedError('A Node need implement get_samples')
class TriangularNode(Node):
def __init__(self, **kargv):
super(TriangularNode, self).__init__(**kargv)
self.left = kargv.get('left', 0)
self.right = kargv.get('right', 2)
self.mode = kargv.get('mode', 1)
def get_samples(self, number=None):
if not number:
number = settings.NumberOfSample
return np.random.triangular(self.left, self.mode, self.right, number)
class GaussianNode(Node):
def __init__(self, **kargv):
super(GaussianNode, self).__init__(**kargv)
self.loc = kargv.get('loc', 0)
self.scale = kargv.get('scale', 0.01)
def get_samples(self, number=None):
if not number:
number = settings.NumberOfSample
return np.random.normal(loc=self.loc, scale=self.scale, size=number)
class TableNode(Node):
def __init__(self, **kargv):
super(TableNode, self).__init__(**kargv)
self.values = kargv['values']
def get_samples(self, number=None):
if not number:
number = settings.NumberOfSample
prob = ProbTable(self.values, range(len(self.values)))
return prob.generate(number)
class TempNode(Node):
def __init__(self, **kargv):
super(TempNode, self).__init__(*kargv)
self.cache.samples = kargv.get('samples', None)
def set_samples(self, samples):
self.cache.samples = samples
def get_samples(self, number=None):
cached, cache_samples = self.cache.samples
return cache_samples
class ConstantNode(Node):
def __init__(self, **kargv):
super(ConstantNode, self).__init__(**kargv)
self.value = kargv.get('value', 0)
def get_samples(self, number=None):
if not number:
number = settings.NumberOfSample
s = [self.value] * number
return s
class MaxAddValueNode(Node):
def __init__(self, *argv, **kargv):
super(MaxAddValueNode, self).__init__(**kargv)
self.add_value = kargv.get('add_value', 0)
if argv:
self.add_successors(*argv)
def set_add_value(self, value):
self.add_value = value
def get_samples(self, number=None):
if not self.successors:
return []
n = settings.NumberOfSample
# if not number:
# n = settings.NumberOfSample
# else:
# n = number
# TODO sua truong hop ConstantNode de toi uu, k can sinh ra 1 mang
# sample nua
succ_samples = [s.get_samples_cache() for s in self.successors]
samples = [0] * n
n_succ = len(succ_samples)
for i in range(n):
max_v = succ_samples[0][i]
for j in range(n_succ):
# sum += succ_samples[j][i]
value = succ_samples[j][i]
max_v = value if value > max_v else max_v
samples[i] = max_v + self.add_value
return samples
class EquationNode(Node):
def __init__(self, *argv, **kargv):
super(EquationNode, self).__init__(**kargv)
self.weight_map = []
if argv:
self.add_successors(*argv)
self.weight_map = [1] * len(argv)
def set_weight(self, weights):
if not weights:
n = len(self.successors) - len(self.weight_map)
weights = [1] * n
else:
self.weight_map = []
for w in weights:
self.weight_map.append(w)
# def add_successors(self, nodes):
# super(EquationNode, self).add_successors(nodes)
# # self.weight_map.append([1] * len(nodes))
def get_samples(self, number=None):
if not self.successors:
return []
n = settings.NumberOfSample
# if not number:
# n = settings.NumberOfSample
# else:
# n = number
# TODO sua truong hop ConstantNode de toi uu, k can sinh ra 1 mang
# sample nua
succ_samples = [s.get_samples_cache() for s in self.successors]
samples = [0] * n
n_succ = len(succ_samples)
for i in range(n):
sum = 0
for j in range(n_succ):
# sum += succ_samples[j][i]
sum += self.weight_map[j] * succ_samples[j][i]
samples[i] = sum
return samples
# def get_histogram(self, samples=None, bins=None):
# samples = settings.NumberOfSample if not samples else samples
# bins = self.NumberOfBins if not bins else bins
| {
"repo_name": "dungvtdev/upsbayescpm",
"path": "mybayes/nodes.py",
"copies": "1",
"size": "7759",
"license": "mit",
"hash": 8735096137337248000,
"line_mean": 27.3175182482,
"line_max": 77,
"alpha_frac": 0.5514885939,
"autogenerated": false,
"ratio": 3.682486948267679,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9712616882579093,
"avg_score": 0.004271731917717319,
"num_lines": 274
} |
""" Add Binary
Given two binary strings a and b, return their sum as a binary string.
Example 1:
Input: a = "11", b = "1"
Output: "100"
Example 2:
Input: a = "1010", b = "1011"
Output: "10101"
Constraints:
* 1 <= a.length, b.length <= 104
* a and b consist only of '0' or '1' characters.
* Each string does not contain leading zeros except for the zero itself.1:
"""
class Solution:
def add_binary(self, a: str, b: str) -> str:
if len(a) > len(b):
much = a
less = b
else:
much = b
less = a
r = []
carry = 0
for i in range(len(much) - 1, -1, -1):
lessi = i - (len(much) - len(less))
if lessi >= 0:
x = int(less[lessi])
else:
x = 0
y = int(much[i])
z = carry + x + y
if z == 3:
carry = 1
r.insert(0, "1")
if z == 2:
carry = 1
r.insert(0, "0")
elif z == 1:
carry = 0
r.insert(0, "1")
elif z == 0:
r.insert(0, "0")
carry = 0
if carry == 1:
r.insert(0, "1")
print("".join(r))
return "".join(r)
if __name__ == '__main__':
cases = [(["11", "1"], "100"), (["1010", "1011"], "10101"), (["0", "11"], "11")]
for case in cases:
assert Solution().add_binary(*case[0]) == case[1]
# Solution().add_binary(*case[0]) | {
"repo_name": "aiden0z/snippets",
"path": "leetcode/067_add_binary.py",
"copies": "1",
"size": "1550",
"license": "mit",
"hash": -7675241468445535000,
"line_mean": 22.1492537313,
"line_max": 84,
"alpha_frac": 0.4109677419,
"autogenerated": false,
"ratio": 3.3916849015317285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43026526434317286,
"avg_score": null,
"num_lines": null
} |
"""add biobank history
Revision ID: 4aa79d5ac86e
Revises: e44e303ae759
Create Date: 2018-08-20 10:33:58.770173
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import BiobankOrderStatus
# revision identifiers, used by Alembic.
revision = "4aa79d5ac86e"
down_revision = "e44e303ae759"
branch_labels = None
depends_on = None
_BACKFILL_VERSION = """
update biobank_order
set version = 1
"""
_BACKFILL_HISTORY = """
insert into biobank_history (biobank_order_id,
participant_id,
log_position_id,
created,
collected_note,
processed_note,
finalized_note,
finalized_site_id,
finalized_username,
source_site_id,
collected_site_id,
collected_username,
processed_site_id,
processed_username,
source_username,
amended_biobank_order_id,
amended_reason,
amended_site_id,
amended_time,
amended_username,
cancelled_site_id,
cancelled_time,
cancelled_username,
last_modified,
order_status,
restored_site_id,
restored_time,
restored_username,
version
) select biobank_order_id,
participant_id,
log_position_id,
created,
collected_note,
processed_note,
finalized_note,
finalized_site_id,
finalized_username,
source_site_id,
collected_site_id,
collected_username,
processed_site_id,
processed_username,
source_username,
amended_biobank_order_id,
amended_reason,
amended_site_id,
amended_time,
amended_username,
cancelled_site_id,
cancelled_time,
cancelled_username,
last_modified,
order_status,
restored_site_id,
restored_time,
restored_username,
version from biobank_order
"""
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"biobank_history",
sa.Column("biobank_order_id", sa.String(length=80), nullable=False),
sa.Column("source_username", sa.String(length=255), nullable=True),
sa.Column("collected_username", sa.String(length=255), nullable=True),
sa.Column("processed_username", sa.String(length=255), nullable=True),
sa.Column("finalized_username", sa.String(length=255), nullable=True),
sa.Column("order_status", model.utils.Enum(BiobankOrderStatus), nullable=True),
sa.Column("amended_reason", sa.UnicodeText(), nullable=True),
sa.Column("last_modified", model.utils.UTCDateTime(), nullable=True),
sa.Column("restored_username", sa.String(length=255), nullable=True),
sa.Column("restored_time", model.utils.UTCDateTime(), nullable=True),
sa.Column("amended_username", sa.String(length=255), nullable=True),
sa.Column("amended_time", model.utils.UTCDateTime(), nullable=True),
sa.Column("cancelled_username", sa.String(length=255), nullable=True),
sa.Column("cancelled_time", model.utils.UTCDateTime(), nullable=True),
sa.Column("created", model.utils.UTCDateTime(), nullable=False),
sa.Column("collected_note", sa.UnicodeText(), nullable=True),
sa.Column("processed_note", sa.UnicodeText(), nullable=True),
sa.Column("finalized_note", sa.UnicodeText(), nullable=True),
sa.Column("version", sa.Integer(), nullable=False),
sa.Column("cancelled_site_id", sa.Integer(), nullable=True),
sa.Column("finalized_site_id", sa.Integer(), nullable=True),
sa.Column("amended_biobank_order_id", sa.String(length=80), nullable=True),
sa.Column("restored_site_id", sa.Integer(), nullable=True),
sa.Column("processed_site_id", sa.Integer(), nullable=True),
sa.Column("collected_site_id", sa.Integer(), nullable=True),
sa.Column("participant_id", sa.Integer(), nullable=False),
sa.Column("source_site_id", sa.Integer(), nullable=True),
sa.Column("log_position_id", sa.Integer(), nullable=False),
sa.Column("amended_site_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["amended_biobank_order_id"], ["biobank_order.biobank_order_id"]),
sa.ForeignKeyConstraint(["amended_site_id"], ["site.site_id"]),
sa.ForeignKeyConstraint(["cancelled_site_id"], ["site.site_id"]),
sa.ForeignKeyConstraint(["collected_site_id"], ["site.site_id"]),
sa.ForeignKeyConstraint(["finalized_site_id"], ["site.site_id"]),
sa.ForeignKeyConstraint(["log_position_id"], ["log_position.log_position_id"]),
sa.ForeignKeyConstraint(["participant_id"], ["participant.participant_id"]),
sa.ForeignKeyConstraint(["processed_site_id"], ["site.site_id"]),
sa.ForeignKeyConstraint(["restored_site_id"], ["site.site_id"]),
sa.ForeignKeyConstraint(["source_site_id"], ["site.site_id"]),
sa.PrimaryKeyConstraint("biobank_order_id", "version"),
)
# ### end Alembic commands ###
op.execute(_BACKFILL_VERSION)
op.execute(_BACKFILL_HISTORY)
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("biobank_history")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/4aa79d5ac86e_add_biobank_history.py",
"copies": "1",
"size": "6417",
"license": "bsd-3-clause",
"hash": -6622745211271318000,
"line_mean": 39.10625,
"line_max": 98,
"alpha_frac": 0.5726975222,
"autogenerated": false,
"ratio": 4.10556621880998,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00035311094134623546,
"num_lines": 160
} |
"""add_biobank_identifier
Revision ID: 32d414bc9a1e
Revises: 9b93d00a35cc
Create Date: 2018-01-30 11:12:24.111826
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "32d414bc9a1e"
down_revision = "9b93d00a35cc"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("biobank_stored_sample", sa.Column("biobank_order_identifier", sa.String(length=80), nullable=False))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("biobank_stored_sample", "biobank_order_identifier")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/32d414bc9a1e_add_biobank_identifier.py",
"copies": "1",
"size": "1163",
"license": "bsd-3-clause",
"hash": -347264098850127040,
"line_mean": 23.7446808511,
"line_max": 119,
"alpha_frac": 0.6732588134,
"autogenerated": false,
"ratio": 3.410557184750733,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9582929473328038,
"avg_score": 0.00017730496453900708,
"num_lines": 47
} |
"""add biobank version
Revision ID: e44e303ae759
Revises: 0e92151ebd4a
Create Date: 2018-08-20 10:08:20.904521
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "e44e303ae759"
down_revision = "0e92151ebd4a"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("biobank_order", sa.Column("version", sa.Integer(), nullable=False))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("biobank_order", "version")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/e44e303ae759_add_biobank_version.py",
"copies": "1",
"size": "1102",
"license": "bsd-3-clause",
"hash": -7997441456450034000,
"line_mean": 22.4468085106,
"line_max": 86,
"alpha_frac": 0.6615245009,
"autogenerated": false,
"ratio": 3.4763406940063093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.963664240204742,
"avg_score": 0.00024455857177794083,
"num_lines": 47
} |
"""add blog
Revision ID: 16c06c739a94
Revises: 1731e32124d9
Create Date: 2017-02-08 15:21:20.686000
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '16c06c739a94'
down_revision = '1731e32124d9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_post_timestamp'), 'post', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_post_timestamp'), table_name='post')
op.drop_table('post')
# ### end Alembic commands ###
| {
"repo_name": "hedm0423/flaskdemo",
"path": "migrations/versions/16c06c739a94_add_blog.py",
"copies": "1",
"size": "1047",
"license": "mit",
"hash": -8964239416669786000,
"line_mean": 27.2972972973,
"line_max": 83,
"alpha_frac": 0.6609360076,
"autogenerated": false,
"ratio": 3.292452830188679,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9435444994729308,
"avg_score": 0.0035887686118740427,
"num_lines": 37
} |
"""add blog_post.slug
Revision ID: 820bb005f2c5
Revises: 15ab4487a7f0
Create Date: 2016-06-09 22:15:53.127614
"""
# revision identifiers, used by Alembic.
revision = '820bb005f2c5'
down_revision = '15ab4487a7f0'
import re
from alembic import op
from sqlalchemy.orm import sessionmaker
import sqlalchemy as sa
from sqlalchemy import text
from blag.models import BlogPost
non_ascii = re.compile(r'[^a-zA-Z-]*')
def slugify(title):
return non_ascii.sub('', title.lower().replace(' ', '-'))
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('blog_post', sa.Column('slug', sa.String(length=40), nullable=True))
# year = func.year(BlogPost.datetime_added)
year = text("date_trunc('year', blog_post.datetime_added)")
op.create_index(op.f('ix_blog_post_slug'), 'blog_post', [year, 'slug'], unique=False)
# Add a default slug for existing blog posts
connection = op.get_bind()
SessionMaker = sessionmaker(bind=connection.engine)
session = SessionMaker(bind=connection)
for blog_post in session.query(BlogPost):
blog_post.slug = slugify(blog_post.title)
session.flush()
# Mark slug as not nullable
op.alter_column('blog_post', 'slug', nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_blog_post_slug'), table_name='blog_post')
op.drop_column('blog_post', 'slug')
### end Alembic commands ###
| {
"repo_name": "thusoy/blag",
"path": "blag/migrations/versions/820bb005f2c5_add_blog_post_slug.py",
"copies": "1",
"size": "1516",
"license": "mit",
"hash": 8458997403209816000,
"line_mean": 27.0740740741,
"line_max": 89,
"alpha_frac": 0.6833773087,
"autogenerated": false,
"ratio": 3.260215053763441,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9402793874672304,
"avg_score": 0.008159697558227461,
"num_lines": 54
} |
"""Add board moderator roles
Revision ID: da8b38b5bdd5
Revises: 90ac01a2df
Create Date: 2016-05-03 09:32:06.756899
"""
# revision identifiers, used by Alembic.
revision = 'da8b38b5bdd5'
down_revision = '90ac01a2df'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_index(op.f('ix_boardmoderator_board_id'), table_name='boardmoderator')
op.drop_index(op.f('ix_boardmoderator_moderator_id'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=False),
sa.Column('moderator_id', sa.Integer(), nullable=False),
sa.Column('roles', postgresql.ARRAY(sa.String()), nullable=False),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], ),
sa.PrimaryKeyConstraint('board_id', 'moderator_id')
)
op.create_index(op.f('ix_boardmoderator_roles'), 'boardmoderator', ['roles'], unique=False)
def downgrade():
op.drop_index(op.f('ix_boardmoderator_roles'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=True),
sa.Column('moderator_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], )
)
op.create_index(op.f('ix_boardmoderator_board_id'), 'boardmoderator', ['board_id'], unique=False)
op.create_index(op.f('ix_boardmoderator_moderator_id'), 'boardmoderator', ['moderator_id'], unique=False)
| {
"repo_name": "Floens/uchan",
"path": "migrations/versions/da8b38b5bdd5_add_board_moderator_roles.py",
"copies": "1",
"size": "1908",
"license": "mit",
"hash": -6841155678013453000,
"line_mean": 39.5957446809,
"line_max": 109,
"alpha_frac": 0.6284067086,
"autogenerated": false,
"ratio": 3.704854368932039,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4833261077532039,
"avg_score": null,
"num_lines": null
} |
"""Add Boards
Revision ID: 50e85a64aa30
Revises: d1cd159b4f82
Create Date: 2017-11-11 20:15:43.683292
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '50e85a64aa30'
down_revision = 'd1cd159b4f82'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'boards',
sa.Column('id', sa.Integer, primary_key=True, autoincrement=True),
sa.Column('title', sa.String(256), nullable=False),
sa.Column('list_order', sa.Integer, nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False)
)
op.create_table(
'board_items',
sa.Column('id', sa.Integer, primary_key=True, autoincrement=True),
sa.Column('board_id', sa.Integer, nullable=False),
sa.Column('title', sa.String(256), nullable=False),
sa.Column('link', sa.String(2048), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('completed_at', sa.DateTime(timezone=True), nullable=True)
)
def downgrade():
op.drop_table('boards')
op.drop_table('board_items')
| {
"repo_name": "charlesj/Apollo",
"path": "database/versions/50e85a64aa30_add_boards.py",
"copies": "1",
"size": "1223",
"license": "mit",
"hash": 7295685434328859000,
"line_mean": 29.575,
"line_max": 76,
"alpha_frac": 0.658217498,
"autogenerated": false,
"ratio": 3.2613333333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4419550831333333,
"avg_score": null,
"num_lines": null
} |
"""add boards table
Revision ID: c1b883dbfff4
Revises: b764aaedf10d
Create Date: 2017-10-30 19:37:00.544958
"""
# pylint: skip-file
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c1b883dbfff4'
down_revision = 'b764aaedf10d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('boards',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('task_number', sa.Integer(), nullable=False),
sa.Column(
'created_at',
sa.DateTime(timezone=True),
server_default=sa.text('now()'),
nullable=False),
sa.Column(
'updated_at',
sa.DateTime(timezone=True),
server_default=sa.text('now()'),
nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_boards')))
op.add_column('tasks', sa.Column('board_id', sa.Integer(), nullable=True))
op.create_foreign_key(
op.f('fk_tasks_board_id_boards'), 'tasks', 'boards', ['board_id'],
['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(
op.f('fk_tasks_board_id_boards'), 'tasks', type_='foreignkey')
op.drop_column('tasks', 'board_id')
op.drop_table('boards')
# ### end Alembic commands ###
| {
"repo_name": "kokimoribe/todo-api",
"path": "alembic/versions/c1b883dbfff4_add_boards_table.py",
"copies": "1",
"size": "1672",
"license": "mit",
"hash": -6417203331918787000,
"line_mean": 32.44,
"line_max": 78,
"alpha_frac": 0.543062201,
"autogenerated": false,
"ratio": 3.861431870669746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9904494071669746,
"avg_score": 0,
"num_lines": 50
} |
"""Add books column
Revision ID: e7aff6447fbe
Revises: 7cdaa5513f2d
Create Date: 2017-10-08 15:22:12.019617
"""
from alembic import op
from sqlalchemy.sql import table, column
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e7aff6447fbe'
down_revision = '7cdaa5513f2d'
branch_labels = None
depends_on = None
def upgrade():
bible_versions = table('bible_versions',
column('id', sa.Integer),
column('command', sa.String),
column('name', sa.String),
column('abbr', sa.String),
column('service', sa.String),
column('service_version', sa.String),
column('rtl', sa.Boolean),
column('books', sa.BigInteger))
op.add_column('bible_versions', sa.Column('books', sa.BigInteger, nullable=True))
op.execute(bible_versions.update()
.where((bible_versions.c.command == op.inline_literal('esv')) |
(bible_versions.c.command == op.inline_literal('kjv')) |
(bible_versions.c.command == op.inline_literal('nasb')) |
(bible_versions.c.command == op.inline_literal('niv')) |
(bible_versions.c.command == op.inline_literal('csb')) |
(bible_versions.c.command == op.inline_literal('net')) |
(bible_versions.c.command == op.inline_literal('isv')) |
(bible_versions.c.command == op.inline_literal('msg')) |
(bible_versions.c.command == op.inline_literal('nlt')) |
(bible_versions.c.command == op.inline_literal('gnv')) |
(bible_versions.c.command == op.inline_literal('amp')))
.values({'books': 3}))
op.execute(bible_versions.update()
.where(bible_versions.c.command == op.inline_literal('sbl'))
.values({'books': 2}))
op.execute(bible_versions.update()
.where(bible_versions.c.command == op.inline_literal('wlc'))
.values({'books': 1}))
op.execute(bible_versions.update()
.where(bible_versions.c.command == op.inline_literal('nrsv'))
.values({'books': 1048575}))
op.execute(bible_versions.update()
.where(bible_versions.c.command == op.inline_literal('kjva'))
.values({'books': 532351}))
op.execute(bible_versions.update()
.where(bible_versions.c.command == op.inline_literal('lxx'))
.values({'books': 4062975}))
op.execute(bible_versions.update()
.where(bible_versions.c.command == op.inline_literal('gnt'))
.values({'books': 761855}))
op.alter_column('bible_versions', 'books', nullable=False)
def downgrade():
op.drop_column('bible_versions', 'books')
| {
"repo_name": "bryanforbes/Erasmus",
"path": "alembic/versions/e7aff6447fbe_add_books_column.py",
"copies": "1",
"size": "2959",
"license": "bsd-3-clause",
"hash": 2787870519976446000,
"line_mean": 43.1641791045,
"line_max": 85,
"alpha_frac": 0.5468063535,
"autogenerated": false,
"ratio": 3.7455696202531645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47923759737531646,
"avg_score": null,
"num_lines": null
} |
"""Add browser info
Revision ID: 3999a40ec7b
Revises: 292d93c545f2
Create Date: 2017-06-09 18:53:32.305876
"""
# revision identifiers, used by Alembic.
revision = '3999a40ec7b'
down_revision = '292d93c545f2'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('UseLogs', sa.Column('browser_language', sa.Unicode(length=100), nullable=True))
op.add_column('UseLogs', sa.Column('browser_name', sa.Unicode(length=100), nullable=True))
op.add_column('UseLogs', sa.Column('browser_platform', sa.Unicode(length=100), nullable=True))
op.add_column('UseLogs', sa.Column('browser_version', sa.Unicode(length=100), nullable=True))
op.create_index(u'ix_UseLogs_browser_language', 'UseLogs', ['browser_language'], unique=False)
op.create_index(u'ix_UseLogs_browser_name', 'UseLogs', ['browser_name'], unique=False)
op.create_index(u'ix_UseLogs_browser_platform', 'UseLogs', ['browser_platform'], unique=False)
op.create_index(u'ix_UseLogs_browser_version', 'UseLogs', ['browser_version'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_UseLogs_browser_version', table_name='UseLogs')
op.drop_index(u'ix_UseLogs_browser_platform', table_name='UseLogs')
op.drop_index(u'ix_UseLogs_browser_name', table_name='UseLogs')
op.drop_index(u'ix_UseLogs_browser_language', table_name='UseLogs')
op.drop_column('UseLogs', 'browser_version')
op.drop_column('UseLogs', 'browser_platform')
op.drop_column('UseLogs', 'browser_name')
op.drop_column('UseLogs', 'browser_language')
### end Alembic commands ###
| {
"repo_name": "gateway4labs/labmanager",
"path": "alembic/versions/3999a40ec7b_add_browser_info.py",
"copies": "5",
"size": "1744",
"license": "bsd-2-clause",
"hash": 3286937410450508300,
"line_mean": 42.6,
"line_max": 98,
"alpha_frac": 0.7035550459,
"autogenerated": false,
"ratio": 3.15370705244123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006485842882699652,
"num_lines": 40
} |
"""add_build_author
Revision ID: 523842e356fa
Revises: c1d328364626
Create Date: 2018-10-01 11:57:18.018127
"""
from alembic import op
import sqlalchemy as sa
import zeus
# revision identifiers, used by Alembic.
revision = "523842e356fa"
down_revision = "c1d328364626"
branch_labels = ()
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"build", sa.Column("author_id", zeus.db.types.guid.GUID(), nullable=True)
)
op.create_index(
"idx_build_author_date", "build", ["author_id", "date_created"], unique=False
)
op.create_index(op.f("ix_build_author_id"), "build", ["author_id"], unique=False)
op.create_foreign_key(
None, "build", "author", ["author_id"], ["id"], ondelete="SET NULL"
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "build", type_="foreignkey")
op.drop_index(op.f("ix_build_author_id"), table_name="build")
op.drop_index("idx_build_author_date", table_name="build")
op.drop_column("build", "author_id")
# ### end Alembic commands ###
| {
"repo_name": "getsentry/zeus",
"path": "zeus/migrations/523842e356fa_add_build_author.py",
"copies": "1",
"size": "1191",
"license": "apache-2.0",
"hash": 6841405053548739000,
"line_mean": 28.0487804878,
"line_max": 85,
"alpha_frac": 0.6456759026,
"autogenerated": false,
"ratio": 3.1507936507936507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42964695533936503,
"avg_score": null,
"num_lines": null
} |
"""Add BuildPhase
Revision ID: 4276d58dd1e6
Revises: 2596f21c6f58
Create Date: 2014-02-24 14:06:16.379028
"""
# revision identifiers, used by Alembic.
revision = '4276d58dd1e6'
down_revision = '2596f21c6f58'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(
'buildphase',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('label', sa.String(length=128), nullable=False),
sa.Column('status', sa.Enum(), server_default='0', nullable=False),
sa.Column('result', sa.Enum(), server_default='0', nullable=False),
sa.Column('order', sa.Integer(), server_default='0', nullable=False),
sa.Column('duration', sa.Integer(), nullable=True),
sa.Column('date_started', sa.DateTime(), nullable=True),
sa.Column('date_finished', sa.DateTime(), nullable=True),
sa.Column('date_created', sa.DateTime(), server_default='now()', nullable=False),
sa.ForeignKeyConstraint(['build_id'], ['build.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('build_id', 'label', name='unq_buildphase_key')
)
def downgrade():
op.drop_table('buildphase')
| {
"repo_name": "wfxiang08/changes",
"path": "migrations/versions/4276d58dd1e6_add_buildphase.py",
"copies": "4",
"size": "1469",
"license": "apache-2.0",
"hash": -4509778670649305000,
"line_mean": 35.725,
"line_max": 89,
"alpha_frac": 0.6466984343,
"autogenerated": false,
"ratio": 3.489311163895487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6136009598195488,
"avg_score": null,
"num_lines": null
} |
"""Add build plans
Revision ID: ff220d76c11
Revises: 153b703a46ea
Create Date: 2013-12-11 16:12:18.309606
"""
# revision identifiers, used by Alembic.
revision = 'ff220d76c11'
down_revision = '153b703a46ea'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'plan',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('label', sa.String(length=128), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=False),
sa.Column('date_modified', sa.DateTime(), nullable=False),
sa.Column('data', sa.JSONEncodedDict(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'step',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('plan_id', sa.GUID(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=False),
sa.Column('date_modified', sa.DateTime(), nullable=False),
sa.Column('implementation', sa.String(length=128), nullable=False),
sa.Column('order', sa.Integer(), nullable=False),
sa.Column('data', sa.JSONEncodedDict(), nullable=True),
sa.CheckConstraint('step."order" >= 0', name='chk_step_order_positive'),
sa.ForeignKeyConstraint(['plan_id'], ['plan.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_step_plan_id', 'step', ['plan_id'])
op.create_table(
'buildfamily',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('repository_id', sa.GUID(), nullable=False),
sa.Column('revision_sha', sa.String(length=40), nullable=True),
sa.Column('patch_id', sa.GUID(), nullable=True),
sa.Column('author_id', sa.GUID(), nullable=True),
sa.Column('cause', sa.Enum(), nullable=False),
sa.Column('label', sa.String(length=128), nullable=False),
sa.Column('target', sa.String(length=128), nullable=True),
sa.Column('status', sa.Enum(), nullable=False),
sa.Column('result', sa.Enum(), nullable=False),
sa.Column('message', sa.Text(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.Column('date_started', sa.DateTime(), nullable=True),
sa.Column('date_finished', sa.DateTime(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('data', sa.JSONEncodedDict(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['author.id'], ),
sa.ForeignKeyConstraint(['patch_id'], ['patch.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),
sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_buildfamily_project_id', 'buildfamily', ['project_id'])
op.create_index('idx_buildfamily_repository_revision', 'buildfamily', ['repository_id', 'revision_sha'])
op.create_index('idx_buildfamily_patch_id', 'buildfamily', ['patch_id'])
op.create_index('idx_buildfamily_author_id', 'buildfamily', ['author_id'])
op.create_table(
'buildplan',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('family_id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=False),
sa.Column('plan_id', sa.GUID(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['build_id'], ['build.id'], ),
sa.ForeignKeyConstraint(['family_id'], ['buildfamily.id'], ),
sa.ForeignKeyConstraint(['plan_id'], ['plan.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_buildplan_project_id', 'buildplan', ['project_id'])
op.create_index('idx_buildplan_family_id', 'buildplan', ['family_id'])
op.create_index('idx_buildplan_build_id', 'buildplan', ['build_id'])
op.create_index('idx_buildplan_plan_id', 'buildplan', ['plan_id'])
def downgrade():
op.drop_table('buildplan')
op.drop_table('buildfamily')
op.drop_table('step')
op.drop_table('plan')
| {
"repo_name": "bowlofstew/changes",
"path": "migrations/versions/ff220d76c11_add_build_plans.py",
"copies": "4",
"size": "4399",
"license": "apache-2.0",
"hash": 6204675761219085000,
"line_mean": 44.3505154639,
"line_max": 108,
"alpha_frac": 0.6228688338,
"autogenerated": false,
"ratio": 3.522017614091273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6144886447891272,
"avg_score": null,
"num_lines": null
} |
"""Add bulletin_message field to Guild.
Revision ID: 8010376daba7
Revises: afc57bd04606
Create Date: 2017-03-14 20:32:02.467432
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '8010376daba7'
down_revision = 'afc57bd04606'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('guild', sa.Column('announcement_channel', sa.BigInteger(), nullable=True))
op.add_column('guild', sa.Column('bulletin_channel', sa.BigInteger(), nullable=True))
op.add_column('guild', sa.Column('bulletin_message', sa.BigInteger(), nullable=True))
op.drop_column('guild', 'stock_announcement_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('guild', sa.Column('stock_announcement_id', sa.BIGINT(), autoincrement=False, nullable=True))
op.drop_column('guild', 'bulletin_message')
op.drop_column('guild', 'bulletin_channel')
op.drop_column('guild', 'announcement_channel')
# ### end Alembic commands ###
| {
"repo_name": "MJB47/Jokusoramame",
"path": "migrations/versions/8010376daba7_add_bulletin_message_field_to_guild.py",
"copies": "1",
"size": "1187",
"license": "mit",
"hash": 9222128314364971000,
"line_mean": 33.9117647059,
"line_max": 111,
"alpha_frac": 0.7034540859,
"autogenerated": false,
"ratio": 3.3249299719887957,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45283840578887957,
"avg_score": null,
"num_lines": null
} |
# ADD-BY-LEETEN 2013/09/24-BEGIN
import numpy as np;
import time;
# ADD-BY-LEETEN 2013/09/24-END
import sat_dwt_decoder;
# ADD-BY-LEETEN 2013/09/24-BEGIN
time_queue = [];
time_queue.append(time.time());
# ADD-BY-LEETEN 2013/09/24-END
waveletsat_decoder = sat_dwt_decoder.decoder();
# waveletsat_decoder.load("D:/data/WaveletSAT/video/vis08.wnc4");
waveletsat_decoder.load("D:/data/WaveletSAT/video/AVSS_PV_Hard.wnc4");
time_queue[-1] = time.time() - time_queue[-1]; # ADD-BY-LEETEN 2013/09/24
# # MOD-BY-LEETEN 2013/09/14-FROM:
# data_size = waveletsat_decoder.get_size();
# # MOD-BY-LEETEN 2013/09/14-TO:
data_size = [];
waveletsat_decoder.get_size(data_size);
# # MOD-BY-LEETEN 2013/09/14-END
print data_size;
# ADD-BY-LEETEN 2013/09/14-BEGIN
n_bins = waveletsat_decoder.get_n_bins();
print n_bins;
time_hist = np.ndarray(shape=(n_bins, data_size[2]-1), dtype=float, order='C');
# ADD-BY-LEETEN 2013/09/14-END
# # MOD-BY-LEETEN 2013/09/14-FROM:
# for t in range(0, data_size[2]-1):
# region_hist = waveletsat_decoder.get_region_histogram([0, 0, t], [data_size[0]-1, data_size[1]-1, t+1]);
# print region_hist;
# # MOD-BY-LEETEN 2013/09/14-TO:
spatial_left = [110, 102];
spatial_size = [44, 33];
spatial_right = [];
for d in range(0, len(spatial_left)):
spatial_right.append(spatial_left[d] + spatial_size[d]);
time_queue.append(time.time());
for t in range(0, data_size[2]-1):
region_hist = []; # ADD-BY-LEETEN 2013/10/20
waveletsat_decoder.get_region_histogram([spatial_left[0], spatial_left[1], t], [spatial_right[0], spatial_right[1], t+1], region_hist);
for b in range(0, n_bins):
time_hist[b, t] = region_hist[b];
time_queue[-1] = time.time() - time_queue[-1];
print time_queue;
import matplotlib.pyplot as plt;
plt.imshow(time_hist, origin='lower');
plt.show();
# # MOD-BY-LEETEN 2013/09/14-END
# import matplotlib.pyplot as plt;
# plt.bar(range(0, len(region_hist)), region_hist);
# plt.show();
############################################################
# Copyright (c) 2013 Teng-Yok Lee
#
# See the file LICENSE.txt for copying permission.
############################################################
| {
"repo_name": "recheliu/wavelet-sat",
"path": "python/test_sat_dwt_decoder.py",
"copies": "1",
"size": "2202",
"license": "mit",
"hash": 4481217178081077000,
"line_mean": 31.3636363636,
"line_max": 136,
"alpha_frac": 0.6262488647,
"autogenerated": false,
"ratio": 2.519450800915332,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3645699665615332,
"avg_score": null,
"num_lines": null
} |
"""Add cache per rlms type
Revision ID: 1a3e5ec9c2fd
Revises: 7f806cb0445
Create Date: 2015-05-06 16:33:34.158433
"""
# revision identifiers, used by Alembic.
revision = '1a3e5ec9c2fd'
down_revision = '7f806cb0445'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('rlmstype_cache',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('datetime', sa.DateTime(), nullable=True),
sa.Column('rlms_type', sa.Unicode(length=255), nullable=False),
sa.Column('key', sa.Unicode(length=255), nullable=True),
sa.Column('value', sa.UnicodeText(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_rlmstype_cache_key', 'rlmstype_cache', ['key'], unique=False)
op.create_index(u'ix_rlmstype_cache_rlms_type', 'rlmstype_cache', ['rlms_type'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_rlmstype_cache_rlms_type', table_name='rlmstype_cache')
op.drop_index(u'ix_rlmstype_cache_key', table_name='rlmstype_cache')
op.drop_table('rlmstype_cache')
### end Alembic commands ###
| {
"repo_name": "labsland/labmanager",
"path": "alembic/versions/1a3e5ec9c2fd_add_cache_per_rlms_type.py",
"copies": "5",
"size": "1242",
"license": "bsd-2-clause",
"hash": -6374285486393555000,
"line_mean": 32.5675675676,
"line_max": 98,
"alpha_frac": 0.6859903382,
"autogenerated": false,
"ratio": 3.0145631067961167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6200553444996116,
"avg_score": null,
"num_lines": null
} |
"""Add calls to action.
Revision ID: 62214735b8
Revises: 564d901de0c
Create Date: 2015-06-22 03:13:32.048821
"""
# revision identifiers, used by Alembic.
revision = '62214735b8'
down_revision = '564d901de0c'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('calls_to_action',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('url', sqlalchemy_utils.types.url.URLType(), nullable=True),
sa.Column('active', sa.Boolean(), nullable=False),
sa.Column('begins', sqlalchemy_utils.types.arrow.ArrowType(), nullable=True),
sa.Column('ends', sqlalchemy_utils.types.arrow.ArrowType(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], name=op.f('calls_to_action_event_id_fkey')),
sa.PrimaryKeyConstraint('id', name=op.f('calls_to_action_pkey'))
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('calls_to_action')
### end Alembic commands ###
| {
"repo_name": "pathunstrom/pygotham",
"path": "migrations/versions/62214735b8_.py",
"copies": "3",
"size": "1229",
"license": "bsd-3-clause",
"hash": 5882270780175383000,
"line_mean": 32.2162162162,
"line_max": 101,
"alpha_frac": 0.6867371847,
"autogenerated": false,
"ratio": 3.330623306233062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5517360490933062,
"avg_score": null,
"num_lines": null
} |
"""add cancelled to PM
Revision ID: 2b328e8e5eb8
Revises: b4f6eb55d503
Create Date: 2018-09-25 14:27:46.744408
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import PhysicalMeasurementsStatus
# revision identifiers, used by Alembic.
revision = "2b328e8e5eb8"
down_revision = "b4f6eb55d503"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("physical_measurements", sa.Column("cancelled_site_id", sa.Integer(), nullable=True))
op.add_column("physical_measurements", sa.Column("cancelled_time", model.utils.UTCDateTime(), nullable=True))
op.add_column("physical_measurements", sa.Column("cancelled_username", sa.String(length=255), nullable=True))
op.add_column(
"physical_measurements", sa.Column("status", model.utils.Enum(PhysicalMeasurementsStatus), nullable=True)
)
op.create_foreign_key(None, "physical_measurements", "site", ["cancelled_site_id"], ["site_id"])
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "physical_measurements", type_="foreignkey")
op.drop_column("physical_measurements", "status")
op.drop_column("physical_measurements", "cancelled_username")
op.drop_column("physical_measurements", "cancelled_time")
op.drop_column("physical_measurements", "cancelled_site_id")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/2b328e8e5eb8_add_cancelled_to_pm.py",
"copies": "1",
"size": "1950",
"license": "bsd-3-clause",
"hash": 706350705678529200,
"line_mean": 31.5,
"line_max": 113,
"alpha_frac": 0.6958974359,
"autogenerated": false,
"ratio": 3.5845588235294117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4780456259429412,
"avg_score": null,
"num_lines": null
} |
"""Add Canons of Dort
Revision ID: 43c0cf2239c4
Revises: a1dbd23261c3
Create Date: 2017-10-11 23:43:21.682227
"""
from alembic import op
import sqlalchemy as sa
from pathlib import Path
from json import load
from collections import OrderedDict
# revision identifiers, used by Alembic.
revision = '43c0cf2239c4'
down_revision = 'a1dbd23261c3'
branch_labels = None
depends_on = None
with (Path(__file__).resolve().parent / f'{revision}_dort.json').open() as f:
dort_data = load(f, object_pairs_hook=lambda x: OrderedDict(x))
metadata = sa.MetaData()
confessions = sa.Table('confessions', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('command', sa.String, unique=True),
sa.Column('name', sa.String))
confession_chapters = sa.Table('confession_chapters', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('confession_id', sa.Integer, sa.ForeignKey('confessions.id')),
sa.Column('chapter_number', sa.Integer),
sa.Column('title', sa.String))
confession_paragraphs = sa.Table('confession_paragraphs', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('confession_id', sa.Integer, sa.ForeignKey('confessions.id')),
sa.Column('chapter_number', sa.Integer),
sa.Column('paragraph_number', sa.Integer),
sa.Column('text', sa.Text))
def upgrade():
conn = op.get_bind()
result = conn.execute(confessions.insert(),
dict(command='dort', name='The Canons of Dort'))
confession_id = result.inserted_primary_key[0]
for chapter_str, chapter in dort_data['chapters'].items():
chapter_number = int(chapter_str)
conn.execute(confession_chapters.insert(),
dict(confession_id=confession_id, chapter_number=chapter_number,
title=chapter['title']))
conn.execute(confession_paragraphs.insert(), *[
dict(confession_id=confession_id, chapter_number=chapter_number,
paragraph_number=int(paragraph_str), text=text) for paragraph_str, text in
chapter['paragraphs'].items()
])
def downgrade():
conn = op.get_bind()
result = conn.execute(confessions.select().where(confessions.c.command == 'dort'))
row = result.fetchone()
conn.execute(confession_paragraphs.delete().where(confession_paragraphs.c.confession_id == row['id']))
conn.execute(confession_chapters.delete().where(confession_chapters.c.confession_id == row['id']))
conn.execute(confessions.delete().where(confessions.c.id == row['id']))
result.close()
| {
"repo_name": "bryanforbes/Erasmus",
"path": "alembic/versions/43c0cf2239c4_add_canons_of_dort.py",
"copies": "1",
"size": "2903",
"license": "bsd-3-clause",
"hash": 2491588952159875000,
"line_mean": 37.7066666667,
"line_max": 106,
"alpha_frac": 0.6017912504,
"autogenerated": false,
"ratio": 3.809711286089239,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4911502536489239,
"avg_score": null,
"num_lines": null
} |
"""add capabilities table
Revision ID: 38c8ec357e0
Revises: e64cd1af67
Create Date: 2015-03-10 13:24:16.971933
"""
# revision identifiers, used by Alembic.
revision = '38c8ec357e0'
down_revision = 'e64cd1af67'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('capabilities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('production_id', sa.Integer(), nullable=False),
sa.Column('type', sa.Enum('member', 'operator', 'admin', name='capability'), nullable=False),
sa.ForeignKeyConstraint(['production_id'], ['productions.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_capabilities_production', 'capabilities', ['production_id'], unique=False)
op.create_index('idx_capabilities_user', 'capabilities', ['user_id'], unique=False)
def downgrade():
op.drop_index('idx_capabilities_user', table_name='capabilities')
op.drop_index('idx_capabilities_production', table_name='capabilities')
op.drop_table('capabilities')
# Enum type created for type column
op.execute('DROP TYPE capability')
| {
"repo_name": "rjw57/cubbie",
"path": "migrations/versions/38c8ec357e0_add_capabilities_table.py",
"copies": "1",
"size": "1216",
"license": "mit",
"hash": -4955954649725083000,
"line_mean": 31.8648648649,
"line_max": 99,
"alpha_frac": 0.6965460526,
"autogenerated": false,
"ratio": 3.4842406876790832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4680786740279083,
"avg_score": null,
"num_lines": null
} |
"""Add captain column to Teams
Revision ID: b5551cd26764
Revises: 4e4d5a9ea000
Create Date: 2019-04-12 00:29:08.021141
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import column, table
from CTFd.models import db
# revision identifiers, used by Alembic.
revision = "b5551cd26764"
down_revision = "4e4d5a9ea000"
branch_labels = None
depends_on = None
teams_table = table("teams", column("id", db.Integer), column("captain_id", db.Integer))
users_table = table("users", column("id", db.Integer), column("team_id", db.Integer))
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("teams", sa.Column("captain_id", sa.Integer(), nullable=True))
bind = op.get_bind()
url = str(bind.engine.url)
if url.startswith("sqlite") is False:
op.create_foreign_key(
"team_captain_id", "teams", "users", ["captain_id"], ["id"]
)
connection = op.get_bind()
for team in connection.execute(teams_table.select()):
users = connection.execute(
users_table.select()
.where(users_table.c.team_id == team.id)
.order_by(users_table.c.id)
.limit(1)
)
for user in users:
connection.execute(
teams_table.update()
.where(teams_table.c.id == team.id)
.values(captain_id=user.id)
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("team_captain_id", "teams", type_="foreignkey")
op.drop_column("teams", "captain_id")
# ### end Alembic commands ###
| {
"repo_name": "LosFuzzys/CTFd",
"path": "migrations/versions/b5551cd26764_add_captain_column_to_teams.py",
"copies": "4",
"size": "1690",
"license": "apache-2.0",
"hash": 744503992530115600,
"line_mean": 28.649122807,
"line_max": 88,
"alpha_frac": 0.6153846154,
"autogenerated": false,
"ratio": 3.373253493013972,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5988638108413972,
"avg_score": null,
"num_lines": null
} |
"""ADd cascades
Revision ID: def46e09c9ef
Revises: 3efdb537f933
Create Date: 2017-12-28 11:44:14.263012
"""
# revision identifiers, used by Alembic.
revision = 'def46e09c9ef'
down_revision = '3efdb537f933'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(u'RepositoryApp2languages_ibfk_2', 'RepositoryApp2languages', type_='foreignkey')
op.drop_constraint(u'RepositoryApp2languages_ibfk_1', 'RepositoryApp2languages', type_='foreignkey')
op.create_foreign_key(None, 'RepositoryApp2languages', 'RepositoryApps', ['repository_app_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.create_foreign_key(None, 'RepositoryApp2languages', 'Languages', ['language_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'RepositoryAppCheckUrls_ibfk_1', 'RepositoryAppCheckUrls', type_='foreignkey')
op.create_foreign_key(None, 'RepositoryAppCheckUrls', 'RepositoryApps', ['repository_app_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'RepositoryAppFailures_ibfk_1', 'RepositoryAppFailures', type_='foreignkey')
op.create_foreign_key(None, 'RepositoryAppFailures', 'RepositoryAppCheckUrls', ['repository_app_check_url_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'RepositoryAppFailures', type_='foreignkey')
op.create_foreign_key(u'RepositoryAppFailures_ibfk_1', 'RepositoryAppFailures', 'RepositoryAppCheckUrls', ['repository_app_check_url_id'], ['id'])
op.drop_constraint(None, 'RepositoryAppCheckUrls', type_='foreignkey')
op.create_foreign_key(u'RepositoryAppCheckUrls_ibfk_1', 'RepositoryAppCheckUrls', 'RepositoryApps', ['repository_app_id'], ['id'])
op.drop_constraint(None, 'RepositoryApp2languages', type_='foreignkey')
op.drop_constraint(None, 'RepositoryApp2languages', type_='foreignkey')
op.create_foreign_key(u'RepositoryApp2languages_ibfk_1', 'RepositoryApp2languages', 'Languages', ['language_id'], ['id'])
op.create_foreign_key(u'RepositoryApp2languages_ibfk_2', 'RepositoryApp2languages', 'RepositoryApps', ['repository_app_id'], ['id'])
# ### end Alembic commands ###
| {
"repo_name": "morelab/appcomposer",
"path": "alembic/versions/def46e09c9ef_add_cascades.py",
"copies": "3",
"size": "2355",
"license": "bsd-2-clause",
"hash": -6597934979457355000,
"line_mean": 57.875,
"line_max": 163,
"alpha_frac": 0.7290870488,
"autogenerated": false,
"ratio": 3.437956204379562,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5667043253179562,
"avg_score": null,
"num_lines": null
} |
"""add cascades to foreign keys
Revision ID: 2d235ac4b9e
Revises: 7d8199c632
Create Date: 2015-03-20 10:11:40.618169
"""
# revision identifiers, used by Alembic.
revision = '2d235ac4b9e'
down_revision = '7d8199c632'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_constraint('user_identities_user_id_fkey', 'user_identities')
op.create_foreign_key(
'user_identities_user_id_fkey', 'user_identities', 'users',
['user_id'], ['id'], ondelete='CASCADE'
)
op.drop_constraint('capabilities_user_id_fkey', 'capabilities')
op.create_foreign_key(
'capabilities_user_id_fkey', 'capabilities', 'users',
['user_id'], ['id'], ondelete='CASCADE'
)
op.drop_constraint('sales_performance_id_fkey', 'sales')
op.create_foreign_key(
'sales_performance_id_fkey', 'sales', 'performances',
['performance_id'], ['id'], ondelete='CASCADE'
)
def downgrade():
op.drop_constraint('user_identities_user_id_fkey', 'user_identities')
op.create_foreign_key(
'user_identities_user_id_fkey', 'user_identities', 'users',
['user_id'], ['id']
)
op.drop_constraint('capabilities_user_id_fkey', 'capabilities')
op.create_foreign_key(
'capabilities_user_id_fkey', 'capabilities', 'users',
['user_id'], ['id']
)
op.drop_constraint('sales_performance_id_fkey', 'sales')
op.create_foreign_key(
'sales_performance_id_fkey', 'sales', 'performances',
['performance_id'], ['id']
)
| {
"repo_name": "rjw57/cubbie",
"path": "migrations/versions/2d235ac4b9e_add_cascades_to_foreign_keys.py",
"copies": "1",
"size": "1536",
"license": "mit",
"hash": -4293875563438055400,
"line_mean": 27.9811320755,
"line_max": 73,
"alpha_frac": 0.6360677083,
"autogenerated": false,
"ratio": 3.1604938271604937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9282979605559739,
"avg_score": 0.0027163859801508793,
"num_lines": 53
} |
"""Add cascading delete to dynamic challenges
Revision ID: b37fb68807ea
Revises:
Create Date: 2020-05-06 12:21:39.373983
"""
# revision identifiers, used by Alembic.
revision = "b37fb68807ea"
down_revision = None
branch_labels = None
depends_on = None
def upgrade(op=None):
bind = op.get_bind()
url = str(bind.engine.url)
if url.startswith("mysql"):
op.drop_constraint(
"dynamic_challenge_ibfk_1", "dynamic_challenge", type_="foreignkey"
)
elif url.startswith("postgres"):
op.drop_constraint(
"dynamic_challenge_id_fkey", "dynamic_challenge", type_="foreignkey"
)
op.create_foreign_key(
None, "dynamic_challenge", "challenges", ["id"], ["id"], ondelete="CASCADE"
)
# ### end Alembic commands ###
def downgrade(op=None):
bind = op.get_bind()
url = str(bind.engine.url)
if url.startswith("mysql"):
op.drop_constraint(
"dynamic_challenge_ibfk_1", "dynamic_challenge", type_="foreignkey"
)
elif url.startswith("postgres"):
op.drop_constraint(
"dynamic_challenge_id_fkey", "dynamic_challenge", type_="foreignkey"
)
op.create_foreign_key(None, "dynamic_challenge", "challenges", ["id"], ["id"])
| {
"repo_name": "ajvpot/CTFd",
"path": "CTFd/plugins/dynamic_challenges/migrations/b37fb68807ea_add_cascading_delete_to_dynamic_.py",
"copies": "4",
"size": "1265",
"license": "apache-2.0",
"hash": -983648834779568100,
"line_mean": 27.1111111111,
"line_max": 83,
"alpha_frac": 0.6229249012,
"autogenerated": false,
"ratio": 3.4752747252747254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6098199626474726,
"avg_score": null,
"num_lines": null
} |
"""add case_number, completed_date, received_date; remove division, bureau, resident_weapon_used on BPD UOF table
Revision ID: 77ad8047becf
Revises: 43c4c512514
Create Date: 2017-01-09 23:48:43.764788
"""
# revision identifiers, used by Alembic.
revision = '77ad8047becf'
down_revision = '43c4c512514'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('use_of_force_incidents_bpd', sa.Column('case_number', sa.String(length=128), nullable=True))
op.add_column('use_of_force_incidents_bpd', sa.Column('completed_date', sa.DateTime(), nullable=True))
op.add_column('use_of_force_incidents_bpd', sa.Column('received_date', sa.DateTime(), nullable=True))
op.drop_column('use_of_force_incidents_bpd', 'division')
op.drop_column('use_of_force_incidents_bpd', 'bureau')
op.drop_column('use_of_force_incidents_bpd', 'resident_weapon_used')
def downgrade():
op.add_column('use_of_force_incidents_bpd', sa.Column('resident_weapon_used', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('use_of_force_incidents_bpd', sa.Column('bureau', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('use_of_force_incidents_bpd', sa.Column('division', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.drop_column('use_of_force_incidents_bpd', 'received_date')
op.drop_column('use_of_force_incidents_bpd', 'completed_date')
op.drop_column('use_of_force_incidents_bpd', 'case_number')
| {
"repo_name": "codeforamerica/comport",
"path": "migrations/versions/77ad8047becf_.py",
"copies": "1",
"size": "1508",
"license": "bsd-3-clause",
"hash": -635128640786223500,
"line_mean": 44.696969697,
"line_max": 142,
"alpha_frac": 0.7267904509,
"autogenerated": false,
"ratio": 2.9,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9096059049745218,
"avg_score": 0.006146280230956557,
"num_lines": 33
} |
""" Add Cast operation
Revision ID: ea5b31a667ff
Revises: faf6f1c74416
Create Date: 2021-02-22 19:49:56.634787
"""
from alembic import context
from alembic import op
from sqlalchemy import String, Integer, Text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import table, column
import sqlalchemy as sa
import json
# revision identifiers, used by Alembic.
revision = 'ea5b31a667ff'
down_revision = 'faf6f1c74416'
branch_labels = None
depends_on = None
CAST_OP_ID = 140
ALL_OPS = [CAST_OP_ID]
BASE_PORT_ID = 325
BASE_FORM_ID = 153
BASE_FORM_FIELD_ID = 584
def _insert_operation():
tb = table(
'operation',
column('id', Integer),
column('slug', String),
column('enabled', String),
column('type', String),
column('icon', String),
column('css_class', String),
)
rows = [
(CAST_OP_ID, 'cast', 1, 'TRANSFORMATION', ' ', None),
]
rows = [dict(list(zip([c.name for c in tb.columns], row))) for row in rows]
op.bulk_insert(tb, rows)
def _insert_operation_translation():
tb = table(
'operation_translation',
column('id', Integer),
column('locale', String),
column('name', String),
column('description', String), )
rows = [
(CAST_OP_ID, 'en', 'Change data types',
'Allow to change attribute type (casting).'),
(CAST_OP_ID, 'pt', 'Alterar tipos de dados',
'Permite alterar o tipo de um ou mais atributos.'),
]
rows = [dict(list(zip([c.name for c in tb.columns], row))) for row in rows]
op.bulk_insert(tb, rows)
def _insert_operation_platform():
tb = table(
'operation_platform',
column('operation_id', Integer),
column('platform_id', Integer))
rows = [(CAST_OP_ID, 1), (CAST_OP_ID, 4)]
rows = [dict(list(zip([c.name for c in tb.columns], row))) for row in rows]
op.bulk_insert(tb, rows)
def _insert_operation_port():
tb = table(
'operation_port',
column('id', Integer),
column('type', String),
column('tags', String),
column('operation_id', Integer),
column('order', Integer),
column('multiplicity', String),
column('slug', String))
new_id = BASE_PORT_ID
rows = []
for op_id in ALL_OPS:
new_id += 1
rows.append([new_id, 'OUTPUT', None, op_id, 1, 'MANY', 'output data'])
new_id += 1
rows.append([new_id, 'INPUT', None, op_id, 1, 'ONE', 'input data'])
rows = [dict(list(zip([c.name for c in tb.columns], row))) for row in rows]
op.bulk_insert(tb, rows)
def _insert_operation_port_translation():
tb = table(
'operation_port_translation',
column('id', Integer),
column('locale', String),
column('name', String),
column('description', String), )
new_id = BASE_PORT_ID
data = []
for op_id in ALL_OPS:
new_id += 1
data.append([new_id, 'en', 'output data', 'Output data'])
data.append([new_id, 'pt', 'dados de saída', 'Dados de saída'])
new_id += 1
data.append([new_id, 'en', 'input data', 'Input data'])
data.append([new_id, 'pt', 'dados de entrada', 'Dados de entrada'])
rows = [dict(list(zip([c.name for c in tb.columns], row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_port_interface_operation_port():
tb = table(
'operation_port_interface_operation_port',
column('operation_port_id', Integer),
column('operation_port_interface_id', Integer))
columns = [c.name for c in tb.columns]
base_id_port = BASE_PORT_ID
new_id = base_id_port
data = []
for op_id in ALL_OPS:
new_id += 1
data.append([new_id, 1])
new_id += 1
data.append([new_id, 1])
rows = [dict(list(zip(columns, cat))) for cat in data]
op.bulk_insert(tb, rows)
def _insert_operation_category_operation():
tb = table(
'operation_category_operation',
column('operation_id', Integer),
column('operation_category_id', Integer))
columns = [c.name for c in tb.columns]
data = []
for op_id in ALL_OPS:
data.append([op_id, 7])
rows = [dict(list(zip(columns, cat))) for cat in data]
op.bulk_insert(tb, rows)
def _insert_operation_form():
tb = table(
'operation_form',
column('id', Integer),
column('enabled', Integer),
column('order', Integer),
column('category', String), )
columns = [c.name for c in tb.columns]
form_id = BASE_FORM_ID + 1
data = []
for op_id in ALL_OPS:
data.append([form_id, 1, 1, 'execution'])
form_id += 1
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_operation_form():
tb = table(
'operation_operation_form',
column('operation_id', Integer),
column('operation_form_id', Integer))
columns = [c.name for c in tb.columns]
form_id = BASE_FORM_ID + 1
data = []
for op_id in ALL_OPS:
data.append([op_id, 41]) # appearance
data.append([op_id, 110]) # result/reports
data.append([op_id, form_id])
form_id += 1
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_translation():
tb = table(
'operation_form_translation',
column('id', Integer),
column('locale', String),
column('name', String)
)
columns = [c.name for c in tb.columns]
form_id = BASE_FORM_ID + 1
data = []
for op_id in ALL_OPS:
data.append([form_id, 'en', 'Execution'])
data.append([form_id, 'pt', 'Execução'])
form_id += 1
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_field():
tb = table(
'operation_form_field',
column('id', Integer),
column('name', String),
column('type', String),
column('required', Integer),
column('order', Integer),
column('default', Text),
column('suggested_widget', String),
column('values_url', String),
column('values', String),
column('scope', String),
column('enable_conditions', String),
column('form_id', Integer), )
types = [
{"en": "Array", "value": "Array", "key": "Array", "pt": "Array"},
{"en": "Boolean", "value": "Boolean", "key": "Boolean", "pt": "Booleano (lógico)"},
{"en": "Date", "value": "Date", "key": "Date", "pt": "Data"},
{"en": "Decimal", "value": "Decimal", "key": "Decimal", "pt": "Decimal"},
{"en": "Integer", "value": "Integer", "key": "Integer", "pt": "Inteiro"},
{"en": "JSON", "value": "JSON", "key": "JSON", "pt": "JSON"},
{"en": "Time", "value": "Time", "key": "Time", "pt": "Hora"},
]
errors = [
{"en": "Coerce value (invalid become null)", "value": "coerce", "key": "coerce", "pt": "Forçar conversão (inválidos viram nulo)"},
{"en": "Fail", "value": "raise", "key": "raise", "pt": "Falhar"},
{"en": "Ignore value (may cause errors)", "value": "ignore", "key": "ignore", "pt": "Ignorar valor (pode causar erros)"},
]
data = [
# CAST_OP_ID
[585, 'attributes', 'TEXT', 0, 0, None, 'cast', None, json.dumps(types), 'EXECUTION', None,
BASE_FORM_ID + 1],
[586, 'errors', 'TEXT', 0, 0, None, 'dropdown', None, json.dumps(errors), 'EXECUTION', None,
BASE_FORM_ID + 1],
]
columns = [c.name for c in tb.columns]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_field_translation():
tb = table(
'operation_form_field_translation',
column('id', Integer),
column('locale', String),
column('label', String),
column('help', String), )
columns = [c.name for c in tb.columns]
data = [
# Indicator
[585, 'en', 'Attributes', 'Attributes to change their types.'],
[585, 'pt', 'Atributos', 'Atributos que terão o seu tipo alterado.'],
[586, 'en', 'Action if error', 'Action to be taken in case of error.'],
[586, 'pt', 'Ação em caso de erro', 'Ação a ser tomada em caso de erro.'],
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
all_commands = [
(_insert_operation, 'DELETE FROM operation WHERE id BETWEEN {s} AND {e}'.format(s=CAST_OP_ID, e=CAST_OP_ID)),
(_insert_operation_translation,
'DELETE FROM operation_translation WHERE id BETWEEN {s} AND {e}'.format(s=CAST_OP_ID, e=CAST_OP_ID)),
(_insert_operation_port,
'DELETE FROM operation_port '
'WHERE operation_id BETWEEN {s} AND {e}'.format(s=CAST_OP_ID, e=CAST_OP_ID)),
(_insert_operation_port_translation,
'DELETE FROM operation_port_translation WHERE id IN '
'(SELECT id FROM operation_port '
' WHERE operation_id BETWEEN {s} AND {e})'.format(s=CAST_OP_ID, e=CAST_OP_ID)),
(_insert_operation_port_interface_operation_port,
'DELETE FROM operation_port_interface_operation_port '
'WHERE operation_port_id IN (SELECT id FROM operation_port '
'WHERE operation_id BETWEEN {s} AND {e})'.format(s=CAST_OP_ID, e=CAST_OP_ID)),
(_insert_operation_category_operation,
'DELETE FROM operation_category_operation '
'WHERE operation_id BETWEEN {s} AND {e}'.format(s=CAST_OP_ID, e=CAST_OP_ID)),
(_insert_operation_platform, 'DELETE FROM operation_platform '
'WHERE operation_id BETWEEN {s} AND {e}'.format(s=CAST_OP_ID, e=CAST_OP_ID)),
(_insert_operation_form,
'DELETE FROM operation_form WHERE id BETWEEN {s} AND {e}'.format(
s=BASE_FORM_ID + 1, e=BASE_FORM_ID + 1 + len(ALL_OPS))),
(_insert_operation_operation_form, 'DELETE FROM operation_operation_form '
'WHERE operation_id BETWEEN {s} AND {e} '.format(
s=CAST_OP_ID, e=CAST_OP_ID)),
(_insert_operation_form_translation,
'DELETE FROM operation_form_translation WHERE id BETWEEN {s} AND {e} '.format(
s=BASE_FORM_ID + 1, e=BASE_FORM_ID + 1 + len(ALL_OPS))),
(_insert_operation_form_field, """DELETE FROM operation_form_field
WHERE form_id BETWEEN {s} AND {e}""".format(
s=BASE_FORM_ID + 1, e=BASE_FORM_ID + 1 + len(ALL_OPS))),
(_insert_operation_form_field_translation,
'DELETE FROM operation_form_field_translation WHERE id IN (' +
'SELECT id FROM operation_form_field WHERE form_id BETWEEN {s} AND {e})'.format(
s=BASE_FORM_ID + 1, e=BASE_FORM_ID + 1 + len(ALL_OPS))),
]
def upgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in all_commands:
if isinstance(cmd[0], str):
connection.execute(cmd[0])
elif isinstance(cmd[0], list):
for row in cmd[0]:
connection.execute(row)
else:
cmd[0]()
except:
session.rollback()
raise
session.commit()
def downgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
connection.execute('SET FOREIGN_KEY_CHECKS=0;')
for cmd in reversed(all_commands):
if isinstance(cmd[1], str):
connection.execute(cmd[1])
elif isinstance(cmd[1], list):
for row in cmd[1]:
connection.execute(row)
else:
cmd[1]()
connection.execute('SET FOREIGN_KEY_CHECKS=1;')
except:
session.rollback()
raise
session.commit()
| {
"repo_name": "eubr-bigsea/tahiti",
"path": "migrations/versions/ea5b31a667ff_add_cast_operation.py",
"copies": "1",
"size": "12005",
"license": "apache-2.0",
"hash": -1637578508204244700,
"line_mean": 31.1501340483,
"line_max": 138,
"alpha_frac": 0.5702968646,
"autogenerated": false,
"ratio": 3.4204221334854537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44907189980854534,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.