file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
stat.go | stock_map := smap.New(true)
stock_map.Set(kline_data.Stockcode, stat)
acc_map = smap.New(true)
acc_map.Set(account, stock_map)
mapResult.Set(user, acc_map)
}
DoCalculateSTK(kline_data, stat)
}
func GetTransaction() {
for {
var con *kdb.KDBConn
var err error
con, err = kdb.DialKDB("127.0.0.1", 3900, "")
// con, err = kdb.DialKDB("139.196.77.165", 5033, "")
if err != nil {
fmt.Printf("Failed to connect kdb: %s", err.Error())
return
}
err = con.AsyncCall(".u.sub", &kdb.K{-kdb.KS, kdb.NONE, "response"}, &kdb.K{-kdb.KS, kdb.NONE, ""})
if err != nil {
fmt.Println("Subscribe: %s", err.Error())
return
}
// ignore type print output
res, _, err := con.ReadMessage()
if err != nil {
fmt.Println("Error processing message: ", err.Error())
return
}
data_list := res.Data.([]*kdb.K)
fmt.Println("data_list:", data_list)
table := data_list[2].Data.(kdb.Table)
fmt.Println("table:", table)
for i := 0; i < int(table.Data[0].Len()); i++ {
kline_data := &Response{}
kline_data2 := &ResponseInt64{}
err := kdb.UnmarshalDict(table.Index(i), kline_data)
if err != nil {
fmt.Println("Failed to unmrshall dict ", err)
continue
}
err2 := kdb.UnmarshalDict(table.Index(i), kline_data2)
if err2 != nil {
fmt.Println("Failed to unmrshall dict ", err2)
continue
}
// fmt.Println("get:", kline_data)
// fmt.Println("get2:", kline_data2)
if kline_data.Askvol == 0 && kline_data2.Askvol != 0 {
kline_data.Askvol = int32(kline_data2.Askvol)
kline_data.Withdraw = int32(kline_data2.Withdraw)
kline_data.Status = int32(kline_data2.Status)
kline_data.Bidvol = int32(kline_data2.Bidvol)
}
handleData(kline_data)
}
}
}
//获取行情数据来统计map内每个票的浮动盈亏
func GetMarket() {
for {
fmt.Println("==GetMarket==", time.Now())
var con *kdb.KDBConn
var err error
// con, err = kdb.DialKDB("10.0.0.71", 5010, "")
con, err = kdb.DialKDB("139.196.77.165", 5031, "")
if err != nil {
fmt.Printf("Failed to connect kdb: %s", err.Error())
return
}
err = con.AsyncCall(".u.sub", &kdb.K{-kdb.KS, kdb.NONE, "Market"}, &kdb.K{-kdb.KS, kdb.NONE, ""})
if err != nil {
fmt.Println("Subscribe: %s", err.Error())
return
}
res, _, err := con.ReadMessage()
if err != nil {
fmt.Println("Error processing message: ", err.Error())
return
}
data_list := res.Data.([]*kdb.K)
table := data_list[2].Data.(kdb.Table)
for i := 0; i < int(table.Data[0].Len()); i++ {
kline_data := &Market{}
err := kdb.UnmarshalDict(table.Index(i), kline_data)
if err != nil {
fmt.Println("Failed to unmrshall dict ", err)
continue
}
fmt.Println("getMarket:", kline_data)
for _, user_map := range mapResult.Values() {
for _, account_map := range (user_map.(smap.Map)).Values() {
for _, stock_map := range (account_map.(smap.Map)).Values() {
stat := stock_map.(*STK)
if stat.SpaceStk.Stockcode == kline_data.Sym {
DoRefresh(float64(kline_data.NMatch/10000), stat)
}
}
}
}
}
}
marketChan <- 0
}
//再算每个订单之前,要判断是不是增量。算完之后,把订单存到数组
func DoCalculateSTK(newOrder *Response, stk *STK) {
fmt.Println("---DoCalculateSTK newOrder:", newOrder)
fmt.Println("---DoCalculateSTK stk:", stk)
// //清除
// stk.SpaceStk.AvgPrice = 0
// stk.SpaceStk.OnlineProfit = 0
// stk.SpaceStk.SpaceVol = 0
// stk.ProfitStk.BidCount = 0
// stk.ProfitStk.BidMoneySum = 0
// stk.ProfitStk.BidNum = 0
// stk.ProfitStk.PastProfit = 0
// stk.ProfitStk.TotalTax = 0
// //之前的全部统计一遍
// for _, order := range stk.orderArray {
// if order.Bidvol != 0 && (order.Status == 2 || order.Status == 5 || order.Status == 4) {
// CalculateSingle(order, stk)
// }
// }
//先统计新订单,再更新订单数组
if newOrder.Status == 4 {
CalculateSingle(newOrder, stk)
var index int
flag := false
for i, order := range stk.orderArray {
// fmt.Println("iiiii ", i)
if newOrder.Entrustno == order.Entrustno && order.Status != 4 {
index = i
flag = true
break
}
}
if flag {
updateArray(stk, index, newOrder)
} else {
stk.orderArray = append(stk.orderArray, newOrder)
}
} else if newOrder.Status == 2 || newOrder.Status == 5 {
var index int
flag := false
for i, order := range stk.orderArray {
if newOrder.Entrustno == order.Entrustno && order.Status != 4 {
//算增量
fmt.Println("---算增量----")
x := &Response{}
x.Bidvol = newOrder.Bidvol - order.Bidvol
x.Bidprice = (newOrder.Bidprice*float64(newOrder.Bidvol) - order.Bidprice*float64(order.Bidvol)) / float64(newOrder.Bidvol-order.Bidvol)
CalculateSingle(x, stk)
index = i
flag = true
break
}
}
if flag {
updateArray(stk, index, newOrder)
} else {
CalculateSingle(newOrder, stk)
stk.orderArray = append(stk.orderArray, newOrder)
}
} else {
stk.orderArray = append(stk.orderArray, newOrder)
}
}
func CalculateSingle(newOrder *Response, stat *STK) {
fmt.Println("CalculateSingle--- vol:", newOrder.Bidvol, " price:", newOrder.Bidprice, " status:", newOrder.Status)
stat.Lock()
//StaticsResult为实时统计对象,每一个交易完成,刷下统计
if newOrder.Bidvol != 0 {
//每次买入刷新均价。然后每次实时价格减去均价不断出现浮动盈利
//算仓位 不管买还是卖,仓位都是相加减
var spaceTemp int32 = stat.SpaceStk.SpaceVol //临时对象记录下之前的仓位量
var avgTemp float64 = stat.SpaceStk.AvgPr | ice //临时对象记录下之前的均价
//卖的大于原有仓位
var flag bool = false
if AbsInt(newOrder.Bidvol) >= AbsInt(stat.SpaceStk.SpaceVol) {
flag = true
}
stat.SpaceStk.SpaceVol = stat.SpaceStk.SpaceVol + newOrder.Bidvol
fmt.Println("算仓位", stat.SpaceStk.SpaceVol)
if newOrder.Bidvol > 0 {
//算均价
if spaceTemp < 0 {
if flag {
stat.SpaceStk.AvgPrice = math.Abs(newOrder.Bidprice)
}
} else {
stat.SpaceStk.AvgPrice = math.Abs((stat.SpaceStk.AvgPrice*(float64(spaceTemp)) + newOrder.Bidprice*float64(newOrder.Bidvol)) / float64(stat.SpaceStk.SpaceVol))
} | identifier_body | |
stat.go | go GetTransaction()
printMap()
// <-marketChan
fmt.Println("==stat=over===")
}
func SelectTransaction() {
fmt.Println("==SelectTransaction==")
var con *kdb.KDBConn
var err error
con, err = kdb.DialKDB("139.224.9.75", 52800, "")
// con, err = kdb.DialKDB("139.196.77.165", 5033, "")
if err != nil {
fmt.Printf("Failed to connect kdb: %s", err.Error())
return
}
res, err := con.Call("0!select from response")
if err != nil {
fmt.Println("Subscribe: %s", err.Error())
return
}
// ignore type print output
// fmt.Println("res:", res)
table := res.Data.(kdb.Table)
// fmt.Println("table:", table)
for i := 0; i < int(table.Data[0].Len()); i++ {
kline_data := &Response{}
kline_data2 := &ResponseInt64{}
err := kdb.UnmarshalDict(table.Index(i), kline_data)
if err != nil {
fmt.Println("Failed to unmrshall dict ", err)
continue
}
err2 := kdb.UnmarshalDict(table.Index(i), kline_data2)
if err2 != nil {
fmt.Println("Failed to unmrshall dict ", err2)
continue
}
if kline_data.Askvol == 0 && kline_data2.Askvol != 0 {
kline_data.Askvol = int32(kline_data2.Askvol)
kline_data.Withdraw = int32(kline_data2.Withdraw)
kline_data.Status = int32(kline_data2.Status)
kline_data.Bidvol = int32(kline_data2.Bidvol)
kline_data.Entrustno = int32(kline_data2.Entrustno)
}
handleData(kline_data)
}
// fmt.Println("==SelectTransaction is over ==")
}
//按照 用户->账户->股票 结构初始化map容器下每一个STK统计对象。每个STK对应的是哪个用户下哪个账户的哪个票。然后新订单来了,拿对应的STK来做统计
func handleData(kline_data *Response) {
fmt.Println("select:", kline_data)
user := kline_data.Sym
account := kline_data.Accountname
stat := &STK{}
p := ProfitSTK{}
s := SpaceSTK{}
stat.ProfitStk = p
stat.SpaceStk = s
arr := []*Response{}
stat.orderArray = arr
stat.ProfitStk.Sym = kline_data.Sym
stat.ProfitStk.Accountname = kline_data.Accountname
stat.ProfitStk.Stockcode = kline_data.Stockcode
stat.SpaceStk.Sym = kline_data.Sym
stat.SpaceStk.Accountname = kline_data.Accountname
stat.SpaceStk.Stockcode = kline_data.Stockcode
var acc_map smap.Map
if mapResult.Exists(user) {
acc_map = (mapResult.Value(user)).(smap.Map)
if acc_map.Exists(account) {
stock_map := acc_map.Value(account).(smap.Map)
if stock_map.Exists(kline_data.Stockcode) {
stat = (stock_map.Value(kline_data.Stockcode)).(*STK)
} else {
stock_map.Set(kline_data.Stockcode, stat)
}
} else {
stock_map := smap.New(true)
stock_map.Set(kline_data.Stockcode, stat)
acc_map.Set(account, stock_map)
}
} else {
stock_map := smap.New(true)
stock_map.Set(kline_data.Stockcode, stat)
acc_map = smap.New(true)
acc_map.Set(account, stock_map)
mapResult.Set(user, acc_map)
}
DoCalculateSTK(kline_data, stat)
}
func GetTransaction() {
for {
var con *kdb.KDBConn
var err error
con, err = kdb.DialKDB("127.0.0.1", 3900, "")
// con, err = kdb.DialKDB("139.196.77.165", 5033, "")
if err != nil {
fmt.Printf("Failed to connect kdb: %s", err.Error())
return
}
err = con.AsyncCall(".u.sub", &kdb.K{-kdb.KS, kdb.NONE, "response"}, &kdb.K{-kdb.KS, kdb.NONE, ""})
if err != nil {
fmt.Println("Subscribe: %s", err.Error())
return
}
// ignore type print output
res, _, err := con.ReadMessage()
if err != nil {
fmt.Println("Error processing message: ", err.Error())
return
}
data_list := res.Data.([]*kdb.K)
fmt.Println("data_list:", data_list)
table := data_list[2].Data.(kdb.Table)
fmt.Println("table:", table)
for i := 0; i < int(table.Data[0].Len()); i++ {
kline_data := &Response{}
kline_data2 := &ResponseInt64{}
err := kdb.UnmarshalDict(table.Index(i), kline_data)
if err != nil {
fmt.Println("Failed to unmrshall dict ", err)
continue
}
err2 := kdb.UnmarshalDict(table.Index(i), kline_data2)
if err2 != nil {
fmt.Println("Failed to unmrshall dict ", err2)
continue
}
// fmt.Println("get:", kline_data)
// fmt.Println("get2:", kline_data2)
if kline_data.Askvol == 0 && kline_data2.Askvol != 0 {
kline_data.Askvol = int32(kline_data2.Askvol)
kline_data.Withdraw = int32(kline_data2.Withdraw)
kline_data.Status = int32(kline_data2.Status)
kline_data.Bidvol = int32(kline_data2.Bidvol)
}
handleData(kline_data)
}
}
}
//获取行情数据来统计map内每个票的浮动盈亏
func GetMarket() {
for {
fmt.Println("==GetMarket==", time.Now())
var con *kdb.KDBConn
var err error
// con, err = kdb.DialKDB("10.0.0.71", 5010, "")
con, err = kdb.DialKDB("139.196.77.165", 5031, "")
if err != nil {
fmt.Printf("Fa | onnect kdb: %s", err.Error())
return
}
err = con.AsyncCall(".u.sub", &kdb.K{-kdb.KS, kdb.NONE, "Market"}, &kdb.K{-kdb.KS, kdb.NONE, ""})
if err != nil {
fmt.Println("Subscribe: %s", err.Error())
return
}
res, _, err := con.ReadMessage()
if err != nil {
fmt.Println("Error processing message: ", err.Error())
return
}
data_list := res.Data.([]*kdb.K)
table := data_list[2].Data.(kdb.Table)
for i := 0; i < int(table.Data[0].Len()); i++ {
kline_data := &Market{}
err := kdb.UnmarshalDict(table.Index(i), kline_data)
if err != nil {
fmt.Println("Failed to unmrshall dict ", err)
continue
}
fmt.Println("getMarket:", kline_data)
for _, user_map := range mapResult.Values() {
for _, account_map := range (user_map.(smap.Map)).Values() {
for _, stock_map := range (account_map.(smap.Map)).Values() {
stat := stock_map.(*STK)
if stat.SpaceStk.Stockcode == kline_data.Sym {
DoRefresh(float64(kline_data.NMatch/10000), stat)
}
}
}
}
}
}
marketChan <- 0
}
//再算每个订单之前,要判断是不是增量。算完之后,把订单存到数组
func DoCalculateSTK(newOrder *Response, stk *STK) {
fmt.Println("---DoCalculateSTK newOrder:", newOrder)
fmt.Println("---DoCalculateSTK stk:", stk)
// //清除
// stk.SpaceStk.AvgPrice = 0
// stk.SpaceStk.OnlineProfit = 0
// stk.SpaceStk.SpaceVol = 0
// stk.ProfitStk.BidCount = 0
// stk.ProfitStk.BidMoneySum = 0
// stk.ProfitStk.BidNum = | iled to c | identifier_name |
expdescription.py | .isDataChanged():
self.writeExperimentConfiguration(ask=True)
Qt.QWidget.closeEvent(self, event)
def setModel(self, model):
'''reimplemented from :class:`TaurusBaseWidget`'''
TaurusBaseWidget.setModel(self, model)
self._reloadConf(force=True)
#set the model of some child widgets
door = self.getModelObj()
if door is None: return
tghost = taurus.Database().getNormalName() #@todo: get the tghost from the door model instead
msname = door.macro_server.getFullName()
self.ui.taurusModelTree.setModel(tghost)
self.ui.sardanaElementTree.setModel(msname)
def _reloadConf(self, force=False):
if not force and self.isDataChanged():
op = Qt.QMessageBox.question(self, "Reload info from door",
"If you reload, all current experiment configuration changes will be lost. Reload?",
Qt.QMessageBox.Yes | Qt.QMessageBox.Cancel)
if op != Qt.QMessageBox.Yes:
return
door = self.getModelObj()
if door is None: return
conf = door.getExperimentConfiguration()
self._originalConfiguration = copy.deepcopy(conf)
self.setLocalConfig(conf)
self._setDirty(False)
self._dirtyMntGrps = set()
#set a list of available channels
avail_channels = {}
for ch_info in door.macro_server.getExpChannelElements().values():
avail_channels[ch_info.full_name] = ch_info.getData()
self.ui.channelEditor.getQModel().setAvailableChannels(avail_channels)
def _setDirty(self, dirty):
self._dirty = dirty
self._updateButtonBox()
def isDataChanged(self):
"""Tells if the local data has been modified since it was last refreshed
:return: (bool) True if he local data has been modified since it was last refreshed
"""
return bool(self._dirty or self.ui.channelEditor.getQModel().isDataChanged() or self._dirtyMntGrps)
def _updateButtonBox(self, *args, **kwargs):
self.ui.buttonBox.setEnabled(self.isDataChanged())
def getLocalConfig(self):
return self._localConfig
def setLocalConfig(self, conf):
'''gets a ExpDescription dictionary and sets up the widget'''
self._localConfig = conf
#set the Channel Editor
activeMntGrpName = self._localConfig['ActiveMntGrp'] or ''
if activeMntGrpName in self._localConfig['MntGrpConfigs']:
mgconfig = self._localConfig['MntGrpConfigs'][activeMntGrpName]
self.ui.channelEditor.getQModel().setDataSource(mgconfig)
#set the measurement group ComboBox
self.ui.activeMntGrpCB.clear()
mntGrpLabels = []
for _, mntGrpConf in self._localConfig['MntGrpConfigs'].items():
# get labels to visualize names with lower and upper case
mntGrpLabels.append(mntGrpConf['label'])
self.ui.activeMntGrpCB.addItems(sorted(mntGrpLabels))
idx = self.ui.activeMntGrpCB.findText(activeMntGrpName,
# case insensitive find
Qt.Qt.MatchFixedString)
self.ui.activeMntGrpCB.setCurrentIndex(idx)
#set the system snapshot list
psl = self._localConfig.get('PreScanSnapshot') #I get it before clearing because clear() changes the _localConfig
# TODO: For Taurus 4 compatibility
psl_fullname = []
for name, display in psl:
psl_fullname.append(("tango://%s" % name, display))
self.ui.preScanList.clear()
self.ui.preScanList.addModels(psl_fullname)
#other settings
self.ui.filenameLE.setText(", ".join(self._localConfig['ScanFile']))
self.ui.pathLE.setText(self._localConfig['ScanDir'] or '')
self.ui.compressionCB.setCurrentIndex(self._localConfig['DataCompressionRank'] + 1)
def writeExperimentConfiguration(self, ask=True):
'''sends the current local configuration to the door
:param ask: (bool) If True (default) prompts the user before saving.
'''
if ask:
op = Qt.QMessageBox.question(self, "Save configuration?",
'Do you want to save the current configuration?\n(if not, any changes will be lost)',
Qt.QMessageBox.Yes | Qt.QMessageBox.No)
if op != Qt.QMessageBox.Yes:
return False
conf = self.getLocalConfig()
#make sure that no empty measurement groups are written
for mgname, mgconfig in conf.get('MntGrpConfigs', {}).items():
if mgconfig is not None and not mgconfig.get('controllers'):
mglabel = mgconfig['label']
Qt.QMessageBox.information(self, "Empty Measurement group",
"The measurement group '%s' is empty. Fill it (or delete it) before applying" % mglabel,
Qt.QMessageBox.Ok)
self.changeActiveMntGrp(mgname)
return False
#check if the currently displayed mntgrp is changed
if self.ui.channelEditor.getQModel().isDataChanged():
self._dirtyMntGrps.add(self._localConfig['ActiveMntGrp'])
door = self.getModelObj()
door.setExperimentConfiguration(conf, mnt_grps=self._dirtyMntGrps)
self._originalConfiguration = copy.deepcopy(conf)
self._dirtyMntGrps = set()
self.ui.channelEditor.getQModel().setDataChanged(False)
self._setDirty(False)
self.emit(Qt.SIGNAL('experimentConfigurationChanged'), copy.deepcopy(conf))
return True
def changeActiveMntGrp(self, activeMntGrpName):
activeMntGrpName = str(activeMntGrpName)
if self._localConfig is None:
return
if activeMntGrpName == self._localConfig['ActiveMntGrp']:
return #nothing changed
if activeMntGrpName not in self._localConfig['MntGrpConfigs']:
raise KeyError('Unknown measurement group "%s"' % activeMntGrpName)
#add the previous measurement group to the list of "dirty" groups if something was changed
if self.ui.channelEditor.getQModel().isDataChanged():
self._dirtyMntGrps.add(self._localConfig['ActiveMntGrp'])
self._localConfig['ActiveMntGrp'] = activeMntGrpName
i = self.ui.activeMntGrpCB.findText(activeMntGrpName,
# case insensitive find
Qt.Qt.MatchFixedString)
self.ui.activeMntGrpCB.setCurrentIndex(i)
mgconfig = self._localConfig['MntGrpConfigs'][activeMntGrpName]
self.ui.channelEditor.getQModel().setDataSource(mgconfig)
self._setDirty(True)
def createMntGrp(self):
'''creates a new Measurement Group'''
if self._localConfig is None:
return
mntGrpName, ok = Qt.QInputDialog.getText(self, "New Measurement Group",
"Enter a name for the new measurement Group")
if not ok: return
mntGrpName = str(mntGrpName)
#check that the given name is not an existing pool element
ms = self.getModelObj().macro_server
poolElementNames = [v.name for v in ms.getElementsWithInterface("PoolElement").values()]
while mntGrpName in poolElementNames:
Qt.QMessageBox.warning(self, "Cannot create Measurement group",
"The name '%s' already is used for another pool element. Please Choose a different one." % mntGrpName,
Qt.QMessageBox.Ok)
mntGrpName, ok = Qt.QInputDialog.getText(self, "New Measurement Group",
"Enter a name for the new measurement Group",
Qt.QLineEdit.Normal,
mntGrpName)
if not ok: return
mntGrpName = str(mntGrpName)
#check that the measurement group is not already in the localConfig
if mntGrpName in self._localConfig['MntGrpConfigs']:
Qt.QMessageBox.warning(self, "%s already exists" % mntGrpName,
'A measurement group named "%s" already exists. A new one will not be created' % mntGrpName)
return
#add an empty configuration dictionary to the local config
mgconfig = {'label': mntGrpName, 'controllers':{} }
self._localConfig['MntGrpConfigs'][mntGrpName] = mgconfig
#add the new measurement group to the list of "dirty" groups
self._dirtyMntGrps.add(mntGrpName)
#add the name to the combobox
self.ui.activeMntGrpCB.addItem(mntGrpName)
#make it the Active MntGrp
self.changeActiveMntGrp(mntGrpName)
| def deleteMntGrp(self):
'''creates a new Measurement Group'''
activeMntGrpName = str(self.ui.activeMntGrpCB.currentText())
op = Qt.QMessageBox.question(self, "Delete Measurement Group", | random_line_split | |
expdescription.py | (Qt.QWidget, TaurusBaseWidget):
'''
A widget for editing the configuration of a experiment (measurement groups,
plot and storage parameters, etc).
It receives a Sardana Door name as its model and gets/sets the configuration
using the `ExperimentConfiguration` environmental variable for that Door.
'''
def __init__(self, parent=None, door=None, plotsButton=True):
Qt.QWidget.__init__(self, parent)
TaurusBaseWidget.__init__(self, 'ExpDescriptionEditor')
self.loadUi()
self.ui.buttonBox.setStandardButtons(Qt.QDialogButtonBox.Reset | Qt.QDialogButtonBox.Apply)
newperspectivesDict = copy.deepcopy(self.ui.sardanaElementTree.KnownPerspectives)
#newperspectivesDict[self.ui.sardanaElementTree.DftPerspective]['model'] = [SardanaAcquirableProxyModel, SardanaElementPlainModel]
newperspectivesDict[self.ui.sardanaElementTree.DftPerspective]['model'][0] = SardanaAcquirableProxyModel
self.ui.sardanaElementTree.KnownPerspectives = newperspectivesDict #assign a copy because if just a key of this class memberwas modified, all instances of this class would be affected
self.ui.sardanaElementTree._setPerspective(self.ui.sardanaElementTree.DftPerspective)
self._localConfig = None
self._originalConfiguration = None
self._dirty = False
self._dirtyMntGrps = set()
self.connect(self.ui.activeMntGrpCB, Qt.SIGNAL('activated (QString)'), self.changeActiveMntGrp)
self.connect(self.ui.createMntGrpBT, Qt.SIGNAL('clicked ()'), self.createMntGrp)
self.connect(self.ui.deleteMntGrpBT, Qt.SIGNAL('clicked ()'), self.deleteMntGrp)
self.connect(self.ui.compressionCB, Qt.SIGNAL('currentIndexChanged (int)'), self.onCompressionCBChanged)
self.connect(self.ui.pathLE, Qt.SIGNAL('textEdited (QString)'), self.onPathLEEdited)
self.connect(self.ui.filenameLE, Qt.SIGNAL('textEdited (QString)'), self.onFilenameLEEdited)
self.connect(self.ui.channelEditor.getQModel(), Qt.SIGNAL('dataChanged (QModelIndex, QModelIndex)'), self._updateButtonBox)
self.connect(self.ui.channelEditor.getQModel(), Qt.SIGNAL('modelReset ()'), self._updateButtonBox)
preScanList = self.ui.preScanList
self.connect(preScanList, Qt.SIGNAL('dataChanged'),
self.onPreScanSnapshotChanged)
#TODO: For Taurus 4 compatibility
if hasattr(preScanList, "dataChangedSignal"):
preScanList.dataChangedSignal.connect(self.onPreScanSnapshotChanged)
self.connect(self.ui.choosePathBT, Qt.SIGNAL('clicked ()'), self.onChooseScanDirButtonClicked)
self.__plotManager = None
icon = resource.getIcon(":/actions/view.svg")
self.togglePlotsAction = Qt.QAction(icon, "Show/Hide plots", self)
self.togglePlotsAction.setCheckable(True)
self.togglePlotsAction.setChecked(False)
self.togglePlotsAction.setEnabled(plotsButton)
self.addAction(self.togglePlotsAction)
self.connect(self.togglePlotsAction, Qt.SIGNAL("toggled(bool)"),
self.onPlotsButtonToggled)
self.ui.plotsButton.setDefaultAction(self.togglePlotsAction)
if door is not None:
self.setModel(door)
self.connect(self.ui.buttonBox, Qt.SIGNAL("clicked(QAbstractButton *)"), self.onDialogButtonClicked)
#Taurus Configuration properties and delegates
self.registerConfigDelegate(self.ui.channelEditor)
def getModelClass(self):
'''reimplemented from :class:`TaurusBaseWidget`'''
return taurus.core.taurusdevice.TaurusDevice
def onChooseScanDirButtonClicked(self):
ret = Qt.QFileDialog.getExistingDirectory (self, 'Choose directory for saving files', self.ui.pathLE.text())
if ret:
self.ui.pathLE.setText(ret)
self.ui.pathLE.emit(Qt.SIGNAL('textEdited (QString)'), ret)
def onDialogButtonClicked(self, button):
role = self.ui.buttonBox.buttonRole(button)
if role == Qt.QDialogButtonBox.ApplyRole:
self.writeExperimentConfiguration(ask=False)
elif role == Qt.QDialogButtonBox.ResetRole:
self._reloadConf()
def closeEvent(self, event):
'''This event handler receives widget close events'''
if self.isDataChanged():
self.writeExperimentConfiguration(ask=True)
Qt.QWidget.closeEvent(self, event)
def setModel(self, model):
'''reimplemented from :class:`TaurusBaseWidget`'''
TaurusBaseWidget.setModel(self, model)
self._reloadConf(force=True)
#set the model of some child widgets
door = self.getModelObj()
if door is None: return
tghost = taurus.Database().getNormalName() #@todo: get the tghost from the door model instead
msname = door.macro_server.getFullName()
self.ui.taurusModelTree.setModel(tghost)
self.ui.sardanaElementTree.setModel(msname)
def _reloadConf(self, force=False):
if not force and self.isDataChanged():
op = Qt.QMessageBox.question(self, "Reload info from door",
"If you reload, all current experiment configuration changes will be lost. Reload?",
Qt.QMessageBox.Yes | Qt.QMessageBox.Cancel)
if op != Qt.QMessageBox.Yes:
return
door = self.getModelObj()
if door is None: return
conf = door.getExperimentConfiguration()
self._originalConfiguration = copy.deepcopy(conf)
self.setLocalConfig(conf)
self._setDirty(False)
self._dirtyMntGrps = set()
#set a list of available channels
avail_channels = {}
for ch_info in door.macro_server.getExpChannelElements().values():
avail_channels[ch_info.full_name] = ch_info.getData()
self.ui.channelEditor.getQModel().setAvailableChannels(avail_channels)
def _setDirty(self, dirty):
self._dirty = dirty
self._updateButtonBox()
def isDataChanged(self):
"""Tells if the local data has been modified since it was last refreshed
:return: (bool) True if he local data has been modified since it was last refreshed
"""
return bool(self._dirty or self.ui.channelEditor.getQModel().isDataChanged() or self._dirtyMntGrps)
def _updateButtonBox(self, *args, **kwargs):
self.ui.buttonBox.setEnabled(self.isDataChanged())
def getLocalConfig(self):
return self._localConfig
def setLocalConfig(self, conf):
'''gets a ExpDescription dictionary and sets up the widget'''
self._localConfig = conf
#set the Channel Editor
activeMntGrpName = self._localConfig['ActiveMntGrp'] or ''
if activeMntGrpName in self._localConfig['MntGrpConfigs']:
mgconfig = self._localConfig['MntGrpConfigs'][activeMntGrpName]
self.ui.channelEditor.getQModel().setDataSource(mgconfig)
#set the measurement group ComboBox
self.ui.activeMntGrpCB.clear()
mntGrpLabels = []
for _, mntGrpConf in self._localConfig['MntGrpConfigs'].items():
# get labels to visualize names with lower and upper case
mntGrpLabels.append(mntGrpConf['label'])
self.ui.activeMntGrpCB.addItems(sorted(mntGrpLabels))
idx = self.ui.activeMntGrpCB.findText(activeMntGrpName,
# case insensitive find
Qt.Qt.MatchFixedString)
self.ui.activeMntGrpCB.setCurrentIndex(idx)
#set the system snapshot list
psl = self._localConfig.get('PreScanSnapshot') #I get it before clearing because clear() changes the _localConfig
# TODO: For Taurus 4 compatibility
psl_fullname = []
for name, display in psl:
psl_fullname.append(("tango://%s" % name, display))
self.ui.preScanList.clear()
self.ui.preScanList.addModels(psl_fullname)
#other settings
self.ui.filenameLE.setText(", ".join(self._localConfig['ScanFile']))
self.ui.pathLE.setText(self._localConfig['ScanDir'] or '')
self.ui.compressionCB.setCurrentIndex(self._localConfig['DataCompressionRank'] + 1)
def writeExperimentConfiguration(self, ask=True):
'''sends the current local configuration to the door
:param ask: (bool) If True (default) prompts the user before saving.
'''
if ask:
op = Qt.QMessageBox.question(self, "Save configuration?",
'Do you want to save the current configuration?\n(if not, any changes will be lost)',
Qt.QMessageBox.Yes | Qt.QMessageBox.No)
if op != Qt.QMessageBox.Yes:
return False
conf = self.getLocalConfig()
#make sure that no empty measurement groups are written
for mgname, mgconfig in conf.get('MntGrpConfigs', {}).items():
if mgconfig is not None and not mgconfig.get('controllers'):
mglabel = mgconfig['label | ExpDescriptionEditor | identifier_name | |
expdescription.py | self.togglePlotsAction.setEnabled(plotsButton)
self.addAction(self.togglePlotsAction)
self.connect(self.togglePlotsAction, Qt.SIGNAL("toggled(bool)"),
self.onPlotsButtonToggled)
self.ui.plotsButton.setDefaultAction(self.togglePlotsAction)
if door is not None:
self.setModel(door)
self.connect(self.ui.buttonBox, Qt.SIGNAL("clicked(QAbstractButton *)"), self.onDialogButtonClicked)
#Taurus Configuration properties and delegates
self.registerConfigDelegate(self.ui.channelEditor)
def getModelClass(self):
'''reimplemented from :class:`TaurusBaseWidget`'''
return taurus.core.taurusdevice.TaurusDevice
def onChooseScanDirButtonClicked(self):
ret = Qt.QFileDialog.getExistingDirectory (self, 'Choose directory for saving files', self.ui.pathLE.text())
if ret:
self.ui.pathLE.setText(ret)
self.ui.pathLE.emit(Qt.SIGNAL('textEdited (QString)'), ret)
def onDialogButtonClicked(self, button):
role = self.ui.buttonBox.buttonRole(button)
if role == Qt.QDialogButtonBox.ApplyRole:
self.writeExperimentConfiguration(ask=False)
elif role == Qt.QDialogButtonBox.ResetRole:
self._reloadConf()
def closeEvent(self, event):
'''This event handler receives widget close events'''
if self.isDataChanged():
self.writeExperimentConfiguration(ask=True)
Qt.QWidget.closeEvent(self, event)
def setModel(self, model):
'''reimplemented from :class:`TaurusBaseWidget`'''
TaurusBaseWidget.setModel(self, model)
self._reloadConf(force=True)
#set the model of some child widgets
door = self.getModelObj()
if door is None: return
tghost = taurus.Database().getNormalName() #@todo: get the tghost from the door model instead
msname = door.macro_server.getFullName()
self.ui.taurusModelTree.setModel(tghost)
self.ui.sardanaElementTree.setModel(msname)
def _reloadConf(self, force=False):
if not force and self.isDataChanged():
op = Qt.QMessageBox.question(self, "Reload info from door",
"If you reload, all current experiment configuration changes will be lost. Reload?",
Qt.QMessageBox.Yes | Qt.QMessageBox.Cancel)
if op != Qt.QMessageBox.Yes:
return
door = self.getModelObj()
if door is None: return
conf = door.getExperimentConfiguration()
self._originalConfiguration = copy.deepcopy(conf)
self.setLocalConfig(conf)
self._setDirty(False)
self._dirtyMntGrps = set()
#set a list of available channels
avail_channels = {}
for ch_info in door.macro_server.getExpChannelElements().values():
avail_channels[ch_info.full_name] = ch_info.getData()
self.ui.channelEditor.getQModel().setAvailableChannels(avail_channels)
def _setDirty(self, dirty):
self._dirty = dirty
self._updateButtonBox()
def isDataChanged(self):
"""Tells if the local data has been modified since it was last refreshed
:return: (bool) True if he local data has been modified since it was last refreshed
"""
return bool(self._dirty or self.ui.channelEditor.getQModel().isDataChanged() or self._dirtyMntGrps)
def _updateButtonBox(self, *args, **kwargs):
self.ui.buttonBox.setEnabled(self.isDataChanged())
def getLocalConfig(self):
return self._localConfig
def setLocalConfig(self, conf):
'''gets a ExpDescription dictionary and sets up the widget'''
self._localConfig = conf
#set the Channel Editor
activeMntGrpName = self._localConfig['ActiveMntGrp'] or ''
if activeMntGrpName in self._localConfig['MntGrpConfigs']:
mgconfig = self._localConfig['MntGrpConfigs'][activeMntGrpName]
self.ui.channelEditor.getQModel().setDataSource(mgconfig)
#set the measurement group ComboBox
self.ui.activeMntGrpCB.clear()
mntGrpLabels = []
for _, mntGrpConf in self._localConfig['MntGrpConfigs'].items():
# get labels to visualize names with lower and upper case
mntGrpLabels.append(mntGrpConf['label'])
self.ui.activeMntGrpCB.addItems(sorted(mntGrpLabels))
idx = self.ui.activeMntGrpCB.findText(activeMntGrpName,
# case insensitive find
Qt.Qt.MatchFixedString)
self.ui.activeMntGrpCB.setCurrentIndex(idx)
#set the system snapshot list
psl = self._localConfig.get('PreScanSnapshot') #I get it before clearing because clear() changes the _localConfig
# TODO: For Taurus 4 compatibility
psl_fullname = []
for name, display in psl:
psl_fullname.append(("tango://%s" % name, display))
self.ui.preScanList.clear()
self.ui.preScanList.addModels(psl_fullname)
#other settings
self.ui.filenameLE.setText(", ".join(self._localConfig['ScanFile']))
self.ui.pathLE.setText(self._localConfig['ScanDir'] or '')
self.ui.compressionCB.setCurrentIndex(self._localConfig['DataCompressionRank'] + 1)
def writeExperimentConfiguration(self, ask=True):
'''sends the current local configuration to the door
:param ask: (bool) If True (default) prompts the user before saving.
'''
if ask:
op = Qt.QMessageBox.question(self, "Save configuration?",
'Do you want to save the current configuration?\n(if not, any changes will be lost)',
Qt.QMessageBox.Yes | Qt.QMessageBox.No)
if op != Qt.QMessageBox.Yes:
return False
conf = self.getLocalConfig()
#make sure that no empty measurement groups are written
for mgname, mgconfig in conf.get('MntGrpConfigs', {}).items():
if mgconfig is not None and not mgconfig.get('controllers'):
mglabel = mgconfig['label']
Qt.QMessageBox.information(self, "Empty Measurement group",
"The measurement group '%s' is empty. Fill it (or delete it) before applying" % mglabel,
Qt.QMessageBox.Ok)
self.changeActiveMntGrp(mgname)
return False
#check if the currently displayed mntgrp is changed
if self.ui.channelEditor.getQModel().isDataChanged():
self._dirtyMntGrps.add(self._localConfig['ActiveMntGrp'])
door = self.getModelObj()
door.setExperimentConfiguration(conf, mnt_grps=self._dirtyMntGrps)
self._originalConfiguration = copy.deepcopy(conf)
self._dirtyMntGrps = set()
self.ui.channelEditor.getQModel().setDataChanged(False)
self._setDirty(False)
self.emit(Qt.SIGNAL('experimentConfigurationChanged'), copy.deepcopy(conf))
return True
def changeActiveMntGrp(self, activeMntGrpName):
activeMntGrpName = str(activeMntGrpName)
if self._localConfig is None:
return
if activeMntGrpName == self._localConfig['ActiveMntGrp']:
return #nothing changed
if activeMntGrpName not in self._localConfig['MntGrpConfigs']:
raise KeyError('Unknown measurement group "%s"' % activeMntGrpName)
#add the previous measurement group to the list of "dirty" groups if something was changed
if self.ui.channelEditor.getQModel().isDataChanged():
self._dirtyMntGrps.add(self._localConfig['ActiveMntGrp'])
self._localConfig['ActiveMntGrp'] = activeMntGrpName
i = self.ui.activeMntGrpCB.findText(activeMntGrpName,
# case insensitive find
Qt.Qt.MatchFixedString)
self.ui.activeMntGrpCB.setCurrentIndex(i)
mgconfig = self._localConfig['MntGrpConfigs'][activeMntGrpName]
self.ui.channelEditor.getQModel().setDataSource(mgconfig)
self._setDirty(True)
def createMntGrp(self):
| '''creates a new Measurement Group'''
if self._localConfig is None:
return
mntGrpName, ok = Qt.QInputDialog.getText(self, "New Measurement Group",
"Enter a name for the new measurement Group")
if not ok: return
mntGrpName = str(mntGrpName)
#check that the given name is not an existing pool element
ms = self.getModelObj().macro_server
poolElementNames = [v.name for v in ms.getElementsWithInterface("PoolElement").values()]
while mntGrpName in poolElementNames:
Qt.QMessageBox.warning(self, "Cannot create Measurement group",
"The name '%s' already is used for another pool element. Please Choose a different one." % mntGrpName,
Qt.QMessageBox.Ok)
mntGrpName, ok = Qt.QInputDialog.getText(self, "New Measurement Group",
"Enter a name for the new measurement Group",
Qt.QLineEdit.Normal, | identifier_body | |
expdescription.py | BaseWidget.__init__(self, 'ExpDescriptionEditor')
self.loadUi()
self.ui.buttonBox.setStandardButtons(Qt.QDialogButtonBox.Reset | Qt.QDialogButtonBox.Apply)
newperspectivesDict = copy.deepcopy(self.ui.sardanaElementTree.KnownPerspectives)
#newperspectivesDict[self.ui.sardanaElementTree.DftPerspective]['model'] = [SardanaAcquirableProxyModel, SardanaElementPlainModel]
newperspectivesDict[self.ui.sardanaElementTree.DftPerspective]['model'][0] = SardanaAcquirableProxyModel
self.ui.sardanaElementTree.KnownPerspectives = newperspectivesDict #assign a copy because if just a key of this class memberwas modified, all instances of this class would be affected
self.ui.sardanaElementTree._setPerspective(self.ui.sardanaElementTree.DftPerspective)
self._localConfig = None
self._originalConfiguration = None
self._dirty = False
self._dirtyMntGrps = set()
self.connect(self.ui.activeMntGrpCB, Qt.SIGNAL('activated (QString)'), self.changeActiveMntGrp)
self.connect(self.ui.createMntGrpBT, Qt.SIGNAL('clicked ()'), self.createMntGrp)
self.connect(self.ui.deleteMntGrpBT, Qt.SIGNAL('clicked ()'), self.deleteMntGrp)
self.connect(self.ui.compressionCB, Qt.SIGNAL('currentIndexChanged (int)'), self.onCompressionCBChanged)
self.connect(self.ui.pathLE, Qt.SIGNAL('textEdited (QString)'), self.onPathLEEdited)
self.connect(self.ui.filenameLE, Qt.SIGNAL('textEdited (QString)'), self.onFilenameLEEdited)
self.connect(self.ui.channelEditor.getQModel(), Qt.SIGNAL('dataChanged (QModelIndex, QModelIndex)'), self._updateButtonBox)
self.connect(self.ui.channelEditor.getQModel(), Qt.SIGNAL('modelReset ()'), self._updateButtonBox)
preScanList = self.ui.preScanList
self.connect(preScanList, Qt.SIGNAL('dataChanged'),
self.onPreScanSnapshotChanged)
#TODO: For Taurus 4 compatibility
if hasattr(preScanList, "dataChangedSignal"):
preScanList.dataChangedSignal.connect(self.onPreScanSnapshotChanged)
self.connect(self.ui.choosePathBT, Qt.SIGNAL('clicked ()'), self.onChooseScanDirButtonClicked)
self.__plotManager = None
icon = resource.getIcon(":/actions/view.svg")
self.togglePlotsAction = Qt.QAction(icon, "Show/Hide plots", self)
self.togglePlotsAction.setCheckable(True)
self.togglePlotsAction.setChecked(False)
self.togglePlotsAction.setEnabled(plotsButton)
self.addAction(self.togglePlotsAction)
self.connect(self.togglePlotsAction, Qt.SIGNAL("toggled(bool)"),
self.onPlotsButtonToggled)
self.ui.plotsButton.setDefaultAction(self.togglePlotsAction)
if door is not None:
self.setModel(door)
self.connect(self.ui.buttonBox, Qt.SIGNAL("clicked(QAbstractButton *)"), self.onDialogButtonClicked)
#Taurus Configuration properties and delegates
self.registerConfigDelegate(self.ui.channelEditor)
def getModelClass(self):
'''reimplemented from :class:`TaurusBaseWidget`'''
return taurus.core.taurusdevice.TaurusDevice
def onChooseScanDirButtonClicked(self):
ret = Qt.QFileDialog.getExistingDirectory (self, 'Choose directory for saving files', self.ui.pathLE.text())
if ret:
self.ui.pathLE.setText(ret)
self.ui.pathLE.emit(Qt.SIGNAL('textEdited (QString)'), ret)
def onDialogButtonClicked(self, button):
role = self.ui.buttonBox.buttonRole(button)
if role == Qt.QDialogButtonBox.ApplyRole:
self.writeExperimentConfiguration(ask=False)
elif role == Qt.QDialogButtonBox.ResetRole:
self._reloadConf()
def closeEvent(self, event):
'''This event handler receives widget close events'''
if self.isDataChanged():
self.writeExperimentConfiguration(ask=True)
Qt.QWidget.closeEvent(self, event)
def setModel(self, model):
'''reimplemented from :class:`TaurusBaseWidget`'''
TaurusBaseWidget.setModel(self, model)
self._reloadConf(force=True)
#set the model of some child widgets
door = self.getModelObj()
if door is None: return
tghost = taurus.Database().getNormalName() #@todo: get the tghost from the door model instead
msname = door.macro_server.getFullName()
self.ui.taurusModelTree.setModel(tghost)
self.ui.sardanaElementTree.setModel(msname)
def _reloadConf(self, force=False):
if not force and self.isDataChanged():
|
door = self.getModelObj()
if door is None: return
conf = door.getExperimentConfiguration()
self._originalConfiguration = copy.deepcopy(conf)
self.setLocalConfig(conf)
self._setDirty(False)
self._dirtyMntGrps = set()
#set a list of available channels
avail_channels = {}
for ch_info in door.macro_server.getExpChannelElements().values():
avail_channels[ch_info.full_name] = ch_info.getData()
self.ui.channelEditor.getQModel().setAvailableChannels(avail_channels)
def _setDirty(self, dirty):
self._dirty = dirty
self._updateButtonBox()
def isDataChanged(self):
"""Tells if the local data has been modified since it was last refreshed
:return: (bool) True if he local data has been modified since it was last refreshed
"""
return bool(self._dirty or self.ui.channelEditor.getQModel().isDataChanged() or self._dirtyMntGrps)
def _updateButtonBox(self, *args, **kwargs):
self.ui.buttonBox.setEnabled(self.isDataChanged())
def getLocalConfig(self):
return self._localConfig
def setLocalConfig(self, conf):
'''gets a ExpDescription dictionary and sets up the widget'''
self._localConfig = conf
#set the Channel Editor
activeMntGrpName = self._localConfig['ActiveMntGrp'] or ''
if activeMntGrpName in self._localConfig['MntGrpConfigs']:
mgconfig = self._localConfig['MntGrpConfigs'][activeMntGrpName]
self.ui.channelEditor.getQModel().setDataSource(mgconfig)
#set the measurement group ComboBox
self.ui.activeMntGrpCB.clear()
mntGrpLabels = []
for _, mntGrpConf in self._localConfig['MntGrpConfigs'].items():
# get labels to visualize names with lower and upper case
mntGrpLabels.append(mntGrpConf['label'])
self.ui.activeMntGrpCB.addItems(sorted(mntGrpLabels))
idx = self.ui.activeMntGrpCB.findText(activeMntGrpName,
# case insensitive find
Qt.Qt.MatchFixedString)
self.ui.activeMntGrpCB.setCurrentIndex(idx)
#set the system snapshot list
psl = self._localConfig.get('PreScanSnapshot') #I get it before clearing because clear() changes the _localConfig
# TODO: For Taurus 4 compatibility
psl_fullname = []
for name, display in psl:
psl_fullname.append(("tango://%s" % name, display))
self.ui.preScanList.clear()
self.ui.preScanList.addModels(psl_fullname)
#other settings
self.ui.filenameLE.setText(", ".join(self._localConfig['ScanFile']))
self.ui.pathLE.setText(self._localConfig['ScanDir'] or '')
self.ui.compressionCB.setCurrentIndex(self._localConfig['DataCompressionRank'] + 1)
def writeExperimentConfiguration(self, ask=True):
'''sends the current local configuration to the door
:param ask: (bool) If True (default) prompts the user before saving.
'''
if ask:
op = Qt.QMessageBox.question(self, "Save configuration?",
'Do you want to save the current configuration?\n(if not, any changes will be lost)',
Qt.QMessageBox.Yes | Qt.QMessageBox.No)
if op != Qt.QMessageBox.Yes:
return False
conf = self.getLocalConfig()
#make sure that no empty measurement groups are written
for mgname, mgconfig in conf.get('MntGrpConfigs', {}).items():
if mgconfig is not None and not mgconfig.get('controllers'):
mglabel = mgconfig['label']
Qt.QMessageBox.information(self, "Empty Measurement group",
"The measurement group '%s' is empty. Fill it (or delete it) before applying" % mglabel,
Qt.QMessageBox.Ok)
self.changeActiveMntGrp(mgname)
return False
#check if the currently displayed mntgrp is changed
if self.ui.channelEditor.getQModel().isDataChanged():
self._dirtyMntGrps.add(self._localConfig[' | op = Qt.QMessageBox.question(self, "Reload info from door",
"If you reload, all current experiment configuration changes will be lost. Reload?",
Qt.QMessageBox.Yes | Qt.QMessageBox.Cancel)
if op != Qt.QMessageBox.Yes:
return | conditional_block |
a1.py | AG", "CTGA")
True
>>> pair_genes("TCAG", "CCAG")
False
'''
# declare a boolean that indicates whether the two genes are pairable
can_pair = False
# create a sample of gene that can pair
sample_gene = ""
for nucleotide in first_gene:
if (nucleotide == "A"):
sample_gene += "T"
elif (nucleotide == "T"):
sample_gene += "A"
elif (nucleotide == "G"):
sample_gene += "C"
else:
sample_gene += "G"
# check if the sample gene matches the second gene
if (second_gene == sample_gene):
can_pair = True
# genes can also pair either direction
if (second_gene[::-1] == sample_gene):
can_pair = True
# returns the boolean that indicates whether the two genes can pair
return can_pair
def zip_length(gene):
''' (str) -> int
Genes can partially pair with itself in a process called zipping.
Zipping occurs when at either end of a gene can form a pair bond,
and continues until the pair of nucleotides can no longer form a bond.
Guanines pair with cytosines, and adenines pair with thymines.
This function returns an integer value that indicates the maximum
number of nucleotides pairs that the gene can zip.
REQ: genes must be consisted of letters {A, G, C, T}
>>> zip_length("AGTCTCGCT")
2
>>> zip_length("AGTCTCGAG")
0
'''
# declare a variable that counts the zip length
zip_length_count = 0
# for loop that is in charge of each nucleotides from the left
for left_index in range(len(gene)):
# declare a variable that is in charge of the indices of
# each nucleotides from the right
right_index = len(gene) - 1 - left_index
# checks if either end of the gene can form a bond
if (gene[left_index] == "A" and gene[right_index] == "T"):
zip_length_count += 1
elif (gene[left_index] == "C" and gene[right_index] == "G"):
zip_length_count += 1
elif (gene[left_index] == "G" and gene[right_index] == "C"):
zip_length_count += 1
elif (gene[left_index] == "T" and gene[right_index] == "A"):
zip_length_count += 1
# once the gene can no longer zip,
# returns the zip length right away
else:
return zip_length_count
def splice_gene(source, destination, start_anchor, end_anchor):
''' (list, list, str, str) -> None
This function performs splicing of gene sequences.
Splicing of genes can be done by taking a nucleotide sequence
from one gene and replace it with a nucleotide sequence from another.
First, find the anchor sequences, which are the sequences found
within the starting and end anchor given by the user (anchors can be
found from either end of the gene). Then, if the starting anchor and
the end anchor is found in both genes, the anchor sequence extracted
from the source (the first gene) replaces the anchor sequence from
the destination (the second gene). If the anchor is not found in
both genes, the splice or the mutation does not occur.
REQ: the anchors must be consisted of letters {A, G, C, T}
'''
# convert the source gene into a string
source_gene = ""
for i in range(len(source)):
source_gene += source[i]
# convert the destination gene into a string
destination_gene = ""
for j in range(len(destination)):
destination_gene += destination[j]
# find the index of start and end anchor from the source
source_start_anchor = source_gene.find(start_anchor)
source_end_anchor = source_gene.find(end_anchor, source_start_anchor)
# start and end anchor can be found in reverse order
if (source_start_anchor == -1 and source_end_anchor == -1):
source_start_anchor = source_gene.rfind(start_anchor)
source_end_anchor = source_gene.rfind(end_anchor,
source_start_anchor)
# find the index of start and end anchor from the destination
destination_start_anchor = destination_gene.find(start_anchor)
destination_end_anchor = destination_gene.find(end_anchor,
destination_start_anchor)
# start and end anchor can be found in reverse order
if (destination_start_anchor == -1 and destination_end_anchor == -1):
|
# check if the indices are found in source gene
if (source_start_anchor != -1 and source_end_anchor != -1):
# check if the indices are found in destination gene
if (destination_start_anchor != -1 and destination_end_anchor != -1):
# for loop to find the anchor sequence from the source
source_anchor_sequence = ""
for i in range(source_start_anchor,
source_end_anchor + len(end_anchor)):
source_anchor_sequence += source[i]
# remove the nucleotides of anchor sequence from the source
count = 0
while (count < len(source_anchor_sequence)):
del source[source_start_anchor]
count += 1
# for loop to find the anchor sequence from the destination
destination_anchor_sequence = ""
for i in range(destination_start_anchor,
destination_end_anchor + len(end_anchor)):
destination_anchor_sequence += destination[i]
# remove the nucleotides from start and end anchor from
# the destination
count = 0
while (count < len(destination_anchor_sequence)):
del destination[destination_start_anchor]
count += 1
# splice the anchor sequence into the destination
for l in range(len(source_anchor_sequence) - 1, -1, -1):
destination.insert(destination_start_anchor,
source_anchor_sequence[l])
# if not found, splice does not occur
else:
pass
# if not found, splice does not occur
else:
pass
def match_mask(gene, mask):
''' (str, str) -> int
This function creates a mask to find a specific pattern in the gene.
Masks can pair with parts of genes, but does not necessarily pair
with the entire gene. Masks can be consisted of multis which are
the special nucleotides, represented inside square brackets,
that can mimic the bonding behaviour of multiple nucleotides.
It can also create a nucleotide that is capapble of pairing with
any other nucleotide, called stars. In addition, if there are
repeated sequences of nucleotides in masks, it can be denoted
by using numbers. An example of a mask would be [AG]C3*, which
can be paired with any gene sequences that starts with T or C,
followed by three G, followed by any other nucleotides.
This function will take in a string representation of a gene,
and a mask, and returns the index of the first nucleotide in
the sequence that is matched by the given mask. If it is not
found anywhere in the sequence, it returns -1.
REQ: masks are strings consisted of '[', ']', numbers, and '*'
REQ: the letters inside the square brackets, should be
consisted of letters {A, G, C, T}
REQ: masks cannot start with integers
>>> match_mask("CCCAGGGGTT", "[TC]G")
3
>>> match_mask("CCCAGGGGTT", "*")
0
>>> match_mask("CCCCGGGG", "A")
-1
'''
# declare a variable for set of nucleotides and integers
nucleotides = "AGCT"
numbers = "123456789"
# declare a variable that is in charge of keeping the index
# of the first nucleotide in gene that matches mask
match_index = -1
# declare variables that make a gene sequence from the mask
# four mask off gene sequences since there are four types of nucleotides
mask_off = []
# for loop to read through the mask
for index in range(len(mask)):
# star is a special character that can pair with any nucleotides
if (mask[index] == "*"):
mask_off.append(nucleotides)
# nucleotides pair with specific nucleotides
elif (mask[index] in nucleotides):
# adenine pairs with thymine
if (mask[index] == "A"):
mask_off.append("T")
# thymine pairs with adenine
elif (mask[index] == "T"):
mask_off.append("A")
# cytosine pairs with guanine
elif (mask[index] == "C"):
mask_off.append("G")
# guanine pairs with cytosine
else:
mask_off.append("C")
# repeated sequences of nucleotides can be denoted using numbers
elif (mask[index] in numbers):
# multiple adenines
if (mask[index - 1] | destination_start_anchor = destination_gene.rfind(start_anchor)
destination_end_anchor = destination_gene.rfind(
end_anchor, destination_start_anchor) | conditional_block |
a1.py | ] == "A"):
zip_length_count += 1
# once the gene can no longer zip,
# returns the zip length right away
else:
return zip_length_count
def splice_gene(source, destination, start_anchor, end_anchor):
''' (list, list, str, str) -> None
This function performs splicing of gene sequences.
Splicing of genes can be done by taking a nucleotide sequence
from one gene and replace it with a nucleotide sequence from another.
First, find the anchor sequences, which are the sequences found
within the starting and end anchor given by the user (anchors can be
found from either end of the gene). Then, if the starting anchor and
the end anchor is found in both genes, the anchor sequence extracted
from the source (the first gene) replaces the anchor sequence from
the destination (the second gene). If the anchor is not found in
both genes, the splice or the mutation does not occur.
REQ: the anchors must be consisted of letters {A, G, C, T}
'''
# convert the source gene into a string
source_gene = ""
for i in range(len(source)):
source_gene += source[i]
# convert the destination gene into a string
destination_gene = ""
for j in range(len(destination)):
destination_gene += destination[j]
# find the index of start and end anchor from the source
source_start_anchor = source_gene.find(start_anchor)
source_end_anchor = source_gene.find(end_anchor, source_start_anchor)
# start and end anchor can be found in reverse order
if (source_start_anchor == -1 and source_end_anchor == -1):
source_start_anchor = source_gene.rfind(start_anchor)
source_end_anchor = source_gene.rfind(end_anchor,
source_start_anchor)
# find the index of start and end anchor from the destination
destination_start_anchor = destination_gene.find(start_anchor)
destination_end_anchor = destination_gene.find(end_anchor,
destination_start_anchor)
# start and end anchor can be found in reverse order
if (destination_start_anchor == -1 and destination_end_anchor == -1):
destination_start_anchor = destination_gene.rfind(start_anchor)
destination_end_anchor = destination_gene.rfind(
end_anchor, destination_start_anchor)
# check if the indices are found in source gene
if (source_start_anchor != -1 and source_end_anchor != -1):
# check if the indices are found in destination gene
if (destination_start_anchor != -1 and destination_end_anchor != -1):
# for loop to find the anchor sequence from the source
source_anchor_sequence = ""
for i in range(source_start_anchor,
source_end_anchor + len(end_anchor)):
source_anchor_sequence += source[i]
# remove the nucleotides of anchor sequence from the source
count = 0
while (count < len(source_anchor_sequence)):
del source[source_start_anchor]
count += 1
# for loop to find the anchor sequence from the destination
destination_anchor_sequence = ""
for i in range(destination_start_anchor,
destination_end_anchor + len(end_anchor)):
destination_anchor_sequence += destination[i]
# remove the nucleotides from start and end anchor from
# the destination
count = 0
while (count < len(destination_anchor_sequence)):
del destination[destination_start_anchor]
count += 1
# splice the anchor sequence into the destination
for l in range(len(source_anchor_sequence) - 1, -1, -1):
destination.insert(destination_start_anchor,
source_anchor_sequence[l])
# if not found, splice does not occur
else:
pass
# if not found, splice does not occur
else:
pass
def match_mask(gene, mask):
''' (str, str) -> int
This function creates a mask to find a specific pattern in the gene.
Masks can pair with parts of genes, but does not necessarily pair
with the entire gene. Masks can be consisted of multis which are
the special nucleotides, represented inside square brackets,
that can mimic the bonding behaviour of multiple nucleotides.
It can also create a nucleotide that is capapble of pairing with
any other nucleotide, called stars. In addition, if there are
repeated sequences of nucleotides in masks, it can be denoted
by using numbers. An example of a mask would be [AG]C3*, which
can be paired with any gene sequences that starts with T or C,
followed by three G, followed by any other nucleotides.
This function will take in a string representation of a gene,
and a mask, and returns the index of the first nucleotide in
the sequence that is matched by the given mask. If it is not
found anywhere in the sequence, it returns -1.
REQ: masks are strings consisted of '[', ']', numbers, and '*'
REQ: the letters inside the square brackets, should be
consisted of letters {A, G, C, T}
REQ: masks cannot start with integers
>>> match_mask("CCCAGGGGTT", "[TC]G")
3
>>> match_mask("CCCAGGGGTT", "*")
0
>>> match_mask("CCCCGGGG", "A")
-1
'''
# declare a variable for set of nucleotides and integers
nucleotides = "AGCT"
numbers = "123456789"
# declare a variable that is in charge of keeping the index
# of the first nucleotide in gene that matches mask
match_index = -1
# declare variables that make a gene sequence from the mask
# four mask off gene sequences since there are four types of nucleotides
mask_off = []
# for loop to read through the mask
for index in range(len(mask)):
# star is a special character that can pair with any nucleotides
if (mask[index] == "*"):
mask_off.append(nucleotides)
# nucleotides pair with specific nucleotides
elif (mask[index] in nucleotides):
# adenine pairs with thymine
if (mask[index] == "A"):
mask_off.append("T")
# thymine pairs with adenine
elif (mask[index] == "T"):
mask_off.append("A")
# cytosine pairs with guanine
elif (mask[index] == "C"):
mask_off.append("G")
# guanine pairs with cytosine
else:
mask_off.append("C")
# repeated sequences of nucleotides can be denoted using numbers
elif (mask[index] in numbers):
# multiple adenines
if (mask[index - 1] == "A"):
mask_off.extend("A" * int(mask[index]))
# multiple thymines
elif (mask[index - 1] == "T"):
mask_off.extend("T" * int(mask[index]))
# multiple cytosine
elif (mask[index - 1] == "C"):
mask_off.extend("C" * int(mask[index]))
# multiple guanine
else:
mask_off.extend("G" * int(mask[index]))
# masks can have special nucleotides called multis
else:
mask_start = mask.find("[")
mask_end = mask.rfind("]")
# for loop to get the multis inside the bracket
multis = ""
# get the multi from the mask
for m in range(index + 1, mask_end):
multis += mask[m]
# convert multis into pairable nucleotides
multi_pair = ""
# for loop to go through the multis
for multi in range(len(multis)):
# adenine pairs with thymine
if (multis[multi] == "A"):
multi_pair += "T"
# thymine pairs with adenine
elif (multis[multi] == "T"):
multi_pair += "A"
# cytosine pairs with guanine
elif (multis[multi] == "C"):
multi_pair += "G"
# guanine pairs with cytosine
else:
multi_pair += "C"
# add multi_pair to the mask off list
mask_off.append(multi_pair)
# remove multis from the mask
for i in range(mask_end + 1):
del mask_off[mask_start+1]
# declare a boolean that indicates whether the gene matches the mask
match = False
# temporary counter
g = 0
# while loop to go through the gene while matching index is not found
while(not match and g < len(gene)):
# if the element in mask off gene is a single letter
if (len(mask_off[g]) == 1):
# check if the nucleotide is the same as the elemnet
# in mask off gene
if (mask_off[g] == gene[g]):
match_index = g
match = True
# if the element in mask off gene is not a single letter
else:
if (gene[g] in mask_off[g]):
match_index = g
match = True
g += 1
# return the the first matching index of the gene
return match_index
def | process_gene_file | identifier_name | |
a1.py | AG", "CTGA")
True
>>> pair_genes("TCAG", "CCAG")
False
'''
# declare a boolean that indicates whether the two genes are pairable
can_pair = False
# create a sample of gene that can pair
sample_gene = ""
for nucleotide in first_gene:
if (nucleotide == "A"):
sample_gene += "T"
elif (nucleotide == "T"):
sample_gene += "A"
elif (nucleotide == "G"):
sample_gene += "C"
else:
sample_gene += "G"
# check if the sample gene matches the second gene
if (second_gene == sample_gene):
can_pair = True
# genes can also pair either direction
if (second_gene[::-1] == sample_gene):
can_pair = True
# returns the boolean that indicates whether the two genes can pair
return can_pair
def zip_length(gene):
''' (str) -> int
Genes can partially pair with itself in a process called zipping.
Zipping occurs when at either end of a gene can form a pair bond,
and continues until the pair of nucleotides can no longer form a bond.
Guanines pair with cytosines, and adenines pair with thymines.
This function returns an integer value that indicates the maximum
number of nucleotides pairs that the gene can zip.
REQ: genes must be consisted of letters {A, G, C, T}
>>> zip_length("AGTCTCGCT")
2
>>> zip_length("AGTCTCGAG")
0
'''
# declare a variable that counts the zip length
zip_length_count = 0
# for loop that is in charge of each nucleotides from the left
for left_index in range(len(gene)):
# declare a variable that is in charge of the indices of
# each nucleotides from the right
right_index = len(gene) - 1 - left_index
# checks if either end of the gene can form a bond
if (gene[left_index] == "A" and gene[right_index] == "T"):
zip_length_count += 1
elif (gene[left_index] == "C" and gene[right_index] == "G"):
zip_length_count += 1
elif (gene[left_index] == "G" and gene[right_index] == "C"):
zip_length_count += 1
elif (gene[left_index] == "T" and gene[right_index] == "A"):
zip_length_count += 1
# once the gene can no longer zip,
# returns the zip length right away
else:
return zip_length_count
def splice_gene(source, destination, start_anchor, end_anchor):
| destination_gene = ""
for j in range(len(destination)):
destination_gene += destination[j]
# find the index of start and end anchor from the source
source_start_anchor = source_gene.find(start_anchor)
source_end_anchor = source_gene.find(end_anchor, source_start_anchor)
# start and end anchor can be found in reverse order
if (source_start_anchor == -1 and source_end_anchor == -1):
source_start_anchor = source_gene.rfind(start_anchor)
source_end_anchor = source_gene.rfind(end_anchor,
source_start_anchor)
# find the index of start and end anchor from the destination
destination_start_anchor = destination_gene.find(start_anchor)
destination_end_anchor = destination_gene.find(end_anchor,
destination_start_anchor)
# start and end anchor can be found in reverse order
if (destination_start_anchor == -1 and destination_end_anchor == -1):
destination_start_anchor = destination_gene.rfind(start_anchor)
destination_end_anchor = destination_gene.rfind(
end_anchor, destination_start_anchor)
# check if the indices are found in source gene
if (source_start_anchor != -1 and source_end_anchor != -1):
# check if the indices are found in destination gene
if (destination_start_anchor != -1 and destination_end_anchor != -1):
# for loop to find the anchor sequence from the source
source_anchor_sequence = ""
for i in range(source_start_anchor,
source_end_anchor + len(end_anchor)):
source_anchor_sequence += source[i]
# remove the nucleotides of anchor sequence from the source
count = 0
while (count < len(source_anchor_sequence)):
del source[source_start_anchor]
count += 1
# for loop to find the anchor sequence from the destination
destination_anchor_sequence = ""
for i in range(destination_start_anchor,
destination_end_anchor + len(end_anchor)):
destination_anchor_sequence += destination[i]
# remove the nucleotides from start and end anchor from
# the destination
count = 0
while (count < len(destination_anchor_sequence)):
del destination[destination_start_anchor]
count += 1
# splice the anchor sequence into the destination
for l in range(len(source_anchor_sequence) - 1, -1, -1):
destination.insert(destination_start_anchor,
source_anchor_sequence[l])
# if not found, splice does not occur
else:
pass
# if not found, splice does not occur
else:
pass
def match_mask(gene, mask):
''' (str, str) -> int
This function creates a mask to find a specific pattern in the gene.
Masks can pair with parts of genes, but does not necessarily pair
with the entire gene. Masks can be consisted of multis which are
the special nucleotides, represented inside square brackets,
that can mimic the bonding behaviour of multiple nucleotides.
It can also create a nucleotide that is capapble of pairing with
any other nucleotide, called stars. In addition, if there are
repeated sequences of nucleotides in masks, it can be denoted
by using numbers. An example of a mask would be [AG]C3*, which
can be paired with any gene sequences that starts with T or C,
followed by three G, followed by any other nucleotides.
This function will take in a string representation of a gene,
and a mask, and returns the index of the first nucleotide in
the sequence that is matched by the given mask. If it is not
found anywhere in the sequence, it returns -1.
REQ: masks are strings consisted of '[', ']', numbers, and '*'
REQ: the letters inside the square brackets, should be
consisted of letters {A, G, C, T}
REQ: masks cannot start with integers
>>> match_mask("CCCAGGGGTT", "[TC]G")
3
>>> match_mask("CCCAGGGGTT", "*")
0
>>> match_mask("CCCCGGGG", "A")
-1
'''
# declare a variable for set of nucleotides and integers
nucleotides = "AGCT"
numbers = "123456789"
# declare a variable that is in charge of keeping the index
# of the first nucleotide in gene that matches mask
match_index = -1
# declare variables that make a gene sequence from the mask
# four mask off gene sequences since there are four types of nucleotides
mask_off = []
# for loop to read through the mask
for index in range(len(mask)):
# star is a special character that can pair with any nucleotides
if (mask[index] == "*"):
mask_off.append(nucleotides)
# nucleotides pair with specific nucleotides
elif (mask[index] in nucleotides):
# adenine pairs with thymine
if (mask[index] == "A"):
mask_off.append("T")
# thymine pairs with adenine
elif (mask[index] == "T"):
mask_off.append("A")
# cytosine pairs with guanine
elif (mask[index] == "C"):
mask_off.append("G")
# guanine pairs with cytosine
else:
mask_off.append("C")
# repeated sequences of nucleotides can be denoted using numbers
elif (mask[index] in numbers):
# multiple adenines
if (mask[index - 1] | ''' (list, list, str, str) -> None
This function performs splicing of gene sequences.
Splicing of genes can be done by taking a nucleotide sequence
from one gene and replace it with a nucleotide sequence from another.
First, find the anchor sequences, which are the sequences found
within the starting and end anchor given by the user (anchors can be
found from either end of the gene). Then, if the starting anchor and
the end anchor is found in both genes, the anchor sequence extracted
from the source (the first gene) replaces the anchor sequence from
the destination (the second gene). If the anchor is not found in
both genes, the splice or the mutation does not occur.
REQ: the anchors must be consisted of letters {A, G, C, T}
'''
# convert the source gene into a string
source_gene = ""
for i in range(len(source)):
source_gene += source[i]
# convert the destination gene into a string
| identifier_body |
a1.py | AG", "CTGA")
True
>>> pair_genes("TCAG", "CCAG")
False
'''
# declare a boolean that indicates whether the two genes are pairable
can_pair = False
# create a sample of gene that can pair
sample_gene = ""
for nucleotide in first_gene:
if (nucleotide == "A"):
sample_gene += "T"
elif (nucleotide == "T"):
sample_gene += "A"
elif (nucleotide == "G"):
sample_gene += "C"
else:
sample_gene += "G"
# check if the sample gene matches the second gene
if (second_gene == sample_gene):
can_pair = True
# genes can also pair either direction
if (second_gene[::-1] == sample_gene):
can_pair = True
# returns the boolean that indicates whether the two genes can pair
return can_pair
def zip_length(gene):
''' (str) -> int
Genes can partially pair with itself in a process called zipping.
Zipping occurs when at either end of a gene can form a pair bond,
and continues until the pair of nucleotides can no longer form a bond.
Guanines pair with cytosines, and adenines pair with thymines.
| >>> zip_length("AGTCTCGCT")
2
>>> zip_length("AGTCTCGAG")
0
'''
# declare a variable that counts the zip length
zip_length_count = 0
# for loop that is in charge of each nucleotides from the left
for left_index in range(len(gene)):
# declare a variable that is in charge of the indices of
# each nucleotides from the right
right_index = len(gene) - 1 - left_index
# checks if either end of the gene can form a bond
if (gene[left_index] == "A" and gene[right_index] == "T"):
zip_length_count += 1
elif (gene[left_index] == "C" and gene[right_index] == "G"):
zip_length_count += 1
elif (gene[left_index] == "G" and gene[right_index] == "C"):
zip_length_count += 1
elif (gene[left_index] == "T" and gene[right_index] == "A"):
zip_length_count += 1
# once the gene can no longer zip,
# returns the zip length right away
else:
return zip_length_count
def splice_gene(source, destination, start_anchor, end_anchor):
''' (list, list, str, str) -> None
This function performs splicing of gene sequences.
Splicing of genes can be done by taking a nucleotide sequence
from one gene and replace it with a nucleotide sequence from another.
First, find the anchor sequences, which are the sequences found
within the starting and end anchor given by the user (anchors can be
found from either end of the gene). Then, if the starting anchor and
the end anchor is found in both genes, the anchor sequence extracted
from the source (the first gene) replaces the anchor sequence from
the destination (the second gene). If the anchor is not found in
both genes, the splice or the mutation does not occur.
REQ: the anchors must be consisted of letters {A, G, C, T}
'''
# convert the source gene into a string
source_gene = ""
for i in range(len(source)):
source_gene += source[i]
# convert the destination gene into a string
destination_gene = ""
for j in range(len(destination)):
destination_gene += destination[j]
# find the index of start and end anchor from the source
source_start_anchor = source_gene.find(start_anchor)
source_end_anchor = source_gene.find(end_anchor, source_start_anchor)
# start and end anchor can be found in reverse order
if (source_start_anchor == -1 and source_end_anchor == -1):
source_start_anchor = source_gene.rfind(start_anchor)
source_end_anchor = source_gene.rfind(end_anchor,
source_start_anchor)
# find the index of start and end anchor from the destination
destination_start_anchor = destination_gene.find(start_anchor)
destination_end_anchor = destination_gene.find(end_anchor,
destination_start_anchor)
# start and end anchor can be found in reverse order
if (destination_start_anchor == -1 and destination_end_anchor == -1):
destination_start_anchor = destination_gene.rfind(start_anchor)
destination_end_anchor = destination_gene.rfind(
end_anchor, destination_start_anchor)
# check if the indices are found in source gene
if (source_start_anchor != -1 and source_end_anchor != -1):
# check if the indices are found in destination gene
if (destination_start_anchor != -1 and destination_end_anchor != -1):
# for loop to find the anchor sequence from the source
source_anchor_sequence = ""
for i in range(source_start_anchor,
source_end_anchor + len(end_anchor)):
source_anchor_sequence += source[i]
# remove the nucleotides of anchor sequence from the source
count = 0
while (count < len(source_anchor_sequence)):
del source[source_start_anchor]
count += 1
# for loop to find the anchor sequence from the destination
destination_anchor_sequence = ""
for i in range(destination_start_anchor,
destination_end_anchor + len(end_anchor)):
destination_anchor_sequence += destination[i]
# remove the nucleotides from start and end anchor from
# the destination
count = 0
while (count < len(destination_anchor_sequence)):
del destination[destination_start_anchor]
count += 1
# splice the anchor sequence into the destination
for l in range(len(source_anchor_sequence) - 1, -1, -1):
destination.insert(destination_start_anchor,
source_anchor_sequence[l])
# if not found, splice does not occur
else:
pass
# if not found, splice does not occur
else:
pass
def match_mask(gene, mask):
''' (str, str) -> int
This function creates a mask to find a specific pattern in the gene.
Masks can pair with parts of genes, but does not necessarily pair
with the entire gene. Masks can be consisted of multis which are
the special nucleotides, represented inside square brackets,
that can mimic the bonding behaviour of multiple nucleotides.
It can also create a nucleotide that is capapble of pairing with
any other nucleotide, called stars. In addition, if there are
repeated sequences of nucleotides in masks, it can be denoted
by using numbers. An example of a mask would be [AG]C3*, which
can be paired with any gene sequences that starts with T or C,
followed by three G, followed by any other nucleotides.
This function will take in a string representation of a gene,
and a mask, and returns the index of the first nucleotide in
the sequence that is matched by the given mask. If it is not
found anywhere in the sequence, it returns -1.
REQ: masks are strings consisted of '[', ']', numbers, and '*'
REQ: the letters inside the square brackets, should be
consisted of letters {A, G, C, T}
REQ: masks cannot start with integers
>>> match_mask("CCCAGGGGTT", "[TC]G")
3
>>> match_mask("CCCAGGGGTT", "*")
0
>>> match_mask("CCCCGGGG", "A")
-1
'''
# declare a variable for set of nucleotides and integers
nucleotides = "AGCT"
numbers = "123456789"
# declare a variable that is in charge of keeping the index
# of the first nucleotide in gene that matches mask
match_index = -1
# declare variables that make a gene sequence from the mask
# four mask off gene sequences since there are four types of nucleotides
mask_off = []
# for loop to read through the mask
for index in range(len(mask)):
# star is a special character that can pair with any nucleotides
if (mask[index] == "*"):
mask_off.append(nucleotides)
# nucleotides pair with specific nucleotides
elif (mask[index] in nucleotides):
# adenine pairs with thymine
if (mask[index] == "A"):
mask_off.append("T")
# thymine pairs with adenine
elif (mask[index] == "T"):
mask_off.append("A")
# cytosine pairs with guanine
elif (mask[index] == "C"):
mask_off.append("G")
# guanine pairs with cytosine
else:
mask_off.append("C")
# repeated sequences of nucleotides can be denoted using numbers
elif (mask[index] in numbers):
# multiple adenines
if (mask[index - 1 | This function returns an integer value that indicates the maximum
number of nucleotides pairs that the gene can zip.
REQ: genes must be consisted of letters {A, G, C, T}
| random_line_split |
runner.rs | of threads to use to run tasks.
pub fn jobs(&mut self, jobs: usize) {
self.jobs = jobs;
}
/// Adds a path to Lua's require path for modules.
pub fn include_path<P: Into<PathBuf>>(&mut self, path: P) {
self.spec.include_paths.push(path.into());
}
/// Sets a variable value.
pub fn set_var<S: AsRef<str>, V: Into<String>>(&mut self, name: S, value: V) {
self.spec.variables.push((name.as_ref().to_string(), value.into()));
}
/// Load the script.
pub fn load(&mut self) -> Result<(), Box<Error>> {
if self.runtime.is_none() {
self.runtime = Some(try!(self.spec.create()));
}
Ok(())
}
/// Prints the list of named tasks for a script.
pub fn print_task_list(&mut self) {
let mut tasks = self.runtime().environment().tasks();
tasks.sort_by(|a, b| a.name().cmp(b.name()));
let mut out = term::stdout().unwrap();
println!("Available tasks:");
for task in tasks {
out.fg(term::color::BRIGHT_GREEN).unwrap();
write!(out, " {:16}", task.name()).unwrap();
out.reset().unwrap();
if let Some(ref description) = task.description() {
write!(out, "{}", description).unwrap();
}
writeln!(out, "").unwrap();
}
if let Some(ref default) = self.runtime().environment().default_task() {
println!("");
println!("Default task: {}", default);
}
}
/// Run the default task.
pub fn run_default(&mut self) -> Result<(), Box<Error>> {
if let Some(ref name) = self.runtime().environment().default_task() {
let tasks = vec![name];
self.run(&tasks)
} else {
Err("no default task defined".into())
}
}
/// Runs the specified list of tasks.
///
/// Tasks are run in parallel when possible during execution. The maximum number of parallel
/// jobs can be set with the `jobs()` method.
pub fn run<S: AsRef<str>>(&mut self, tasks: &[S]) -> Result<(), Box<Error>> {
// Resolve all tasks given.
for task in tasks {
try!(self.resolve_task(task));
}
// Determine the schedule of tasks to execute.
let mut queue = try!(self.graph.solve(!self.spec.always_run));
let task_count = queue.len();
let thread_count = cmp::min(self.jobs, task_count);
debug!("running {} task(s) across {} thread(s)",
task_count,
thread_count);
// Spawn one thread for each job.
let mut threads = Vec::new();
let mut free_threads: HashSet<usize> = HashSet::new();
let mut channels = Vec::new();
let (sender, receiver) = mpsc::channel::<Result<usize, usize>>();
// Spawn `jobs` number of threads (but no more than the task count!).
for thread_id in 0..thread_count {
let spec = self.spec.clone();
let thread_sender = sender.clone();
let (parent_sender, thread_receiver) = mpsc::sync_channel::<(String, usize)>(0);
channels.push(parent_sender);
threads.push(thread::spawn(move || {
// Prepare a new runtime.
let runtime = spec.create().unwrap_or_else(|e| {
error!("{}", e);
panic!();
});
if thread_sender.send(Ok(thread_id)).is_err() {
trace!("thread {} failed to send channel", thread_id);
}
// Begin executing tasks!
while let Ok((name, task_id)) = thread_receiver.recv() {
info!("running task '{}' ({} of {})", name, task_id, task_count);
// Lookup the task to run.
let task = {
// Lookup the task to run.
if let Some(task) = runtime.environment().get_task(&name) {
task as Rc<Task>
}
// Find a rule that matches the task name.
else if let Some(rule) = runtime.environment()
.rules()
.iter()
.find(|rule| rule.matches(&name)) {
Rc::new(rule.create_task(name).unwrap()) as Rc<Task>
}
// No matching task.
else {
panic!("no matching task or rule for '{}'", name);
}
};
// Check for dry run.
if !spec.dry_run {
if let Err(e) = task.run() {
// If we ought to keep going, just issue a warning.
if spec.keep_going {
warn!("ignoring error: {}", e);
} else {
error!("{}", e);
thread_sender.send(Err(thread_id)).unwrap();
return;
}
}
} else {
info!("would run task '{}'", task.name());
}
if thread_sender.send(Ok(thread_id)).is_err() {
trace!("thread {} failed to send channel", thread_id);
break;
}
}
}))
}
drop(sender);
// Keep track of tasks completed and tasks in progress.
let mut completed_tasks: HashSet<String> = HashSet::new();
let mut current_tasks: HashMap<usize, String> = HashMap::new();
let all_tasks: HashSet<String> = queue.iter().map(|s| s.name().to_string()).collect();
while !queue.is_empty() || !current_tasks.is_empty() {
// Wait for a thread to request a task.
let result = receiver.recv().unwrap();
// If the thread sent an error, we should stop everything if keep_going isn't enabled.
if let Err(thread_id) = result {
debug!("thread {} errored, waiting for remaining tasks...",
thread_id);
return Err("not all tasks completed successfully".into());
}
let thread_id = result.unwrap();
free_threads.insert(thread_id);
trace!("thread {} is idle", thread_id);
// If the thread was previously running a task, mark it as completed.
if let Some(task) = current_tasks.remove(&thread_id) {
trace!("task '{}' completed", task);
completed_tasks.insert(task);
}
// Attempt to schedule more tasks to run. The most we can schedule is the number of free
// threads, but it is limited by the number of tasks that have their dependencies already
// finished.
'schedule: for _ in 0..free_threads.len() {
// If the queue is empty, we are done.
if queue.is_empty() {
break;
}
// Check the next task in the queue. If any of its dependencies have not yet been
// completed, we cannot schedule it yet.
for dependency in queue.front().unwrap().dependencies() {
// Check that the dependency needs scheduled at all (some are already satisfied),
// and that it hasn't already finished.
if all_tasks.contains(dependency) && !completed_tasks.contains(dependency) {
// We can't run the next task, so we're done scheduling for now until another
// thread finishes.
break 'schedule;
}
}
// Get the available task from the queue.
let task = queue.front().unwrap().clone();
// Pick a free thread to run the task in.
if let Some(thread_id) = free_threads.iter().next().map(|t| *t) {
trace!("scheduling task '{}' on thread {}", task.name(), thread_id);
let data = (task.name().to_string(), task_count - queue.len() + 1);
// Send the task name.
if channels[thread_id].send(data).is_ok() {
current_tasks.insert(thread_id, task.name().to_string());
free_threads.remove(&thread_id);
// Scheduling was successful, so remove the task frome the queue.
queue.pop_front().unwrap();
} else {
trace!("failed to send channel to thread {}", thread_id);
}
} else {
// We can schedule now, but there aren't any free threads. 😢
break;
}
}
}
// Close the input and wait for any remaining threads to finish.
drop(channels);
for (thread_id, thread) in threads.into_iter().enumerate() {
if let Err(e) = thread.join() {
trace!("thread {} closed with panic: {:?}", thread_id, e);
}
}
info!("all tasks up to date");
Ok(())
}
fn resolve_task<S: AsRef<str>>(&mut self, name: S) -> Result<(), Box<Error>> {
if !self.graph.contains(&name) {
// Lookup the task to run.
if let Some(task) = self.runtime().environment().get_task(&name) {
debug!("task '{}' matches named task", name.as_ref());
self.graph.insert(task.clone());
}
// Find a rule that matches the task name.
else if let Some(rule) = self.runtime()
.environment()
.rules()
.iter()
.find(|rule| rule.matches(&name)) {
debug!("task '{}' matches rule '{}'", name.as_ref(), rule.pattern);
// Create a task for the rule and insert it in the graph.
self.graph.insert(Rc::new(rule.create_task(name.as_ref()).unwrap()));
}
// No matching task.
else {
| return Err(format!("no matching task or rule for '{}'", name.as_ref()).into());
}
| conditional_block | |
runner.rs | _all(&runtime);
// Set include paths.
for path in &self.include_paths {
runtime.include_path(&path);
}
// Set the OS
runtime.state().push_string(if cfg!(windows) {
"windows"
} else {
"unix"
});
runtime.state().set_global("OS");
// Set configured variables.
for &(ref name, ref value) in &self.variables {
runtime.state().push(value.clone());
runtime.state().set_global(&name);
}
// Load the script.
try!(runtime.load());
Ok(runtime)
}
}
/// A task runner object that holds the state for defined tasks, dependencies, and the scripting
/// runtime.
pub struct Runner {
/// The current DAG for tasks.
graph: Graph,
/// The number of threads to use.
jobs: usize,
/// Environment specification.
spec: EnvironmentSpec,
/// Runtime local owned by the master thread.
runtime: Option<Runtime>,
}
impl Runner {
/// Creates a new runner instance.
pub fn new<P: Into<PathBuf>>(path: P) -> Result<Runner, Box<Error>> {
// By default, set the number of jobs to be one less than the number of available CPU cores.
let jobs = cmp::max(1, num_cpus::get() - 1);
let path = path.into();
let directory: PathBuf = match path.parent() {
Some(path) => path.into(),
None => {
return Err("failed to parse script directory".into());
}
};
Ok(Runner {
graph: Graph::new(),
jobs: jobs as usize,
spec: EnvironmentSpec {
path: path.into(),
directory: directory,
include_paths: Vec::new(),
variables: Vec::new(),
dry_run: false,
always_run: false,
keep_going: false,
},
runtime: None,
})
}
pub fn path(&self) -> &Path {
&self.spec.path
}
pub fn directory(&self) -> &Path {
&self.spec.directory
}
/// Sets "dry run" mode.
///
/// When in "dry run" mode, running tasks will operate as normal, except that no task's actions
/// will be actually run.
pub fn dry_run(&mut self) {
self.spec.dry_run = true;
}
/// Run all tasks even if they are up-to-date.
pub fn | (&mut self) {
self.spec.always_run = true;
}
/// Run all tasks even if they throw errors.
pub fn keep_going(&mut self) {
self.spec.keep_going = true;
}
/// Sets the number of threads to use to run tasks.
pub fn jobs(&mut self, jobs: usize) {
self.jobs = jobs;
}
/// Adds a path to Lua's require path for modules.
pub fn include_path<P: Into<PathBuf>>(&mut self, path: P) {
self.spec.include_paths.push(path.into());
}
/// Sets a variable value.
pub fn set_var<S: AsRef<str>, V: Into<String>>(&mut self, name: S, value: V) {
self.spec.variables.push((name.as_ref().to_string(), value.into()));
}
/// Load the script.
pub fn load(&mut self) -> Result<(), Box<Error>> {
if self.runtime.is_none() {
self.runtime = Some(try!(self.spec.create()));
}
Ok(())
}
/// Prints the list of named tasks for a script.
pub fn print_task_list(&mut self) {
let mut tasks = self.runtime().environment().tasks();
tasks.sort_by(|a, b| a.name().cmp(b.name()));
let mut out = term::stdout().unwrap();
println!("Available tasks:");
for task in tasks {
out.fg(term::color::BRIGHT_GREEN).unwrap();
write!(out, " {:16}", task.name()).unwrap();
out.reset().unwrap();
if let Some(ref description) = task.description() {
write!(out, "{}", description).unwrap();
}
writeln!(out, "").unwrap();
}
if let Some(ref default) = self.runtime().environment().default_task() {
println!("");
println!("Default task: {}", default);
}
}
/// Run the default task.
pub fn run_default(&mut self) -> Result<(), Box<Error>> {
if let Some(ref name) = self.runtime().environment().default_task() {
let tasks = vec![name];
self.run(&tasks)
} else {
Err("no default task defined".into())
}
}
/// Runs the specified list of tasks.
///
/// Tasks are run in parallel when possible during execution. The maximum number of parallel
/// jobs can be set with the `jobs()` method.
pub fn run<S: AsRef<str>>(&mut self, tasks: &[S]) -> Result<(), Box<Error>> {
// Resolve all tasks given.
for task in tasks {
try!(self.resolve_task(task));
}
// Determine the schedule of tasks to execute.
let mut queue = try!(self.graph.solve(!self.spec.always_run));
let task_count = queue.len();
let thread_count = cmp::min(self.jobs, task_count);
debug!("running {} task(s) across {} thread(s)",
task_count,
thread_count);
// Spawn one thread for each job.
let mut threads = Vec::new();
let mut free_threads: HashSet<usize> = HashSet::new();
let mut channels = Vec::new();
let (sender, receiver) = mpsc::channel::<Result<usize, usize>>();
// Spawn `jobs` number of threads (but no more than the task count!).
for thread_id in 0..thread_count {
let spec = self.spec.clone();
let thread_sender = sender.clone();
let (parent_sender, thread_receiver) = mpsc::sync_channel::<(String, usize)>(0);
channels.push(parent_sender);
threads.push(thread::spawn(move || {
// Prepare a new runtime.
let runtime = spec.create().unwrap_or_else(|e| {
error!("{}", e);
panic!();
});
if thread_sender.send(Ok(thread_id)).is_err() {
trace!("thread {} failed to send channel", thread_id);
}
// Begin executing tasks!
while let Ok((name, task_id)) = thread_receiver.recv() {
info!("running task '{}' ({} of {})", name, task_id, task_count);
// Lookup the task to run.
let task = {
// Lookup the task to run.
if let Some(task) = runtime.environment().get_task(&name) {
task as Rc<Task>
}
// Find a rule that matches the task name.
else if let Some(rule) = runtime.environment()
.rules()
.iter()
.find(|rule| rule.matches(&name)) {
Rc::new(rule.create_task(name).unwrap()) as Rc<Task>
}
// No matching task.
else {
panic!("no matching task or rule for '{}'", name);
}
};
// Check for dry run.
if !spec.dry_run {
if let Err(e) = task.run() {
// If we ought to keep going, just issue a warning.
if spec.keep_going {
warn!("ignoring error: {}", e);
} else {
error!("{}", e);
thread_sender.send(Err(thread_id)).unwrap();
return;
}
}
} else {
info!("would run task '{}'", task.name());
}
if thread_sender.send(Ok(thread_id)).is_err() {
trace!("thread {} failed to send channel", thread_id);
break;
}
}
}))
}
drop(sender);
// Keep track of tasks completed and tasks in progress.
let mut completed_tasks: HashSet<String> = HashSet::new();
let mut current_tasks: HashMap<usize, String> = HashMap::new();
let all_tasks: HashSet<String> = queue.iter().map(|s| s.name().to_string()).collect();
while !queue.is_empty() || !current_tasks.is_empty() {
// Wait for a thread to request a task.
let result = receiver.recv().unwrap();
// If the thread sent an error, we should stop everything if keep_going isn't enabled.
if let Err(thread_id) = result {
debug!("thread {} errored, waiting for remaining tasks...",
thread_id);
return Err("not all tasks completed successfully".into());
}
let thread_id = result.unwrap();
free_threads.insert(thread_id);
trace!("thread {} is idle", thread_id);
// If the thread was previously running a task, mark it as completed.
if let Some(task) = current_tasks.remove(&thread_id) {
trace!("task '{}' completed", task);
completed_tasks.insert(task);
}
// Attempt to schedule more tasks to run. The most we can schedule is the number of free
// threads, but it is limited by the number of tasks that have their dependencies already
// finished.
'schedule: for _ in 0..free_threads.len() {
// If the queue is empty, we are done.
if queue.is_empty() {
break;
}
// Check the next task in the queue. If any of its dependencies have not yet been
// completed, we cannot schedule it yet.
| always_run | identifier_name |
runner.rs | (&runtime);
// Set include paths.
for path in &self.include_paths {
runtime.include_path(&path);
}
// Set the OS
runtime.state().push_string(if cfg!(windows) {
"windows"
} else {
"unix"
});
runtime.state().set_global("OS");
// Set configured variables.
for &(ref name, ref value) in &self.variables {
runtime.state().push(value.clone());
runtime.state().set_global(&name);
}
// Load the script.
try!(runtime.load());
Ok(runtime)
}
}
/// A task runner object that holds the state for defined tasks, dependencies, and the scripting
/// runtime.
pub struct Runner {
/// The current DAG for tasks.
graph: Graph,
/// The number of threads to use.
jobs: usize,
/// Environment specification.
spec: EnvironmentSpec,
/// Runtime local owned by the master thread.
runtime: Option<Runtime>,
}
impl Runner {
/// Creates a new runner instance.
pub fn new<P: Into<PathBuf>>(path: P) -> Result<Runner, Box<Error>> {
// By default, set the number of jobs to be one less than the number of available CPU cores.
let jobs = cmp::max(1, num_cpus::get() - 1);
let path = path.into();
let directory: PathBuf = match path.parent() {
Some(path) => path.into(),
None => {
return Err("failed to parse script directory".into());
}
};
Ok(Runner {
graph: Graph::new(),
jobs: jobs as usize,
spec: EnvironmentSpec {
path: path.into(),
directory: directory,
include_paths: Vec::new(),
variables: Vec::new(),
dry_run: false,
always_run: false,
keep_going: false,
},
runtime: None,
})
}
pub fn path(&self) -> &Path {
&self.spec.path
}
pub fn directory(&self) -> &Path {
&self.spec.directory
}
/// Sets "dry run" mode.
///
/// When in "dry run" mode, running tasks will operate as normal, except that no task's actions
/// will be actually run.
pub fn dry_run(&mut self) {
self.spec.dry_run = true;
}
/// Run all tasks even if they are up-to-date.
pub fn always_run(&mut self) |
/// Run all tasks even if they throw errors.
pub fn keep_going(&mut self) {
self.spec.keep_going = true;
}
/// Sets the number of threads to use to run tasks.
pub fn jobs(&mut self, jobs: usize) {
self.jobs = jobs;
}
/// Adds a path to Lua's require path for modules.
pub fn include_path<P: Into<PathBuf>>(&mut self, path: P) {
self.spec.include_paths.push(path.into());
}
/// Sets a variable value.
pub fn set_var<S: AsRef<str>, V: Into<String>>(&mut self, name: S, value: V) {
self.spec.variables.push((name.as_ref().to_string(), value.into()));
}
/// Load the script.
pub fn load(&mut self) -> Result<(), Box<Error>> {
if self.runtime.is_none() {
self.runtime = Some(try!(self.spec.create()));
}
Ok(())
}
/// Prints the list of named tasks for a script.
pub fn print_task_list(&mut self) {
let mut tasks = self.runtime().environment().tasks();
tasks.sort_by(|a, b| a.name().cmp(b.name()));
let mut out = term::stdout().unwrap();
println!("Available tasks:");
for task in tasks {
out.fg(term::color::BRIGHT_GREEN).unwrap();
write!(out, " {:16}", task.name()).unwrap();
out.reset().unwrap();
if let Some(ref description) = task.description() {
write!(out, "{}", description).unwrap();
}
writeln!(out, "").unwrap();
}
if let Some(ref default) = self.runtime().environment().default_task() {
println!("");
println!("Default task: {}", default);
}
}
/// Run the default task.
pub fn run_default(&mut self) -> Result<(), Box<Error>> {
if let Some(ref name) = self.runtime().environment().default_task() {
let tasks = vec![name];
self.run(&tasks)
} else {
Err("no default task defined".into())
}
}
/// Runs the specified list of tasks.
///
/// Tasks are run in parallel when possible during execution. The maximum number of parallel
/// jobs can be set with the `jobs()` method.
pub fn run<S: AsRef<str>>(&mut self, tasks: &[S]) -> Result<(), Box<Error>> {
// Resolve all tasks given.
for task in tasks {
try!(self.resolve_task(task));
}
// Determine the schedule of tasks to execute.
let mut queue = try!(self.graph.solve(!self.spec.always_run));
let task_count = queue.len();
let thread_count = cmp::min(self.jobs, task_count);
debug!("running {} task(s) across {} thread(s)",
task_count,
thread_count);
// Spawn one thread for each job.
let mut threads = Vec::new();
let mut free_threads: HashSet<usize> = HashSet::new();
let mut channels = Vec::new();
let (sender, receiver) = mpsc::channel::<Result<usize, usize>>();
// Spawn `jobs` number of threads (but no more than the task count!).
for thread_id in 0..thread_count {
let spec = self.spec.clone();
let thread_sender = sender.clone();
let (parent_sender, thread_receiver) = mpsc::sync_channel::<(String, usize)>(0);
channels.push(parent_sender);
threads.push(thread::spawn(move || {
// Prepare a new runtime.
let runtime = spec.create().unwrap_or_else(|e| {
error!("{}", e);
panic!();
});
if thread_sender.send(Ok(thread_id)).is_err() {
trace!("thread {} failed to send channel", thread_id);
}
// Begin executing tasks!
while let Ok((name, task_id)) = thread_receiver.recv() {
info!("running task '{}' ({} of {})", name, task_id, task_count);
// Lookup the task to run.
let task = {
// Lookup the task to run.
if let Some(task) = runtime.environment().get_task(&name) {
task as Rc<Task>
}
// Find a rule that matches the task name.
else if let Some(rule) = runtime.environment()
.rules()
.iter()
.find(|rule| rule.matches(&name)) {
Rc::new(rule.create_task(name).unwrap()) as Rc<Task>
}
// No matching task.
else {
panic!("no matching task or rule for '{}'", name);
}
};
// Check for dry run.
if !spec.dry_run {
if let Err(e) = task.run() {
// If we ought to keep going, just issue a warning.
if spec.keep_going {
warn!("ignoring error: {}", e);
} else {
error!("{}", e);
thread_sender.send(Err(thread_id)).unwrap();
return;
}
}
} else {
info!("would run task '{}'", task.name());
}
if thread_sender.send(Ok(thread_id)).is_err() {
trace!("thread {} failed to send channel", thread_id);
break;
}
}
}))
}
drop(sender);
// Keep track of tasks completed and tasks in progress.
let mut completed_tasks: HashSet<String> = HashSet::new();
let mut current_tasks: HashMap<usize, String> = HashMap::new();
let all_tasks: HashSet<String> = queue.iter().map(|s| s.name().to_string()).collect();
while !queue.is_empty() || !current_tasks.is_empty() {
// Wait for a thread to request a task.
let result = receiver.recv().unwrap();
// If the thread sent an error, we should stop everything if keep_going isn't enabled.
if let Err(thread_id) = result {
debug!("thread {} errored, waiting for remaining tasks...",
thread_id);
return Err("not all tasks completed successfully".into());
}
let thread_id = result.unwrap();
free_threads.insert(thread_id);
trace!("thread {} is idle", thread_id);
// If the thread was previously running a task, mark it as completed.
if let Some(task) = current_tasks.remove(&thread_id) {
trace!("task '{}' completed", task);
completed_tasks.insert(task);
}
// Attempt to schedule more tasks to run. The most we can schedule is the number of free
// threads, but it is limited by the number of tasks that have their dependencies already
// finished.
'schedule: for _ in 0..free_threads.len() {
// If the queue is empty, we are done.
if queue.is_empty() {
break;
}
// Check the next task in the queue. If any of its dependencies have not yet been
// completed, we cannot schedule it yet.
| {
self.spec.always_run = true;
} | identifier_body |
runner.rs | pub fn new<P: Into<PathBuf>>(path: P) -> Result<Runner, Box<Error>> {
// By default, set the number of jobs to be one less than the number of available CPU cores.
let jobs = cmp::max(1, num_cpus::get() - 1);
let path = path.into();
let directory: PathBuf = match path.parent() {
Some(path) => path.into(),
None => {
return Err("failed to parse script directory".into());
}
};
Ok(Runner {
graph: Graph::new(),
jobs: jobs as usize,
spec: EnvironmentSpec {
path: path.into(),
directory: directory,
include_paths: Vec::new(),
variables: Vec::new(),
dry_run: false,
always_run: false,
keep_going: false,
},
runtime: None,
})
}
pub fn path(&self) -> &Path {
&self.spec.path
}
pub fn directory(&self) -> &Path {
&self.spec.directory
}
/// Sets "dry run" mode.
///
/// When in "dry run" mode, running tasks will operate as normal, except that no task's actions
/// will be actually run.
pub fn dry_run(&mut self) {
self.spec.dry_run = true;
}
/// Run all tasks even if they are up-to-date.
pub fn always_run(&mut self) {
self.spec.always_run = true;
}
/// Run all tasks even if they throw errors.
pub fn keep_going(&mut self) {
self.spec.keep_going = true;
}
/// Sets the number of threads to use to run tasks.
pub fn jobs(&mut self, jobs: usize) {
self.jobs = jobs;
}
/// Adds a path to Lua's require path for modules.
pub fn include_path<P: Into<PathBuf>>(&mut self, path: P) {
self.spec.include_paths.push(path.into());
}
/// Sets a variable value.
pub fn set_var<S: AsRef<str>, V: Into<String>>(&mut self, name: S, value: V) {
self.spec.variables.push((name.as_ref().to_string(), value.into()));
}
/// Load the script.
pub fn load(&mut self) -> Result<(), Box<Error>> {
if self.runtime.is_none() {
self.runtime = Some(try!(self.spec.create()));
}
Ok(())
}
/// Prints the list of named tasks for a script.
pub fn print_task_list(&mut self) {
let mut tasks = self.runtime().environment().tasks();
tasks.sort_by(|a, b| a.name().cmp(b.name()));
let mut out = term::stdout().unwrap();
println!("Available tasks:");
for task in tasks {
out.fg(term::color::BRIGHT_GREEN).unwrap();
write!(out, " {:16}", task.name()).unwrap();
out.reset().unwrap();
if let Some(ref description) = task.description() {
write!(out, "{}", description).unwrap();
}
writeln!(out, "").unwrap();
}
if let Some(ref default) = self.runtime().environment().default_task() {
println!("");
println!("Default task: {}", default);
}
}
/// Run the default task.
pub fn run_default(&mut self) -> Result<(), Box<Error>> {
if let Some(ref name) = self.runtime().environment().default_task() {
let tasks = vec![name];
self.run(&tasks)
} else {
Err("no default task defined".into())
}
}
/// Runs the specified list of tasks.
///
/// Tasks are run in parallel when possible during execution. The maximum number of parallel
/// jobs can be set with the `jobs()` method.
pub fn run<S: AsRef<str>>(&mut self, tasks: &[S]) -> Result<(), Box<Error>> {
// Resolve all tasks given.
for task in tasks {
try!(self.resolve_task(task));
}
// Determine the schedule of tasks to execute.
let mut queue = try!(self.graph.solve(!self.spec.always_run));
let task_count = queue.len();
let thread_count = cmp::min(self.jobs, task_count);
debug!("running {} task(s) across {} thread(s)",
task_count,
thread_count);
// Spawn one thread for each job.
let mut threads = Vec::new();
let mut free_threads: HashSet<usize> = HashSet::new();
let mut channels = Vec::new();
let (sender, receiver) = mpsc::channel::<Result<usize, usize>>();
// Spawn `jobs` number of threads (but no more than the task count!).
for thread_id in 0..thread_count {
let spec = self.spec.clone();
let thread_sender = sender.clone();
let (parent_sender, thread_receiver) = mpsc::sync_channel::<(String, usize)>(0);
channels.push(parent_sender);
threads.push(thread::spawn(move || {
// Prepare a new runtime.
let runtime = spec.create().unwrap_or_else(|e| {
error!("{}", e);
panic!();
});
if thread_sender.send(Ok(thread_id)).is_err() {
trace!("thread {} failed to send channel", thread_id);
}
// Begin executing tasks!
while let Ok((name, task_id)) = thread_receiver.recv() {
info!("running task '{}' ({} of {})", name, task_id, task_count);
// Lookup the task to run.
let task = {
// Lookup the task to run.
if let Some(task) = runtime.environment().get_task(&name) {
task as Rc<Task>
}
// Find a rule that matches the task name.
else if let Some(rule) = runtime.environment()
.rules()
.iter()
.find(|rule| rule.matches(&name)) {
Rc::new(rule.create_task(name).unwrap()) as Rc<Task>
}
// No matching task.
else {
panic!("no matching task or rule for '{}'", name);
}
};
// Check for dry run.
if !spec.dry_run {
if let Err(e) = task.run() {
// If we ought to keep going, just issue a warning.
if spec.keep_going {
warn!("ignoring error: {}", e);
} else {
error!("{}", e);
thread_sender.send(Err(thread_id)).unwrap();
return;
}
}
} else {
info!("would run task '{}'", task.name());
}
if thread_sender.send(Ok(thread_id)).is_err() {
trace!("thread {} failed to send channel", thread_id);
break;
}
}
}))
}
drop(sender);
// Keep track of tasks completed and tasks in progress.
let mut completed_tasks: HashSet<String> = HashSet::new();
let mut current_tasks: HashMap<usize, String> = HashMap::new();
let all_tasks: HashSet<String> = queue.iter().map(|s| s.name().to_string()).collect();
while !queue.is_empty() || !current_tasks.is_empty() {
// Wait for a thread to request a task.
let result = receiver.recv().unwrap();
// If the thread sent an error, we should stop everything if keep_going isn't enabled.
if let Err(thread_id) = result {
debug!("thread {} errored, waiting for remaining tasks...",
thread_id);
return Err("not all tasks completed successfully".into());
}
let thread_id = result.unwrap();
free_threads.insert(thread_id);
trace!("thread {} is idle", thread_id);
// If the thread was previously running a task, mark it as completed.
if let Some(task) = current_tasks.remove(&thread_id) {
trace!("task '{}' completed", task);
completed_tasks.insert(task);
}
// Attempt to schedule more tasks to run. The most we can schedule is the number of free
// threads, but it is limited by the number of tasks that have their dependencies already
// finished.
'schedule: for _ in 0..free_threads.len() {
// If the queue is empty, we are done.
if queue.is_empty() {
break;
}
// Check the next task in the queue. If any of its dependencies have not yet been
// completed, we cannot schedule it yet.
for dependency in queue.front().unwrap().dependencies() {
// Check that the dependency needs scheduled at all (some are already satisfied),
// and that it hasn't already finished.
if all_tasks.contains(dependency) && !completed_tasks.contains(dependency) {
// We can't run the next task, so we're done scheduling for now until another
// thread finishes.
break 'schedule;
}
}
// Get the available task from the queue.
let task = queue.front().unwrap().clone();
// Pick a free thread to run the task in.
if let Some(thread_id) = free_threads.iter().next().map(|t| *t) {
trace!("scheduling task '{}' on thread {}", task.name(), thread_id);
let data = (task.name().to_string(), task_count - queue.len() + 1);
// Send the task name.
if channels[thread_id].send(data).is_ok() { | current_tasks.insert(thread_id, task.name().to_string());
free_threads.remove(&thread_id);
| random_line_split | |
assembly_finishing_objects.py | um_sequences[self.contigs[current+1].search_true_first_sequence(start, step)].start): # could it fit after the next contig
self.swap_contigs(current, current + 1)
continue # restart the loop
if(j == len(c.mum_sequences)): # all mum_sequences fit on the already treated part, so we discard it
current += 1
c.futur = None
continue # restart the loop
res = c.verify_heights(j, self.contigs[0].id)
if (res == 0 or res == 1): #### maybe : find between which seqs is the vertical gap and use the orientaton of the one at the start to set orientation
if (len(c.mum_sequences) > j+1): # change to a while? in case 0 1 0 gap 1
if ((c.mum_sequences[j+1].start - c.mum_sequences[j].end > step) and (c.mum_sequences[j] != c.mum_sequences[j+1])):
c.futur = 0
if (c.futur == None):
if c.mum_sequences[j].orientation == orientation:
c.futur = 0
else:
c.futur = 1
if (res == 1): # rolling case
print "\n\nHERE\n\n"
# search for vertical gap between a and b
# while (compare_heights(a, b)): ## TODO need a function to check when it goes from top to bottom gap, if direct/direct, fisrt higher, if reverse/reverse, first lower
res = c.find_rolling_gap()
# call recursion on current + 1 with start = a.end and end = b.start
# if (c.futur == 0):
# if (c.mum_sequences[res].orientation == 0):
# orientation = 1
# else:
# orientation = 0
# else:
if (c.futur == 1):
if (c.mum_sequences[res].orientation == 0):
orientation = 1
else:
orientation = 0
else:
orientation = c.mum_sequences[res].orientation
self.orientate(current + 1, orientation, c.mum_sequences[res].end, c.mum_sequences[res+1].start, True, step)
### TODO restaurer l'orientation!! à celle de b
if (c.futur == 1):
if (c.mum_sequences[res+1].orientation == 0):
orientation = 1
else:
orientation = 0
else:
orientation = c.mum_sequences[res+1].orientation
current += 1
continue
elif (res == 2):
c.futur = 1
if c.mum_sequences[j].orientation == 0: # since it will be reversed
orientation = 1
else:
orientation = 0
j += 1
while (j < len(c.mum_sequences)):
if (c.mum_sequences[j].start < start - step):
j += 1
continue # not to change the start value
if (c.mum_sequences[j].start - c.mum_sequences[j-1].end < step): # maybe n times step?
if (c.futur == 0): # the contig is in the good orientation, so .orientation is true
orientation = c.mum_sequences[j].orientation
elif c.mum_sequences[j].orientation == 0: # the contig will be reversed, so the actual orientation is the opposite of .orientation
orientation = 1
else:
orientation = 0
else: # a gap can only happen when the two sequences around it are in different orientations
###### FALSE when there's need to roll, and it is of lower height
if (c.compare_heights(j-1, j)): #c.mum_sequences[j-1].height < c.mum_sequences[j].height): # good
if (not recursion):
if orientation == 0:
orientation = 1
# else:
# orientation = 0
else:
break
# print "\n\n\n\n\n\nINVERSED CONCATENATION ORDER\n\n\n\n\n\n"
tmp_start = c.get_position_before_gap(step)
self.orientate(current + 1, orientation, tmp_start, c.mum_sequences[j].start, True, step)
# search from here to which contig of the list, the first mums start are smaller than rev, and reverse that length
rev = self.search_rev(current+1, c.mum_sequences[j].start)
# self.contigs[current + 1: rev] = reversed(self.contigs[current + 1:rev])
####################
tmp = None
if (c.mum_sequences[j-1].orientation == 1): # if the first mum sequence is reverse
if (c.futur == 0):
tmp = current
else:
tmp = current + 1
else:
if (c.futur == 0):
tmp = current +1
else:
tmp = current
self.contigs[tmp : rev] = reversed(self.contigs[tmp : rev])
####################
orientation = 0
current = rev-1
j += 1
break # because a gap means that j is the last mum_sequence of the list
# else: # this one should be reversed, unless there's translocation
# if c.futur == 0:
# print "/!\\problem HEREEEE" # unless on the starting contig for which rolling might happen
# else:
# print "\n\n\n\n\nHERE\n\n\n\n"
j += 1
# start = c.get_position_before_gap(step)
start = c.mum_sequences[j-1].end
current += 1
@staticmethod
def sort_mums(index, mums):
mums_of_contigs = [[] for v in xrange(len(index.graph.vertices)-1)]
for m in mums:
mums_of_contigs[m[4].id - 1].append(m)
return mums_of_contigs
def search_rev(self, i, limit):
while(i < len(self.contigs) and self.contigs[i].mum_sequences[0].start < limit):
i += 1
return i
def swap_contigs(self, first, second):
tmp_contig = self.contigs[first]
self.contigs[first] = self.contigs[second]
self.contigs[second] = tmp_contig
class Contig:
def __init__(self, n, mums, step, big_enough):
self.id = n
self.mum_sequences = []
if (len(mums) > 1):
# print "kept : " + str(self.id) + " " + str(mums[0][2])
# mums = Contig.clean(mums, step)
mums.sort(key = lambda info: info[0])
Contig.clean(mums, step)
if (len(mums) == 0 or (len(mums) == 1 and mums[0][2] < big_enough)): # if only 1 mum, might false results
print "Discarding this conting or appending it to the end of the sequence"
self.futur = -1 # removal
self.first_mum = float("inf")
else:
self.futur = None
self.make_mum_sequences(mums) # each element is a list of mums which follow in the same order, two following elemnts are in different orientation
self.first_mum = self.mum_sequences[0].mums[0][0]
def make_mum_sequences(self, mums):
i = 1
orientation = mums[0][5] | while(i < len(mums)):
if (mums[i][5] != orientation):
self.mum_sequences.append(Mum_sequence(orientation, mums[j:i]))
orientation = mums[i][5]
j = i
elif (abs(mums[i-1][1] - mums[i][1]) > 2000000): # arbitrary value that is much bigger than any small jump that could happen, but smaller than the size of the genome
self.mum_sequences.append(Mum_sequence(orientation, mums[j:i]))
orientation = mums[i][5]
j = i
i += 1
# if (i != 0):
self.mum_sequences.append(Mum_sequence(orientation, mums[j:i]))
@staticmethod
def clean(mums, step):
continuating = [False for i in mums]
for i in xrange(len(mums)): ### currently this removes inversions that would span only 1 mum
if (continuating[i | j = 0
if (len(mums) == 1):
self.mum_sequences.append(Mum_sequence(orientation, mums))
return | random_line_split |
assembly_finishing_objects.py | = current
else:
tmp = current + 1
else:
if (c.futur == 0):
tmp = current +1
else:
tmp = current
self.contigs[tmp : rev] = reversed(self.contigs[tmp : rev])
####################
orientation = 0
current = rev-1
j += 1
break # because a gap means that j is the last mum_sequence of the list
# else: # this one should be reversed, unless there's translocation
# if c.futur == 0:
# print "/!\\problem HEREEEE" # unless on the starting contig for which rolling might happen
# else:
# print "\n\n\n\n\nHERE\n\n\n\n"
j += 1
# start = c.get_position_before_gap(step)
start = c.mum_sequences[j-1].end
current += 1
@staticmethod
def sort_mums(index, mums):
mums_of_contigs = [[] for v in xrange(len(index.graph.vertices)-1)]
for m in mums:
mums_of_contigs[m[4].id - 1].append(m)
return mums_of_contigs
def search_rev(self, i, limit):
while(i < len(self.contigs) and self.contigs[i].mum_sequences[0].start < limit):
i += 1
return i
def swap_contigs(self, first, second):
tmp_contig = self.contigs[first]
self.contigs[first] = self.contigs[second]
self.contigs[second] = tmp_contig
class Contig:
def __init__(self, n, mums, step, big_enough):
self.id = n
self.mum_sequences = []
if (len(mums) > 1):
# print "kept : " + str(self.id) + " " + str(mums[0][2])
# mums = Contig.clean(mums, step)
mums.sort(key = lambda info: info[0])
Contig.clean(mums, step)
if (len(mums) == 0 or (len(mums) == 1 and mums[0][2] < big_enough)): # if only 1 mum, might false results
print "Discarding this conting or appending it to the end of the sequence"
self.futur = -1 # removal
self.first_mum = float("inf")
else:
self.futur = None
self.make_mum_sequences(mums) # each element is a list of mums which follow in the same order, two following elemnts are in different orientation
self.first_mum = self.mum_sequences[0].mums[0][0]
def make_mum_sequences(self, mums):
i = 1
orientation = mums[0][5]
j = 0
if (len(mums) == 1):
self.mum_sequences.append(Mum_sequence(orientation, mums))
return
while(i < len(mums)):
if (mums[i][5] != orientation):
self.mum_sequences.append(Mum_sequence(orientation, mums[j:i]))
orientation = mums[i][5]
j = i
elif (abs(mums[i-1][1] - mums[i][1]) > 2000000): # arbitrary value that is much bigger than any small jump that could happen, but smaller than the size of the genome
self.mum_sequences.append(Mum_sequence(orientation, mums[j:i]))
orientation = mums[i][5]
j = i
i += 1
# if (i != 0):
self.mum_sequences.append(Mum_sequence(orientation, mums[j:i]))
@staticmethod
def clean(mums, step):
continuating = [False for i in mums]
for i in xrange(len(mums)): ### currently this removes inversions that would span only 1 mum
if (continuating[i] is False):
if i != 0: # if NOT the first one --> check continuity with previous
for j in xrange(i): # searching if any mum before it continues it
if mums[i][5] == 0:
if ((mums[j][1] + mums[j][2] < mums[i][1] + step)
and (mums[j][1] + mums[j][2] > mums[i][1] - step)
and (mums[j][0] + mums[j][2] < mums[i][0] + step)
and (mums[j][0] + mums[j][2] > mums[i][0] - step)):
continuating[j] = True
continuating[j] = True
break
else:
if ((mums[j][1] < mums[i][1] + mums[i][2] + step)
and (mums[j][1] > mums[i][1] + mums[i][2] - step)
and (mums[j][0] + mums[j][2] < mums[i][0] + step)
and (mums[j][0] + mums[j][2] > mums[i][0] - step)):
continuating[j] = True
continuating[i] = True
break
if i != len(mums): # if NOT the last one --> check continuity with next
for j in xrange(i+1, len(mums)): # searching if any mum after it continues it
if mums[i][5] == 0:
if ((mums[j][1] > mums[i][1] + mums[i][2] - step)
and (mums[j][1] < mums[i][1] + mums[i][2] + step)
and (mums[j][0] > mums[i][0] + mums[i][2] - step)
and (mums[j][0] < mums[i][0] + mums[i][2] + step)):
continuating[j] = True
continuating[i] = True
break
else:
if ((mums[j][1] + mums[j][2] > mums[i][1] - step)
and (mums[j][1] + mums[j][2] < mums[i][1] + step)
and (mums[j][0] < mums[i][0] + mums[i][2] + step)
and (mums[j][0] > mums[i][0] + mums[i][2] - step)):
continuating[j] = True
continuating[i] = True
break
i = 0
while (i < len(mums)):
if (continuating[i] is False):
continuating.pop(i)
mums.pop(i)
# print "Removing a mum of size " + str(tmp[2])
continue
i += 1
# return mums
def verify_heights(self, j, first_s_id):
i = j + 1
start_height = self.mum_sequences[j].height
current_height = start_height
restarted = False
while (i < len(self.mum_sequences)):
if (self.mum_sequences[i].height > current_height):
# current_height = self.mum_sequences[i].height ## ADDED THIS
i += 1
continue
elif (self.mum_sequences[i].height < start_height and not restarted and first_s_id == self.id):
restarted = True
current_height = self.mum_sequences[i].height
else:
if (i+1 == len(self.mum_sequences)):
return 2 # need to reverse this contig
return -1 # error somewhere
i += 1
if (restarted == True):
return 1 # will need to roll
return 0 # linear (but might still have a gap and two different orientations)
def search_true_first_sequence(self, start, step):
j = 0
while (j < len(self.mum_sequences) and (self.mum_sequences[j].start < start - step)):
j += 1
return j
def get_position_before_gap(self, step):
i = 1
position = self.mum_sequences[0].end
while (i < len(self.mum_sequences)):
if (self.mum_sequences[i].start > position + step):
return position
else:
position = self.mum_sequences[i].end
i += 1
return position
def compare_heights(self, i, j): # True = i lower than j, False = i higher than j, with regards to futur orientation
if (self.mum_sequences[i].height < self.mum_sequences[j].height):
if (self.futur == 0):
return True
else:
return False
if (self.futur == 0):
return False
return True
def f | ind_rolling_gap( | identifier_name | |
assembly_finishing_objects.py | _sequences[j].height): # good
if (not recursion):
if orientation == 0:
orientation = 1
# else:
# orientation = 0
else:
break
# print "\n\n\n\n\n\nINVERSED CONCATENATION ORDER\n\n\n\n\n\n"
tmp_start = c.get_position_before_gap(step)
self.orientate(current + 1, orientation, tmp_start, c.mum_sequences[j].start, True, step)
# search from here to which contig of the list, the first mums start are smaller than rev, and reverse that length
rev = self.search_rev(current+1, c.mum_sequences[j].start)
# self.contigs[current + 1: rev] = reversed(self.contigs[current + 1:rev])
####################
tmp = None
if (c.mum_sequences[j-1].orientation == 1): # if the first mum sequence is reverse
if (c.futur == 0):
tmp = current
else:
tmp = current + 1
else:
if (c.futur == 0):
tmp = current +1
else:
tmp = current
self.contigs[tmp : rev] = reversed(self.contigs[tmp : rev])
####################
orientation = 0
current = rev-1
j += 1
break # because a gap means that j is the last mum_sequence of the list
# else: # this one should be reversed, unless there's translocation
# if c.futur == 0:
# print "/!\\problem HEREEEE" # unless on the starting contig for which rolling might happen
# else:
# print "\n\n\n\n\nHERE\n\n\n\n"
j += 1
# start = c.get_position_before_gap(step)
start = c.mum_sequences[j-1].end
current += 1
@staticmethod
def sort_mums(index, mums):
mums_of_contigs = [[] for v in xrange(len(index.graph.vertices)-1)]
for m in mums:
mums_of_contigs[m[4].id - 1].append(m)
return mums_of_contigs
def search_rev(self, i, limit):
while(i < len(self.contigs) and self.contigs[i].mum_sequences[0].start < limit):
i += 1
return i
def swap_contigs(self, first, second):
tmp_contig = self.contigs[first]
self.contigs[first] = self.contigs[second]
self.contigs[second] = tmp_contig
class Contig:
def __init__(self, n, mums, step, big_enough):
self.id = n
self.mum_sequences = []
if (len(mums) > 1):
# print "kept : " + str(self.id) + " " + str(mums[0][2])
# mums = Contig.clean(mums, step)
mums.sort(key = lambda info: info[0])
Contig.clean(mums, step)
if (len(mums) == 0 or (len(mums) == 1 and mums[0][2] < big_enough)): # if only 1 mum, might false results
print "Discarding this conting or appending it to the end of the sequence"
self.futur = -1 # removal
self.first_mum = float("inf")
else:
self.futur = None
self.make_mum_sequences(mums) # each element is a list of mums which follow in the same order, two following elemnts are in different orientation
self.first_mum = self.mum_sequences[0].mums[0][0]
def make_mum_sequences(self, mums):
i = 1
orientation = mums[0][5]
j = 0
if (len(mums) == 1):
self.mum_sequences.append(Mum_sequence(orientation, mums))
return
while(i < len(mums)):
if (mums[i][5] != orientation):
self.mum_sequences.append(Mum_sequence(orientation, mums[j:i]))
orientation = mums[i][5]
j = i
elif (abs(mums[i-1][1] - mums[i][1]) > 2000000): # arbitrary value that is much bigger than any small jump that could happen, but smaller than the size of the genome
self.mum_sequences.append(Mum_sequence(orientation, mums[j:i]))
orientation = mums[i][5]
j = i
i += 1
# if (i != 0):
self.mum_sequences.append(Mum_sequence(orientation, mums[j:i]))
@staticmethod
def clean(mums, step):
continuating = [False for i in mums]
for i in xrange(len(mums)): ### currently this removes inversions that would span only 1 mum
if (continuating[i] is False):
if i != 0: # if NOT the first one --> check continuity with previous
for j in xrange(i): # searching if any mum before it continues it
if mums[i][5] == 0:
if ((mums[j][1] + mums[j][2] < mums[i][1] + step)
and (mums[j][1] + mums[j][2] > mums[i][1] - step)
and (mums[j][0] + mums[j][2] < mums[i][0] + step)
and (mums[j][0] + mums[j][2] > mums[i][0] - step)):
continuating[j] = True
continuating[j] = True
break
else:
if ((mums[j][1] < mums[i][1] + mums[i][2] + step)
and (mums[j][1] > mums[i][1] + mums[i][2] - step)
and (mums[j][0] + mums[j][2] < mums[i][0] + step)
and (mums[j][0] + mums[j][2] > mums[i][0] - step)):
continuating[j] = True
continuating[i] = True
break
if i != len(mums): # if NOT the last one --> check continuity with next
for j in xrange(i+1, len(mums)): # searching if any mum after it continues it
if mums[i][5] == 0:
if ((mums[j][1] > mums[i][1] + mums[i][2] - step)
and (mums[j][1] < mums[i][1] + mums[i][2] + step)
and (mums[j][0] > mums[i][0] + mums[i][2] - step)
and (mums[j][0] < mums[i][0] + mums[i][2] + step)):
continuating[j] = True
continuating[i] = True
break
else:
if ((mums[j][1] + mums[j][2] > mums[i][1] - step)
and (mums[j][1] + mums[j][2] < mums[i][1] + step)
and (mums[j][0] < mums[i][0] + mums[i][2] + step)
and (mums[j][0] > mums[i][0] + mums[i][2] - step)):
continuating[j] = True
continuating[i] = True
break
i = 0
while (i < len(mums)):
if (continuating[i] is False):
continuating.pop(i)
mums.pop(i)
# print "Removing a mum of size " + str(tmp[2])
continue
i += 1
# return mums
def verify_heights(self, j, first_s_id):
i | = j + 1
start_height = self.mum_sequences[j].height
current_height = start_height
restarted = False
while (i < len(self.mum_sequences)):
if (self.mum_sequences[i].height > current_height):
# current_height = self.mum_sequences[i].height ## ADDED THIS
i += 1
continue
elif (self.mum_sequences[i].height < start_height and not restarted and first_s_id == self.id):
restarted = True
current_height = self.mum_sequences[i].height
else:
if (i+1 == len(self.mum_sequences)):
return 2 # need to reverse this contig
return -1 # error somewhere
i += 1
if (restarted == True):
return 1 # will need to roll
return 0 # linear (but might still have a gap and two different orientations) | identifier_body | |
assembly_finishing_objects.py | _sequences[self.contigs[current+1].search_true_first_sequence(start, step)].start): # could it fit after the next contig
self.swap_contigs(current, current + 1)
continue # restart the loop
if(j == len(c.mum_sequences)): # all mum_sequences fit on the already treated part, so we discard it
current += 1
c.futur = None
continue # restart the loop
res = c.verify_heights(j, self.contigs[0].id)
if (res == 0 or res == 1): #### maybe : find between which seqs is the vertical gap and use the orientaton of the one at the start to set orientation
if (len(c.mum_sequences) > j+1): # change to a while? in case 0 1 0 gap 1
if ((c.mum_sequences[j+1].start - c.mum_sequences[j].end > step) and (c.mum_sequences[j] != c.mum_sequences[j+1])):
c.futur = 0
if (c.futur == None):
if c.mum_sequences[j].orientation == orientation:
c.futur = 0
else:
|
if (res == 1): # rolling case
print "\n\nHERE\n\n"
# search for vertical gap between a and b
# while (compare_heights(a, b)): ## TODO need a function to check when it goes from top to bottom gap, if direct/direct, fisrt higher, if reverse/reverse, first lower
res = c.find_rolling_gap()
# call recursion on current + 1 with start = a.end and end = b.start
# if (c.futur == 0):
# if (c.mum_sequences[res].orientation == 0):
# orientation = 1
# else:
# orientation = 0
# else:
if (c.futur == 1):
if (c.mum_sequences[res].orientation == 0):
orientation = 1
else:
orientation = 0
else:
orientation = c.mum_sequences[res].orientation
self.orientate(current + 1, orientation, c.mum_sequences[res].end, c.mum_sequences[res+1].start, True, step)
### TODO restaurer l'orientation!! à celle de b
if (c.futur == 1):
if (c.mum_sequences[res+1].orientation == 0):
orientation = 1
else:
orientation = 0
else:
orientation = c.mum_sequences[res+1].orientation
current += 1
continue
elif (res == 2):
c.futur = 1
if c.mum_sequences[j].orientation == 0: # since it will be reversed
orientation = 1
else:
orientation = 0
j += 1
while (j < len(c.mum_sequences)):
if (c.mum_sequences[j].start < start - step):
j += 1
continue # not to change the start value
if (c.mum_sequences[j].start - c.mum_sequences[j-1].end < step): # maybe n times step?
if (c.futur == 0): # the contig is in the good orientation, so .orientation is true
orientation = c.mum_sequences[j].orientation
elif c.mum_sequences[j].orientation == 0: # the contig will be reversed, so the actual orientation is the opposite of .orientation
orientation = 1
else:
orientation = 0
else: # a gap can only happen when the two sequences around it are in different orientations
###### FALSE when there's need to roll, and it is of lower height
if (c.compare_heights(j-1, j)): #c.mum_sequences[j-1].height < c.mum_sequences[j].height): # good
if (not recursion):
if orientation == 0:
orientation = 1
# else:
# orientation = 0
else:
break
# print "\n\n\n\n\n\nINVERSED CONCATENATION ORDER\n\n\n\n\n\n"
tmp_start = c.get_position_before_gap(step)
self.orientate(current + 1, orientation, tmp_start, c.mum_sequences[j].start, True, step)
# search from here to which contig of the list, the first mums start are smaller than rev, and reverse that length
rev = self.search_rev(current+1, c.mum_sequences[j].start)
# self.contigs[current + 1: rev] = reversed(self.contigs[current + 1:rev])
####################
tmp = None
if (c.mum_sequences[j-1].orientation == 1): # if the first mum sequence is reverse
if (c.futur == 0):
tmp = current
else:
tmp = current + 1
else:
if (c.futur == 0):
tmp = current +1
else:
tmp = current
self.contigs[tmp : rev] = reversed(self.contigs[tmp : rev])
####################
orientation = 0
current = rev-1
j += 1
break # because a gap means that j is the last mum_sequence of the list
# else: # this one should be reversed, unless there's translocation
# if c.futur == 0:
# print "/!\\problem HEREEEE" # unless on the starting contig for which rolling might happen
# else:
# print "\n\n\n\n\nHERE\n\n\n\n"
j += 1
# start = c.get_position_before_gap(step)
start = c.mum_sequences[j-1].end
current += 1
@staticmethod
def sort_mums(index, mums):
mums_of_contigs = [[] for v in xrange(len(index.graph.vertices)-1)]
for m in mums:
mums_of_contigs[m[4].id - 1].append(m)
return mums_of_contigs
def search_rev(self, i, limit):
while(i < len(self.contigs) and self.contigs[i].mum_sequences[0].start < limit):
i += 1
return i
def swap_contigs(self, first, second):
tmp_contig = self.contigs[first]
self.contigs[first] = self.contigs[second]
self.contigs[second] = tmp_contig
class Contig:
def __init__(self, n, mums, step, big_enough):
self.id = n
self.mum_sequences = []
if (len(mums) > 1):
# print "kept : " + str(self.id) + " " + str(mums[0][2])
# mums = Contig.clean(mums, step)
mums.sort(key = lambda info: info[0])
Contig.clean(mums, step)
if (len(mums) == 0 or (len(mums) == 1 and mums[0][2] < big_enough)): # if only 1 mum, might false results
print "Discarding this conting or appending it to the end of the sequence"
self.futur = -1 # removal
self.first_mum = float("inf")
else:
self.futur = None
self.make_mum_sequences(mums) # each element is a list of mums which follow in the same order, two following elemnts are in different orientation
self.first_mum = self.mum_sequences[0].mums[0][0]
def make_mum_sequences(self, mums):
i = 1
orientation = mums[0][5]
j = 0
if (len(mums) == 1):
self.mum_sequences.append(Mum_sequence(orientation, mums))
return
while(i < len(mums)):
if (mums[i][5] != orientation):
self.mum_sequences.append(Mum_sequence(orientation, mums[j:i]))
orientation = mums[i][5]
j = i
elif (abs(mums[i-1][1] - mums[i][1]) > 2000000): # arbitrary value that is much bigger than any small jump that could happen, but smaller than the size of the genome
self.mum_sequences.append(Mum_sequence(orientation, mums[j:i]))
orientation = mums[i][5]
j = i
i += 1
# if (i != 0):
self.mum_sequences.append(Mum_sequence(orientation, mums[j:i]))
@staticmethod
def clean(mums, step):
continuating = [False for i in mums]
for i in xrange(len(mums)): ### currently this removes inversions that would span only 1 mum
if (continuating[i | c.futur = 1 | conditional_block |
Ch11. Ex.py | first batch of input the user would enter
Chris
Jesse
Sally
# this is the second batch of input the user would enter
Grade for Chris: 90
Grade for Jesse: 80
Grade for Sally: 70
# below is what your program should output
Class roster:
Chris (90.0)
Jesse (80.0)
Sally (70.0)
Average grade: 80.0
"""
import sys
sys.setExecutionLimit(70000)
students = []
grades = []
total_score = 0.0
name = input("Enter the name of a student. (When finished, enter nothing)")
while (name != ""):
students += [name]
name = input("Enter the name of a student. (When finished, enter nothing)")
for i in range(len(students)):
score = float(input("Grade for {0}:".format(students[i])))
grades += [score]
print("Class roster:")
for index, student in enumerate(students):
total_score += grades[index]
print("{0} ({1:.1})".format(student, grades[index]))
print("\nAverage grade:", (total_score / len(students)))
#3. Implement the functionality of the above program using a dictionary instead of a list.
import sys
sys.setExecutionLimit(70000)
students = {}
total_score = 0.0
name = input("Enter the name of a student. (When finished, enter nothing)")
while (name != ""):
students[name] = 0.0
name = input("Enter the name of a student. (When finished, enter nothing)")
print("Class roster:")
for student in students.keys():
score = float(input("Grade for {0}:".format(student)))
students[student] = score
total_score += students[student]
print("{0} ({1:.1})".format(student, students[student]))
print("\nAverage grade:", (total_score / len(students)))
"""4. Make a dictionary where the key is a worker’s name, and the value is a list where the first element is the clock in time, second element is the clock out time, and the third element is the total hours worked that day. Each worker’s list starts at [0, 0, 0]. Create functions for clock_in and clock_out.
clock_in takes the dictionary of workers, the name of the worker, and the clock in time as parameters. When the worker clocks in, enter and save their clock in time as the first element in the associated list value.
clock_out takes the same parameters, but with a clock out time instead of clock in time. When the worker clocks out, enter and save their clock out time and calculate the hours worked for that day and store it as the third element in the list.
To make this program a little easier, we’re entering the clock in and clock out times as integers. As a bonus mission, try adding the times as strings representing the 24 hour clock (e.g., "08:00"), and then figure out how to calculate the time worked. And you can do this exercise either by aliasing or copying the dictionary."""
def clock_in(worker_dict, name, clock_in_time):
worker_dict[name][0] = clock_in_time
def clock_out(worker_dict, name, clock_out_time):
worker_dict[name][1] = clock_out_time
worker_dict[name][2] = clock_out_time - worker_dict[name][0]
def main():
workers = {"George Spelvin": [0,0,0], "Jane Doe": [0,0,0], "John Smith": [0,0,0]}
print(workers.get("George Spelvin")) # should print [0,0,0]
clock_in(workers, "George Spelvin", 8)
clock_out(workers, "George Spelvin", 17)
print(workers.get("George Spelvin")) # should print [8, 17, 9]
if __name__ == "__main__":
main()
"""5. Here’s a table of English to Pirate translations:
English Pirate
sir matey
hotel fleabag inn
student swabbie
boy matey
madam proud beauty
professor foul blaggart
restaurant galley
your yer
excuse arr
students swabbies
are be
lawyer foul blaggart
restroom th’ head
my me
hello avast
is be
man matey
Write a program that asks the user for a sentence in English and then translates that sentence to Pirate."""
from test import testEqual
def translate(text):
# your code here!
pirate_text = ""
my_dict = {"sir" : "matey", "hotel" : "fleabag inn", "student" : "swabbie", "boy" : "matey",
"madam" : "proud beauty", "professor" : "foul blaggart", "restaurant" : "galley",
"your" : "yer", "excuse" : "arr", "students" : "swabbies", "are" : "be",
"lawyer" : "foul blaggart", "restroom" : "head", "my" : "me", "the" : "th'",
"hello" : "avast", "is" : "be", "man" : "matey"}
word_list = text.split()
index = 0
pirate_words = []
for word in word_list:
char = ""
pirate_word = ""
if (word.isalpha() == False):
char = word[len(word)-1]
new_word = word[:len(word)-1]
else:
new_word = word
#print("!"+new_word+"!")
if new_word in my_dict:
pirate_word = my_dict[new_word]
else:
pirate_word = new_word
pirate_word += char
index += 1
pirate_words += [pirate_word]
pirate_text = " ".join(pirate_words)
#print(my_dict, "\n\n")
print(pirate_text)
return pirate_text
text = "hello my man, please excuse your professor to the restroom!"
testEqual(translate(text), "avast me matey, please arr yer foul blaggart to th' head!")
"""6. Give the Python interpreter’s response to each of the following from a continuous interpreter session:
>>> d = {'apples': 15, 'bananas': 35, 'grapes': 12}
>>> d['bananas']
>>> d['oranges'] = 20
>>> len(d)
>>> 'grapes' in d
>>> d['pears']
>>> d.get('pears', 0)
>>> fruits = d.keys()
>>> sorted(fruits)
>>> print(fruits)
>>> del d['apples']
>>> 'apples' in d
Be sure you understand why you get each result. """
from test import testEqual
# Note: The pass is a placeholder to allow
# the code to compile. Remove it when you
# begin coding.
def set_inventory(inventory, fruit, quantity=0):
inventory[fruit] = quantity
# make these tests work...
new_inventory = {}
set_inventory(new_inventory, 'strawberries', 10)
testEqual('strawberries' in new_inventory, True)
testEqual(new_inventory['strawberries'], 10)
set_inventory(new_inventory, 'strawberries', 25)
testEqual(new_inventory['strawberries'] , 25)
"""Weekly Graded Assignment
Write a sort_contacts function that takes a dictionary of contacts as a parameter and returns a sorted list of those contacts, where each contact is a tuple.
The contacts dictionary that will be passed into the function has the contact name as its key, and the value is a tuple containing the phone number and email for the contact.
contacts = {name: (phone, email), name: (phone, email), etc.}
The sort_contacts function should then create a new, sorted (by last name) list of tuples representing all of the contact info (one tuple for each contact) that was in the dictionary. It should then return this list to the calling function.
For example, given a dictionary argument of:
{"Horney, Karen": ("1-541-656-3010", "karen@psychoanalysis.com"),
"Welles, Orson": ("1-312-720-8888", "orson@notlive.com"),
"Freud, Anna": ("1-541-754-3010", "anna@psychoanalysis.com")}
sort_contacts should return this:
[('Freud, Anna', '1-541-754-3010', 'anna@psychoanalysis.com'), ('Horney, Karen', '1-541-656-3010', 'karen@psychoanalysis.com'), ('Welles, Orson', '1-312-720-8888', 'orson@notlive.com')]
"""
# Create sort_contacts function
def sort_contacts( | contacts):
| identifier_name | |
Ch11. Ex.py | 0 percentage scale. Use 2 lists (grades and students) and the enumerate function in your solution.
A test run of this program would yield the following:
# this is the first batch of input the user would enter
Chris
Jesse
Sally
# this is the second batch of input the user would enter
Grade for Chris: 90
Grade for Jesse: 80
Grade for Sally: 70
# below is what your program should output
Class roster:
Chris (90.0)
Jesse (80.0)
Sally (70.0)
Average grade: 80.0
"""
import sys
sys.setExecutionLimit(70000)
students = []
grades = []
total_score = 0.0
name = input("Enter the name of a student. (When finished, enter nothing)")
while (name != ""):
students += [name]
name = input("Enter the name of a student. (When finished, enter nothing)")
for i in range(len(students)):
score = float(input("Grade for {0}:".format(students[i])))
grades += [score]
print("Class roster:")
for index, student in enumerate(students):
to |
print("\nAverage grade:", (total_score / len(students)))
#3. Implement the functionality of the above program using a dictionary instead of a list.
import sys
sys.setExecutionLimit(70000)
students = {}
total_score = 0.0
name = input("Enter the name of a student. (When finished, enter nothing)")
while (name != ""):
students[name] = 0.0
name = input("Enter the name of a student. (When finished, enter nothing)")
print("Class roster:")
for student in students.keys():
score = float(input("Grade for {0}:".format(student)))
students[student] = score
total_score += students[student]
print("{0} ({1:.1})".format(student, students[student]))
print("\nAverage grade:", (total_score / len(students)))
"""4. Make a dictionary where the key is a worker’s name, and the value is a list where the first element is the clock in time, second element is the clock out time, and the third element is the total hours worked that day. Each worker’s list starts at [0, 0, 0]. Create functions for clock_in and clock_out.
clock_in takes the dictionary of workers, the name of the worker, and the clock in time as parameters. When the worker clocks in, enter and save their clock in time as the first element in the associated list value.
clock_out takes the same parameters, but with a clock out time instead of clock in time. When the worker clocks out, enter and save their clock out time and calculate the hours worked for that day and store it as the third element in the list.
To make this program a little easier, we’re entering the clock in and clock out times as integers. As a bonus mission, try adding the times as strings representing the 24 hour clock (e.g., "08:00"), and then figure out how to calculate the time worked. And you can do this exercise either by aliasing or copying the dictionary."""
def clock_in(worker_dict, name, clock_in_time):
worker_dict[name][0] = clock_in_time
def clock_out(worker_dict, name, clock_out_time):
worker_dict[name][1] = clock_out_time
worker_dict[name][2] = clock_out_time - worker_dict[name][0]
def main():
workers = {"George Spelvin": [0,0,0], "Jane Doe": [0,0,0], "John Smith": [0,0,0]}
print(workers.get("George Spelvin")) # should print [0,0,0]
clock_in(workers, "George Spelvin", 8)
clock_out(workers, "George Spelvin", 17)
print(workers.get("George Spelvin")) # should print [8, 17, 9]
if __name__ == "__main__":
main()
"""5. Here’s a table of English to Pirate translations:
English Pirate
sir matey
hotel fleabag inn
student swabbie
boy matey
madam proud beauty
professor foul blaggart
restaurant galley
your yer
excuse arr
students swabbies
are be
lawyer foul blaggart
restroom th’ head
my me
hello avast
is be
man matey
Write a program that asks the user for a sentence in English and then translates that sentence to Pirate."""
from test import testEqual
def translate(text):
# your code here!
pirate_text = ""
my_dict = {"sir" : "matey", "hotel" : "fleabag inn", "student" : "swabbie", "boy" : "matey",
"madam" : "proud beauty", "professor" : "foul blaggart", "restaurant" : "galley",
"your" : "yer", "excuse" : "arr", "students" : "swabbies", "are" : "be",
"lawyer" : "foul blaggart", "restroom" : "head", "my" : "me", "the" : "th'",
"hello" : "avast", "is" : "be", "man" : "matey"}
word_list = text.split()
index = 0
pirate_words = []
for word in word_list:
char = ""
pirate_word = ""
if (word.isalpha() == False):
char = word[len(word)-1]
new_word = word[:len(word)-1]
else:
new_word = word
#print("!"+new_word+"!")
if new_word in my_dict:
pirate_word = my_dict[new_word]
else:
pirate_word = new_word
pirate_word += char
index += 1
pirate_words += [pirate_word]
pirate_text = " ".join(pirate_words)
#print(my_dict, "\n\n")
print(pirate_text)
return pirate_text
text = "hello my man, please excuse your professor to the restroom!"
testEqual(translate(text), "avast me matey, please arr yer foul blaggart to th' head!")
"""6. Give the Python interpreter’s response to each of the following from a continuous interpreter session:
>>> d = {'apples': 15, 'bananas': 35, 'grapes': 12}
>>> d['bananas']
>>> d['oranges'] = 20
>>> len(d)
>>> 'grapes' in d
>>> d['pears']
>>> d.get('pears', 0)
>>> fruits = d.keys()
>>> sorted(fruits)
>>> print(fruits)
>>> del d['apples']
>>> 'apples' in d
Be sure you understand why you get each result. """
from test import testEqual
# Note: The pass is a placeholder to allow
# the code to compile. Remove it when you
# begin coding.
def set_inventory(inventory, fruit, quantity=0):
inventory[fruit] = quantity
# make these tests work...
new_inventory = {}
set_inventory(new_inventory, 'strawberries', 10)
testEqual('strawberries' in new_inventory, True)
testEqual(new_inventory['strawberries'], 10)
set_inventory(new_inventory, 'strawberries', 25)
testEqual(new_inventory['strawberries'] , 25)
"""Weekly Graded Assignment
Write a sort_contacts function that takes a dictionary of contacts as a parameter and returns a sorted list of those contacts, where each contact is a tuple.
The contacts dictionary that will be passed into the function has the contact name as its key, and the value is a tuple containing the phone number and email for the contact.
contacts = {name: (phone, email), name: (phone, email), etc.}
The sort_contacts function should then create a new, sorted (by last name) list of tuples representing all of the contact info (one tuple for each contact) that was in the dictionary. It should then return this list to the calling function.
For example, given a dictionary argument of:
{"Horney, Karen": ("1-541-656-3010", "karen@psychoanalysis.com"),
"Welles, Orson": ("1-312-720-8888", "orson@notlive.com"),
"Freud, Anna": ("1-541-754-3010", "anna@psychoanalysis.com")}
sort_contacts should return this:
[('Freud, Anna', '1-541-754-3010', 'anna@psychoanalysis.com'), ('Horney, Karen', '1-541-656-3010', 'karen@psychoanalysis.com'), ('Welles, Orson', | tal_score += grades[index]
print("{0} ({1:.1})".format(student, grades[index]))
| conditional_block |
Ch11. Ex.py |
if __name__ == "__main__":
main()
"""2. Write a program that will function as a grade book, allowing a user (a professor or teacher) to enter the class roster for a course, along with each student’s cumulative grade. It then prints the class roster along with the average cumulative grade. Grades are on a 0-100 percentage scale. Use 2 lists (grades and students) and the enumerate function in your solution.
A test run of this program would yield the following:
# this is the first batch of input the user would enter
Chris
Jesse
Sally
# this is the second batch of input the user would enter
Grade for Chris: 90
Grade for Jesse: 80
Grade for Sally: 70
# below is what your program should output
Class roster:
Chris (90.0)
Jesse (80.0)
Sally (70.0)
Average grade: 80.0
"""
import sys
sys.setExecutionLimit(70000)
students = []
grades = []
total_score = 0.0
name = input("Enter the name of a student. (When finished, enter nothing)")
while (name != ""):
students += [name]
name = input("Enter the name of a student. (When finished, enter nothing)")
for i in range(len(students)):
score = float(input("Grade for {0}:".format(students[i])))
grades += [score]
print("Class roster:")
for index, student in enumerate(students):
total_score += grades[index]
print("{0} ({1:.1})".format(student, grades[index]))
print("\nAverage grade:", (total_score / len(students)))
#3. Implement the functionality of the above program using a dictionary instead of a list.
import sys
sys.setExecutionLimit(70000)
students = {}
total_score = 0.0
name = input("Enter the name of a student. (When finished, enter nothing)")
while (name != ""):
students[name] = 0.0
name = input("Enter the name of a student. (When finished, enter nothing)")
print("Class roster:")
for student in students.keys():
score = float(input("Grade for {0}:".format(student)))
students[student] = score
total_score += students[student]
print("{0} ({1:.1})".format(student, students[student]))
print("\nAverage grade:", (total_score / len(students)))
"""4. Make a dictionary where the key is a worker’s name, and the value is a list where the first element is the clock in time, second element is the clock out time, and the third element is the total hours worked that day. Each worker’s list starts at [0, 0, 0]. Create functions for clock_in and clock_out.
clock_in takes the dictionary of workers, the name of the worker, and the clock in time as parameters. When the worker clocks in, enter and save their clock in time as the first element in the associated list value.
clock_out takes the same parameters, but with a clock out time instead of clock in time. When the worker clocks out, enter and save their clock out time and calculate the hours worked for that day and store it as the third element in the list.
To make this program a little easier, we’re entering the clock in and clock out times as integers. As a bonus mission, try adding the times as strings representing the 24 hour clock (e.g., "08:00"), and then figure out how to calculate the time worked. And you can do this exercise either by aliasing or copying the dictionary."""
def clock_in(worker_dict, name, clock_in_time):
worker_dict[name][0] = clock_in_time
def clock_out(worker_dict, name, clock_out_time):
worker_dict[name][1] = clock_out_time
worker_dict[name][2] = clock_out_time - worker_dict[name][0]
def main():
workers = {"George Spelvin": [0,0,0], "Jane Doe": [0,0,0], "John Smith": [0,0,0]}
print(workers.get("George Spelvin")) # should print [0,0,0]
clock_in(workers, "George Spelvin", 8)
clock_out(workers, "George Spelvin", 17)
print(workers.get("George Spelvin")) # should print [8, 17, 9]
if __name__ == "__main__":
main()
"""5. Here’s a table of English to Pirate translations:
English Pirate
sir matey
hotel fleabag inn
student swabbie
boy matey
madam proud beauty
professor foul blaggart
restaurant galley
your yer
excuse arr
students swabbies
are be
lawyer foul blaggart
restroom th’ head
my me
hello avast
is be
man matey
Write a program that asks the user for a sentence in English and then translates that sentence to Pirate."""
from test import testEqual
def translate(text):
# your code here!
pirate_text = ""
my_dict = {"sir" : "matey", "hotel" : "fleabag inn", "student" : "swabbie", "boy" : "matey",
"madam" : "proud beauty", "professor" : "foul blaggart", "restaurant" : "galley",
"your" : "yer", "excuse" : "arr", "students" : "swabbies", "are" : "be",
"lawyer" : "foul blaggart", "restroom" : "head", "my" : "me", "the" : "th'",
"hello" : "avast", "is" : "be", "man" : "matey"}
word_list = text.split()
index = 0
pirate_words = []
for word in word_list:
char = ""
pirate_word = ""
if (word.isalpha() == False):
char = word[len(word)-1]
new_word = word[:len(word)-1]
else:
new_word = word
#print("!"+new_word+"!")
if new_word in my_dict:
pirate_word = my_dict[new_word]
else:
pirate_word = new_word
pirate_word += char
index += 1
pirate_words += [pirate_word]
pirate_text = " ".join(pirate_words)
#print(my_dict, "\n\n")
print(pirate_text)
return pirate_text
text = "hello my man, please excuse your professor to the restroom!"
testEqual(translate(text), "avast me matey, please arr yer foul blaggart to th' head!")
"""6. Give the Python interpreter’s response to each of the following from a continuous interpreter session:
>>> d = {'apples': 15, 'bananas': 35, 'grapes': 12}
>>> d['bananas']
>>> d['oranges'] = 20
>>> len(d)
>>> 'grapes' in d
>>> d['pears']
>>> d.get('pears', 0)
>>> fruits = d.keys()
>>> sorted(fruits)
>>> print(fruits)
>>> del d['apples']
>>> 'apples' in d
Be sure you understand why you get each result. """
from test import testEqual
# Note: The pass is a placeholder to allow
# the code to compile. Remove it when you
# begin coding.
def set_inventory(inventory, fruit, quantity=0):
inventory[fruit] = quantity
# make these tests work...
new_inventory = {}
set_inventory(new_inventory, 'strawberries', 10)
testEqual('strawberries' in new_inventory, True)
testEqual(new_inventory['strawberries'], 10)
set_inventory(new_inventory, 'strawberries', 25)
testEqual(new_inventory['strawberries'] , 25)
"""Weekly Graded Assignment
Write a sort_contacts function that takes a dictionary of contacts as a parameter and returns a sorted list of those contacts, where each contact is a tuple.
The contacts dictionary that will be passed into the function has the contact name as its key, and the value is a tuple containing the phone number and email for the contact.
contacts = {name: (phone, email), name: (phone, email), etc.}
The sort_contacts function should then create a new, sorted (by last name) list of tuples representing all of the contact info (one tuple for each contact) that was in the dictionary. It should then return this list to the calling function.
For example, given a dictionary argument of:
{"Horney, Karen": ("1-541-656-3010", "karen@psychoanalysis.com"),
"Welles, Orson": ("1-312-720-8888", "orson@notlive.com"),
"Freud, Anna": ("1-541 | text = input("Please enter a sentence: ")
chars = create_dict(text)
print_dict(chars) | identifier_body | |
Ch11. Ex.py | 0 percentage scale. Use 2 lists (grades and students) and the enumerate function in your solution.
A test run of this program would yield the following:
# this is the first batch of input the user would enter
Chris
Jesse
Sally
# this is the second batch of input the user would enter
Grade for Chris: 90
Grade for Jesse: 80
Grade for Sally: 70
# below is what your program should output
Class roster:
Chris (90.0)
Jesse (80.0)
Sally (70.0)
Average grade: 80.0
"""
import sys
sys.setExecutionLimit(70000)
| students = []
grades = []
total_score = 0.0
name = input("Enter the name of a student. (When finished, enter nothing)")
while (name != ""):
students += [name]
name = input("Enter the name of a student. (When finished, enter nothing)")
for i in range(len(students)):
score = float(input("Grade for {0}:".format(students[i])))
grades += [score]
print("Class roster:")
for index, student in enumerate(students):
total_score += grades[index]
print("{0} ({1:.1})".format(student, grades[index]))
print("\nAverage grade:", (total_score / len(students)))
#3. Implement the functionality of the above program using a dictionary instead of a list.
import sys
sys.setExecutionLimit(70000)
students = {}
total_score = 0.0
name = input("Enter the name of a student. (When finished, enter nothing)")
while (name != ""):
students[name] = 0.0
name = input("Enter the name of a student. (When finished, enter nothing)")
print("Class roster:")
for student in students.keys():
score = float(input("Grade for {0}:".format(student)))
students[student] = score
total_score += students[student]
print("{0} ({1:.1})".format(student, students[student]))
print("\nAverage grade:", (total_score / len(students)))
"""4. Make a dictionary where the key is a worker’s name, and the value is a list where the first element is the clock in time, second element is the clock out time, and the third element is the total hours worked that day. Each worker’s list starts at [0, 0, 0]. Create functions for clock_in and clock_out.
clock_in takes the dictionary of workers, the name of the worker, and the clock in time as parameters. When the worker clocks in, enter and save their clock in time as the first element in the associated list value.
clock_out takes the same parameters, but with a clock out time instead of clock in time. When the worker clocks out, enter and save their clock out time and calculate the hours worked for that day and store it as the third element in the list.
To make this program a little easier, we’re entering the clock in and clock out times as integers. As a bonus mission, try adding the times as strings representing the 24 hour clock (e.g., "08:00"), and then figure out how to calculate the time worked. And you can do this exercise either by aliasing or copying the dictionary."""
def clock_in(worker_dict, name, clock_in_time):
worker_dict[name][0] = clock_in_time
def clock_out(worker_dict, name, clock_out_time):
worker_dict[name][1] = clock_out_time
worker_dict[name][2] = clock_out_time - worker_dict[name][0]
def main():
workers = {"George Spelvin": [0,0,0], "Jane Doe": [0,0,0], "John Smith": [0,0,0]}
print(workers.get("George Spelvin")) # should print [0,0,0]
clock_in(workers, "George Spelvin", 8)
clock_out(workers, "George Spelvin", 17)
print(workers.get("George Spelvin")) # should print [8, 17, 9]
if __name__ == "__main__":
main()
"""5. Here’s a table of English to Pirate translations:
English Pirate
sir matey
hotel fleabag inn
student swabbie
boy matey
madam proud beauty
professor foul blaggart
restaurant galley
your yer
excuse arr
students swabbies
are be
lawyer foul blaggart
restroom th’ head
my me
hello avast
is be
man matey
Write a program that asks the user for a sentence in English and then translates that sentence to Pirate."""
from test import testEqual
def translate(text):
# your code here!
pirate_text = ""
my_dict = {"sir" : "matey", "hotel" : "fleabag inn", "student" : "swabbie", "boy" : "matey",
"madam" : "proud beauty", "professor" : "foul blaggart", "restaurant" : "galley",
"your" : "yer", "excuse" : "arr", "students" : "swabbies", "are" : "be",
"lawyer" : "foul blaggart", "restroom" : "head", "my" : "me", "the" : "th'",
"hello" : "avast", "is" : "be", "man" : "matey"}
word_list = text.split()
index = 0
pirate_words = []
for word in word_list:
char = ""
pirate_word = ""
if (word.isalpha() == False):
char = word[len(word)-1]
new_word = word[:len(word)-1]
else:
new_word = word
#print("!"+new_word+"!")
if new_word in my_dict:
pirate_word = my_dict[new_word]
else:
pirate_word = new_word
pirate_word += char
index += 1
pirate_words += [pirate_word]
pirate_text = " ".join(pirate_words)
#print(my_dict, "\n\n")
print(pirate_text)
return pirate_text
text = "hello my man, please excuse your professor to the restroom!"
testEqual(translate(text), "avast me matey, please arr yer foul blaggart to th' head!")
"""6. Give the Python interpreter’s response to each of the following from a continuous interpreter session:
>>> d = {'apples': 15, 'bananas': 35, 'grapes': 12}
>>> d['bananas']
>>> d['oranges'] = 20
>>> len(d)
>>> 'grapes' in d
>>> d['pears']
>>> d.get('pears', 0)
>>> fruits = d.keys()
>>> sorted(fruits)
>>> print(fruits)
>>> del d['apples']
>>> 'apples' in d
Be sure you understand why you get each result. """
from test import testEqual
# Note: The pass is a placeholder to allow
# the code to compile. Remove it when you
# begin coding.
def set_inventory(inventory, fruit, quantity=0):
inventory[fruit] = quantity
# make these tests work...
new_inventory = {}
set_inventory(new_inventory, 'strawberries', 10)
testEqual('strawberries' in new_inventory, True)
testEqual(new_inventory['strawberries'], 10)
set_inventory(new_inventory, 'strawberries', 25)
testEqual(new_inventory['strawberries'] , 25)
"""Weekly Graded Assignment
Write a sort_contacts function that takes a dictionary of contacts as a parameter and returns a sorted list of those contacts, where each contact is a tuple.
The contacts dictionary that will be passed into the function has the contact name as its key, and the value is a tuple containing the phone number and email for the contact.
contacts = {name: (phone, email), name: (phone, email), etc.}
The sort_contacts function should then create a new, sorted (by last name) list of tuples representing all of the contact info (one tuple for each contact) that was in the dictionary. It should then return this list to the calling function.
For example, given a dictionary argument of:
{"Horney, Karen": ("1-541-656-3010", "karen@psychoanalysis.com"),
"Welles, Orson": ("1-312-720-8888", "orson@notlive.com"),
"Freud, Anna": ("1-541-754-3010", "anna@psychoanalysis.com")}
sort_contacts should return this:
[('Freud, Anna', '1-541-754-3010', 'anna@psychoanalysis.com'), ('Horney, Karen', '1-541-656-3010', 'karen@psychoanalysis.com'), ('Welles, Orson', '1 | random_line_split | |
train_folds.py | anaconda3\envs\arc105\bin'.format(homepath), \
r'{}\anaconda3\condabin'.format(homepath)]
for i in list_lib:
os.environ['PATH'] = '%s;%s' % (i, os.environ['PATH'])
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
import numpy as np
#print(np.__version__)
import argparse, copy, shutil
import random
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
#import network_factory as factory
import network_factory_refactor_speed as factory
import dataloader
import torch.nn as nn
import torch
from torch.optim.lr_scheduler import StepLR
from skimage.io import imsave
import numpy as np
import logging
# Fix seed's for reproducibility
random.seed(42)
torch.manual_seed(42)
def get_indices(id_fold, data):
# Instantiate Folds
list_folds = np.array(range(5))
# Roll in axis
tmp_set = np.roll(list_folds,id_fold)
indices_train = []
indices_val = []
indices_test = []
# Train
for i in tmp_set[:3]:
indices_train += list(data[data[:,1].astype(int) == i][:,0])
# Val
for i in tmp_set[3:4]:
indices_val += list(data[data[:,1].astype(int) == i][:,0])
# Test
for i in tmp_set[4:]:
indices_test += list(data[data[:,1].astype(int) == i][:,0])
#print(indices_train)
#print(indices_val)
#print(indices_test)
indices = {}
indices['train'] = indices_train
indices['val'] = indices_val
indices['test'] = indices_test
return indices
def main():
parser = argparse.ArgumentParser(description='Semantic Segmentation General')
parser.add_argument('--dataset_path', type=str, required=True,
help='Path to dataset')
parser.add_argument('--inRasterReference', type=str, required=True,
help='Path to inRasterReference')
parser.add_argument('--output_path', type=str, required=True,
help='Path to folder where models and stats will be saved')
parser.add_argument('--batch', type=int, required=True,
help='Batch Size')
parser.add_argument('--epochs', type=int, required=True,
help='Number of epochs')
parser.add_argument('--learning_rate', type=float, required=False, default=0.001,
help='Learning rate. Default:0.001')
parser.add_argument('--network_type', type=str, required=True,
help = 'Choose network type')
parser.add_argument('--optimizer_type', type=str, required=False, default='adam',
help = 'Optimizer: adam, sgd')
parser.add_argument('--early_stop', type=int, required=True,
help='Number of epochs to activate early stop.')
parser.add_argument('--fine_tunning_imagenet', type= bool, required=False, default=False,
help='set fine tunning on imagenet.')
parser.add_argument('--feature_extract', type= bool, required=False, default=False,
help='Train just the classifier.')
parser.add_argument('--only_top_layers', type= str, required=False, default='True',
help='Train only the top layers (classifier).')
parser.add_argument('--ignore_zero', type= bool, required=False, default=True,
help='Ignore class 0 (background).')
parser.add_argument('--modelpath', type=str, required=False, default=False,
help='Ignore class 0 (background).')
parser.add_argument('--isRGB', type=str, required=False, default='False',
help='Ignore class 0 (background).')
parser.add_argument('--use_weight_decay', type=str, required=False, default='False',
help='Use weight_decay.')
args = parser.parse_args()
dataset_dir = args.dataset_path
inRasterReference = args.inRasterReference
out_dir = args.output_path
batch_size = args.batch
epochs = args.epochs
learning_rate = args.learning_rate
net_type = args.network_type
opt_type = args.optimizer_type
fine_tunning = args.fine_tunning_imagenet
early_stop = args.early_stop
feature_extract = args.feature_extract
only_top_layers = args.only_top_layers
ignore_zero = args.ignore_zero
modelpath = args.modelpath
isRGB = True if args.isRGB == 'True' else False
use_weight_decay = True if args.use_weight_decay == 'True' else False
print(args)
# Get classes from mask
list_classes = factory.get_classes(inRasterReference, ignore_zero)
num_classes = len(list_classes)
#Delete
base_output = os.path.join(out_dir, '2_Segmentation_BASE')
if os.path.exists(base_output):
shutil.rmtree(base_output, ignore_errors=True)
#Recreate folders
os.makedirs(base_output, exist_ok=True)
#if (not os.path.exists(out_dir)):
# os.makedirs(out_dir)
num_folds = 5
#data = np.genfromtxt('code/config/stratified_folds', dtype=str, delimiter=',')
data = np.genfromtxt(os.path.join(dataset_dir.split('raw_data')[0],'stratified_folds'), dtype=str, delimiter=',')
#for id_fold in range(num_folds):
for id_fold in range(num_folds):
print ('.......Creating model.......')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if modelpath:
print('Loading model in: {}'.format(modelpath))
model = torch.load(modelpath)
else:
print('Creating a new model: {}'.format(net_type))
model = factory.model_factory(net_type, num_classes, feature_extract, fine_tunning, isRGB)
# Use multiples GPUS
#if torch.cuda.device_count() > 1:
# print("Let's use", torch.cuda.device_count(), "GPUs!")
# batch_size = int(torch.cuda.device_count())*batch_size
# model = nn.DataParallel(model, device_ids=[0, 1])
model = model.to(device)
print(model)
print ('......Model created.......')
indices = get_indices(id_fold, data)
print ('......Creating dataloader......')
dataloaders_dict = {}
dataset = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='train', indices=indices['train'], isRGB=isRGB)
dataloaders_dict['train'] = torch.utils.data.DataLoader(dataset, | num_workers=4,
drop_last=True)
dataset_val = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='val', indices=indices['val'], isRGB=isRGB)
dataloaders_dict['val'] = torch.utils.data.DataLoader(dataset_val,
batch_size=batch_size,
num_workers=4)
#Compute mean,std from training data
mean, std, transform_train = dataloader.get_transforms(dataloaders_dict['train'], 'train', mean=None, std=None)
dataloaders_dict['train'].dataset.transform = transform_train
dataloaders_dict['train'].dataset.mean = mean
dataloaders_dict['train'].dataset.std = std
dataloaders_dict['train'].dataset.iscomputed = True
_, _, transform_val = dataloader.get_transforms(dataloaders_dict['val'], 'val', mean=mean, std=std)
dataloaders_dict['val'].dataset.transform = transform_val
dataloaders_dict['val'].dataset.mean = mean
dataloaders_dict['val'].dataset.std = std
print ('......Dataloader created......')
print(dataloaders_dict['train'].dataset.mean)
print(dataloaders_dict['train'].dataset.std)
#print(only_top_layers)
# FOr default all parameters have requires_grad = True
# So, unselect all layers from backbone if only top is needed
if only_top_layers == 'True':
print('TRAINING: ONLY TOP LAYERS')
# Freeze backbone parameters
for param in model.backbone.parameters():
param.requires_grad = False
else:
print('TRAINING FULL LAYERS')
# Show trainable layers
for name,param in model.named_parameters():
if param.requires_grad == True:
print("\t",name)
# Get parameters to pass to optimizer
params_to_update = model.parameters()
"""
print("Params to learn:")
if feature_extract:
params_to_update = []
for name,param in model.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t",name)
else:
for name,param in model.named_parameters():
if param.requires_grad == True:
print("\t",name)
"""
#self.weight_class = 1. / np.unique(np.array(self.list_labels), return_counts=True)[1]
#self.samples_weights = self.weight_class[self. list_labels]
#criterion = nn.CrossEntropyLoss(weight=class_weights)
#defining optimizer and loss
scheduler = None
if opt_type == 'adam':
optimizer = optim.Adam | batch_size=batch_size, | random_line_split |
train_folds.py | anaconda3\envs\arc105\bin'.format(homepath), \
r'{}\anaconda3\condabin'.format(homepath)]
for i in list_lib:
os.environ['PATH'] = '%s;%s' % (i, os.environ['PATH'])
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
import numpy as np
#print(np.__version__)
import argparse, copy, shutil
import random
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
#import network_factory as factory
import network_factory_refactor_speed as factory
import dataloader
import torch.nn as nn
import torch
from torch.optim.lr_scheduler import StepLR
from skimage.io import imsave
import numpy as np
import logging
# Fix seed's for reproducibility
random.seed(42)
torch.manual_seed(42)
def get_indices(id_fold, data):
# Instantiate Folds
|
#print(indices_train)
#print(indices_val)
#print(indices_test)
indices = {}
indices['train'] = indices_train
indices['val'] = indices_val
indices['test'] = indices_test
return indices
def main():
parser = argparse.ArgumentParser(description='Semantic Segmentation General')
parser.add_argument('--dataset_path', type=str, required=True,
help='Path to dataset')
parser.add_argument('--inRasterReference', type=str, required=True,
help='Path to inRasterReference')
parser.add_argument('--output_path', type=str, required=True,
help='Path to folder where models and stats will be saved')
parser.add_argument('--batch', type=int, required=True,
help='Batch Size')
parser.add_argument('--epochs', type=int, required=True,
help='Number of epochs')
parser.add_argument('--learning_rate', type=float, required=False, default=0.001,
help='Learning rate. Default:0.001')
parser.add_argument('--network_type', type=str, required=True,
help = 'Choose network type')
parser.add_argument('--optimizer_type', type=str, required=False, default='adam',
help = 'Optimizer: adam, sgd')
parser.add_argument('--early_stop', type=int, required=True,
help='Number of epochs to activate early stop.')
parser.add_argument('--fine_tunning_imagenet', type= bool, required=False, default=False,
help='set fine tunning on imagenet.')
parser.add_argument('--feature_extract', type= bool, required=False, default=False,
help='Train just the classifier.')
parser.add_argument('--only_top_layers', type= str, required=False, default='True',
help='Train only the top layers (classifier).')
parser.add_argument('--ignore_zero', type= bool, required=False, default=True,
help='Ignore class 0 (background).')
parser.add_argument('--modelpath', type=str, required=False, default=False,
help='Ignore class 0 (background).')
parser.add_argument('--isRGB', type=str, required=False, default='False',
help='Ignore class 0 (background).')
parser.add_argument('--use_weight_decay', type=str, required=False, default='False',
help='Use weight_decay.')
args = parser.parse_args()
dataset_dir = args.dataset_path
inRasterReference = args.inRasterReference
out_dir = args.output_path
batch_size = args.batch
epochs = args.epochs
learning_rate = args.learning_rate
net_type = args.network_type
opt_type = args.optimizer_type
fine_tunning = args.fine_tunning_imagenet
early_stop = args.early_stop
feature_extract = args.feature_extract
only_top_layers = args.only_top_layers
ignore_zero = args.ignore_zero
modelpath = args.modelpath
isRGB = True if args.isRGB == 'True' else False
use_weight_decay = True if args.use_weight_decay == 'True' else False
print(args)
# Get classes from mask
list_classes = factory.get_classes(inRasterReference, ignore_zero)
num_classes = len(list_classes)
#Delete
base_output = os.path.join(out_dir, '2_Segmentation_BASE')
if os.path.exists(base_output):
shutil.rmtree(base_output, ignore_errors=True)
#Recreate folders
os.makedirs(base_output, exist_ok=True)
#if (not os.path.exists(out_dir)):
# os.makedirs(out_dir)
num_folds = 5
#data = np.genfromtxt('code/config/stratified_folds', dtype=str, delimiter=',')
data = np.genfromtxt(os.path.join(dataset_dir.split('raw_data')[0],'stratified_folds'), dtype=str, delimiter=',')
#for id_fold in range(num_folds):
for id_fold in range(num_folds):
print ('.......Creating model.......')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if modelpath:
print('Loading model in: {}'.format(modelpath))
model = torch.load(modelpath)
else:
print('Creating a new model: {}'.format(net_type))
model = factory.model_factory(net_type, num_classes, feature_extract, fine_tunning, isRGB)
# Use multiples GPUS
#if torch.cuda.device_count() > 1:
# print("Let's use", torch.cuda.device_count(), "GPUs!")
# batch_size = int(torch.cuda.device_count())*batch_size
# model = nn.DataParallel(model, device_ids=[0, 1])
model = model.to(device)
print(model)
print ('......Model created.......')
indices = get_indices(id_fold, data)
print ('......Creating dataloader......')
dataloaders_dict = {}
dataset = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='train', indices=indices['train'], isRGB=isRGB)
dataloaders_dict['train'] = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=4,
drop_last=True)
dataset_val = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='val', indices=indices['val'], isRGB=isRGB)
dataloaders_dict['val'] = torch.utils.data.DataLoader(dataset_val,
batch_size=batch_size,
num_workers=4)
#Compute mean,std from training data
mean, std, transform_train = dataloader.get_transforms(dataloaders_dict['train'], 'train', mean=None, std=None)
dataloaders_dict['train'].dataset.transform = transform_train
dataloaders_dict['train'].dataset.mean = mean
dataloaders_dict['train'].dataset.std = std
dataloaders_dict['train'].dataset.iscomputed = True
_, _, transform_val = dataloader.get_transforms(dataloaders_dict['val'], 'val', mean=mean, std=std)
dataloaders_dict['val'].dataset.transform = transform_val
dataloaders_dict['val'].dataset.mean = mean
dataloaders_dict['val'].dataset.std = std
print ('......Dataloader created......')
print(dataloaders_dict['train'].dataset.mean)
print(dataloaders_dict['train'].dataset.std)
#print(only_top_layers)
# FOr default all parameters have requires_grad = True
# So, unselect all layers from backbone if only top is needed
if only_top_layers == 'True':
print('TRAINING: ONLY TOP LAYERS')
# Freeze backbone parameters
for param in model.backbone.parameters():
param.requires_grad = False
else:
print('TRAINING FULL LAYERS')
# Show trainable layers
for name,param in model.named_parameters():
if param.requires_grad == True:
print("\t",name)
# Get parameters to pass to optimizer
params_to_update = model.parameters()
"""
print("Params to learn:")
if feature_extract:
params_to_update = []
for name,param in model.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t",name)
else:
for name,param in model.named_parameters():
if param.requires_grad == True:
print("\t",name)
"""
#self.weight_class = 1. / np.unique(np.array(self.list_labels), return_counts=True)[1]
#self.samples_weights = self.weight_class[self. list_labels]
#criterion = nn.CrossEntropyLoss(weight=class_weights)
#defining optimizer and loss
scheduler = None
if opt_type == 'adam':
optimizer = optim.Adam | list_folds = np.array(range(5))
# Roll in axis
tmp_set = np.roll(list_folds,id_fold)
indices_train = []
indices_val = []
indices_test = []
# Train
for i in tmp_set[:3]:
indices_train += list(data[data[:,1].astype(int) == i][:,0])
# Val
for i in tmp_set[3:4]:
indices_val += list(data[data[:,1].astype(int) == i][:,0])
# Test
for i in tmp_set[4:]:
indices_test += list(data[data[:,1].astype(int) == i][:,0]) | identifier_body |
train_folds.py | anaconda3\envs\arc105\bin'.format(homepath), \
r'{}\anaconda3\condabin'.format(homepath)]
for i in list_lib:
os.environ['PATH'] = '%s;%s' % (i, os.environ['PATH'])
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
import numpy as np
#print(np.__version__)
import argparse, copy, shutil
import random
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
#import network_factory as factory
import network_factory_refactor_speed as factory
import dataloader
import torch.nn as nn
import torch
from torch.optim.lr_scheduler import StepLR
from skimage.io import imsave
import numpy as np
import logging
# Fix seed's for reproducibility
random.seed(42)
torch.manual_seed(42)
def get_indices(id_fold, data):
# Instantiate Folds
list_folds = np.array(range(5))
# Roll in axis
tmp_set = np.roll(list_folds,id_fold)
indices_train = []
indices_val = []
indices_test = []
# Train
for i in tmp_set[:3]:
indices_train += list(data[data[:,1].astype(int) == i][:,0])
# Val
for i in tmp_set[3:4]:
|
# Test
for i in tmp_set[4:]:
indices_test += list(data[data[:,1].astype(int) == i][:,0])
#print(indices_train)
#print(indices_val)
#print(indices_test)
indices = {}
indices['train'] = indices_train
indices['val'] = indices_val
indices['test'] = indices_test
return indices
def main():
parser = argparse.ArgumentParser(description='Semantic Segmentation General')
parser.add_argument('--dataset_path', type=str, required=True,
help='Path to dataset')
parser.add_argument('--inRasterReference', type=str, required=True,
help='Path to inRasterReference')
parser.add_argument('--output_path', type=str, required=True,
help='Path to folder where models and stats will be saved')
parser.add_argument('--batch', type=int, required=True,
help='Batch Size')
parser.add_argument('--epochs', type=int, required=True,
help='Number of epochs')
parser.add_argument('--learning_rate', type=float, required=False, default=0.001,
help='Learning rate. Default:0.001')
parser.add_argument('--network_type', type=str, required=True,
help = 'Choose network type')
parser.add_argument('--optimizer_type', type=str, required=False, default='adam',
help = 'Optimizer: adam, sgd')
parser.add_argument('--early_stop', type=int, required=True,
help='Number of epochs to activate early stop.')
parser.add_argument('--fine_tunning_imagenet', type= bool, required=False, default=False,
help='set fine tunning on imagenet.')
parser.add_argument('--feature_extract', type= bool, required=False, default=False,
help='Train just the classifier.')
parser.add_argument('--only_top_layers', type= str, required=False, default='True',
help='Train only the top layers (classifier).')
parser.add_argument('--ignore_zero', type= bool, required=False, default=True,
help='Ignore class 0 (background).')
parser.add_argument('--modelpath', type=str, required=False, default=False,
help='Ignore class 0 (background).')
parser.add_argument('--isRGB', type=str, required=False, default='False',
help='Ignore class 0 (background).')
parser.add_argument('--use_weight_decay', type=str, required=False, default='False',
help='Use weight_decay.')
args = parser.parse_args()
dataset_dir = args.dataset_path
inRasterReference = args.inRasterReference
out_dir = args.output_path
batch_size = args.batch
epochs = args.epochs
learning_rate = args.learning_rate
net_type = args.network_type
opt_type = args.optimizer_type
fine_tunning = args.fine_tunning_imagenet
early_stop = args.early_stop
feature_extract = args.feature_extract
only_top_layers = args.only_top_layers
ignore_zero = args.ignore_zero
modelpath = args.modelpath
isRGB = True if args.isRGB == 'True' else False
use_weight_decay = True if args.use_weight_decay == 'True' else False
print(args)
# Get classes from mask
list_classes = factory.get_classes(inRasterReference, ignore_zero)
num_classes = len(list_classes)
#Delete
base_output = os.path.join(out_dir, '2_Segmentation_BASE')
if os.path.exists(base_output):
shutil.rmtree(base_output, ignore_errors=True)
#Recreate folders
os.makedirs(base_output, exist_ok=True)
#if (not os.path.exists(out_dir)):
# os.makedirs(out_dir)
num_folds = 5
#data = np.genfromtxt('code/config/stratified_folds', dtype=str, delimiter=',')
data = np.genfromtxt(os.path.join(dataset_dir.split('raw_data')[0],'stratified_folds'), dtype=str, delimiter=',')
#for id_fold in range(num_folds):
for id_fold in range(num_folds):
print ('.......Creating model.......')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if modelpath:
print('Loading model in: {}'.format(modelpath))
model = torch.load(modelpath)
else:
print('Creating a new model: {}'.format(net_type))
model = factory.model_factory(net_type, num_classes, feature_extract, fine_tunning, isRGB)
# Use multiples GPUS
#if torch.cuda.device_count() > 1:
# print("Let's use", torch.cuda.device_count(), "GPUs!")
# batch_size = int(torch.cuda.device_count())*batch_size
# model = nn.DataParallel(model, device_ids=[0, 1])
model = model.to(device)
print(model)
print ('......Model created.......')
indices = get_indices(id_fold, data)
print ('......Creating dataloader......')
dataloaders_dict = {}
dataset = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='train', indices=indices['train'], isRGB=isRGB)
dataloaders_dict['train'] = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=4,
drop_last=True)
dataset_val = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='val', indices=indices['val'], isRGB=isRGB)
dataloaders_dict['val'] = torch.utils.data.DataLoader(dataset_val,
batch_size=batch_size,
num_workers=4)
#Compute mean,std from training data
mean, std, transform_train = dataloader.get_transforms(dataloaders_dict['train'], 'train', mean=None, std=None)
dataloaders_dict['train'].dataset.transform = transform_train
dataloaders_dict['train'].dataset.mean = mean
dataloaders_dict['train'].dataset.std = std
dataloaders_dict['train'].dataset.iscomputed = True
_, _, transform_val = dataloader.get_transforms(dataloaders_dict['val'], 'val', mean=mean, std=std)
dataloaders_dict['val'].dataset.transform = transform_val
dataloaders_dict['val'].dataset.mean = mean
dataloaders_dict['val'].dataset.std = std
print ('......Dataloader created......')
print(dataloaders_dict['train'].dataset.mean)
print(dataloaders_dict['train'].dataset.std)
#print(only_top_layers)
# FOr default all parameters have requires_grad = True
# So, unselect all layers from backbone if only top is needed
if only_top_layers == 'True':
print('TRAINING: ONLY TOP LAYERS')
# Freeze backbone parameters
for param in model.backbone.parameters():
param.requires_grad = False
else:
print('TRAINING FULL LAYERS')
# Show trainable layers
for name,param in model.named_parameters():
if param.requires_grad == True:
print("\t",name)
# Get parameters to pass to optimizer
params_to_update = model.parameters()
"""
print("Params to learn:")
if feature_extract:
params_to_update = []
for name,param in model.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t",name)
else:
for name,param in model.named_parameters():
if param.requires_grad == True:
print("\t",name)
"""
#self.weight_class = 1. / np.unique(np.array(self.list_labels), return_counts=True)[1]
#self.samples_weights = self.weight_class[self. list_labels]
#criterion = nn.CrossEntropyLoss(weight=class_weights)
#defining optimizer and loss
scheduler = None
if opt_type == 'adam':
optimizer = optim.Adam | indices_val += list(data[data[:,1].astype(int) == i][:,0]) | conditional_block |
train_folds.py | anaconda3\envs\arc105\bin'.format(homepath), \
r'{}\anaconda3\condabin'.format(homepath)]
for i in list_lib:
os.environ['PATH'] = '%s;%s' % (i, os.environ['PATH'])
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
import numpy as np
#print(np.__version__)
import argparse, copy, shutil
import random
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
#import network_factory as factory
import network_factory_refactor_speed as factory
import dataloader
import torch.nn as nn
import torch
from torch.optim.lr_scheduler import StepLR
from skimage.io import imsave
import numpy as np
import logging
# Fix seed's for reproducibility
random.seed(42)
torch.manual_seed(42)
def | (id_fold, data):
# Instantiate Folds
list_folds = np.array(range(5))
# Roll in axis
tmp_set = np.roll(list_folds,id_fold)
indices_train = []
indices_val = []
indices_test = []
# Train
for i in tmp_set[:3]:
indices_train += list(data[data[:,1].astype(int) == i][:,0])
# Val
for i in tmp_set[3:4]:
indices_val += list(data[data[:,1].astype(int) == i][:,0])
# Test
for i in tmp_set[4:]:
indices_test += list(data[data[:,1].astype(int) == i][:,0])
#print(indices_train)
#print(indices_val)
#print(indices_test)
indices = {}
indices['train'] = indices_train
indices['val'] = indices_val
indices['test'] = indices_test
return indices
def main():
parser = argparse.ArgumentParser(description='Semantic Segmentation General')
parser.add_argument('--dataset_path', type=str, required=True,
help='Path to dataset')
parser.add_argument('--inRasterReference', type=str, required=True,
help='Path to inRasterReference')
parser.add_argument('--output_path', type=str, required=True,
help='Path to folder where models and stats will be saved')
parser.add_argument('--batch', type=int, required=True,
help='Batch Size')
parser.add_argument('--epochs', type=int, required=True,
help='Number of epochs')
parser.add_argument('--learning_rate', type=float, required=False, default=0.001,
help='Learning rate. Default:0.001')
parser.add_argument('--network_type', type=str, required=True,
help = 'Choose network type')
parser.add_argument('--optimizer_type', type=str, required=False, default='adam',
help = 'Optimizer: adam, sgd')
parser.add_argument('--early_stop', type=int, required=True,
help='Number of epochs to activate early stop.')
parser.add_argument('--fine_tunning_imagenet', type= bool, required=False, default=False,
help='set fine tunning on imagenet.')
parser.add_argument('--feature_extract', type= bool, required=False, default=False,
help='Train just the classifier.')
parser.add_argument('--only_top_layers', type= str, required=False, default='True',
help='Train only the top layers (classifier).')
parser.add_argument('--ignore_zero', type= bool, required=False, default=True,
help='Ignore class 0 (background).')
parser.add_argument('--modelpath', type=str, required=False, default=False,
help='Ignore class 0 (background).')
parser.add_argument('--isRGB', type=str, required=False, default='False',
help='Ignore class 0 (background).')
parser.add_argument('--use_weight_decay', type=str, required=False, default='False',
help='Use weight_decay.')
args = parser.parse_args()
dataset_dir = args.dataset_path
inRasterReference = args.inRasterReference
out_dir = args.output_path
batch_size = args.batch
epochs = args.epochs
learning_rate = args.learning_rate
net_type = args.network_type
opt_type = args.optimizer_type
fine_tunning = args.fine_tunning_imagenet
early_stop = args.early_stop
feature_extract = args.feature_extract
only_top_layers = args.only_top_layers
ignore_zero = args.ignore_zero
modelpath = args.modelpath
isRGB = True if args.isRGB == 'True' else False
use_weight_decay = True if args.use_weight_decay == 'True' else False
print(args)
# Get classes from mask
list_classes = factory.get_classes(inRasterReference, ignore_zero)
num_classes = len(list_classes)
#Delete
base_output = os.path.join(out_dir, '2_Segmentation_BASE')
if os.path.exists(base_output):
shutil.rmtree(base_output, ignore_errors=True)
#Recreate folders
os.makedirs(base_output, exist_ok=True)
#if (not os.path.exists(out_dir)):
# os.makedirs(out_dir)
num_folds = 5
#data = np.genfromtxt('code/config/stratified_folds', dtype=str, delimiter=',')
data = np.genfromtxt(os.path.join(dataset_dir.split('raw_data')[0],'stratified_folds'), dtype=str, delimiter=',')
#for id_fold in range(num_folds):
for id_fold in range(num_folds):
print ('.......Creating model.......')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if modelpath:
print('Loading model in: {}'.format(modelpath))
model = torch.load(modelpath)
else:
print('Creating a new model: {}'.format(net_type))
model = factory.model_factory(net_type, num_classes, feature_extract, fine_tunning, isRGB)
# Use multiples GPUS
#if torch.cuda.device_count() > 1:
# print("Let's use", torch.cuda.device_count(), "GPUs!")
# batch_size = int(torch.cuda.device_count())*batch_size
# model = nn.DataParallel(model, device_ids=[0, 1])
model = model.to(device)
print(model)
print ('......Model created.......')
indices = get_indices(id_fold, data)
print ('......Creating dataloader......')
dataloaders_dict = {}
dataset = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='train', indices=indices['train'], isRGB=isRGB)
dataloaders_dict['train'] = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=4,
drop_last=True)
dataset_val = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='val', indices=indices['val'], isRGB=isRGB)
dataloaders_dict['val'] = torch.utils.data.DataLoader(dataset_val,
batch_size=batch_size,
num_workers=4)
#Compute mean,std from training data
mean, std, transform_train = dataloader.get_transforms(dataloaders_dict['train'], 'train', mean=None, std=None)
dataloaders_dict['train'].dataset.transform = transform_train
dataloaders_dict['train'].dataset.mean = mean
dataloaders_dict['train'].dataset.std = std
dataloaders_dict['train'].dataset.iscomputed = True
_, _, transform_val = dataloader.get_transforms(dataloaders_dict['val'], 'val', mean=mean, std=std)
dataloaders_dict['val'].dataset.transform = transform_val
dataloaders_dict['val'].dataset.mean = mean
dataloaders_dict['val'].dataset.std = std
print ('......Dataloader created......')
print(dataloaders_dict['train'].dataset.mean)
print(dataloaders_dict['train'].dataset.std)
#print(only_top_layers)
# FOr default all parameters have requires_grad = True
# So, unselect all layers from backbone if only top is needed
if only_top_layers == 'True':
print('TRAINING: ONLY TOP LAYERS')
# Freeze backbone parameters
for param in model.backbone.parameters():
param.requires_grad = False
else:
print('TRAINING FULL LAYERS')
# Show trainable layers
for name,param in model.named_parameters():
if param.requires_grad == True:
print("\t",name)
# Get parameters to pass to optimizer
params_to_update = model.parameters()
"""
print("Params to learn:")
if feature_extract:
params_to_update = []
for name,param in model.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t",name)
else:
for name,param in model.named_parameters():
if param.requires_grad == True:
print("\t",name)
"""
#self.weight_class = 1. / np.unique(np.array(self.list_labels), return_counts=True)[1]
#self.samples_weights = self.weight_class[self. list_labels]
#criterion = nn.CrossEntropyLoss(weight=class_weights)
#defining optimizer and loss
scheduler = None
if opt_type == 'adam':
optimizer = optim | get_indices | identifier_name |
backtrace.rs | ;
use intrinsics;
use io;
use libc;
use mem;
use path::Path;
use ptr;
use str;
use sync::{StaticMutex, MUTEX_INIT};
use sys_common::backtrace::*;
#[allow(non_snake_case)]
extern "system" {
fn GetCurrentProcess() -> libc::HANDLE;
fn GetCurrentThread() -> libc::HANDLE;
fn RtlCaptureContext(ctx: *mut arch::CONTEXT);
}
type SymFromAddrFn =
extern "system" fn(libc::HANDLE, u64, *mut u64,
*mut SYMBOL_INFO) -> libc::BOOL;
type SymInitializeFn =
extern "system" fn(libc::HANDLE, *mut libc::c_void,
libc::BOOL) -> libc::BOOL;
type SymCleanupFn =
extern "system" fn(libc::HANDLE) -> libc::BOOL;
type StackWalk64Fn =
extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE,
*mut STACKFRAME64, *mut arch::CONTEXT,
*mut libc::c_void, *mut libc::c_void,
*mut libc::c_void, *mut libc::c_void) -> libc::BOOL;
const MAX_SYM_NAME: usize = 2000;
const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c;
const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200;
const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664;
#[repr(C)]
struct SYMBOL_INFO {
SizeOfStruct: libc::c_ulong,
TypeIndex: libc::c_ulong,
Reserved: [u64; 2],
Index: libc::c_ulong,
Size: libc::c_ulong,
ModBase: u64,
Flags: libc::c_ulong,
Value: u64,
Address: u64,
Register: libc::c_ulong,
Scope: libc::c_ulong,
Tag: libc::c_ulong,
NameLen: libc::c_ulong,
MaxNameLen: libc::c_ulong,
// note that windows has this as 1, but it basically just means that
// the name is inline at the end of the struct. For us, we just bump
// the struct size up to MAX_SYM_NAME.
Name: [libc::c_char; MAX_SYM_NAME],
}
#[repr(C)]
enum ADDRESS_MODE {
AddrMode1616,
AddrMode1632,
AddrModeReal,
AddrModeFlat,
}
struct | {
Offset: u64,
Segment: u16,
Mode: ADDRESS_MODE,
}
pub struct STACKFRAME64 {
AddrPC: ADDRESS64,
AddrReturn: ADDRESS64,
AddrFrame: ADDRESS64,
AddrStack: ADDRESS64,
AddrBStore: ADDRESS64,
FuncTableEntry: *mut libc::c_void,
Params: [u64; 4],
Far: libc::BOOL,
Virtual: libc::BOOL,
Reserved: [u64; 3],
KdHelp: KDHELP64,
}
struct KDHELP64 {
Thread: u64,
ThCallbackStack: libc::DWORD,
ThCallbackBStore: libc::DWORD,
NextCallback: libc::DWORD,
FramePointer: libc::DWORD,
KiCallUserMode: u64,
KeUserCallbackDispatcher: u64,
SystemRangeStart: u64,
KiUserExceptionDispatcher: u64,
StackBase: u64,
StackLimit: u64,
Reserved: [u64; 5],
}
#[cfg(target_arch = "x86")]
mod arch {
use libc;
const MAXIMUM_SUPPORTED_EXTENSION: usize = 512;
#[repr(C)]
pub struct CONTEXT {
ContextFlags: libc::DWORD,
Dr0: libc::DWORD,
Dr1: libc::DWORD,
Dr2: libc::DWORD,
Dr3: libc::DWORD,
Dr6: libc::DWORD,
Dr7: libc::DWORD,
FloatSave: FLOATING_SAVE_AREA,
SegGs: libc::DWORD,
SegFs: libc::DWORD,
SegEs: libc::DWORD,
SegDs: libc::DWORD,
Edi: libc::DWORD,
Esi: libc::DWORD,
Ebx: libc::DWORD,
Edx: libc::DWORD,
Ecx: libc::DWORD,
Eax: libc::DWORD,
Ebp: libc::DWORD,
Eip: libc::DWORD,
SegCs: libc::DWORD,
EFlags: libc::DWORD,
Esp: libc::DWORD,
SegSs: libc::DWORD,
ExtendedRegisters: [u8; MAXIMUM_SUPPORTED_EXTENSION],
}
#[repr(C)]
pub struct FLOATING_SAVE_AREA {
ControlWord: libc::DWORD,
StatusWord: libc::DWORD,
TagWord: libc::DWORD,
ErrorOffset: libc::DWORD,
ErrorSelector: libc::DWORD,
DataOffset: libc::DWORD,
DataSelector: libc::DWORD,
RegisterArea: [u8; 80],
Cr0NpxState: libc::DWORD,
}
pub fn init_frame(frame: &mut super::STACKFRAME64,
ctx: &CONTEXT) -> libc::DWORD {
frame.AddrPC.Offset = ctx.Eip as u64;
frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrStack.Offset = ctx.Esp as u64;
frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrFrame.Offset = ctx.Ebp as u64;
frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
super::IMAGE_FILE_MACHINE_I386
}
}
#[cfg(target_arch = "x86_64")]
mod arch {
use libc::{c_longlong, c_ulonglong};
use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG};
use simd;
#[repr(C)]
pub struct CONTEXT {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
P1Home: DWORDLONG,
P2Home: DWORDLONG,
P3Home: DWORDLONG,
P4Home: DWORDLONG,
P5Home: DWORDLONG,
P6Home: DWORDLONG,
ContextFlags: DWORD,
MxCsr: DWORD,
SegCs: WORD,
SegDs: WORD,
SegEs: WORD,
SegFs: WORD,
SegGs: WORD,
SegSs: WORD,
EFlags: DWORD,
Dr0: DWORDLONG,
Dr1: DWORDLONG,
Dr2: DWORDLONG,
Dr3: DWORDLONG,
Dr6: DWORDLONG,
Dr7: DWORDLONG,
Rax: DWORDLONG,
Rcx: DWORDLONG,
Rdx: DWORDLONG,
Rbx: DWORDLONG,
Rsp: DWORDLONG,
Rbp: DWORDLONG,
Rsi: DWORDLONG,
Rdi: DWORDLONG,
R8: DWORDLONG,
R9: DWORDLONG,
R10: DWORDLONG,
R11: DWORDLONG,
R12: DWORDLONG,
R13: DWORDLONG,
R14: DWORDLONG,
R15: DWORDLONG,
Rip: DWORDLONG,
FltSave: FLOATING_SAVE_AREA,
VectorRegister: [M128A; 26],
VectorControl: DWORDLONG,
DebugControl: DWORDLONG,
LastBranchToRip: DWORDLONG,
LastBranchFromRip: DWORDLONG,
LastExceptionToRip: DWORDLONG,
LastExceptionFromRip: DWORDLONG,
}
#[repr(C)]
pub struct M128A {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
Low: c_ulonglong,
High: c_longlong
}
#[repr(C)]
pub struct FLOATING_SAVE_AREA {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
_Dummy: [u8; 512] // FIXME: Fill this out
}
pub fn init_frame(frame: &mut super::STACKFRAME64,
ctx: &CONTEXT) -> DWORD {
frame.AddrPC.Offset = ctx.Rip as u64;
frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrStack.Offset = ctx.Rsp as u64;
frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrFrame.Offset = ctx.Rbp as u64;
frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
super::IMAGE_FILE_MACHINE_AMD64
}
}
struct Cleanup {
handle: libc::HANDLE,
SymCleanup: SymCleanupFn,
}
impl Drop for Cleanup {
fn drop(&mut self) { (self.SymCleanup)(self.handle); }
}
pub fn write(w: &mut Write) -> io::Result<()> {
// According to windows documentation, all dbghelp functions are
// single-threaded.
static LOCK: StaticMutex = MUTEX | ADDRESS64 | identifier_name |
backtrace.rs | ;
use intrinsics;
use io;
use libc;
use mem;
use path::Path;
use ptr;
use str;
use sync::{StaticMutex, MUTEX_INIT};
use sys_common::backtrace::*;
#[allow(non_snake_case)]
extern "system" {
fn GetCurrentProcess() -> libc::HANDLE;
fn GetCurrentThread() -> libc::HANDLE;
fn RtlCaptureContext(ctx: *mut arch::CONTEXT);
}
type SymFromAddrFn =
extern "system" fn(libc::HANDLE, u64, *mut u64, | type SymInitializeFn =
extern "system" fn(libc::HANDLE, *mut libc::c_void,
libc::BOOL) -> libc::BOOL;
type SymCleanupFn =
extern "system" fn(libc::HANDLE) -> libc::BOOL;
type StackWalk64Fn =
extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE,
*mut STACKFRAME64, *mut arch::CONTEXT,
*mut libc::c_void, *mut libc::c_void,
*mut libc::c_void, *mut libc::c_void) -> libc::BOOL;
const MAX_SYM_NAME: usize = 2000;
const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c;
const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200;
const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664;
#[repr(C)]
struct SYMBOL_INFO {
SizeOfStruct: libc::c_ulong,
TypeIndex: libc::c_ulong,
Reserved: [u64; 2],
Index: libc::c_ulong,
Size: libc::c_ulong,
ModBase: u64,
Flags: libc::c_ulong,
Value: u64,
Address: u64,
Register: libc::c_ulong,
Scope: libc::c_ulong,
Tag: libc::c_ulong,
NameLen: libc::c_ulong,
MaxNameLen: libc::c_ulong,
// note that windows has this as 1, but it basically just means that
// the name is inline at the end of the struct. For us, we just bump
// the struct size up to MAX_SYM_NAME.
Name: [libc::c_char; MAX_SYM_NAME],
}
#[repr(C)]
enum ADDRESS_MODE {
AddrMode1616,
AddrMode1632,
AddrModeReal,
AddrModeFlat,
}
struct ADDRESS64 {
Offset: u64,
Segment: u16,
Mode: ADDRESS_MODE,
}
pub struct STACKFRAME64 {
AddrPC: ADDRESS64,
AddrReturn: ADDRESS64,
AddrFrame: ADDRESS64,
AddrStack: ADDRESS64,
AddrBStore: ADDRESS64,
FuncTableEntry: *mut libc::c_void,
Params: [u64; 4],
Far: libc::BOOL,
Virtual: libc::BOOL,
Reserved: [u64; 3],
KdHelp: KDHELP64,
}
struct KDHELP64 {
Thread: u64,
ThCallbackStack: libc::DWORD,
ThCallbackBStore: libc::DWORD,
NextCallback: libc::DWORD,
FramePointer: libc::DWORD,
KiCallUserMode: u64,
KeUserCallbackDispatcher: u64,
SystemRangeStart: u64,
KiUserExceptionDispatcher: u64,
StackBase: u64,
StackLimit: u64,
Reserved: [u64; 5],
}
#[cfg(target_arch = "x86")]
mod arch {
use libc;
const MAXIMUM_SUPPORTED_EXTENSION: usize = 512;
#[repr(C)]
pub struct CONTEXT {
ContextFlags: libc::DWORD,
Dr0: libc::DWORD,
Dr1: libc::DWORD,
Dr2: libc::DWORD,
Dr3: libc::DWORD,
Dr6: libc::DWORD,
Dr7: libc::DWORD,
FloatSave: FLOATING_SAVE_AREA,
SegGs: libc::DWORD,
SegFs: libc::DWORD,
SegEs: libc::DWORD,
SegDs: libc::DWORD,
Edi: libc::DWORD,
Esi: libc::DWORD,
Ebx: libc::DWORD,
Edx: libc::DWORD,
Ecx: libc::DWORD,
Eax: libc::DWORD,
Ebp: libc::DWORD,
Eip: libc::DWORD,
SegCs: libc::DWORD,
EFlags: libc::DWORD,
Esp: libc::DWORD,
SegSs: libc::DWORD,
ExtendedRegisters: [u8; MAXIMUM_SUPPORTED_EXTENSION],
}
#[repr(C)]
pub struct FLOATING_SAVE_AREA {
ControlWord: libc::DWORD,
StatusWord: libc::DWORD,
TagWord: libc::DWORD,
ErrorOffset: libc::DWORD,
ErrorSelector: libc::DWORD,
DataOffset: libc::DWORD,
DataSelector: libc::DWORD,
RegisterArea: [u8; 80],
Cr0NpxState: libc::DWORD,
}
pub fn init_frame(frame: &mut super::STACKFRAME64,
ctx: &CONTEXT) -> libc::DWORD {
frame.AddrPC.Offset = ctx.Eip as u64;
frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrStack.Offset = ctx.Esp as u64;
frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrFrame.Offset = ctx.Ebp as u64;
frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
super::IMAGE_FILE_MACHINE_I386
}
}
#[cfg(target_arch = "x86_64")]
mod arch {
use libc::{c_longlong, c_ulonglong};
use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG};
use simd;
#[repr(C)]
pub struct CONTEXT {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
P1Home: DWORDLONG,
P2Home: DWORDLONG,
P3Home: DWORDLONG,
P4Home: DWORDLONG,
P5Home: DWORDLONG,
P6Home: DWORDLONG,
ContextFlags: DWORD,
MxCsr: DWORD,
SegCs: WORD,
SegDs: WORD,
SegEs: WORD,
SegFs: WORD,
SegGs: WORD,
SegSs: WORD,
EFlags: DWORD,
Dr0: DWORDLONG,
Dr1: DWORDLONG,
Dr2: DWORDLONG,
Dr3: DWORDLONG,
Dr6: DWORDLONG,
Dr7: DWORDLONG,
Rax: DWORDLONG,
Rcx: DWORDLONG,
Rdx: DWORDLONG,
Rbx: DWORDLONG,
Rsp: DWORDLONG,
Rbp: DWORDLONG,
Rsi: DWORDLONG,
Rdi: DWORDLONG,
R8: DWORDLONG,
R9: DWORDLONG,
R10: DWORDLONG,
R11: DWORDLONG,
R12: DWORDLONG,
R13: DWORDLONG,
R14: DWORDLONG,
R15: DWORDLONG,
Rip: DWORDLONG,
FltSave: FLOATING_SAVE_AREA,
VectorRegister: [M128A; 26],
VectorControl: DWORDLONG,
DebugControl: DWORDLONG,
LastBranchToRip: DWORDLONG,
LastBranchFromRip: DWORDLONG,
LastExceptionToRip: DWORDLONG,
LastExceptionFromRip: DWORDLONG,
}
#[repr(C)]
pub struct M128A {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
Low: c_ulonglong,
High: c_longlong
}
#[repr(C)]
pub struct FLOATING_SAVE_AREA {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
_Dummy: [u8; 512] // FIXME: Fill this out
}
pub fn init_frame(frame: &mut super::STACKFRAME64,
ctx: &CONTEXT) -> DWORD {
frame.AddrPC.Offset = ctx.Rip as u64;
frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrStack.Offset = ctx.Rsp as u64;
frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrFrame.Offset = ctx.Rbp as u64;
frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
super::IMAGE_FILE_MACHINE_AMD64
}
}
struct Cleanup {
handle: libc::HANDLE,
SymCleanup: SymCleanupFn,
}
impl Drop for Cleanup {
fn drop(&mut self) { (self.SymCleanup)(self.handle); }
}
pub fn write(w: &mut Write) -> io::Result<()> {
// According to windows documentation, all dbghelp functions are
// single-threaded.
static LOCK: StaticMutex = MUTEX_INIT | *mut SYMBOL_INFO) -> libc::BOOL; | random_line_split |
backtrace.rs | size up to MAX_SYM_NAME.
Name: [libc::c_char; MAX_SYM_NAME],
}
#[repr(C)]
enum ADDRESS_MODE {
AddrMode1616,
AddrMode1632,
AddrModeReal,
AddrModeFlat,
}
struct ADDRESS64 {
Offset: u64,
Segment: u16,
Mode: ADDRESS_MODE,
}
pub struct STACKFRAME64 {
AddrPC: ADDRESS64,
AddrReturn: ADDRESS64,
AddrFrame: ADDRESS64,
AddrStack: ADDRESS64,
AddrBStore: ADDRESS64,
FuncTableEntry: *mut libc::c_void,
Params: [u64; 4],
Far: libc::BOOL,
Virtual: libc::BOOL,
Reserved: [u64; 3],
KdHelp: KDHELP64,
}
struct KDHELP64 {
Thread: u64,
ThCallbackStack: libc::DWORD,
ThCallbackBStore: libc::DWORD,
NextCallback: libc::DWORD,
FramePointer: libc::DWORD,
KiCallUserMode: u64,
KeUserCallbackDispatcher: u64,
SystemRangeStart: u64,
KiUserExceptionDispatcher: u64,
StackBase: u64,
StackLimit: u64,
Reserved: [u64; 5],
}
#[cfg(target_arch = "x86")]
mod arch {
use libc;
const MAXIMUM_SUPPORTED_EXTENSION: usize = 512;
#[repr(C)]
pub struct CONTEXT {
ContextFlags: libc::DWORD,
Dr0: libc::DWORD,
Dr1: libc::DWORD,
Dr2: libc::DWORD,
Dr3: libc::DWORD,
Dr6: libc::DWORD,
Dr7: libc::DWORD,
FloatSave: FLOATING_SAVE_AREA,
SegGs: libc::DWORD,
SegFs: libc::DWORD,
SegEs: libc::DWORD,
SegDs: libc::DWORD,
Edi: libc::DWORD,
Esi: libc::DWORD,
Ebx: libc::DWORD,
Edx: libc::DWORD,
Ecx: libc::DWORD,
Eax: libc::DWORD,
Ebp: libc::DWORD,
Eip: libc::DWORD,
SegCs: libc::DWORD,
EFlags: libc::DWORD,
Esp: libc::DWORD,
SegSs: libc::DWORD,
ExtendedRegisters: [u8; MAXIMUM_SUPPORTED_EXTENSION],
}
#[repr(C)]
pub struct FLOATING_SAVE_AREA {
ControlWord: libc::DWORD,
StatusWord: libc::DWORD,
TagWord: libc::DWORD,
ErrorOffset: libc::DWORD,
ErrorSelector: libc::DWORD,
DataOffset: libc::DWORD,
DataSelector: libc::DWORD,
RegisterArea: [u8; 80],
Cr0NpxState: libc::DWORD,
}
pub fn init_frame(frame: &mut super::STACKFRAME64,
ctx: &CONTEXT) -> libc::DWORD {
frame.AddrPC.Offset = ctx.Eip as u64;
frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrStack.Offset = ctx.Esp as u64;
frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrFrame.Offset = ctx.Ebp as u64;
frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
super::IMAGE_FILE_MACHINE_I386
}
}
#[cfg(target_arch = "x86_64")]
mod arch {
use libc::{c_longlong, c_ulonglong};
use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG};
use simd;
#[repr(C)]
pub struct CONTEXT {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
P1Home: DWORDLONG,
P2Home: DWORDLONG,
P3Home: DWORDLONG,
P4Home: DWORDLONG,
P5Home: DWORDLONG,
P6Home: DWORDLONG,
ContextFlags: DWORD,
MxCsr: DWORD,
SegCs: WORD,
SegDs: WORD,
SegEs: WORD,
SegFs: WORD,
SegGs: WORD,
SegSs: WORD,
EFlags: DWORD,
Dr0: DWORDLONG,
Dr1: DWORDLONG,
Dr2: DWORDLONG,
Dr3: DWORDLONG,
Dr6: DWORDLONG,
Dr7: DWORDLONG,
Rax: DWORDLONG,
Rcx: DWORDLONG,
Rdx: DWORDLONG,
Rbx: DWORDLONG,
Rsp: DWORDLONG,
Rbp: DWORDLONG,
Rsi: DWORDLONG,
Rdi: DWORDLONG,
R8: DWORDLONG,
R9: DWORDLONG,
R10: DWORDLONG,
R11: DWORDLONG,
R12: DWORDLONG,
R13: DWORDLONG,
R14: DWORDLONG,
R15: DWORDLONG,
Rip: DWORDLONG,
FltSave: FLOATING_SAVE_AREA,
VectorRegister: [M128A; 26],
VectorControl: DWORDLONG,
DebugControl: DWORDLONG,
LastBranchToRip: DWORDLONG,
LastBranchFromRip: DWORDLONG,
LastExceptionToRip: DWORDLONG,
LastExceptionFromRip: DWORDLONG,
}
#[repr(C)]
pub struct M128A {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
Low: c_ulonglong,
High: c_longlong
}
#[repr(C)]
pub struct FLOATING_SAVE_AREA {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
_Dummy: [u8; 512] // FIXME: Fill this out
}
pub fn init_frame(frame: &mut super::STACKFRAME64,
ctx: &CONTEXT) -> DWORD {
frame.AddrPC.Offset = ctx.Rip as u64;
frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrStack.Offset = ctx.Rsp as u64;
frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrFrame.Offset = ctx.Rbp as u64;
frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
super::IMAGE_FILE_MACHINE_AMD64
}
}
struct Cleanup {
handle: libc::HANDLE,
SymCleanup: SymCleanupFn,
}
impl Drop for Cleanup {
fn drop(&mut self) { (self.SymCleanup)(self.handle); }
}
pub fn write(w: &mut Write) -> io::Result<()> {
// According to windows documentation, all dbghelp functions are
// single-threaded.
static LOCK: StaticMutex = MUTEX_INIT;
let _g = LOCK.lock();
// Open up dbghelp.dll, we don't link to it explicitly because it can't
// always be found. Additionally, it's nice having fewer dependencies.
let path = Path::new("dbghelp.dll");
let lib = match DynamicLibrary::open(Some(&path)) {
Ok(lib) => lib,
Err(..) => return Ok(()),
};
macro_rules! sym{ ($e:expr, $t:ident) => (unsafe {
match lib.symbol($e) {
Ok(f) => mem::transmute::<*mut u8, $t>(f),
Err(..) => return Ok(())
}
}) }
// Fetch the symbols necessary from dbghelp.dll
let SymFromAddr = sym!("SymFromAddr", SymFromAddrFn);
let SymInitialize = sym!("SymInitialize", SymInitializeFn);
let SymCleanup = sym!("SymCleanup", SymCleanupFn);
let StackWalk64 = sym!("StackWalk64", StackWalk64Fn);
// Allocate necessary structures for doing the stack walk
let process = unsafe { GetCurrentProcess() };
let thread = unsafe { GetCurrentThread() };
let mut context: arch::CONTEXT = unsafe { intrinsics::init() };
unsafe { RtlCaptureContext(&mut context); }
let mut frame: STACKFRAME64 = unsafe { intrinsics::init() };
let image = arch::init_frame(&mut frame, &context);
// Initialize this process's symbols
let ret = SymInitialize(process, ptr::null_mut(), libc::TRUE);
if ret != libc::TRUE { return Ok(()) }
let _c = Cleanup { handle: process, SymCleanup: SymCleanup };
// And now that we're done with all the setup, do the stack walking!
let mut i = 0;
try!(write!(w, "stack backtrace:\n"));
while StackWalk64(image, process, thread, &mut frame, &mut context,
ptr::null_mut(),
ptr::null_mut(),
ptr::null_mut(),
ptr::null_mut()) == libc::TRUE{
let addr = frame.AddrPC.Offset;
if addr == frame.AddrReturn.Offset || addr == 0 ||
frame.AddrReturn.Offset == 0 | { break } | conditional_block | |
platform_types.rs | (ref cp1), Ok(ref cp2)) if cp1 == cp2 => {
Equal
}
_ => {
p1.cmp(p2)
}
}
}
(Path(_), Scratch(_)) => {
Less
}
(Scratch(_), Path(_)) => {
Greater
}
(Scratch(n1), Scratch(n2)) => {
n1.cmp(n2)
}
}
});
impl BufferName {
#[must_use]
pub fn get_extension_or_empty(&self) -> &str {
use BufferName::*;
match self {
Path(p) => {
p.extension()
.and_then(std::ffi::OsStr::to_str)
.unwrap_or("")
},
Scratch(..) => "",
}
}
#[must_use]
pub fn size_in_bytes(&self) -> usize {
use core::mem;
// TODO Do other platforms need adjusting as well?
#[cfg(target_os = "windows")]
const BYTES_PER_UNIT: usize = 2;
#[cfg(not(target_os = "windows"))]
const BYTES_PER_UNIT: usize = 1;
match self {
Self::Path(p) => {
mem::size_of_val(p) + p.capacity() * BYTES_PER_UNIT
},
Self::Scratch(n) => mem::size_of_val(n),
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub enum CursorState {
None,
PressedAgainstWall(Move),
}
d!(for CursorState: CursorState::None);
fmt_debug!(for CursorState: s in "{}", match s {
CursorState::None => std::borrow::Cow::Borrowed("_"),
CursorState::PressedAgainstWall(r#move) => std::borrow::Cow::Owned(format!("->|({})", r#move))
});
ord!(for CursorState: state, other in {
use std::cmp::Ordering::*;
match (state, other) {
(CursorState::None, CursorState::None) => Equal,
(CursorState::None, CursorState::PressedAgainstWall(_)) => Less,
(CursorState::PressedAgainstWall(_), CursorState::None) => Greater,
(CursorState::PressedAgainstWall(m1), CursorState::PressedAgainstWall(m2)) => {
m1.cmp(m2)
}
}
});
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct CursorView {
pub position: Position,
pub state: CursorState,
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct StatusLineView {
pub chars: String,
}
pub const DEFAULT_STATUS_LINE_CHARS: &str = "No buffer selected.";
d!(for StatusLineView: StatusLineView {chars: DEFAULT_STATUS_LINE_CHARS.to_owned()});
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum MenuMode {
Hidden,
FileSwitcher,
FindReplace(FindReplaceMode),
GoToPosition,
}
d!(for MenuMode: MenuMode::Hidden);
#[derive(Clone, Debug, PartialEq)]
pub enum MenuView {
None,
FileSwitcher(FileSwitcherView),
FindReplace(FindReplaceView),
GoToPosition(GoToPositionView)
}
d!(for MenuView: MenuView::None);
impl MenuView {
#[must_use]
pub fn get_mode(&self) -> MenuMode {
match self {
Self::None => MenuMode::Hidden,
Self::FileSwitcher(_) => MenuMode::FileSwitcher,
Self::FindReplace(v) => MenuMode::FindReplace(v.mode),
Self::GoToPosition(_) => MenuMode::GoToPosition,
}
}
}
#[must_use]
pub fn kind_editable_during_mode(kind: BufferIdKind, menu_mode: MenuMode) -> bool {
u!{MenuMode}
match (kind, menu_mode) {
// We want this to be true for `Text` always since it would be completely
// reasonable behaviour for a different client to always show the text
// buffers.
(BufferIdKind::Text, _)
| (BufferIdKind::Find | BufferIdKind::Replace, FindReplace(_))
| (BufferIdKind::FileSwitcher, MenuMode::FileSwitcher)
| (BufferIdKind::GoToPosition, MenuMode::GoToPosition) => true,
_ => {
false
},
}
}
pub type FileSwitcherResults = Vec<PathBuf>;
#[derive(Clone, Default, Debug, PartialEq)]
pub struct FileSwitcherView {
pub search: BufferViewData,
pub results: FileSwitcherResults,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum FindReplaceMode {
CurrentFile,
}
d!(for FindReplaceMode: FindReplaceMode::CurrentFile);
#[derive(Clone, Default, Debug, PartialEq)]
pub struct FindReplaceView {
pub mode: FindReplaceMode,
pub find: BufferViewData,
pub replace: BufferViewData,
pub result_count: usize,
}
#[derive(Clone, Default, Debug, PartialEq)]
pub struct GoToPositionView {
pub go_to_position: BufferViewData,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum EditedTransition {
ToEdited,
ToUnedited,
}
pub type IndexedEditedTransition = (g_i::Index, EditedTransition);
#[derive(Clone, Default, Debug, PartialEq, Eq)]
pub struct EditedTransitions(Vec<IndexedEditedTransition>);
impl EditedTransitions {
pub fn push(&mut self, iet: IndexedEditedTransition) {
self.0.push(iet);
}
pub fn clear(&mut self) {
self.0.clear();
}
#[must_use]
pub fn len(&self) -> usize {
self.0.len()
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn iter(&self) -> impl Iterator<Item = &IndexedEditedTransition> {
self.0.iter()
}
}
impl IntoIterator for EditedTransitions {
type Item = IndexedEditedTransition;
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
#[derive(Clone, Default, PartialEq)]
pub struct BufferLabel {
pub name: BufferName,
/// Having an owned version of the result of `name.to_string()` simplifies
/// ownership in some cases.
// TODO this could be truncated to a fixed length/on the stack
pub name_string: String,
}
fmt_debug!(collapse default for BufferLabel: me {
blank_if_default!(name);
blank_if_default!(name_string, me.name_string.is_empty());
});
// This could arguably be ToOwned.
impl From<&BufferName> for BufferLabel {
fn from(name: &BufferName) -> Self {
Self {
name: name.clone(),
name_string: name.to_string(),
}
}
}
impl From<BufferName> for BufferLabel {
fn from(name: BufferName) -> Self {
let name_string = name.to_string();
Self {
name,
name_string,
}
}
}
#[derive(Clone, Default, Debug, PartialEq)]
pub struct View {
pub buffers: SelectableVec1<BufferLabel>,
pub menu: MenuView,
pub status_line: StatusLineView,
pub current_buffer_kind: BufferIdKind,
pub edited_transitions: EditedTransitions,
pub stats: ViewStats,
}
impl View {
#[must_use]
/// returns the currently visible editor buffer index.
pub fn current_text_index(&self) -> g_i::Index {
self.buffers.current_index()
}
#[must_use]
/// returns the currently visible editor buffer view's index and label.
pub fn current_text_index_and_buffer_label(&self) -> (g_i::Index, &BufferLabel) {
(
self.buffers.current_index(),
self.buffers.get_current_element()
)
}
#[must_use]
pub fn get_buffer_label(&self, index: g_i::Index) -> Option<&BufferLabel> {
self.buffers.get(index)
}
#[must_use]
pub fn current_buffer_id(&self) -> BufferId {
b_id!(
self.current_buffer_kind,
self.buffers.current_index()
)
}
#[must_use]
/// returns the selected menu's cursors if there is a menu containing a buffer
/// currently visible, or the current text buffer's cursors if not.
pub fn get_selected_cursors(&self) -> Option<&[CursorView]> {
use BufferIdKind::*;
match self.current_buffer_kind {
// Seems like we never actually need to access the Text buffer
// cursors here. If we want to later, then some additional restructuring
// will be needed, at least according to the comment this comment
// replaced. commmit `680d9507`
None | Text => Option::None,
Find => match &self.menu {
MenuView::FindReplace(ref fr) => Some(&fr.find),
_ => Option::None,
},
Replace => match &self.menu {
MenuView::FindReplace(ref fr) => Some(&fr.replace),
_ => Option::None,
},
FileSwitcher => match &self.menu { | MenuView::FileSwitcher(ref fs) => Some(&fs.search),
_ => Option::None, | random_line_split | |
platform_types.rs | other.into();
k.cmp(&o)
});
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum HighlightKind {
User,
Result,
CurrentResult,
}
d!(for HighlightKind: HighlightKind::User);
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Highlight {
pub min: Position,
pub max: Position,
pub kind: HighlightKind,
}
impl Highlight {
#[must_use]
pub fn new((p1, p2): (Position, Position), kind: HighlightKind) -> Self {
Highlight {
min: std::cmp::min(p1, p2),
max: std::cmp::max(p1, p2),
kind,
}
}
#[must_use]
pub fn get(&self) -> (Position, Position) {
(self.min, self.max)
}
}
#[macro_export]
macro_rules! highlight {
(l $min_line:literal o $min_offset:literal l $max_line:literal o $max_offset:literal ) => {
Highlight::new(
(
Position {
line: $min_line,
offset: CharOffset($min_offset),
},
Position {
line: $max_line,
offset: CharOffset($max_offset),
},
),
d!()
)
};
(l $min_line:literal o $min_offset:literal l $max_line:literal o max ) => {
highlight!(l $min_line o $min_offset l $max_line o 0xFFFF_FFFF__FFFF_FFFF)
};
}
pub fn push_highlights<O: Into<Option<Position>>>(
highlights: &mut Vec<Highlight>,
position: Position,
highlight_position: O,
kind: HighlightKind,
) {
match highlight_position.into() {
Some(h) if h != position => {
let min = std::cmp::min(position, h);
let max = std::cmp::max(position, h);
if min.line == max.line {
highlights.push(Highlight::new((min, max), kind));
return;
}
// This early return is merely an optimization from three rectangles to two.
// TODO Is this optimization actually worth it? The sticky cursor offset does make this
// more likely than it would otherwise be.
if min.offset != 0 && min.offset == max.offset {
// [|_______________________|]
// ^min_middle max_middle^
let min_middle = min.line + if min.offset == 0 { 0 } else { 1 };
// Since We know the lines must be different, we know `max.line > 0`
let max_middle = max.line - 1;
let offset = min.offset;
highlights.push(Highlight::new(
(
Position {
offset,
line: min.line,
},
Position {
offset: CharOffset::max_value(),
line: max_middle,
},
),
kind,
));
highlights.push(Highlight::new(
(
Position {
offset: CharOffset(0),
line: min_middle,
},
Position {
offset,
line: max.line,
},
),
kind,
));
return;
}
if min.offset != 0 {
highlights.push(Highlight::new(
(
min,
Position {
offset: CharOffset::max_value(),
..min
},
),
kind,
));
}
let min_middle = min.line + if min.offset == 0 { 0 } else { 1 };
// Since We know the lines must be different, we know `max.line > 0`
let max_middle = max.line - 1;
if min_middle <= max_middle {
highlights.push(Highlight::new(
(
Position {
offset: CharOffset(0),
line: min_middle,
},
Position {
offset: CharOffset::max_value(),
line: max_middle,
},
),
kind,
));
}
if max.offset != 0 {
highlights.push(Highlight::new(
(
Position {
offset: CharOffset(0),
..max
},
max,
),
kind,
));
}
}
_ => {}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum BufferName {
Path(PathBuf),
Scratch(u32),
}
d!(for BufferName: BufferName::Scratch(d!()));
fmt_display!(for BufferName: name in "{}",
match name {
BufferName::Path(p) => p
.file_name()
.map_or_else(
|| "?Unknown Path?".to_string(),
|s| s.to_string_lossy().into_owned()
),
BufferName::Scratch(n) => format!("*scratch {}*", n),
}
);
ord!(for BufferName: name, other in {
use BufferName::*;
use std::cmp::Ordering::*;
match (name, other) {
(Path(p1), Path(p2)) => {
match (p1.canonicalize(), p2.canonicalize() ) {
(Ok(ref cp1), Ok(ref cp2)) if cp1 == cp2 => {
Equal
}
_ => {
p1.cmp(p2)
}
}
}
(Path(_), Scratch(_)) => {
Less
}
(Scratch(_), Path(_)) => {
Greater
}
(Scratch(n1), Scratch(n2)) => {
n1.cmp(n2)
}
}
});
impl BufferName {
#[must_use]
pub fn get_extension_or_empty(&self) -> &str {
use BufferName::*;
match self {
Path(p) => {
p.extension()
.and_then(std::ffi::OsStr::to_str)
.unwrap_or("")
},
Scratch(..) => "",
}
}
#[must_use]
pub fn size_in_bytes(&self) -> usize {
use core::mem;
// TODO Do other platforms need adjusting as well?
#[cfg(target_os = "windows")]
const BYTES_PER_UNIT: usize = 2;
#[cfg(not(target_os = "windows"))]
const BYTES_PER_UNIT: usize = 1;
match self {
Self::Path(p) => {
mem::size_of_val(p) + p.capacity() * BYTES_PER_UNIT
},
Self::Scratch(n) => mem::size_of_val(n),
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub enum CursorState {
None,
PressedAgainstWall(Move),
}
d!(for CursorState: CursorState::None);
fmt_debug!(for CursorState: s in "{}", match s {
CursorState::None => std::borrow::Cow::Borrowed("_"),
CursorState::PressedAgainstWall(r#move) => std::borrow::Cow::Owned(format!("->|({})", r#move))
});
ord!(for CursorState: state, other in {
use std::cmp::Ordering::*;
match (state, other) {
(CursorState::None, CursorState::None) => Equal,
(CursorState::None, CursorState::PressedAgainstWall(_)) => Less,
(CursorState::PressedAgainstWall(_), CursorState::None) => Greater,
(CursorState::PressedAgainstWall(m1), CursorState::PressedAgainstWall(m2)) => {
m1.cmp(m2)
}
}
});
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct CursorView {
pub position: Position,
pub state: CursorState,
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct StatusLineView {
pub chars: String,
}
pub const DEFAULT_STATUS_LINE_CHARS: &str = "No buffer selected.";
d!(for StatusLineView: StatusLineView {chars: DEFAULT_STATUS_LINE_CHARS.to_owned()});
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum MenuMode {
Hidden,
FileSwitcher,
FindReplace(FindReplaceMode),
GoToPosition,
}
d!(for MenuMode: MenuMode::Hidden);
#[derive(Clone, Debug, PartialEq)]
pub enum MenuView {
None,
FileSwitcher(FileSwitcherView),
FindReplace(FindReplaceView),
GoToPosition(GoToPositionView)
}
d!(for MenuView: MenuView::None);
impl MenuView {
#[must_use]
pub fn | (&self) -> MenuMode {
match self {
Self::None => MenuMode::Hidden,
Self::FileSwitcher(_) => MenuMode::FileSwitcher,
Self::FindReplace(v) => MenuMode::FindReplace(v.mode),
Self::GoToPosition(_) => MenuMode::GoToPosition,
}
}
}
#[must_use]
pub fn kind_editable_during_mode(kind: BufferIdKind, menu_mode: MenuMode) -> bool {
u!{MenuMode}
match (kind, menu_mode) {
// We want this to be true for `Text` always since it would be completely
// reasonable behaviour for a different client to always show the text
// buffers.
(BufferIdKind::Text, _)
| (BufferIdKind::Find | BufferIdKind::Replace, FindReplace(_))
| (BufferIdKind::FileSwitcher, MenuMode::FileSwitcher)
| (BufferIdKind::GoToPosition, MenuMode:: | get_mode | identifier_name |
platform_types.rs | ), Ok(ref cp2)) if cp1 == cp2 => {
Equal
}
_ => {
p1.cmp(p2)
}
}
}
(Path(_), Scratch(_)) => {
Less
}
(Scratch(_), Path(_)) => {
Greater
}
(Scratch(n1), Scratch(n2)) => {
n1.cmp(n2)
}
}
});
impl BufferName {
#[must_use]
pub fn get_extension_or_empty(&self) -> &str {
use BufferName::*;
match self {
Path(p) => {
p.extension()
.and_then(std::ffi::OsStr::to_str)
.unwrap_or("")
},
Scratch(..) => "",
}
}
#[must_use]
pub fn size_in_bytes(&self) -> usize {
use core::mem;
// TODO Do other platforms need adjusting as well?
#[cfg(target_os = "windows")]
const BYTES_PER_UNIT: usize = 2;
#[cfg(not(target_os = "windows"))]
const BYTES_PER_UNIT: usize = 1;
match self {
Self::Path(p) => {
mem::size_of_val(p) + p.capacity() * BYTES_PER_UNIT
},
Self::Scratch(n) => mem::size_of_val(n),
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub enum CursorState {
None,
PressedAgainstWall(Move),
}
d!(for CursorState: CursorState::None);
fmt_debug!(for CursorState: s in "{}", match s {
CursorState::None => std::borrow::Cow::Borrowed("_"),
CursorState::PressedAgainstWall(r#move) => std::borrow::Cow::Owned(format!("->|({})", r#move))
});
ord!(for CursorState: state, other in {
use std::cmp::Ordering::*;
match (state, other) {
(CursorState::None, CursorState::None) => Equal,
(CursorState::None, CursorState::PressedAgainstWall(_)) => Less,
(CursorState::PressedAgainstWall(_), CursorState::None) => Greater,
(CursorState::PressedAgainstWall(m1), CursorState::PressedAgainstWall(m2)) => {
m1.cmp(m2)
}
}
});
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct CursorView {
pub position: Position,
pub state: CursorState,
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct StatusLineView {
pub chars: String,
}
pub const DEFAULT_STATUS_LINE_CHARS: &str = "No buffer selected.";
d!(for StatusLineView: StatusLineView {chars: DEFAULT_STATUS_LINE_CHARS.to_owned()});
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum MenuMode {
Hidden,
FileSwitcher,
FindReplace(FindReplaceMode),
GoToPosition,
}
d!(for MenuMode: MenuMode::Hidden);
#[derive(Clone, Debug, PartialEq)]
pub enum MenuView {
None,
FileSwitcher(FileSwitcherView),
FindReplace(FindReplaceView),
GoToPosition(GoToPositionView)
}
d!(for MenuView: MenuView::None);
impl MenuView {
#[must_use]
pub fn get_mode(&self) -> MenuMode {
match self {
Self::None => MenuMode::Hidden,
Self::FileSwitcher(_) => MenuMode::FileSwitcher,
Self::FindReplace(v) => MenuMode::FindReplace(v.mode),
Self::GoToPosition(_) => MenuMode::GoToPosition,
}
}
}
#[must_use]
pub fn kind_editable_during_mode(kind: BufferIdKind, menu_mode: MenuMode) -> bool {
u!{MenuMode}
match (kind, menu_mode) {
// We want this to be true for `Text` always since it would be completely
// reasonable behaviour for a different client to always show the text
// buffers.
(BufferIdKind::Text, _)
| (BufferIdKind::Find | BufferIdKind::Replace, FindReplace(_))
| (BufferIdKind::FileSwitcher, MenuMode::FileSwitcher)
| (BufferIdKind::GoToPosition, MenuMode::GoToPosition) => true,
_ => {
false
},
}
}
pub type FileSwitcherResults = Vec<PathBuf>;
#[derive(Clone, Default, Debug, PartialEq)]
pub struct FileSwitcherView {
pub search: BufferViewData,
pub results: FileSwitcherResults,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum FindReplaceMode {
CurrentFile,
}
d!(for FindReplaceMode: FindReplaceMode::CurrentFile);
#[derive(Clone, Default, Debug, PartialEq)]
pub struct FindReplaceView {
pub mode: FindReplaceMode,
pub find: BufferViewData,
pub replace: BufferViewData,
pub result_count: usize,
}
#[derive(Clone, Default, Debug, PartialEq)]
pub struct GoToPositionView {
pub go_to_position: BufferViewData,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum EditedTransition {
ToEdited,
ToUnedited,
}
pub type IndexedEditedTransition = (g_i::Index, EditedTransition);
#[derive(Clone, Default, Debug, PartialEq, Eq)]
pub struct EditedTransitions(Vec<IndexedEditedTransition>);
impl EditedTransitions {
pub fn push(&mut self, iet: IndexedEditedTransition) {
self.0.push(iet);
}
pub fn clear(&mut self) {
self.0.clear();
}
#[must_use]
pub fn len(&self) -> usize {
self.0.len()
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn iter(&self) -> impl Iterator<Item = &IndexedEditedTransition> {
self.0.iter()
}
}
impl IntoIterator for EditedTransitions {
type Item = IndexedEditedTransition;
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
#[derive(Clone, Default, PartialEq)]
pub struct BufferLabel {
pub name: BufferName,
/// Having an owned version of the result of `name.to_string()` simplifies
/// ownership in some cases.
// TODO this could be truncated to a fixed length/on the stack
pub name_string: String,
}
fmt_debug!(collapse default for BufferLabel: me {
blank_if_default!(name);
blank_if_default!(name_string, me.name_string.is_empty());
});
// This could arguably be ToOwned.
impl From<&BufferName> for BufferLabel {
fn from(name: &BufferName) -> Self {
Self {
name: name.clone(),
name_string: name.to_string(),
}
}
}
impl From<BufferName> for BufferLabel {
fn from(name: BufferName) -> Self {
let name_string = name.to_string();
Self {
name,
name_string,
}
}
}
#[derive(Clone, Default, Debug, PartialEq)]
pub struct View {
pub buffers: SelectableVec1<BufferLabel>,
pub menu: MenuView,
pub status_line: StatusLineView,
pub current_buffer_kind: BufferIdKind,
pub edited_transitions: EditedTransitions,
pub stats: ViewStats,
}
impl View {
#[must_use]
/// returns the currently visible editor buffer index.
pub fn current_text_index(&self) -> g_i::Index {
self.buffers.current_index()
}
#[must_use]
/// returns the currently visible editor buffer view's index and label.
pub fn current_text_index_and_buffer_label(&self) -> (g_i::Index, &BufferLabel) {
(
self.buffers.current_index(),
self.buffers.get_current_element()
)
}
#[must_use]
pub fn get_buffer_label(&self, index: g_i::Index) -> Option<&BufferLabel> {
self.buffers.get(index)
}
#[must_use]
pub fn current_buffer_id(&self) -> BufferId {
b_id!(
self.current_buffer_kind,
self.buffers.current_index()
)
}
#[must_use]
/// returns the selected menu's cursors if there is a menu containing a buffer
/// currently visible, or the current text buffer's cursors if not.
pub fn get_selected_cursors(&self) -> Option<&[CursorView]> | {
use BufferIdKind::*;
match self.current_buffer_kind {
// Seems like we never actually need to access the Text buffer
// cursors here. If we want to later, then some additional restructuring
// will be needed, at least according to the comment this comment
// replaced. commmit `680d9507`
None | Text => Option::None,
Find => match &self.menu {
MenuView::FindReplace(ref fr) => Some(&fr.find),
_ => Option::None,
},
Replace => match &self.menu {
MenuView::FindReplace(ref fr) => Some(&fr.replace),
_ => Option::None,
},
FileSwitcher => match &self.menu {
MenuView::FileSwitcher(ref fs) => Some(&fs.search),
_ => Option::None,
}, | identifier_body | |
pvc-clone-controller.go | ")
}
if err := addCloneToken(dataVolume, pvc); err != nil {
return err
}
sourceNamespace := dataVolume.Spec.Source.PVC.Namespace
if sourceNamespace == "" {
sourceNamespace = dataVolume.Namespace
}
pvc.Annotations[cc.AnnCloneRequest] = sourceNamespace + "/" + dataVolume.Spec.Source.PVC.Name
return nil
}
func (r *PvcCloneReconciler) sync(log logr.Logger, req reconcile.Request) (dvSyncResult, error) {
syncState, err := r.syncClone(log, req)
if err == nil {
err = r.syncUpdate(log, &syncState)
}
return syncState.dvSyncResult, err
}
func (r *PvcCloneReconciler) syncClone(log logr.Logger, req reconcile.Request) (dvSyncState, error) {
syncRes, syncErr := r.syncCommon(log, req, r.cleanup, r.prepare)
if syncErr != nil || syncRes.result != nil {
return syncRes, syncErr
}
pvc := syncRes.pvc
pvcSpec := syncRes.pvcSpec
datavolume := syncRes.dvMutated
pvcPopulated := pvcIsPopulated(pvc, datavolume)
staticProvisionPending := checkStaticProvisionPending(pvc, datavolume)
prePopulated := dvIsPrePopulated(datavolume)
if pvcPopulated || prePopulated || staticProvisionPending {
return syncRes, nil
}
if addedToken, err := r.ensureExtendedTokenDV(datavolume); err != nil {
return syncRes, err
} else if addedToken {
// make sure token gets persisted before doing anything else
return syncRes, nil
}
if pvc == nil {
// Check if source PVC exists and do proper validation before attempting to clone
if done, err := r.validateCloneAndSourcePVC(&syncRes, log); err != nil {
return syncRes, err
} else if !done {
return syncRes, nil
}
// Always call detect size, it will handle the case where size is specified
// and detection pod not necessary
if datavolume.Spec.Storage != nil {
done, err := r.detectCloneSize(&syncRes)
if err != nil {
return syncRes, err
} else if !done {
// Check if the source PVC is ready to be cloned
if readyToClone, err := r.isSourceReadyToClone(datavolume); err != nil {
return syncRes, err
} else if !readyToClone {
if syncRes.result == nil {
syncRes.result = &reconcile.Result{}
}
syncRes.result.RequeueAfter = sourceInUseRequeueDuration
return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil)
}
return syncRes, nil
}
}
pvcModifier := r.updateAnnotations
if syncRes.usePopulator {
if isCrossNamespaceClone(datavolume) {
if !cc.HasFinalizer(datavolume, crossNamespaceFinalizer) {
cc.AddFinalizer(datavolume, crossNamespaceFinalizer)
return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil)
}
}
pvcModifier = r.updatePVCForPopulation
}
newPvc, err := r.createPvcForDatavolume(datavolume, pvcSpec, pvcModifier)
if err != nil {
if cc.ErrQuotaExceeded(err) {
syncErr = r.syncDataVolumeStatusPhaseWithEvent(&syncRes, cdiv1.Pending, nil,
Event{
eventType: corev1.EventTypeWarning,
reason: cc.ErrExceededQuota,
message: err.Error(),
})
if syncErr != nil {
log.Error(syncErr, "failed to sync DataVolume status with event")
}
}
return syncRes, err
}
pvc = newPvc
}
if syncRes.usePopulator {
if err := r.reconcileVolumeCloneSourceCR(&syncRes); err != nil {
return syncRes, err
}
ct, ok := pvc.Annotations[cc.AnnCloneType]
if ok {
cc.AddAnnotation(datavolume, cc.AnnCloneType, ct)
}
} else {
cc.AddAnnotation(datavolume, cc.AnnCloneType, string(cdiv1.CloneStrategyHostAssisted))
if err := r.fallbackToHostAssisted(pvc); err != nil {
return syncRes, err
}
}
if err := r.ensureExtendedTokenPVC(datavolume, pvc); err != nil {
return syncRes, err
}
return syncRes, syncErr
}
// Verify that the source PVC has been completely populated.
func (r *PvcCloneReconciler) isSourcePVCPopulated(dv *cdiv1.DataVolume) (bool, error) {
sourcePvc := &corev1.PersistentVolumeClaim{}
if err := r.client.Get(context.TODO(), types.NamespacedName{Name: dv.Spec.Source.PVC.Name, Namespace: dv.Spec.Source.PVC.Namespace}, sourcePvc); err != nil {
return false, err
}
return cc.IsPopulated(sourcePvc, r.client)
}
func (r *PvcCloneReconciler) sourceInUse(dv *cdiv1.DataVolume, eventReason string) (bool, error) {
pods, err := cc.GetPodsUsingPVCs(context.TODO(), r.client, dv.Spec.Source.PVC.Namespace, sets.New(dv.Spec.Source.PVC.Name), false)
if err != nil {
return false, err
}
for _, pod := range pods {
r.log.V(1).Info("Cannot snapshot",
"namespace", dv.Namespace, "name", dv.Name, "pod namespace", pod.Namespace, "pod name", pod.Name)
r.recorder.Eventf(dv, corev1.EventTypeWarning, eventReason,
"pod %s/%s using PersistentVolumeClaim %s", pod.Namespace, pod.Name, dv.Spec.Source.PVC.Name)
}
return len(pods) > 0, nil
}
func (r *PvcCloneReconciler) findSourcePvc(dataVolume *cdiv1.DataVolume) (*corev1.PersistentVolumeClaim, error) {
sourcePvcSpec := dataVolume.Spec.Source.PVC
if sourcePvcSpec == nil {
return nil, errors.New("no source PVC provided")
}
// Find source PVC
sourcePvcNs := sourcePvcSpec.Namespace
if sourcePvcNs == "" {
sourcePvcNs = dataVolume.Namespace
}
pvc := &corev1.PersistentVolumeClaim{}
if err := r.client.Get(context.TODO(), types.NamespacedName{Namespace: sourcePvcNs, Name: sourcePvcSpec.Name}, pvc); err != nil {
if k8serrors.IsNotFound(err) {
r.log.V(3).Info("Source PVC is missing", "source namespace", sourcePvcSpec.Namespace, "source name", sourcePvcSpec.Name)
}
return nil, err
}
return pvc, nil
}
// validateCloneAndSourcePVC checks if the source PVC of a clone exists and does proper validation
func (r *PvcCloneReconciler) validateCloneAndSourcePVC(syncState *dvSyncState, log logr.Logger) (bool, error) {
datavolume := syncState.dvMutated
sourcePvc, err := r.findSourcePvc(datavolume)
if err != nil {
// Clone without source
if k8serrors.IsNotFound(err) {
syncErr := r.syncDataVolumeStatusPhaseWithEvent(syncState, datavolume.Status.Phase, nil,
Event{
eventType: corev1.EventTypeWarning,
reason: CloneWithoutSource,
message: fmt.Sprintf(MessageCloneWithoutSource, "pvc", datavolume.Spec.Source.PVC.Name),
})
if syncErr != nil {
log.Error(syncErr, "failed to sync DataVolume status with event")
}
return false, nil
}
return false, err
}
err = cc.ValidateClone(sourcePvc, &datavolume.Spec)
if err != nil {
r.recorder.Event(datavolume, corev1.EventTypeWarning, CloneValidationFailed, MessageCloneValidationFailed)
return false, err
}
return true, nil
}
// isSourceReadyToClone handles the reconciling process of a clone when the source PVC is not ready
func (r *PvcCloneReconciler) isSourceReadyToClone(datavolume *cdiv1.DataVolume) (bool, error) | {
// TODO preper const
eventReason := "CloneSourceInUse"
// Check if any pods are using the source PVC
inUse, err := r.sourceInUse(datavolume, eventReason)
if err != nil {
return false, err
}
// Check if the source PVC is fully populated
populated, err := r.isSourcePVCPopulated(datavolume)
if err != nil {
return false, err
}
if inUse || !populated {
return false, nil
}
return true, nil | identifier_body | |
pvc-clone-controller.go | error {
dv := syncState.dvMutated
if err := r.populateSourceIfSourceRef(dv); err != nil {
return err
}
return nil
}
func (r *PvcCloneReconciler) cleanup(syncState *dvSyncState) error {
dv := syncState.dvMutated
if err := r.populateSourceIfSourceRef(dv); err != nil {
return err
}
if dv.DeletionTimestamp == nil && dv.Status.Phase != cdiv1.Succeeded {
return nil
}
return r.reconcileVolumeCloneSourceCR(syncState)
}
func | (dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error {
// first clear out tokens that may have already been added
delete(pvc.Annotations, cc.AnnCloneToken)
delete(pvc.Annotations, cc.AnnExtendedCloneToken)
if isCrossNamespaceClone(dv) {
// only want this initially
// extended token is added later
token, ok := dv.Annotations[cc.AnnCloneToken]
if !ok {
return errors.Errorf("no clone token")
}
cc.AddAnnotation(pvc, cc.AnnCloneToken, token)
}
return nil
}
func volumeCloneSourceName(dv *cdiv1.DataVolume) string {
return fmt.Sprintf("%s-%s", volumeCloneSourcePrefix, dv.UID)
}
func (r *PvcCloneReconciler) updateAnnotations(dataVolume *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error {
if dataVolume.Spec.Source.PVC == nil {
return errors.Errorf("no source set for clone datavolume")
}
if err := addCloneToken(dataVolume, pvc); err != nil {
return err
}
sourceNamespace := dataVolume.Spec.Source.PVC.Namespace
if sourceNamespace == "" {
sourceNamespace = dataVolume.Namespace
}
pvc.Annotations[cc.AnnCloneRequest] = sourceNamespace + "/" + dataVolume.Spec.Source.PVC.Name
return nil
}
func (r *PvcCloneReconciler) sync(log logr.Logger, req reconcile.Request) (dvSyncResult, error) {
syncState, err := r.syncClone(log, req)
if err == nil {
err = r.syncUpdate(log, &syncState)
}
return syncState.dvSyncResult, err
}
func (r *PvcCloneReconciler) syncClone(log logr.Logger, req reconcile.Request) (dvSyncState, error) {
syncRes, syncErr := r.syncCommon(log, req, r.cleanup, r.prepare)
if syncErr != nil || syncRes.result != nil {
return syncRes, syncErr
}
pvc := syncRes.pvc
pvcSpec := syncRes.pvcSpec
datavolume := syncRes.dvMutated
pvcPopulated := pvcIsPopulated(pvc, datavolume)
staticProvisionPending := checkStaticProvisionPending(pvc, datavolume)
prePopulated := dvIsPrePopulated(datavolume)
if pvcPopulated || prePopulated || staticProvisionPending {
return syncRes, nil
}
if addedToken, err := r.ensureExtendedTokenDV(datavolume); err != nil {
return syncRes, err
} else if addedToken {
// make sure token gets persisted before doing anything else
return syncRes, nil
}
if pvc == nil {
// Check if source PVC exists and do proper validation before attempting to clone
if done, err := r.validateCloneAndSourcePVC(&syncRes, log); err != nil {
return syncRes, err
} else if !done {
return syncRes, nil
}
// Always call detect size, it will handle the case where size is specified
// and detection pod not necessary
if datavolume.Spec.Storage != nil {
done, err := r.detectCloneSize(&syncRes)
if err != nil {
return syncRes, err
} else if !done {
// Check if the source PVC is ready to be cloned
if readyToClone, err := r.isSourceReadyToClone(datavolume); err != nil {
return syncRes, err
} else if !readyToClone {
if syncRes.result == nil {
syncRes.result = &reconcile.Result{}
}
syncRes.result.RequeueAfter = sourceInUseRequeueDuration
return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil)
}
return syncRes, nil
}
}
pvcModifier := r.updateAnnotations
if syncRes.usePopulator {
if isCrossNamespaceClone(datavolume) {
if !cc.HasFinalizer(datavolume, crossNamespaceFinalizer) {
cc.AddFinalizer(datavolume, crossNamespaceFinalizer)
return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil)
}
}
pvcModifier = r.updatePVCForPopulation
}
newPvc, err := r.createPvcForDatavolume(datavolume, pvcSpec, pvcModifier)
if err != nil {
if cc.ErrQuotaExceeded(err) {
syncErr = r.syncDataVolumeStatusPhaseWithEvent(&syncRes, cdiv1.Pending, nil,
Event{
eventType: corev1.EventTypeWarning,
reason: cc.ErrExceededQuota,
message: err.Error(),
})
if syncErr != nil {
log.Error(syncErr, "failed to sync DataVolume status with event")
}
}
return syncRes, err
}
pvc = newPvc
}
if syncRes.usePopulator {
if err := r.reconcileVolumeCloneSourceCR(&syncRes); err != nil {
return syncRes, err
}
ct, ok := pvc.Annotations[cc.AnnCloneType]
if ok {
cc.AddAnnotation(datavolume, cc.AnnCloneType, ct)
}
} else {
cc.AddAnnotation(datavolume, cc.AnnCloneType, string(cdiv1.CloneStrategyHostAssisted))
if err := r.fallbackToHostAssisted(pvc); err != nil {
return syncRes, err
}
}
if err := r.ensureExtendedTokenPVC(datavolume, pvc); err != nil {
return syncRes, err
}
return syncRes, syncErr
}
// Verify that the source PVC has been completely populated.
func (r *PvcCloneReconciler) isSourcePVCPopulated(dv *cdiv1.DataVolume) (bool, error) {
sourcePvc := &corev1.PersistentVolumeClaim{}
if err := r.client.Get(context.TODO(), types.NamespacedName{Name: dv.Spec.Source.PVC.Name, Namespace: dv.Spec.Source.PVC.Namespace}, sourcePvc); err != nil {
return false, err
}
return cc.IsPopulated(sourcePvc, r.client)
}
func (r *PvcCloneReconciler) sourceInUse(dv *cdiv1.DataVolume, eventReason string) (bool, error) {
pods, err := cc.GetPodsUsingPVCs(context.TODO(), r.client, dv.Spec.Source.PVC.Namespace, sets.New(dv.Spec.Source.PVC.Name), false)
if err != nil {
return false, err
}
for _, pod := range pods {
r.log.V(1).Info("Cannot snapshot",
"namespace", dv.Namespace, "name", dv.Name, "pod namespace", pod.Namespace, "pod name", pod.Name)
r.recorder.Eventf(dv, corev1.EventTypeWarning, eventReason,
"pod %s/%s using PersistentVolumeClaim %s", pod.Namespace, pod.Name, dv.Spec.Source.PVC.Name)
}
return len(pods) > 0, nil
}
func (r *PvcCloneReconciler) findSourcePvc(dataVolume *cdiv1.DataVolume) (*corev1.PersistentVolumeClaim, error) {
sourcePvcSpec := dataVolume.Spec.Source.PVC
if sourcePvcSpec == nil {
return nil, errors.New("no source PVC provided")
}
// Find source PVC
sourcePvcNs := sourcePvcSpec.Namespace
if sourcePvcNs == "" {
sourcePvcNs = dataVolume.Namespace
}
pvc := &corev1.PersistentVolumeClaim{}
if err := r.client.Get(context.TODO(), types.NamespacedName{Namespace: sourcePvcNs, Name: sourcePvcSpec.Name}, pvc); err != nil {
if k8serrors.IsNotFound(err) {
r.log.V(3).Info("Source PVC is missing", "source namespace", sourcePvcSpec.Namespace, "source name", sourcePvcSpec.Name)
}
return nil, err
}
return pvc, nil
}
// validateCloneAndSourcePVC checks if the source PVC of a clone exists and does proper validation
func (r *PvcCloneReconciler) validateCloneAndSourcePVC(syncState *dvSyncState, log logr.Logger) (bool, error) {
datavolume := syncState.dvMutated
sourcePvc, err := r.findSourcePvc(datavolume)
if err != nil {
// Clone without source
| addCloneToken | identifier_name |
pvc-clone-controller.go | getKey := func(namespace, name string) string {
return namespace + "/" + name
}
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &cdiv1.DataVolume{}, dvDataSourceField, func(obj client.Object) []string {
if sourceRef := obj.(*cdiv1.DataVolume).Spec.SourceRef; sourceRef != nil && sourceRef.Kind == cdiv1.DataVolumeDataSource {
ns := obj.GetNamespace()
if sourceRef.Namespace != nil && *sourceRef.Namespace != "" {
ns = *sourceRef.Namespace
}
return []string{getKey(ns, sourceRef.Name)}
}
return nil
}); err != nil {
return err
}
mapToDataVolume := func(obj client.Object) (reqs []reconcile.Request) {
var dvs cdiv1.DataVolumeList
matchingFields := client.MatchingFields{dvDataSourceField: getKey(obj.GetNamespace(), obj.GetName())}
if err := mgr.GetClient().List(context.TODO(), &dvs, matchingFields); err != nil {
c.GetLogger().Error(err, "Unable to list DataVolumes", "matchingFields", matchingFields)
return
}
for _, dv := range dvs.Items {
reqs = append(reqs, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: dv.Namespace, Name: dv.Name}})
}
return
}
if err := c.Watch(&source.Kind{Type: &cdiv1.DataSource{}},
handler.EnqueueRequestsFromMapFunc(mapToDataVolume),
); err != nil {
return err
}
return nil
}
// Reconcile loop for the clone data volumes
func (r *PvcCloneReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
return r.reconcile(ctx, req, r)
}
func (r *PvcCloneReconciler) prepare(syncState *dvSyncState) error {
dv := syncState.dvMutated
if err := r.populateSourceIfSourceRef(dv); err != nil {
return err
}
return nil
}
func (r *PvcCloneReconciler) cleanup(syncState *dvSyncState) error {
dv := syncState.dvMutated
if err := r.populateSourceIfSourceRef(dv); err != nil {
return err
}
if dv.DeletionTimestamp == nil && dv.Status.Phase != cdiv1.Succeeded {
return nil
}
return r.reconcileVolumeCloneSourceCR(syncState)
}
func addCloneToken(dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error {
// first clear out tokens that may have already been added
delete(pvc.Annotations, cc.AnnCloneToken)
delete(pvc.Annotations, cc.AnnExtendedCloneToken)
if isCrossNamespaceClone(dv) {
// only want this initially
// extended token is added later
token, ok := dv.Annotations[cc.AnnCloneToken]
if !ok {
return errors.Errorf("no clone token")
}
cc.AddAnnotation(pvc, cc.AnnCloneToken, token)
}
return nil
}
func volumeCloneSourceName(dv *cdiv1.DataVolume) string {
return fmt.Sprintf("%s-%s", volumeCloneSourcePrefix, dv.UID)
}
func (r *PvcCloneReconciler) updateAnnotations(dataVolume *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error {
if dataVolume.Spec.Source.PVC == nil {
return errors.Errorf("no source set for clone datavolume")
}
if err := addCloneToken(dataVolume, pvc); err != nil {
return err
}
sourceNamespace := dataVolume.Spec.Source.PVC.Namespace
if sourceNamespace == "" {
sourceNamespace = dataVolume.Namespace
}
pvc.Annotations[cc.AnnCloneRequest] = sourceNamespace + "/" + dataVolume.Spec.Source.PVC.Name
return nil
}
func (r *PvcCloneReconciler) sync(log logr.Logger, req reconcile.Request) (dvSyncResult, error) {
syncState, err := r.syncClone(log, req)
if err == nil {
err = r.syncUpdate(log, &syncState)
}
return syncState.dvSyncResult, err
}
func (r *PvcCloneReconciler) syncClone(log logr.Logger, req reconcile.Request) (dvSyncState, error) {
syncRes, syncErr := r.syncCommon(log, req, r.cleanup, r.prepare)
if syncErr != nil || syncRes.result != nil {
return syncRes, syncErr
}
pvc := syncRes.pvc
pvcSpec := syncRes.pvcSpec
datavolume := syncRes.dvMutated
pvcPopulated := pvcIsPopulated(pvc, datavolume)
staticProvisionPending := checkStaticProvisionPending(pvc, datavolume)
prePopulated := dvIsPrePopulated(datavolume)
if pvcPopulated || prePopulated || staticProvisionPending {
return syncRes, nil
}
if addedToken, err := r.ensureExtendedTokenDV(datavolume); err != nil {
return syncRes, err
} else if addedToken {
// make sure token gets persisted before doing anything else
return syncRes, nil
}
if pvc == nil {
// Check if source PVC exists and do proper validation before attempting to clone
if done, err := r.validateCloneAndSourcePVC(&syncRes, log); err != nil {
return syncRes, err
} else if !done {
return syncRes, nil
}
// Always call detect size, it will handle the case where size is specified
// and detection pod not necessary
if datavolume.Spec.Storage != nil {
done, err := r.detectCloneSize(&syncRes)
if err != nil {
return syncRes, err
} else if !done {
// Check if the source PVC is ready to be cloned
if readyToClone, err := r.isSourceReadyToClone(datavolume); err != nil {
return syncRes, err
} else if !readyToClone {
if syncRes.result == nil {
syncRes.result = &reconcile.Result{}
}
syncRes.result.RequeueAfter = sourceInUseRequeueDuration
return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil)
}
return syncRes, nil
}
}
pvcModifier := r.updateAnnotations
if syncRes.usePopulator {
if isCrossNamespaceClone(datavolume) {
if !cc.HasFinalizer(datavolume, crossNamespaceFinalizer) {
cc.AddFinalizer(datavolume, crossNamespaceFinalizer)
return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil)
}
}
pvcModifier = r.updatePVCForPopulation
}
newPvc, err := r.createPvcForDatavolume(datavolume, pvcSpec, pvcModifier)
if err != nil {
if cc.ErrQuotaExceeded(err) {
syncErr = r.syncDataVolumeStatusPhaseWithEvent(&syncRes, cdiv1.Pending, nil,
Event{
eventType: corev1.EventTypeWarning,
reason: cc.ErrExceededQuota,
message: err.Error(),
})
if syncErr != nil {
log.Error(syncErr, "failed to sync DataVolume status with event")
}
}
return syncRes, err
}
pvc = newPvc
}
if syncRes.usePopulator {
if err := r.reconcileVolumeCloneSourceCR(&syncRes); err != nil {
return syncRes, err
}
ct, ok := pvc.Annotations[cc.AnnCloneType]
if ok {
cc.AddAnnotation(datavolume, cc.AnnCloneType, ct)
}
} else {
cc.AddAnnotation(datavolume, cc.AnnCloneType, string(cdiv1.CloneStrategyHostAssisted))
if err := r.fallbackToHostAssisted(pvc); err != nil {
return syncRes, err
}
}
if err := r.ensureExtendedTokenPVC(datavolume, pvc); err != nil {
return syncRes, err
}
return syncRes, syncErr
}
// Verify that the source PVC has been completely populated.
func (r *PvcCloneReconciler) isSourcePVCPopulated(dv *cdiv1.DataVolume) (bool, error) {
sourcePvc := &corev1.PersistentVolumeClaim{}
if err := r.client.Get(context.TODO(), types.NamespacedName{Name: dv.Spec.Source.PVC.Name, Namespace: dv.Spec.Source.PVC.Namespace}, sourcePvc); err != nil {
return false, err
}
return cc.IsPopulated(sourcePvc, r.client)
}
func (r *PvcCloneReconciler) sourceInUse(dv *cdiv1.DataVolume, eventReason string) (bool, error) {
pods, err := cc.GetPodsUsingPVCs(context.TODO(), r.client, dv.Spec.Source.PVC.Namespace, sets.New(dv.Spec.Source.PVC.Name), false)
if err != nil {
| random_line_split | ||
pvc-clone-controller.go | {
dv := syncState.dvMutated
if err := r.populateSourceIfSourceRef(dv); err != nil {
return err
}
return nil
}
func (r *PvcCloneReconciler) cleanup(syncState *dvSyncState) error {
dv := syncState.dvMutated
if err := r.populateSourceIfSourceRef(dv); err != nil {
return err
}
if dv.DeletionTimestamp == nil && dv.Status.Phase != cdiv1.Succeeded {
return nil
}
return r.reconcileVolumeCloneSourceCR(syncState)
}
func addCloneToken(dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error {
// first clear out tokens that may have already been added
delete(pvc.Annotations, cc.AnnCloneToken)
delete(pvc.Annotations, cc.AnnExtendedCloneToken)
if isCrossNamespaceClone(dv) {
// only want this initially
// extended token is added later
token, ok := dv.Annotations[cc.AnnCloneToken]
if !ok {
return errors.Errorf("no clone token")
}
cc.AddAnnotation(pvc, cc.AnnCloneToken, token)
}
return nil
}
func volumeCloneSourceName(dv *cdiv1.DataVolume) string {
return fmt.Sprintf("%s-%s", volumeCloneSourcePrefix, dv.UID)
}
func (r *PvcCloneReconciler) updateAnnotations(dataVolume *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error {
if dataVolume.Spec.Source.PVC == nil {
return errors.Errorf("no source set for clone datavolume")
}
if err := addCloneToken(dataVolume, pvc); err != nil {
return err
}
sourceNamespace := dataVolume.Spec.Source.PVC.Namespace
if sourceNamespace == "" {
sourceNamespace = dataVolume.Namespace
}
pvc.Annotations[cc.AnnCloneRequest] = sourceNamespace + "/" + dataVolume.Spec.Source.PVC.Name
return nil
}
func (r *PvcCloneReconciler) sync(log logr.Logger, req reconcile.Request) (dvSyncResult, error) {
syncState, err := r.syncClone(log, req)
if err == nil {
err = r.syncUpdate(log, &syncState)
}
return syncState.dvSyncResult, err
}
func (r *PvcCloneReconciler) syncClone(log logr.Logger, req reconcile.Request) (dvSyncState, error) {
syncRes, syncErr := r.syncCommon(log, req, r.cleanup, r.prepare)
if syncErr != nil || syncRes.result != nil {
return syncRes, syncErr
}
pvc := syncRes.pvc
pvcSpec := syncRes.pvcSpec
datavolume := syncRes.dvMutated
pvcPopulated := pvcIsPopulated(pvc, datavolume)
staticProvisionPending := checkStaticProvisionPending(pvc, datavolume)
prePopulated := dvIsPrePopulated(datavolume)
if pvcPopulated || prePopulated || staticProvisionPending {
return syncRes, nil
}
if addedToken, err := r.ensureExtendedTokenDV(datavolume); err != nil {
return syncRes, err
} else if addedToken {
// make sure token gets persisted before doing anything else
return syncRes, nil
}
if pvc == nil {
// Check if source PVC exists and do proper validation before attempting to clone
if done, err := r.validateCloneAndSourcePVC(&syncRes, log); err != nil {
return syncRes, err
} else if !done {
return syncRes, nil
}
// Always call detect size, it will handle the case where size is specified
// and detection pod not necessary
if datavolume.Spec.Storage != nil {
done, err := r.detectCloneSize(&syncRes)
if err != nil {
return syncRes, err
} else if !done {
// Check if the source PVC is ready to be cloned
if readyToClone, err := r.isSourceReadyToClone(datavolume); err != nil {
return syncRes, err
} else if !readyToClone {
if syncRes.result == nil {
syncRes.result = &reconcile.Result{}
}
syncRes.result.RequeueAfter = sourceInUseRequeueDuration
return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil)
}
return syncRes, nil
}
}
pvcModifier := r.updateAnnotations
if syncRes.usePopulator {
if isCrossNamespaceClone(datavolume) {
if !cc.HasFinalizer(datavolume, crossNamespaceFinalizer) {
cc.AddFinalizer(datavolume, crossNamespaceFinalizer)
return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil)
}
}
pvcModifier = r.updatePVCForPopulation
}
newPvc, err := r.createPvcForDatavolume(datavolume, pvcSpec, pvcModifier)
if err != nil |
pvc = newPvc
}
if syncRes.usePopulator {
if err := r.reconcileVolumeCloneSourceCR(&syncRes); err != nil {
return syncRes, err
}
ct, ok := pvc.Annotations[cc.AnnCloneType]
if ok {
cc.AddAnnotation(datavolume, cc.AnnCloneType, ct)
}
} else {
cc.AddAnnotation(datavolume, cc.AnnCloneType, string(cdiv1.CloneStrategyHostAssisted))
if err := r.fallbackToHostAssisted(pvc); err != nil {
return syncRes, err
}
}
if err := r.ensureExtendedTokenPVC(datavolume, pvc); err != nil {
return syncRes, err
}
return syncRes, syncErr
}
// Verify that the source PVC has been completely populated.
func (r *PvcCloneReconciler) isSourcePVCPopulated(dv *cdiv1.DataVolume) (bool, error) {
sourcePvc := &corev1.PersistentVolumeClaim{}
if err := r.client.Get(context.TODO(), types.NamespacedName{Name: dv.Spec.Source.PVC.Name, Namespace: dv.Spec.Source.PVC.Namespace}, sourcePvc); err != nil {
return false, err
}
return cc.IsPopulated(sourcePvc, r.client)
}
func (r *PvcCloneReconciler) sourceInUse(dv *cdiv1.DataVolume, eventReason string) (bool, error) {
pods, err := cc.GetPodsUsingPVCs(context.TODO(), r.client, dv.Spec.Source.PVC.Namespace, sets.New(dv.Spec.Source.PVC.Name), false)
if err != nil {
return false, err
}
for _, pod := range pods {
r.log.V(1).Info("Cannot snapshot",
"namespace", dv.Namespace, "name", dv.Name, "pod namespace", pod.Namespace, "pod name", pod.Name)
r.recorder.Eventf(dv, corev1.EventTypeWarning, eventReason,
"pod %s/%s using PersistentVolumeClaim %s", pod.Namespace, pod.Name, dv.Spec.Source.PVC.Name)
}
return len(pods) > 0, nil
}
func (r *PvcCloneReconciler) findSourcePvc(dataVolume *cdiv1.DataVolume) (*corev1.PersistentVolumeClaim, error) {
sourcePvcSpec := dataVolume.Spec.Source.PVC
if sourcePvcSpec == nil {
return nil, errors.New("no source PVC provided")
}
// Find source PVC
sourcePvcNs := sourcePvcSpec.Namespace
if sourcePvcNs == "" {
sourcePvcNs = dataVolume.Namespace
}
pvc := &corev1.PersistentVolumeClaim{}
if err := r.client.Get(context.TODO(), types.NamespacedName{Namespace: sourcePvcNs, Name: sourcePvcSpec.Name}, pvc); err != nil {
if k8serrors.IsNotFound(err) {
r.log.V(3).Info("Source PVC is missing", "source namespace", sourcePvcSpec.Namespace, "source name", sourcePvcSpec.Name)
}
return nil, err
}
return pvc, nil
}
// validateCloneAndSourcePVC checks if the source PVC of a clone exists and does proper validation
func (r *PvcCloneReconciler) validateCloneAndSourcePVC(syncState *dvSyncState, log logr.Logger) (bool, error) {
datavolume := syncState.dvMutated
sourcePvc, err := r.findSourcePvc(datavolume)
if err != nil {
// Clone without source
| {
if cc.ErrQuotaExceeded(err) {
syncErr = r.syncDataVolumeStatusPhaseWithEvent(&syncRes, cdiv1.Pending, nil,
Event{
eventType: corev1.EventTypeWarning,
reason: cc.ErrExceededQuota,
message: err.Error(),
})
if syncErr != nil {
log.Error(syncErr, "failed to sync DataVolume status with event")
}
}
return syncRes, err
} | conditional_block |
migration.ts | const existingImport = findImportSpecifier(node.elements, oldImport);
if (!existingImport) {
throw new Error(`Could not find an import to replace using ${oldImport}.`);
}
return ts.updateNamedImports(node, [
...node.elements.filter(current => current !== existingImport),
// Create a new import while trying to preserve the alias of the old one.
ts.createImportSpecifier(
existingImport.propertyName ? ts.createIdentifier(newImport) : undefined,
existingImport.propertyName ? existingImport.name : ts.createIdentifier(newImport))
]);
}
/**
* Migrates a function call expression from `Renderer` to `Renderer2`.
* Returns null if the expression should be dropped.
*/
export function migrateExpression(node: ts.CallExpression, typeChecker: ts.TypeChecker):
{node: ts.Node|null, requiredHelpers?: HelperFunction[]} {
if (isPropertyAccessCallExpression(node)) {
switch (node.expression.name.getText()) {
case 'setElementProperty':
return {node: renameMethodCall(node, 'setProperty')};
case 'setText':
return {node: renameMethodCall(node, 'setValue')};
case 'listenGlobal':
return {node: renameMethodCall(node, 'listen')};
case 'selectRootElement':
return {node: migrateSelectRootElement(node)};
case 'setElementClass':
return {node: migrateSetElementClass(node)};
case 'setElementStyle':
return {node: migrateSetElementStyle(node, typeChecker)};
case 'invokeElementMethod':
return {node: migrateInvokeElementMethod(node)};
case 'setBindingDebugInfo':
return {node: null};
case 'createViewRoot':
return {node: migrateCreateViewRoot(node)};
case 'setElementAttribute':
return {
node: switchToHelperCall(node, HelperFunction.setElementAttribute, node.arguments),
requiredHelpers: [
HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.setElementAttribute
]
};
case 'createElement':
return {
node: switchToHelperCall(node, HelperFunction.createElement, node.arguments.slice(0, 2)),
requiredHelpers:
[HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.createElement]
};
case 'createText':
return {
node: switchToHelperCall(node, HelperFunction.createText, node.arguments.slice(0, 2)),
requiredHelpers: [HelperFunction.any, HelperFunction.createText]
};
case 'createTemplateAnchor':
return {
node: switchToHelperCall(
node, HelperFunction.createTemplateAnchor, node.arguments.slice(0, 1)),
requiredHelpers: [HelperFunction.any, HelperFunction.createTemplateAnchor]
};
case 'projectNodes':
return {
node: switchToHelperCall(node, HelperFunction.projectNodes, node.arguments),
requiredHelpers: [HelperFunction.any, HelperFunction.projectNodes]
};
case 'animate':
return {
node: migrateAnimateCall(),
requiredHelpers: [HelperFunction.any, HelperFunction.animate]
};
case 'destroyView':
return {
node: switchToHelperCall(node, HelperFunction.destroyView, [node.arguments[1]]),
requiredHelpers: [HelperFunction.any, HelperFunction.destroyView]
};
case 'detachView':
return {
node: switchToHelperCall(node, HelperFunction.detachView, [node.arguments[0]]),
requiredHelpers: [HelperFunction.any, HelperFunction.detachView]
};
case 'attachViewAfter':
return {
node: switchToHelperCall(node, HelperFunction.attachViewAfter, node.arguments),
requiredHelpers: [HelperFunction.any, HelperFunction.attachViewAfter]
};
}
}
return {node};
}
/** Checks whether a node is a PropertyAccessExpression. */
function isPropertyAccessCallExpression(node: ts.Node): node is PropertyAccessCallExpression {
return ts.isCallExpression(node) && ts.isPropertyAccessExpression(node.expression);
}
/** Renames a method call while keeping all of the parameters in place. */
function renameMethodCall(node: PropertyAccessCallExpression, newName: string): ts.CallExpression {
const newExpression = ts.updatePropertyAccess(
node.expression, node.expression.expression, ts.createIdentifier(newName));
return ts.updateCall(node, newExpression, node.typeArguments, node.arguments);
}
/**
* Migrates a `selectRootElement` call by removing the last argument which is no longer supported.
*/
function migrateSelectRootElement(node: ts.CallExpression): ts.Node {
// The only thing we need to do is to drop the last argument
// (`debugInfo`), if the consumer was passing it in.
if (node.arguments.length > 1) {
return ts.updateCall(node, node.expression, node.typeArguments, [node.arguments[0]]);
}
return node;
}
/**
* Migrates a call to `setElementClass` either to a call to `addClass` or `removeClass`, or
* to an expression like `isAdd ? addClass(el, className) : removeClass(el, className)`.
*/
function migrateSetElementClass(node: PropertyAccessCallExpression): ts.Node {
// Clone so we don't mutate by accident. Note that we assume that
// the user's code is providing all three required arguments.
const outputMethodArgs = node.arguments.slice();
const isAddArgument = outputMethodArgs.pop()!;
const createRendererCall = (isAdd: boolean) => {
const innerExpression = node.expression.expression;
const topExpression =
ts.createPropertyAccess(innerExpression, isAdd ? 'addClass' : 'removeClass');
return ts.createCall(topExpression, [], node.arguments.slice(0, 2));
};
// If the call has the `isAdd` argument as a literal boolean, we can map it directly to
// `addClass` or `removeClass`. Note that we can't use the type checker here, because it
// won't tell us whether the value resolves to true or false.
if (isAddArgument.kind === ts.SyntaxKind.TrueKeyword ||
isAddArgument.kind === ts.SyntaxKind.FalseKeyword) {
return createRendererCall(isAddArgument.kind === ts.SyntaxKind.TrueKeyword);
}
// Otherwise create a ternary on the variable.
return ts.createConditional(isAddArgument, createRendererCall(true), createRendererCall(false));
}
/**
* Migrates a call to `setElementStyle` call either to a call to
* `setStyle` or `removeStyle`. or to an expression like
* `value == null ? removeStyle(el, key) : setStyle(el, key, value)`.
*/
function migrateSetElementStyle(
node: PropertyAccessCallExpression, typeChecker: ts.TypeChecker): ts.Node {
const args = node.arguments;
const addMethodName = 'setStyle';
const removeMethodName = 'removeStyle';
const lastArgType = args[2] ?
typeChecker.typeToString(
typeChecker.getTypeAtLocation(args[2]), node, ts.TypeFormatFlags.AddUndefined) :
null;
// Note that for a literal null, TS considers it a `NullKeyword`,
// whereas a literal `undefined` is just an Identifier.
if (args.length === 2 || lastArgType === 'null' || lastArgType === 'undefined') {
// If we've got a call with two arguments, or one with three arguments where the last one is
// `undefined` or `null`, we can safely switch to a `removeStyle` call.
const innerExpression = node.expression.expression;
const topExpression = ts.createPropertyAccess(innerExpression, removeMethodName);
return ts.createCall(topExpression, [], args.slice(0, 2));
} else if (args.length === 3) {
// We need the checks for string literals, because the type of something
// like `"blue"` is the literal `blue`, not `string`.
if (lastArgType === 'string' || lastArgType === 'number' || ts.isStringLiteral(args[2]) ||
ts.isNoSubstitutionTemplateLiteral(args[2]) || ts.isNumericLiteral(args[2])) {
// If we've got three arguments and the last one is a string literal or a number, we
// can safely rename to `setStyle`.
return renameMethodCall(node, addMethodName);
} else {
// Otherwise migrate to a ternary that looks like:
// `value == null ? removeStyle(el, key) : setStyle(el, key, value)`
const condition = ts.createBinary(args[2], ts.SyntaxKind.EqualsEqualsToken, ts.createNull());
const whenNullCall = renameMethodCall(
ts.createCall(node.expression, [], args.slice(0, 2)) as PropertyAccessCallExpression,
removeMethodName);
return ts.createConditional(condition, whenNullCall, renameMethodCall(node, addMethodName));
}
}
return node;
}
/**
* Migrates a call to `invokeElementMethod(target, method, [arg1, arg2])` either to
* `target.method(arg1, arg2)` or `(target as any)[method].apply(target, [arg1, arg2])`.
*/
function | (node: ts.CallExpression): ts.Node {
const [target, name, args] = node.arguments;
const isNameStatic = ts.isStringLiteral(name) || ts.isNoSubstitutionTemplateLiteral(name);
const isArgsStatic = !args || ts.isArrayLiteralExpression(args);
if (isNameStatic && isArgsStatic) {
// If the name is a static string and the arguments are an array literal,
// we can safely convert the node | migrateInvokeElementMethod | identifier_name |
migration.ts | const existingImport = findImportSpecifier(node.elements, oldImport);
if (!existingImport) {
throw new Error(`Could not find an import to replace using ${oldImport}.`);
}
return ts.updateNamedImports(node, [
...node.elements.filter(current => current !== existingImport),
// Create a new import while trying to preserve the alias of the old one.
ts.createImportSpecifier(
existingImport.propertyName ? ts.createIdentifier(newImport) : undefined,
existingImport.propertyName ? existingImport.name : ts.createIdentifier(newImport))
]);
}
/**
* Migrates a function call expression from `Renderer` to `Renderer2`.
* Returns null if the expression should be dropped.
*/
export function migrateExpression(node: ts.CallExpression, typeChecker: ts.TypeChecker):
{node: ts.Node|null, requiredHelpers?: HelperFunction[]} {
if (isPropertyAccessCallExpression(node)) {
switch (node.expression.name.getText()) {
case 'setElementProperty':
return {node: renameMethodCall(node, 'setProperty')};
case 'setText':
return {node: renameMethodCall(node, 'setValue')};
case 'listenGlobal':
return {node: renameMethodCall(node, 'listen')};
case 'selectRootElement':
return {node: migrateSelectRootElement(node)};
case 'setElementClass':
return {node: migrateSetElementClass(node)};
case 'setElementStyle':
return {node: migrateSetElementStyle(node, typeChecker)};
case 'invokeElementMethod':
return {node: migrateInvokeElementMethod(node)};
case 'setBindingDebugInfo':
return {node: null};
case 'createViewRoot':
return {node: migrateCreateViewRoot(node)};
case 'setElementAttribute':
return {
node: switchToHelperCall(node, HelperFunction.setElementAttribute, node.arguments),
requiredHelpers: [
HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.setElementAttribute
]
};
case 'createElement':
return {
node: switchToHelperCall(node, HelperFunction.createElement, node.arguments.slice(0, 2)),
requiredHelpers:
[HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.createElement]
};
case 'createText':
return {
node: switchToHelperCall(node, HelperFunction.createText, node.arguments.slice(0, 2)),
requiredHelpers: [HelperFunction.any, HelperFunction.createText]
};
case 'createTemplateAnchor':
return {
node: switchToHelperCall(
node, HelperFunction.createTemplateAnchor, node.arguments.slice(0, 1)),
requiredHelpers: [HelperFunction.any, HelperFunction.createTemplateAnchor]
};
case 'projectNodes':
return {
node: switchToHelperCall(node, HelperFunction.projectNodes, node.arguments),
requiredHelpers: [HelperFunction.any, HelperFunction.projectNodes]
};
case 'animate':
return {
node: migrateAnimateCall(),
requiredHelpers: [HelperFunction.any, HelperFunction.animate]
};
case 'destroyView':
return {
node: switchToHelperCall(node, HelperFunction.destroyView, [node.arguments[1]]),
requiredHelpers: [HelperFunction.any, HelperFunction.destroyView]
};
case 'detachView':
return {
node: switchToHelperCall(node, HelperFunction.detachView, [node.arguments[0]]),
requiredHelpers: [HelperFunction.any, HelperFunction.detachView]
};
case 'attachViewAfter':
return {
node: switchToHelperCall(node, HelperFunction.attachViewAfter, node.arguments),
requiredHelpers: [HelperFunction.any, HelperFunction.attachViewAfter]
};
}
}
return {node};
}
/** Checks whether a node is a PropertyAccessExpression. */
function isPropertyAccessCallExpression(node: ts.Node): node is PropertyAccessCallExpression {
return ts.isCallExpression(node) && ts.isPropertyAccessExpression(node.expression);
}
/** Renames a method call while keeping all of the parameters in place. */
function renameMethodCall(node: PropertyAccessCallExpression, newName: string): ts.CallExpression {
const newExpression = ts.updatePropertyAccess(
node.expression, node.expression.expression, ts.createIdentifier(newName));
return ts.updateCall(node, newExpression, node.typeArguments, node.arguments);
}
/**
* Migrates a `selectRootElement` call by removing the last argument which is no longer supported.
*/
function migrateSelectRootElement(node: ts.CallExpression): ts.Node {
// The only thing we need to do is to drop the last argument
// (`debugInfo`), if the consumer was passing it in.
if (node.arguments.length > 1) {
return ts.updateCall(node, node.expression, node.typeArguments, [node.arguments[0]]);
}
return node;
}
/**
* Migrates a call to `setElementClass` either to a call to `addClass` or `removeClass`, or
* to an expression like `isAdd ? addClass(el, className) : removeClass(el, className)`.
*/
function migrateSetElementClass(node: PropertyAccessCallExpression): ts.Node {
// Clone so we don't mutate by accident. Note that we assume that
// the user's code is providing all three required arguments.
const outputMethodArgs = node.arguments.slice();
const isAddArgument = outputMethodArgs.pop()!;
const createRendererCall = (isAdd: boolean) => {
const innerExpression = node.expression.expression;
const topExpression =
ts.createPropertyAccess(innerExpression, isAdd ? 'addClass' : 'removeClass');
return ts.createCall(topExpression, [], node.arguments.slice(0, 2));
};
// If the call has the `isAdd` argument as a literal boolean, we can map it directly to
// `addClass` or `removeClass`. Note that we can't use the type checker here, because it
// won't tell us whether the value resolves to true or false.
if (isAddArgument.kind === ts.SyntaxKind.TrueKeyword ||
isAddArgument.kind === ts.SyntaxKind.FalseKeyword) {
return createRendererCall(isAddArgument.kind === ts.SyntaxKind.TrueKeyword);
}
// Otherwise create a ternary on the variable.
return ts.createConditional(isAddArgument, createRendererCall(true), createRendererCall(false));
}
/**
* Migrates a call to `setElementStyle` call either to a call to
* `setStyle` or `removeStyle`. or to an expression like
* `value == null ? removeStyle(el, key) : setStyle(el, key, value)`.
*/
function migrateSetElementStyle(
node: PropertyAccessCallExpression, typeChecker: ts.TypeChecker): ts.Node {
const args = node.arguments;
const addMethodName = 'setStyle';
const removeMethodName = 'removeStyle';
const lastArgType = args[2] ?
typeChecker.typeToString(
typeChecker.getTypeAtLocation(args[2]), node, ts.TypeFormatFlags.AddUndefined) :
null;
// Note that for a literal null, TS considers it a `NullKeyword`,
// whereas a literal `undefined` is just an Identifier.
if (args.length === 2 || lastArgType === 'null' || lastArgType === 'undefined') {
// If we've got a call with two arguments, or one with three arguments where the last one is
// `undefined` or `null`, we can safely switch to a `removeStyle` call.
const innerExpression = node.expression.expression;
const topExpression = ts.createPropertyAccess(innerExpression, removeMethodName);
return ts.createCall(topExpression, [], args.slice(0, 2));
} else if (args.length === 3) |
return node;
}
/**
* Migrates a call to `invokeElementMethod(target, method, [arg1, arg2])` either to
* `target.method(arg1, arg2)` or `(target as any)[method].apply(target, [arg1, arg2])`.
*/
function migrateInvokeElementMethod(node: ts.CallExpression): ts.Node {
const [target, name, args] = node.arguments;
const isNameStatic = ts.isStringLiteral(name) || ts.isNoSubstitutionTemplateLiteral(name);
const isArgsStatic = !args || ts.isArrayLiteralExpression(args);
if (isNameStatic && isArgsStatic) {
// If the name is a static string and the arguments are an array literal,
// we can safely convert the node | {
// We need the checks for string literals, because the type of something
// like `"blue"` is the literal `blue`, not `string`.
if (lastArgType === 'string' || lastArgType === 'number' || ts.isStringLiteral(args[2]) ||
ts.isNoSubstitutionTemplateLiteral(args[2]) || ts.isNumericLiteral(args[2])) {
// If we've got three arguments and the last one is a string literal or a number, we
// can safely rename to `setStyle`.
return renameMethodCall(node, addMethodName);
} else {
// Otherwise migrate to a ternary that looks like:
// `value == null ? removeStyle(el, key) : setStyle(el, key, value)`
const condition = ts.createBinary(args[2], ts.SyntaxKind.EqualsEqualsToken, ts.createNull());
const whenNullCall = renameMethodCall(
ts.createCall(node.expression, [], args.slice(0, 2)) as PropertyAccessCallExpression,
removeMethodName);
return ts.createConditional(condition, whenNullCall, renameMethodCall(node, addMethodName));
}
} | conditional_block |
migration.ts | const existingImport = findImportSpecifier(node.elements, oldImport);
if (!existingImport) {
throw new Error(`Could not find an import to replace using ${oldImport}.`);
}
return ts.updateNamedImports(node, [
...node.elements.filter(current => current !== existingImport),
// Create a new import while trying to preserve the alias of the old one.
ts.createImportSpecifier(
existingImport.propertyName ? ts.createIdentifier(newImport) : undefined,
existingImport.propertyName ? existingImport.name : ts.createIdentifier(newImport))
]);
}
/**
* Migrates a function call expression from `Renderer` to `Renderer2`.
* Returns null if the expression should be dropped.
*/
export function migrateExpression(node: ts.CallExpression, typeChecker: ts.TypeChecker):
{node: ts.Node|null, requiredHelpers?: HelperFunction[]} {
if (isPropertyAccessCallExpression(node)) {
switch (node.expression.name.getText()) {
case 'setElementProperty':
return {node: renameMethodCall(node, 'setProperty')};
case 'setText':
return {node: renameMethodCall(node, 'setValue')};
case 'listenGlobal':
return {node: renameMethodCall(node, 'listen')};
case 'selectRootElement':
return {node: migrateSelectRootElement(node)};
case 'setElementClass':
return {node: migrateSetElementClass(node)};
case 'setElementStyle':
return {node: migrateSetElementStyle(node, typeChecker)};
case 'invokeElementMethod':
return {node: migrateInvokeElementMethod(node)};
case 'setBindingDebugInfo':
return {node: null};
case 'createViewRoot':
return {node: migrateCreateViewRoot(node)};
case 'setElementAttribute':
return {
node: switchToHelperCall(node, HelperFunction.setElementAttribute, node.arguments),
requiredHelpers: [
HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.setElementAttribute
]
};
case 'createElement':
return {
node: switchToHelperCall(node, HelperFunction.createElement, node.arguments.slice(0, 2)),
requiredHelpers:
[HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.createElement]
};
case 'createText':
return {
node: switchToHelperCall(node, HelperFunction.createText, node.arguments.slice(0, 2)),
requiredHelpers: [HelperFunction.any, HelperFunction.createText]
};
case 'createTemplateAnchor':
return {
node: switchToHelperCall(
node, HelperFunction.createTemplateAnchor, node.arguments.slice(0, 1)),
requiredHelpers: [HelperFunction.any, HelperFunction.createTemplateAnchor]
};
case 'projectNodes':
return {
node: switchToHelperCall(node, HelperFunction.projectNodes, node.arguments),
requiredHelpers: [HelperFunction.any, HelperFunction.projectNodes]
};
case 'animate':
return {
node: migrateAnimateCall(),
requiredHelpers: [HelperFunction.any, HelperFunction.animate]
};
case 'destroyView':
return {
node: switchToHelperCall(node, HelperFunction.destroyView, [node.arguments[1]]),
requiredHelpers: [HelperFunction.any, HelperFunction.destroyView]
};
case 'detachView':
return {
node: switchToHelperCall(node, HelperFunction.detachView, [node.arguments[0]]),
requiredHelpers: [HelperFunction.any, HelperFunction.detachView]
};
case 'attachViewAfter':
return {
node: switchToHelperCall(node, HelperFunction.attachViewAfter, node.arguments),
requiredHelpers: [HelperFunction.any, HelperFunction.attachViewAfter]
};
}
}
return {node};
}
/** Checks whether a node is a PropertyAccessExpression. */
function isPropertyAccessCallExpression(node: ts.Node): node is PropertyAccessCallExpression {
return ts.isCallExpression(node) && ts.isPropertyAccessExpression(node.expression);
}
/** Renames a method call while keeping all of the parameters in place. */
function renameMethodCall(node: PropertyAccessCallExpression, newName: string): ts.CallExpression {
const newExpression = ts.updatePropertyAccess(
node.expression, node.expression.expression, ts.createIdentifier(newName));
return ts.updateCall(node, newExpression, node.typeArguments, node.arguments);
}
/**
* Migrates a `selectRootElement` call by removing the last argument which is no longer supported.
*/
function migrateSelectRootElement(node: ts.CallExpression): ts.Node {
// The only thing we need to do is to drop the last argument
// (`debugInfo`), if the consumer was passing it in.
if (node.arguments.length > 1) {
return ts.updateCall(node, node.expression, node.typeArguments, [node.arguments[0]]);
}
return node;
}
/**
* Migrates a call to `setElementClass` either to a call to `addClass` or `removeClass`, or
* to an expression like `isAdd ? addClass(el, className) : removeClass(el, className)`.
*/
function migrateSetElementClass(node: PropertyAccessCallExpression): ts.Node | // Otherwise create a ternary on the variable.
return ts.createConditional(isAddArgument, createRendererCall(true), createRendererCall(false));
}
/**
* Migrates a call to `setElementStyle` call either to a call to
* `setStyle` or `removeStyle`. or to an expression like
* `value == null ? removeStyle(el, key) : setStyle(el, key, value)`.
*/
function migrateSetElementStyle(
node: PropertyAccessCallExpression, typeChecker: ts.TypeChecker): ts.Node {
const args = node.arguments;
const addMethodName = 'setStyle';
const removeMethodName = 'removeStyle';
const lastArgType = args[2] ?
typeChecker.typeToString(
typeChecker.getTypeAtLocation(args[2]), node, ts.TypeFormatFlags.AddUndefined) :
null;
// Note that for a literal null, TS considers it a `NullKeyword`,
// whereas a literal `undefined` is just an Identifier.
if (args.length === 2 || lastArgType === 'null' || lastArgType === 'undefined') {
// If we've got a call with two arguments, or one with three arguments where the last one is
// `undefined` or `null`, we can safely switch to a `removeStyle` call.
const innerExpression = node.expression.expression;
const topExpression = ts.createPropertyAccess(innerExpression, removeMethodName);
return ts.createCall(topExpression, [], args.slice(0, 2));
} else if (args.length === 3) {
// We need the checks for string literals, because the type of something
// like `"blue"` is the literal `blue`, not `string`.
if (lastArgType === 'string' || lastArgType === 'number' || ts.isStringLiteral(args[2]) ||
ts.isNoSubstitutionTemplateLiteral(args[2]) || ts.isNumericLiteral(args[2])) {
// If we've got three arguments and the last one is a string literal or a number, we
// can safely rename to `setStyle`.
return renameMethodCall(node, addMethodName);
} else {
// Otherwise migrate to a ternary that looks like:
// `value == null ? removeStyle(el, key) : setStyle(el, key, value)`
const condition = ts.createBinary(args[2], ts.SyntaxKind.EqualsEqualsToken, ts.createNull());
const whenNullCall = renameMethodCall(
ts.createCall(node.expression, [], args.slice(0, 2)) as PropertyAccessCallExpression,
removeMethodName);
return ts.createConditional(condition, whenNullCall, renameMethodCall(node, addMethodName));
}
}
return node;
}
/**
* Migrates a call to `invokeElementMethod(target, method, [arg1, arg2])` either to
* `target.method(arg1, arg2)` or `(target as any)[method].apply(target, [arg1, arg2])`.
*/
function migrateInvokeElementMethod(node: ts.CallExpression): ts.Node {
const [target, name, args] = node.arguments;
const isNameStatic = ts.isStringLiteral(name) || ts.isNoSubstitutionTemplateLiteral(name);
const isArgsStatic = !args || ts.isArrayLiteralExpression(args);
if (isNameStatic && isArgsStatic) {
// If the name is a static string and the arguments are an array literal,
// we can safely convert the node into | {
// Clone so we don't mutate by accident. Note that we assume that
// the user's code is providing all three required arguments.
const outputMethodArgs = node.arguments.slice();
const isAddArgument = outputMethodArgs.pop()!;
const createRendererCall = (isAdd: boolean) => {
const innerExpression = node.expression.expression;
const topExpression =
ts.createPropertyAccess(innerExpression, isAdd ? 'addClass' : 'removeClass');
return ts.createCall(topExpression, [], node.arguments.slice(0, 2));
};
// If the call has the `isAdd` argument as a literal boolean, we can map it directly to
// `addClass` or `removeClass`. Note that we can't use the type checker here, because it
// won't tell us whether the value resolves to true or false.
if (isAddArgument.kind === ts.SyntaxKind.TrueKeyword ||
isAddArgument.kind === ts.SyntaxKind.FalseKeyword) {
return createRendererCall(isAddArgument.kind === ts.SyntaxKind.TrueKeyword);
}
| identifier_body |
migration.ts | const existingImport = findImportSpecifier(node.elements, oldImport);
if (!existingImport) {
throw new Error(`Could not find an import to replace using ${oldImport}.`);
}
return ts.updateNamedImports(node, [
...node.elements.filter(current => current !== existingImport),
// Create a new import while trying to preserve the alias of the old one.
ts.createImportSpecifier(
existingImport.propertyName ? ts.createIdentifier(newImport) : undefined,
existingImport.propertyName ? existingImport.name : ts.createIdentifier(newImport))
]);
}
/**
* Migrates a function call expression from `Renderer` to `Renderer2`.
* Returns null if the expression should be dropped.
*/
export function migrateExpression(node: ts.CallExpression, typeChecker: ts.TypeChecker):
{node: ts.Node|null, requiredHelpers?: HelperFunction[]} {
if (isPropertyAccessCallExpression(node)) {
switch (node.expression.name.getText()) {
case 'setElementProperty':
return {node: renameMethodCall(node, 'setProperty')};
case 'setText':
return {node: renameMethodCall(node, 'setValue')};
case 'listenGlobal':
return {node: renameMethodCall(node, 'listen')};
case 'selectRootElement':
return {node: migrateSelectRootElement(node)};
case 'setElementClass':
return {node: migrateSetElementClass(node)};
case 'setElementStyle':
return {node: migrateSetElementStyle(node, typeChecker)};
case 'invokeElementMethod':
return {node: migrateInvokeElementMethod(node)};
case 'setBindingDebugInfo':
return {node: null};
case 'createViewRoot':
return {node: migrateCreateViewRoot(node)};
case 'setElementAttribute':
return {
node: switchToHelperCall(node, HelperFunction.setElementAttribute, node.arguments),
requiredHelpers: [
HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.setElementAttribute
]
};
case 'createElement':
return {
node: switchToHelperCall(node, HelperFunction.createElement, node.arguments.slice(0, 2)),
requiredHelpers:
[HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.createElement]
};
case 'createText':
return {
node: switchToHelperCall(node, HelperFunction.createText, node.arguments.slice(0, 2)),
requiredHelpers: [HelperFunction.any, HelperFunction.createText]
};
case 'createTemplateAnchor':
return {
node: switchToHelperCall(
node, HelperFunction.createTemplateAnchor, node.arguments.slice(0, 1)),
requiredHelpers: [HelperFunction.any, HelperFunction.createTemplateAnchor]
};
case 'projectNodes':
return {
node: switchToHelperCall(node, HelperFunction.projectNodes, node.arguments),
requiredHelpers: [HelperFunction.any, HelperFunction.projectNodes]
};
case 'animate':
return {
node: migrateAnimateCall(),
requiredHelpers: [HelperFunction.any, HelperFunction.animate]
};
case 'destroyView':
return {
node: switchToHelperCall(node, HelperFunction.destroyView, [node.arguments[1]]),
requiredHelpers: [HelperFunction.any, HelperFunction.destroyView]
};
case 'detachView':
return {
node: switchToHelperCall(node, HelperFunction.detachView, [node.arguments[0]]),
requiredHelpers: [HelperFunction.any, HelperFunction.detachView]
};
case 'attachViewAfter':
return {
node: switchToHelperCall(node, HelperFunction.attachViewAfter, node.arguments),
requiredHelpers: [HelperFunction.any, HelperFunction.attachViewAfter]
};
}
}
return {node};
}
/** Checks whether a node is a PropertyAccessExpression. */
function isPropertyAccessCallExpression(node: ts.Node): node is PropertyAccessCallExpression {
return ts.isCallExpression(node) && ts.isPropertyAccessExpression(node.expression);
}
/** Renames a method call while keeping all of the parameters in place. */
function renameMethodCall(node: PropertyAccessCallExpression, newName: string): ts.CallExpression {
const newExpression = ts.updatePropertyAccess(
node.expression, node.expression.expression, ts.createIdentifier(newName));
return ts.updateCall(node, newExpression, node.typeArguments, node.arguments);
}
/**
* Migrates a `selectRootElement` call by removing the last argument which is no longer supported.
*/
function migrateSelectRootElement(node: ts.CallExpression): ts.Node {
// The only thing we need to do is to drop the last argument
// (`debugInfo`), if the consumer was passing it in.
if (node.arguments.length > 1) {
return ts.updateCall(node, node.expression, node.typeArguments, [node.arguments[0]]);
}
| /**
* Migrates a call to `setElementClass` either to a call to `addClass` or `removeClass`, or
* to an expression like `isAdd ? addClass(el, className) : removeClass(el, className)`.
*/
function migrateSetElementClass(node: PropertyAccessCallExpression): ts.Node {
// Clone so we don't mutate by accident. Note that we assume that
// the user's code is providing all three required arguments.
const outputMethodArgs = node.arguments.slice();
const isAddArgument = outputMethodArgs.pop()!;
const createRendererCall = (isAdd: boolean) => {
const innerExpression = node.expression.expression;
const topExpression =
ts.createPropertyAccess(innerExpression, isAdd ? 'addClass' : 'removeClass');
return ts.createCall(topExpression, [], node.arguments.slice(0, 2));
};
// If the call has the `isAdd` argument as a literal boolean, we can map it directly to
// `addClass` or `removeClass`. Note that we can't use the type checker here, because it
// won't tell us whether the value resolves to true or false.
if (isAddArgument.kind === ts.SyntaxKind.TrueKeyword ||
isAddArgument.kind === ts.SyntaxKind.FalseKeyword) {
return createRendererCall(isAddArgument.kind === ts.SyntaxKind.TrueKeyword);
}
// Otherwise create a ternary on the variable.
return ts.createConditional(isAddArgument, createRendererCall(true), createRendererCall(false));
}
/**
* Migrates a call to `setElementStyle` call either to a call to
* `setStyle` or `removeStyle`. or to an expression like
* `value == null ? removeStyle(el, key) : setStyle(el, key, value)`.
*/
function migrateSetElementStyle(
node: PropertyAccessCallExpression, typeChecker: ts.TypeChecker): ts.Node {
const args = node.arguments;
const addMethodName = 'setStyle';
const removeMethodName = 'removeStyle';
const lastArgType = args[2] ?
typeChecker.typeToString(
typeChecker.getTypeAtLocation(args[2]), node, ts.TypeFormatFlags.AddUndefined) :
null;
// Note that for a literal null, TS considers it a `NullKeyword`,
// whereas a literal `undefined` is just an Identifier.
if (args.length === 2 || lastArgType === 'null' || lastArgType === 'undefined') {
// If we've got a call with two arguments, or one with three arguments where the last one is
// `undefined` or `null`, we can safely switch to a `removeStyle` call.
const innerExpression = node.expression.expression;
const topExpression = ts.createPropertyAccess(innerExpression, removeMethodName);
return ts.createCall(topExpression, [], args.slice(0, 2));
} else if (args.length === 3) {
// We need the checks for string literals, because the type of something
// like `"blue"` is the literal `blue`, not `string`.
if (lastArgType === 'string' || lastArgType === 'number' || ts.isStringLiteral(args[2]) ||
ts.isNoSubstitutionTemplateLiteral(args[2]) || ts.isNumericLiteral(args[2])) {
// If we've got three arguments and the last one is a string literal or a number, we
// can safely rename to `setStyle`.
return renameMethodCall(node, addMethodName);
} else {
// Otherwise migrate to a ternary that looks like:
// `value == null ? removeStyle(el, key) : setStyle(el, key, value)`
const condition = ts.createBinary(args[2], ts.SyntaxKind.EqualsEqualsToken, ts.createNull());
const whenNullCall = renameMethodCall(
ts.createCall(node.expression, [], args.slice(0, 2)) as PropertyAccessCallExpression,
removeMethodName);
return ts.createConditional(condition, whenNullCall, renameMethodCall(node, addMethodName));
}
}
return node;
}
/**
* Migrates a call to `invokeElementMethod(target, method, [arg1, arg2])` either to
* `target.method(arg1, arg2)` or `(target as any)[method].apply(target, [arg1, arg2])`.
*/
function migrateInvokeElementMethod(node: ts.CallExpression): ts.Node {
const [target, name, args] = node.arguments;
const isNameStatic = ts.isStringLiteral(name) || ts.isNoSubstitutionTemplateLiteral(name);
const isArgsStatic = !args || ts.isArrayLiteralExpression(args);
if (isNameStatic && isArgsStatic) {
// If the name is a static string and the arguments are an array literal,
// we can safely convert the node into a | return node;
}
| random_line_split |
page.js | ( req, res ) {
var cmds = {};
function cmd( group, url, link, help )
{
this.url = url;
this.link = link;
this.help = help;
if(!cmds[group]) { cmds[group] = {}; cmds[group].items = [ ] };
cmds[group].items.push(this);
}
var user = loginstate.getUser(req);
new cmd('safeharbor', '/about', 'About', 'Learn about Safe Harbor');
new cmd('safeharbor', '/learn', 'Learn', 'Learn about your rights and the DMCA');
new cmd('safeharbor', '/support', 'Support', 'Ask us stuff');
if( user )
{
new cmd('user', '/dash', 'Dashboard', 'Manage your disputes' );
// new cmd('user', '/passwordreset', 'Password reset', 'Change your password');
new cmd('user', '/account', 'Account settings', 'Change your email and other settings');
new cmd('user', '/accountdeleter', 'Delete your account', 'hrumph');
new cmd('user', '/logout', 'Log out', 'bye for now' );
new cmd('site', '/siteeditor','Edit your site properties');
new cmd('tablinks', '/disputes', 'Past Disputes', 'Your dispute history' );
new cmd('tablinks', '/form', 'Future Disputes', 'Your dispute future' );
var r = user.role>>>0; // WTF?
if( r <= ROLES.admin )
{
new cmd('admin', '/admin', 'Admin stuff', '' );
if( r == ROLES.developer )
{
new cmd('developer', '/dev', 'Developer stuff', '' );
}
}
}
else
{
new cmd( 'user', '/login', 'Login', 'For existing accounts' );
new cmd( 'user', '/reg', 'Register', 'For creating new accounts' );
new cmd( 'user', '/lostpassword', 'Lost password', 'For existing, if forgetful accounts');
}
return cmds;
}
exports.Message = function( msgLevel, msgTitle, text, opts )
{
utils.copy( this, opts || {});
this.level = msgLevel;
this.title = msgTitle;
this.text = text;
if( !this.status )
{
switch( this.level )
{
case MESSAGE_LEVELS.info:
case MESSAGE_LEVELS.success:
this.status = 'ok';
break;
case MESSAGE_LEVELS.warning:
case MESSAGE_LEVELS.danger:
case MESSAGE_LEVELS.error:
this.status = '??'; // TODO fill these info
break;
}
}
}
exports.setup = function(app) {
var Handlebars = require('handlebars');
Handlebars.registerHelper('loggedInStatusClass', function() {
var isLoggedIn = loginstate.isLoggedIn();
if( isLoggedIn )
return('loggedin'); // that's a CSS selector name
else
return('loggedout');
});
Handlebars.registerHelper('contextDumper', function(a) {
// I haven't figured out if this context blob
// is a copy or an actual instance of something
// important and shared, so we remove the 'app'
// thingy so the dump is managable...
var app = a.app;
a.app = null;
var text = require('util').inspect(a,true,null);
// ...and then restore it just in case someone
// else was using it
a.app = app;
return text;
});
app.register('.html', Handlebars);
app.set('view engine', 'handlebars');
app.dynamicHelpers( {
// these will all be passed to every page...
user: function( req, res ) { var u = loginstate.getUser(req);
if( u && u.password ) u.password = '****';
return u; },
isLoggedIn: function( req, res ) { return !!loginstate.getUser(req); },
isAdmin: function( req, res ) { var u = loginstate.getUser(req);
return u && (u.role>>>0 <= ROLES.admin>>>0); },
menu: buildMenu, // we should consider not outputting this on Ajax
messages: function( req, res ) { return res.sh_output_messages || [ ] }
} );
app.use( function setupPage(req,res,next)
{
/**
Override of the express.response.render method
in order put our application specific standard
templates into the call stream.
@method render
@for Response
@param {string} view Path to template file (relative to './view')
@param {Object} opts Can include things like body_text, pageTitle
**/
var oldRender = res.render;
res.render = function(view, opts, fn, parent, sub )
{
if( typeof view != 'string' )
{
opts = view;
view = exports.BODY_TEXT_VIEW;
}
if( view == exports.BODY_TEXT_VIEW )
{
if( !opts.body_text )
opts.body_text = '';
}
if( !opts.layout )
{
opts.layout = exports.DEFAULT_LAYOUT;
}
if( !opts.bodyClass )
{
try { opts.bodyClass = view.match(/([a-z0-9]+)\/[^\/]+$/)[1]; } catch( e ) { }
}
res.render = oldRender;
return res.render(view, opts, fn, parent, sub );
}
if( !res.outputMessage )
{
/**
Call this to setup a message to be ouput during the
res.render() call.
@method outputMessage
@for Response
@param {MESSAGE_LEVELS} msgLevel
@param {STRING} msgTitle
@param {STRING} text
@param {Object} [opts]
**/
res.outputMessage = function( msgLevel, msgTitle, text, opts ) {
if( !res.sh_output_messages )
res.sh_output_messages = [ ];
res.sh_output_messages.push( new exports.Message(msgLevel,msgTitle,text,opts) );
return res;
}
}
next();
});
}
exports.countryList = function(selectedElementName){
var json = [
{name:"Afghanistan","data-alternative-spellings":"AF افغانستان"}
,{name:"Åland Islands","data-alternative-spellings":"AX Aaland Aland","data-relevancy-booster":"0.5"}
,{name:"Albania","data-alternative-spellings":"AL"}
,{name:"Algeria","data-alternative-spellings":"DZ الجزائر"}
,{name:"American Samoa","data-alternative-spellings":"AS","data-relevancy-booster":"0.5"}
,{name:"Andorra","data-alternative-spellings":"AD","data-relevancy-booster":"0.5"}
,{name:"Angola","data-alternative-spellings":"AO"}
,{name:"Anguilla","data-alternative-spellings":"AI","data-relevancy-booster":"0.5"}
,{name:"Antarctica","data-alternative-spellings":"AQ","data-relevancy-booster":"0.5"}
,{name:"Antigua And Barbuda","data-alternative-spellings":"AG","data-relevancy-booster":"0.5"}
,{name:"Argentina","data-alternative-spellings":"AR"}
,{name:"Armenia","data-alternative-spellings":"AM Հայաստան"}
,{name:"Aruba","data-alternative-spellings":"AW","data-relevancy-booster":"0.5"}
,{name:"Australia","data-alternative-spellings":"AU","data-relevancy-booster":"1.5"}
,{name:"Austria","data-alternative-spellings":"AT Österreich Osterreich Oesterreich "}
,{name:"Azerbaijan","data-alternative-spellings":"AZ"}
,{name:"Bahamas","data-alternative-spellings":"BS"}
,{name:"Bahrain","data-alternative-spellings":"BH البحرين"}
,{name:"Bangladesh","data-alternative-spellings":"BD বাংলাদেশ","data-relevancy-booster":"2"}
,{name:"Barbados","data-alternative-spellings":"BB"}
,{name:"Belarus","data-alternative-spellings":"BY Беларусь"}
,{name:"Belgium","data-alternative-spellings":"BE België Belgie Belgien Belgique","data-relevancy-booster":"1.5"}
,{name:"Belize","data-alternative-spellings":"BZ"}
,{name:"Benin","data-alternative-spellings":"BJ"}
,{name:"Bermuda","data-alternative-spellings":"BM","data-relevancy-booster":"0.5"}
,{name:"Bhutan","data-alternative-spellings":"BT भूटान"}
,{name:"Bolivia","data-alternative-spellings":"BO"}
,{ | buildMenu | identifier_name | |
page.js | ;
cmds[group].items.push(this);
}
var user = loginstate.getUser(req);
new cmd('safeharbor', '/about', 'About', 'Learn about Safe Harbor');
new cmd('safeharbor', '/learn', 'Learn', 'Learn about your rights and the DMCA');
new cmd('safeharbor', '/support', 'Support', 'Ask us stuff');
if( user )
{
new cmd('user', '/dash', 'Dashboard', 'Manage your disputes' );
// new cmd('user', '/passwordreset', 'Password reset', 'Change your password');
new cmd('user', '/account', 'Account settings', 'Change your email and other settings');
new cmd('user', '/accountdeleter', 'Delete your account', 'hrumph');
new cmd('user', '/logout', 'Log out', 'bye for now' );
new cmd('site', '/siteeditor','Edit your site properties');
new cmd('tablinks', '/disputes', 'Past Disputes', 'Your dispute history' );
new cmd('tablinks', '/form', 'Future Disputes', 'Your dispute future' );
var r = user.role>>>0; // WTF?
if( r <= ROLES.admin )
{
new cmd('admin', '/admin', 'Admin stuff', '' );
if( r == ROLES.developer )
{
new cmd('developer', '/dev', 'Developer stuff', '' );
}
}
}
else
{
new cmd( 'user', '/login', 'Login', 'For existing accounts' );
new cmd( 'user', '/reg', 'Register', 'For creating new accounts' );
new cmd( 'user', '/lostpassword', 'Lost password', 'For existing, if forgetful accounts');
}
return cmds;
}
exports.Message = function( msgLevel, msgTitle, text, opts )
{
utils.copy( this, opts || {});
this.level = msgLevel;
this.title = msgTitle;
this.text = text;
if( !this.status )
{
switch( this.level )
{
case MESSAGE_LEVELS.info:
case MESSAGE_LEVELS.success:
this.status = 'ok';
break;
case MESSAGE_LEVELS.warning:
case MESSAGE_LEVELS.danger:
case MESSAGE_LEVELS.error:
this.status = '??'; // TODO fill these info
break;
}
}
}
exports.setup = function(app) {
var Handlebars = require('handlebars');
Handlebars.registerHelper('loggedInStatusClass', function() {
var isLoggedIn = loginstate.isLoggedIn();
if( isLoggedIn )
return('loggedin'); // that's a CSS selector name
else
return('loggedout');
});
Handlebars.registerHelper('contextDumper', function(a) {
// I haven't figured out if this context blob
// is a copy or an actual instance of something
// important and shared, so we remove the 'app'
// thingy so the dump is managable...
var app = a.app;
a.app = null;
var text = require('util').inspect(a,true,null);
// ...and then restore it just in case someone
// else was using it
a.app = app;
return text;
});
app.register('.html', Handlebars);
app.set('view engine', 'handlebars');
app.dynamicHelpers( {
// these will all be passed to every page...
user: function( req, res ) { var u = loginstate.getUser(req);
if( u && u.password ) u.password = '****';
return u; },
isLoggedIn: function( req, res ) { return !!loginstate.getUser(req); },
isAdmin: function( req, res ) { var u = loginstate.getUser(req);
return u && (u.role>>>0 <= ROLES.admin>>>0); },
menu: buildMenu, // we should consider not outputting this on Ajax
messages: function( req, res ) { return res.sh_output_messages || [ ] }
} );
app.use( function setupPage(req,res,next)
{
/**
Override of the express.response.render method
in order put our application specific standard
templates into the call stream.
@method render
@for Response
@param {string} view Path to template file (relative to './view')
@param {Object} opts Can include things like body_text, pageTitle
**/
var oldRender = res.render;
res.render = function(view, opts, fn, parent, sub )
{
if( typeof view != 'string' )
{
opts = view;
view = exports.BODY_TEXT_VIEW;
}
if( view == exports.BODY_TEXT_VIEW )
{
if( !opts.body_text )
opts.body_text = '';
}
if( !opts.layout )
{
opts.layout = exports.DEFAULT_LAYOUT;
}
if( !opts.bodyClass )
{
try { opts.bodyClass = view.match(/([a-z0-9]+)\/[^\/]+$/)[1]; } catch( e ) { }
}
res.render = oldRender;
return res.render(view, opts, fn, parent, sub );
}
if( !res.outputMessage )
{
/**
Call this to setup a message to be ouput during the
res.render() call.
@method outputMessage
@for Response
@param {MESSAGE_LEVELS} msgLevel
@param {STRING} msgTitle
@param {STRING} text
@param {Object} [opts]
**/
res.outputMessage = function( msgLevel, msgTitle, text, opts ) {
if( !res.sh_output_messages )
res.sh_output_messages = [ ];
res.sh_output_messages.push( new exports.Message(msgLevel,msgTitle,text,opts) );
return res;
}
}
next();
});
}
exports.countryList = function(selectedElementName){
var json = [
{name:"Afghanistan","data-alternative-spellings":"AF افغانستان"}
,{name:"Åland Islands","data-alternative-spellings":"AX Aaland Aland","data-relevancy-booster":"0.5"}
,{name:"Albania","data-alternative-spellings":"AL"}
,{name:"Algeria","data-alternative-spellings":"DZ الجزائر"}
,{name:"American Samoa","data-alternative-spellings":"AS","data-relevancy-booster":"0.5"}
,{name:"Andorra","data-alternative-spellings":"AD","data-relevancy-booster":"0.5"}
,{name:"Angola","data-alternative-spellings":"AO"}
,{name:"Anguilla","data-alternative-spellings":"AI","data-relevancy-booster":"0.5"}
,{name:"Antarctica","data-alternative-spellings":"AQ","data-relevancy-booster":"0.5"}
,{name:"Antigua And Barbuda","data-alternative-spellings":"AG","data-relevancy-booster":"0.5"}
,{name:"Argentina","data-alternative-spellings":"AR"}
,{name:"Armenia","data-alternative-spellings":"AM Հայաստան"}
,{name:"Aruba","data-alternative-spellings":"AW","data-relevancy-booster":"0.5"}
,{name:"Australia","data-alternative-spellings":"AU","data-relevancy-booster":"1.5"}
,{name:"Austria","data-alternative-spellings":"AT Österreich Osterreich Oesterreich "}
,{name:"Azerbaijan","data-alternative-spellings":"AZ"}
,{name:"Bahamas","data-alternative-spellings":"BS"}
,{name:"Bahrain","data-alternative-spellings":"BH البحرين"}
,{name:"Bangladesh","data-alternative-spellings":"BD বাংলাদেশ","data-relevancy-booster":"2"}
,{name:"Barbados","data-alternative-spellings":"BB"}
,{name:"Belarus","data-alternative-spellings":"BY Беларусь"}
,{name:"Belgium","data-alternative-spellings":"BE België Belgie Belgien Belgique","data-relevancy-booster":"1.5"}
,{name:"Belize","data-alternative-spellings":"BZ"}
,{name:"Benin","data-alternative-spellings":"BJ"}
,{name:"Bermuda","data-alternative-spellings":"BM","data-relevancy-booster":"0.5"}
,{name:"Bhutan","data-alternative-spellings":"BT भूटान"}
,{name:"Bolivia","data-alternative-spellings":"BO"}
,{name:"Bonaire,Sint Eustatius and Saba","data-alternative-spellings":"BQ"}
,{name:"Bosnia and Herzegovina","data-alternative-spellings":"BA Босна и Х | { cmds[group] = {}; cmds[group].items = [ ] } | conditional_block | |
page.js | ellings":"PA"}
,{name:"Papua New Guinea","data-alternative-spellings":"PG"}
,{name:"Paraguay","data-alternative-spellings":"PY"}
,{name:"Peru","data-alternative-spellings":"PE"}
,{name:"Philippines","data-alternative-spellings":"PH Pilipinas","data-relevancy-booster":"1.5"}
,{name:"Pitcairn","data-alternative-spellings":"PN","data-relevancy-booster":"0.5"}
,{name:"Poland","data-alternative-spellings":"PL Polska","data-relevancy-booster":"1.25"}
,{name:"Portugal","data-alternative-spellings":"PT Portuguesa","data-relevancy-booster":"1.5"}
,{name:"Puerto Rico","data-alternative-spellings":"PR"}
,{name:"Qatar","data-alternative-spellings":"QA قطر"}
,{name:"Réunion","data-alternative-spellings":"RE Reunion"}
,{name:"Romania","data-alternative-spellings":"RO Rumania Roumania România"}
,{name:"Russian Federation","data-alternative-spellings":"RU Rossiya Российская Россия","data-relevancy-booster":"2.5"}
,{name:"Rwanda","data-alternative-spellings":"RW"}
,{name:"Saint Barthélemy","data-alternative-spellings":"BL St. Barthelemy"}
,{name:"Saint Helena","data-alternative-spellings":"SH St."}
,{name:"Saint Kitts and Nevis","data-alternative-spellings":"KN St."}
,{name:"Saint Lucia","data-alternative-spellings":"LC St."}
,{name:"Saint Martin (French Part)","data-alternative-spellings":"MF St."}
,{name:"Saint Pierre and Miquelon","data-alternative-spellings":"PM St."}
,{name:"Saint Vincent and the Grenadines","data-alternative-spellings":"VC St."}
,{name:"Samoa","data-alternative-spellings":"WS"}
,{name:"San Marino","data-alternative-spellings":"SM"}
,{name:"Sao Tome and Principe","data-alternative-spellings":"ST"}
,{name:"Saudi Arabia","data-alternative-spellings":"SA السعودية"}
,{name:"Senegal","data-alternative-spellings":"SN Sénégal"}
,{name:"Serbia","data-alternative-spellings":"RS Србија Srbija"}
,{name:"Seychelles","data-alternative-spellings":"SC","data-relevancy-booster":"0.5"}
,{name:"Sierra Leone","data-alternative-spellings":"SL"}
,{name:"Singapore","data-alternative-spellings":"SG Singapura சிங்கப்பூர் குடியரசு 新加坡共和国"}
,{name:"Sint Maarten (Dutch Part)","data-alternative-spellings":"SX"}
,{name:"Slovakia","data-alternative-spellings":"SK Slovenská Slovensko"}
,{name:"Slovenia","data-alternative-spellings":"SI Slovenija"}
,{name:"Solomon Islands","data-alternative-spellings":"SB"}
,{name:"Somalia","data-alternative-spellings":"SO الصومال"}
,{name:"South Africa","data-alternative-spellings":"ZA RSA Suid-Afrika"}
,{name:"South Georgia and the South Sandwich Islands","data-alternative-spellings":"GS"}
,{name:"South Sudan","data-alternative-spellings":"SS"}
,{name:"Spain","data-alternative-spellings":"ES España","data-relevancy-booster":"2"}
,{name:"Sri Lanka","data-alternative-spellings":"LK ශ්රී ලංකා இலங்கை Ceylon"}
,{name:"Sudan","data-alternative-spellings":"SD السودان"}
,{name:"Suriname","data-alternative-spellings":"SR शर्नम् Sarnam Sranangron"}
,{name:"Svalbard and Jan Mayen","data-alternative-spellings":"SJ","data-relevancy-booster":"0.5"}
,{name:"Swaziland","data-alternative-spellings":"SZ weSwatini Swatini Ngwane"}
,{name:"Sweden","data-alternative-spellings":"SE Sverige","data-relevancy-booster":"1.5"}
,{name:"Switzerland","data-alternative-spellings":"CH Swiss Confederation Schweiz Suisse Svizzera Svizra","data-relevancy-booster":"1.5"}
,{name:"Syrian Arab Republic","data-alternative-spellings":"SY Syria سورية"}
,{name:"Taiwan,Province of China","data-alternative-spellings":"TW 台灣 臺灣"}
,{name:"Tajikistan","data-alternative-spellings":"TJ Тоҷикистон Toçikiston"}
,{name:"Tanzania,United Republic of","data-alternative-spellings":"TZ"}
,{name:"Thailand","data-alternative-spellings":"TH ประเทศไทย Prathet Thai"}
,{name:"Timor-Leste","data-alternative-spellings":"TL"}
,{name:"Togo","data-alternative-spellings":"TG Togolese"}
,{name:"Tokelau","data-alternative-spellings":"TK","data-relevancy-booster":"0.5"}
,{name:"Tonga","data-alternative-spellings":"TO"}
,{name:"Trinidad and Tobago","data-alternative-spellings":"TT"}
,{name:"Tunisia","data-alternative-spellings":"TN تونس"}
,{name:"Turkey","data-alternative-spellings":"TR Türkiye Turkiye"}
,{name:"Turkmenistan","data-alternative-spellings":"TM Türkmenistan"}
,{name:"Turks and Caicos Islands","data-alternative-spellings":"TC","data-relevancy-booster":"0.5"}
,{name:"Tuvalu","data-alternative-spellings":"TV","data-relevancy-booster":"0.5"}
,{name:"Uganda","data-alternative-spellings":"UG"}
,{name:"Ukraine","data-alternative-spellings":"UA Ukrayina Україна"}
,{name:"United Arab Emirates","data-alternative-spellings":"AE UAE الإمارات"}
,{name:"United Kingdom","data-alternative-spellings":"GB Great Britain England UK Wales Scotland Northern Ireland","data-relevancy-booster":"2.5"}
,{name:"United States","data-relevancy-booster":"3.5","data-alternative-spellings":"US USA United States of America"}
,{name:"United States Minor Outlying Islands","data-alternative-spellings":"UM"}
,{name:"Uruguay","data-alternative-spellings":"UY"}
,{name:"Uzbekistan","data-alternative-spellings":"UZ Ўзбекистон O'zbekstan O‘zbekiston"}
,{name:"Vanuatu","data-alternative-spellings":"VU"}
,{name:"Venezuela","data-alternative-spellings":"VE"}
,{name:"Vietnam","data-alternative-spellings":"VN Việt Nam","data-relevancy-booster":"1.5"}
,{name:"Virgin Islands,British","data-alternative-spellings":"VG","data-relevancy-booster":"0.5"}
,{name:"Virgin Islands,U.S.","data-alternative-spellings":"VI","data-relevancy-booster":"0.5"}
,{name:"Wallis and Futuna","data-alternative-spellings":"WF","data-relevancy-booster":"0.5"}
,{name:"Western Sahara","data-alternative-spellings":"EH لصحراء الغربية"}
,{name:"Yemen","data-alternative-spellings":"YE اليمن"}
,{name:"Zambia","data-alternative-spellings":"ZM"}
,{name:"Zimbabwe","data-alternative-spellings":"ZW"}
];
var html = "";
// Provide a "Select Country" leader element, but only if there is no pre-selected item.
// This is to prevent users who have previously selected a country from setting an empty country.
if( !selectedElementName || selectedElementName.length < 1)
html = '<option value="" selected="selected">Select Country</option>\n';
json.forEach(function(element, index, array){
var str = '<option value="' + element.name+'"';
if( element.name == selectedElementName )
str += " selected ";
var helper=function(field){
if( typeof element[field] != "string" ) return("");
if( element[field].length == 0 ) return(""); | random_line_split | ||
page.js |
var user = loginstate.getUser(req);
new cmd('safeharbor', '/about', 'About', 'Learn about Safe Harbor');
new cmd('safeharbor', '/learn', 'Learn', 'Learn about your rights and the DMCA');
new cmd('safeharbor', '/support', 'Support', 'Ask us stuff');
if( user )
{
new cmd('user', '/dash', 'Dashboard', 'Manage your disputes' );
// new cmd('user', '/passwordreset', 'Password reset', 'Change your password');
new cmd('user', '/account', 'Account settings', 'Change your email and other settings');
new cmd('user', '/accountdeleter', 'Delete your account', 'hrumph');
new cmd('user', '/logout', 'Log out', 'bye for now' );
new cmd('site', '/siteeditor','Edit your site properties');
new cmd('tablinks', '/disputes', 'Past Disputes', 'Your dispute history' );
new cmd('tablinks', '/form', 'Future Disputes', 'Your dispute future' );
var r = user.role>>>0; // WTF?
if( r <= ROLES.admin )
{
new cmd('admin', '/admin', 'Admin stuff', '' );
if( r == ROLES.developer )
{
new cmd('developer', '/dev', 'Developer stuff', '' );
}
}
}
else
{
new cmd( 'user', '/login', 'Login', 'For existing accounts' );
new cmd( 'user', '/reg', 'Register', 'For creating new accounts' );
new cmd( 'user', '/lostpassword', 'Lost password', 'For existing, if forgetful accounts');
}
return cmds;
}
exports.Message = function( msgLevel, msgTitle, text, opts )
{
utils.copy( this, opts || {});
this.level = msgLevel;
this.title = msgTitle;
this.text = text;
if( !this.status )
{
switch( this.level )
{
case MESSAGE_LEVELS.info:
case MESSAGE_LEVELS.success:
this.status = 'ok';
break;
case MESSAGE_LEVELS.warning:
case MESSAGE_LEVELS.danger:
case MESSAGE_LEVELS.error:
this.status = '??'; // TODO fill these info
break;
}
}
}
exports.setup = function(app) {
var Handlebars = require('handlebars');
Handlebars.registerHelper('loggedInStatusClass', function() {
var isLoggedIn = loginstate.isLoggedIn();
if( isLoggedIn )
return('loggedin'); // that's a CSS selector name
else
return('loggedout');
});
Handlebars.registerHelper('contextDumper', function(a) {
// I haven't figured out if this context blob
// is a copy or an actual instance of something
// important and shared, so we remove the 'app'
// thingy so the dump is managable...
var app = a.app;
a.app = null;
var text = require('util').inspect(a,true,null);
// ...and then restore it just in case someone
// else was using it
a.app = app;
return text;
});
app.register('.html', Handlebars);
app.set('view engine', 'handlebars');
app.dynamicHelpers( {
// these will all be passed to every page...
user: function( req, res ) { var u = loginstate.getUser(req);
if( u && u.password ) u.password = '****';
return u; },
isLoggedIn: function( req, res ) { return !!loginstate.getUser(req); },
isAdmin: function( req, res ) { var u = loginstate.getUser(req);
return u && (u.role>>>0 <= ROLES.admin>>>0); },
menu: buildMenu, // we should consider not outputting this on Ajax
messages: function( req, res ) { return res.sh_output_messages || [ ] }
} );
app.use( function setupPage(req,res,next)
{
/**
Override of the express.response.render method
in order put our application specific standard
templates into the call stream.
@method render
@for Response
@param {string} view Path to template file (relative to './view')
@param {Object} opts Can include things like body_text, pageTitle
**/
var oldRender = res.render;
res.render = function(view, opts, fn, parent, sub )
{
if( typeof view != 'string' )
{
opts = view;
view = exports.BODY_TEXT_VIEW;
}
if( view == exports.BODY_TEXT_VIEW )
{
if( !opts.body_text )
opts.body_text = '';
}
if( !opts.layout )
{
opts.layout = exports.DEFAULT_LAYOUT;
}
if( !opts.bodyClass )
{
try { opts.bodyClass = view.match(/([a-z0-9]+)\/[^\/]+$/)[1]; } catch( e ) { }
}
res.render = oldRender;
return res.render(view, opts, fn, parent, sub );
}
if( !res.outputMessage )
{
/**
Call this to setup a message to be ouput during the
res.render() call.
@method outputMessage
@for Response
@param {MESSAGE_LEVELS} msgLevel
@param {STRING} msgTitle
@param {STRING} text
@param {Object} [opts]
**/
res.outputMessage = function( msgLevel, msgTitle, text, opts ) {
if( !res.sh_output_messages )
res.sh_output_messages = [ ];
res.sh_output_messages.push( new exports.Message(msgLevel,msgTitle,text,opts) );
return res;
}
}
next();
});
}
exports.countryList = function(selectedElementName){
var json = [
{name:"Afghanistan","data-alternative-spellings":"AF افغانستان"}
,{name:"Åland Islands","data-alternative-spellings":"AX Aaland Aland","data-relevancy-booster":"0.5"}
,{name:"Albania","data-alternative-spellings":"AL"}
,{name:"Algeria","data-alternative-spellings":"DZ الجزائر"}
,{name:"American Samoa","data-alternative-spellings":"AS","data-relevancy-booster":"0.5"}
,{name:"Andorra","data-alternative-spellings":"AD","data-relevancy-booster":"0.5"}
,{name:"Angola","data-alternative-spellings":"AO"}
,{name:"Anguilla","data-alternative-spellings":"AI","data-relevancy-booster":"0.5"}
,{name:"Antarctica","data-alternative-spellings":"AQ","data-relevancy-booster":"0.5"}
,{name:"Antigua And Barbuda","data-alternative-spellings":"AG","data-relevancy-booster":"0.5"}
,{name:"Argentina","data-alternative-spellings":"AR"}
,{name:"Armenia","data-alternative-spellings":"AM Հայաստան"}
,{name:"Aruba","data-alternative-spellings":"AW","data-relevancy-booster":"0.5"}
,{name:"Australia","data-alternative-spellings":"AU","data-relevancy-booster":"1.5"}
,{name:"Austria","data-alternative-spellings":"AT Österreich Osterreich Oesterreich "}
,{name:"Azerbaijan","data-alternative-spellings":"AZ"}
,{name:"Bahamas","data-alternative-spellings":"BS"}
,{name:"Bahrain","data-alternative-spellings":"BH البحرين"}
,{name:"Bangladesh","data-alternative-spellings":"BD বাংলাদেশ","data-relevancy-booster":"2"}
,{name:"Barbados","data-alternative-spellings":"BB"}
,{name:"Belarus","data-alternative-spellings":"BY Беларусь"}
,{name:"Belgium","data-alternative-spellings":"BE België Belgie Belgien Belgique","data-relevancy-booster":"1.5"}
,{name:"Belize","data-alternative-spellings":"BZ"}
,{name:"Benin","data-alternative-spellings":"BJ"}
,{name:"Bermuda","data-alternative-spellings":"BM","data-relevancy-booster":"0.5"}
,{name:"Bhutan","data-alternative-spellings":"BT भूटान"}
,{name:"Bolivia","data-alternative-spellings":"BO"}
,{name:"Bonaire,Sint Eustatius and Saba","data-alternative-spellings":"BQ"}
,{ | {
this.url = url;
this.link = link;
this.help = help;
if(!cmds[group]) { cmds[group] = {}; cmds[group].items = [ ] };
cmds[group].items.push(this);
} | identifier_body | |
eval.rs | Val> {
unwrap_from_context("Variable", id, self.vars.get(id))
}
pub fn add_var(&mut self, id: Ident, val: RunVal, ty: Type) -> Ret {
self.vars.insert(id.clone(), val);
self.types.add_var_type(id, ty)
}
pub fn find_type(&self, id: &Ident) -> Ret<Type> {
self.types.find_type(id)
}
pub fn add_type(&mut self, id: String, ty: Type) -> Ret {
self.types.add_type(id, ty)
}
pub fn | (&mut self, id: String, variants: Vec<Ident>) -> Ret {
let rc = Rc::new(DataType {id: id.clone(), variants: variants.clone()});
for (i, variant) in variants.iter().enumerate() {
self.add_var(variant.clone(), RunVal::Data(rc.clone(), i), Type::Data(rc.clone()))?;
}
self.add_type(id, Type::Data(rc))
}
pub fn add_macro(&mut self, id: &str, handle: &'static dyn Fn(&Exp, &Context) -> Ret<RunVal>) -> Ret {
self.add_var(id.to_string(), RunVal::Macro(Macro(id.to_string(), Rc::new(handle))), Type::Any /* TODO define macro types */)
}
pub fn import(&self, path: &str) -> Ret<Module> {
use regex::Regex;
use std::path::Path;
use resource;
use stdlib;
use parser;
let (ctx, file) = if Regex::new("^[a-z]+:").unwrap().is_match(path) {(self.create_child(), path.to_string())}
else {
let import_path = Path::new(&self.path()).join(&resource::with_ext(path, "fqy"));
let mut import_dir = import_path.clone();
import_dir.pop();
let file = import_path.to_string_lossy().to_string();
let ctx = stdlib::create_ctx(&import_dir.to_string_lossy())?;
(ctx, file)
};
let exp = parser::parse_resource(&file)?;
Ok(Module {path: file.to_string(), exp: exp, ctx: ctx})
}
pub fn import_eval(&self, path: &str) -> Ret<RunVal> {
let mut module = self.import(path)?;
Ok(eval_exp_inline(&module.exp, &mut module.ctx))
}
}
#[derive(Clone,Debug,PartialEq)]
pub struct Module {
pub path: String,
pub exp: Exp,
pub ctx: Context,
}
pub fn eval_exp(exp: &Exp, ctx: &Context) -> RunVal {
match exp {
&Exp::Index(n) => RunVal::Index(n),
&Exp::String(ref s) => RunVal::String(s.to_string()),
&Exp::Var(ref id) => ctx.find_var(id).unwrap(),
&Exp::Scope(ref decls, ref ret) => {
let mut child = ctx.create_child();
for decl in decls {
eval_decl(decl, &mut child).unwrap();
}
eval_exp(ret, &child)
},
&Exp::Expand(_) => panic!("No context for expansion"),
&Exp::Tuple(ref args) => RunVal::Tuple(eval_exp_seq(args, ctx)),
&Exp::Concat(ref args) => {
//TODO adjacent gates
if args.len() == 1 {
if let Some(gate) = build_gate(&eval_exp(&args[0], ctx), ctx) {
return RunVal::Gate(gate)
}
}
let div = (args.len() as f32).sqrt();
let states = args.iter()
.map(|e| build_state_typed(eval_exp(e, ctx)))
.collect::<Ret<Vec<(State, Type)>>>().unwrap();
RunVal::State(states.iter()
.flat_map(|(s, _)| s)
.map(|n| n / div)
.collect(),
Type::Concat(states.into_iter()
.map(|(_, t)| t)
.collect()))
},
&Exp::Cond(ref cond_exp, ref then_exp, ref else_exp) => {
let val = eval_exp(cond_exp, ctx);
if let Some(b) = build_bool(&val) {
eval_exp(if b {then_exp} else {else_exp}, ctx)
}
else {
// TODO: consider removing in favor of using extract gates for explicitness
// let state = build_state(val);
// if state.len() > 2 {
// panic!("Conditional state cannot be {}-dimensional", state.len())
// }
// RunVal::State(state.extract(vec![
// build_state(eval_exp(else_exp, ctx)),
// build_state(eval_exp(then_exp, ctx)),
// ]), Type::Any /* TODO determine from then/else types */)
panic!("Non-boolean value: {}", val)
}
},
&Exp::Lambda(ref pat, ref body) => {
let ty = infer_type(exp, ctx.types()).unwrap();
RunVal::Func(Rc::new(ctx.clone()), pat.clone(), (**body).clone(), ty)
},
&Exp::Invoke(ref target, ref arg) => {
match eval_exp(target, ctx) {
// TODO proper tuple function evaluation
RunVal::Func(fn_ctx_rc, pat, body, _ty) => {
let mut fn_ctx = (*fn_ctx_rc).clone();
assign_pat(&pat, &eval_exp(arg, ctx), &mut fn_ctx).unwrap();
eval_exp(&body, &fn_ctx)
},
RunVal::Macro(Macro(_, handle)) => handle(arg, ctx).unwrap(),
RunVal::Gate(gate) => {
let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap();
RunVal::State(s.extract(gate), t)
},
val => {
let msg = &format!("Cannot invoke {}", val);
let state = build_state(eval_exp(arg, ctx));
let gate = build_gate(&val, ctx).expect(msg);
RunVal::State(state.extract(gate), Type::Any /* TODO infer output type from `target` */)
},
}
},
&Exp::Repeat(n, ref exp) => {
let val = eval_exp(&exp, ctx);
RunVal::Tuple((0..n).map(|_| val.clone()).collect())
},
&Exp::State(ref arg) => {
let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap();
RunVal::State(s, t)
},
&Exp::Phase(phase, ref arg) => {
let val = eval_exp(arg, ctx);
build_gate(&val, ctx)
.map(|g| RunVal::Gate(g.power(phase)))
.unwrap_or_else(|| {
let (s, t) = build_state_typed(val).unwrap();
RunVal::State(s.phase(phase), t)
})
},
&Exp::Extract(ref arg, ref cases) => {
let state = build_state(eval_exp(arg, ctx));
let (gate, gt) = create_extract_gate_typed(cases, state.len(), ctx);
RunVal::State(state.extract(gate), gt)
},
&Exp::Anno(ref exp, ref anno) => eval_type(anno, ctx.types()).unwrap().assign(eval_exp(exp, ctx)).unwrap(),
}
}
pub fn eval_exp_inline(exp: &Exp, ctx: &mut Context) -> RunVal {
match exp {
Exp::Scope(ref decls, ref exp) => {
for decl in decls {
eval_decl(decl, ctx).unwrap();
}
eval_exp(exp, ctx)
},
_ => eval_exp(exp, ctx),
}
}
pub fn eval_exp_seq(seq: &Vec<Exp>, ctx: &Context) -> Vec<RunVal> {
seq.iter().flat_map(|e| {
if let Exp::Expand(ref e) = e {
let val = eval_exp(e, ctx);
let err = Error(format!("Cannot expand value: {}", val));
iterate_val(val).ok_or(err).unwrap()
}
else {vec![eval_exp(e, ctx)]}
}).collect()
}
pub fn eval_decl(decl: &Decl, ctx: &mut Context) -> Ret {
match decl {
&Decl::Let(ref pat, ref exp) => assign_pat(pat, &eval_exp(exp, ctx), ctx),
&Decl::Type(ref id, ref pat) => {
let ty = eval_type(pat, ctx.types())?;
ctx.add_type(id.clone(), ty)
},
&Decl::Data(ref id, ref variants) => ctx.add_datatype(id.clone(), variants.clone()),
&Decl::Assert(ref expect, ref result) => {
let a = eval_exp(expect, ctx);
let b = eval_exp(result, ctx);
let eq = match (&a, &b) {
(&RunVal::State(ref a, _), &RunVal::State(ref b, _)) => {
a.iter().zip(b).map(|(a, b)| {
let abs = (a - b).norm();
abs * abs
}).sum::<f3 | add_datatype | identifier_name |
eval.rs | RunVal> {
unwrap_from_context("Variable", id, self.vars.get(id))
}
pub fn add_var(&mut self, id: Ident, val: RunVal, ty: Type) -> Ret {
self.vars.insert(id.clone(), val);
self.types.add_var_type(id, ty)
}
pub fn find_type(&self, id: &Ident) -> Ret<Type> {
self.types.find_type(id)
}
pub fn add_type(&mut self, id: String, ty: Type) -> Ret {
self.types.add_type(id, ty)
}
pub fn add_datatype(&mut self, id: String, variants: Vec<Ident>) -> Ret {
let rc = Rc::new(DataType {id: id.clone(), variants: variants.clone()});
for (i, variant) in variants.iter().enumerate() {
self.add_var(variant.clone(), RunVal::Data(rc.clone(), i), Type::Data(rc.clone()))?;
}
self.add_type(id, Type::Data(rc))
}
pub fn add_macro(&mut self, id: &str, handle: &'static dyn Fn(&Exp, &Context) -> Ret<RunVal>) -> Ret {
self.add_var(id.to_string(), RunVal::Macro(Macro(id.to_string(), Rc::new(handle))), Type::Any /* TODO define macro types */)
}
pub fn import(&self, path: &str) -> Ret<Module> {
use regex::Regex;
use std::path::Path;
use resource;
use stdlib;
use parser;
let (ctx, file) = if Regex::new("^[a-z]+:").unwrap().is_match(path) {(self.create_child(), path.to_string())}
else {
let import_path = Path::new(&self.path()).join(&resource::with_ext(path, "fqy"));
let mut import_dir = import_path.clone();
import_dir.pop();
let file = import_path.to_string_lossy().to_string();
let ctx = stdlib::create_ctx(&import_dir.to_string_lossy())?;
(ctx, file)
};
let exp = parser::parse_resource(&file)?;
Ok(Module {path: file.to_string(), exp: exp, ctx: ctx})
}
pub fn import_eval(&self, path: &str) -> Ret<RunVal> {
let mut module = self.import(path)?;
Ok(eval_exp_inline(&module.exp, &mut module.ctx))
}
}
#[derive(Clone,Debug,PartialEq)]
pub struct Module {
pub path: String,
pub exp: Exp,
pub ctx: Context,
}
pub fn eval_exp(exp: &Exp, ctx: &Context) -> RunVal {
match exp {
&Exp::Index(n) => RunVal::Index(n),
&Exp::String(ref s) => RunVal::String(s.to_string()),
&Exp::Var(ref id) => ctx.find_var(id).unwrap(),
&Exp::Scope(ref decls, ref ret) => {
let mut child = ctx.create_child();
for decl in decls {
eval_decl(decl, &mut child).unwrap();
}
eval_exp(ret, &child)
},
&Exp::Expand(_) => panic!("No context for expansion"),
&Exp::Tuple(ref args) => RunVal::Tuple(eval_exp_seq(args, ctx)),
&Exp::Concat(ref args) => {
//TODO adjacent gates
if args.len() == 1 {
if let Some(gate) = build_gate(&eval_exp(&args[0], ctx), ctx) {
return RunVal::Gate(gate)
}
}
let div = (args.len() as f32).sqrt();
let states = args.iter()
.map(|e| build_state_typed(eval_exp(e, ctx)))
.collect::<Ret<Vec<(State, Type)>>>().unwrap();
RunVal::State(states.iter()
.flat_map(|(s, _)| s)
.map(|n| n / div)
.collect(),
Type::Concat(states.into_iter()
.map(|(_, t)| t)
.collect()))
},
&Exp::Cond(ref cond_exp, ref then_exp, ref else_exp) => {
let val = eval_exp(cond_exp, ctx);
if let Some(b) = build_bool(&val) {
eval_exp(if b {then_exp} else {else_exp}, ctx)
}
else {
// TODO: consider removing in favor of using extract gates for explicitness
// let state = build_state(val);
// if state.len() > 2 {
// panic!("Conditional state cannot be {}-dimensional", state.len())
// }
// RunVal::State(state.extract(vec![
// build_state(eval_exp(else_exp, ctx)),
// build_state(eval_exp(then_exp, ctx)),
// ]), Type::Any /* TODO determine from then/else types */)
panic!("Non-boolean value: {}", val)
}
},
&Exp::Lambda(ref pat, ref body) => {
let ty = infer_type(exp, ctx.types()).unwrap();
RunVal::Func(Rc::new(ctx.clone()), pat.clone(), (**body).clone(), ty)
},
&Exp::Invoke(ref target, ref arg) => {
match eval_exp(target, ctx) {
// TODO proper tuple function evaluation
RunVal::Func(fn_ctx_rc, pat, body, _ty) => {
let mut fn_ctx = (*fn_ctx_rc).clone();
assign_pat(&pat, &eval_exp(arg, ctx), &mut fn_ctx).unwrap();
eval_exp(&body, &fn_ctx)
},
RunVal::Macro(Macro(_, handle)) => handle(arg, ctx).unwrap(),
RunVal::Gate(gate) => {
let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap();
RunVal::State(s.extract(gate), t)
},
val => {
let msg = &format!("Cannot invoke {}", val);
let state = build_state(eval_exp(arg, ctx));
let gate = build_gate(&val, ctx).expect(msg);
RunVal::State(state.extract(gate), Type::Any /* TODO infer output type from `target` */)
},
}
},
&Exp::Repeat(n, ref exp) => {
let val = eval_exp(&exp, ctx);
RunVal::Tuple((0..n).map(|_| val.clone()).collect())
},
&Exp::State(ref arg) => {
let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap();
RunVal::State(s, t)
},
&Exp::Phase(phase, ref arg) => {
let val = eval_exp(arg, ctx);
build_gate(&val, ctx)
.map(|g| RunVal::Gate(g.power(phase)))
.unwrap_or_else(|| {
let (s, t) = build_state_typed(val).unwrap();
RunVal::State(s.phase(phase), t)
})
},
&Exp::Extract(ref arg, ref cases) => {
let state = build_state(eval_exp(arg, ctx));
let (gate, gt) = create_extract_gate_typed(cases, state.len(), ctx);
RunVal::State(state.extract(gate), gt)
},
&Exp::Anno(ref exp, ref anno) => eval_type(anno, ctx.types()).unwrap().assign(eval_exp(exp, ctx)).unwrap(),
}
}
pub fn eval_exp_inline(exp: &Exp, ctx: &mut Context) -> RunVal {
match exp {
Exp::Scope(ref decls, ref exp) => { | }
eval_exp(exp, ctx)
},
_ => eval_exp(exp, ctx),
}
}
pub fn eval_exp_seq(seq: &Vec<Exp>, ctx: &Context) -> Vec<RunVal> {
seq.iter().flat_map(|e| {
if let Exp::Expand(ref e) = e {
let val = eval_exp(e, ctx);
let err = Error(format!("Cannot expand value: {}", val));
iterate_val(val).ok_or(err).unwrap()
}
else {vec![eval_exp(e, ctx)]}
}).collect()
}
pub fn eval_decl(decl: &Decl, ctx: &mut Context) -> Ret {
match decl {
&Decl::Let(ref pat, ref exp) => assign_pat(pat, &eval_exp(exp, ctx), ctx),
&Decl::Type(ref id, ref pat) => {
let ty = eval_type(pat, ctx.types())?;
ctx.add_type(id.clone(), ty)
},
&Decl::Data(ref id, ref variants) => ctx.add_datatype(id.clone(), variants.clone()),
&Decl::Assert(ref expect, ref result) => {
let a = eval_exp(expect, ctx);
let b = eval_exp(result, ctx);
let eq = match (&a, &b) {
(&RunVal::State(ref a, _), &RunVal::State(ref b, _)) => {
a.iter().zip(b).map(|(a, b)| {
let abs = (a - b).norm();
abs * abs
}).sum::<f3 | for decl in decls {
eval_decl(decl, ctx).unwrap(); | random_line_split |
eval.rs | , ctx)),
// build_state(eval_exp(then_exp, ctx)),
// ]), Type::Any /* TODO determine from then/else types */)
panic!("Non-boolean value: {}", val)
}
},
&Exp::Lambda(ref pat, ref body) => {
let ty = infer_type(exp, ctx.types()).unwrap();
RunVal::Func(Rc::new(ctx.clone()), pat.clone(), (**body).clone(), ty)
},
&Exp::Invoke(ref target, ref arg) => {
match eval_exp(target, ctx) {
// TODO proper tuple function evaluation
RunVal::Func(fn_ctx_rc, pat, body, _ty) => {
let mut fn_ctx = (*fn_ctx_rc).clone();
assign_pat(&pat, &eval_exp(arg, ctx), &mut fn_ctx).unwrap();
eval_exp(&body, &fn_ctx)
},
RunVal::Macro(Macro(_, handle)) => handle(arg, ctx).unwrap(),
RunVal::Gate(gate) => {
let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap();
RunVal::State(s.extract(gate), t)
},
val => {
let msg = &format!("Cannot invoke {}", val);
let state = build_state(eval_exp(arg, ctx));
let gate = build_gate(&val, ctx).expect(msg);
RunVal::State(state.extract(gate), Type::Any /* TODO infer output type from `target` */)
},
}
},
&Exp::Repeat(n, ref exp) => {
let val = eval_exp(&exp, ctx);
RunVal::Tuple((0..n).map(|_| val.clone()).collect())
},
&Exp::State(ref arg) => {
let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap();
RunVal::State(s, t)
},
&Exp::Phase(phase, ref arg) => {
let val = eval_exp(arg, ctx);
build_gate(&val, ctx)
.map(|g| RunVal::Gate(g.power(phase)))
.unwrap_or_else(|| {
let (s, t) = build_state_typed(val).unwrap();
RunVal::State(s.phase(phase), t)
})
},
&Exp::Extract(ref arg, ref cases) => {
let state = build_state(eval_exp(arg, ctx));
let (gate, gt) = create_extract_gate_typed(cases, state.len(), ctx);
RunVal::State(state.extract(gate), gt)
},
&Exp::Anno(ref exp, ref anno) => eval_type(anno, ctx.types()).unwrap().assign(eval_exp(exp, ctx)).unwrap(),
}
}
pub fn eval_exp_inline(exp: &Exp, ctx: &mut Context) -> RunVal {
match exp {
Exp::Scope(ref decls, ref exp) => {
for decl in decls {
eval_decl(decl, ctx).unwrap();
}
eval_exp(exp, ctx)
},
_ => eval_exp(exp, ctx),
}
}
pub fn eval_exp_seq(seq: &Vec<Exp>, ctx: &Context) -> Vec<RunVal> {
seq.iter().flat_map(|e| {
if let Exp::Expand(ref e) = e {
let val = eval_exp(e, ctx);
let err = Error(format!("Cannot expand value: {}", val));
iterate_val(val).ok_or(err).unwrap()
}
else {vec![eval_exp(e, ctx)]}
}).collect()
}
pub fn eval_decl(decl: &Decl, ctx: &mut Context) -> Ret {
match decl {
&Decl::Let(ref pat, ref exp) => assign_pat(pat, &eval_exp(exp, ctx), ctx),
&Decl::Type(ref id, ref pat) => {
let ty = eval_type(pat, ctx.types())?;
ctx.add_type(id.clone(), ty)
},
&Decl::Data(ref id, ref variants) => ctx.add_datatype(id.clone(), variants.clone()),
&Decl::Assert(ref expect, ref result) => {
let a = eval_exp(expect, ctx);
let b = eval_exp(result, ctx);
let eq = match (&a, &b) {
(&RunVal::State(ref a, _), &RunVal::State(ref b, _)) => {
a.iter().zip(b).map(|(a, b)| {
let abs = (a - b).norm();
abs * abs
}).sum::<f32>() < 0.00001_f32
},
(a, b) => a == b,
};
if !eq {err!("Assertion failed: {} != {}", a, b)}
else {Ok(())}
},
&Decl::Print(ref exp) => Ok(println!(":: {}", eval_exp(exp, ctx))),
&Decl::Do(ref exp) => {
eval_exp(exp, ctx);
Ok(())
},
}
}
// TODO combine logic with eval_static::assign_pat_type()
pub fn assign_pat(pat: &Pat, val: &RunVal, ctx: &mut Context) -> Ret {
match (pat, val) {
(&Pat::Any, _) => Ok(()),
(&Pat::Var(ref id), _) => ctx.add_var(id.clone(), val.clone(), get_val_type(val)), //TODO use val type
(&Pat::Tuple(ref pats), &RunVal::Tuple(ref vals)) => {
if pats.len() != vals.len() {err!("Cannot deconstruct {} values from value: {}", pats.len(), val)}
else {
pats.iter().zip(vals)
.map(|(pat, val)| assign_pat(pat, val, ctx))
.collect::<Ret<_>>()
}
},
(&Pat::Anno(ref pat, ref anno), _) => assign_pat(pat, &eval_type(&anno, ctx.types())?.assign(val.clone())?, ctx),
_ => err!("{:?} cannot deconstruct `{}`", pat, val),
}
}
pub fn get_val_type(val: &RunVal) -> Type {
match val {
&RunVal::Index(_) => Type::Any,
&RunVal::String(_) => Type::Any,
&RunVal::Data(ref dt, _) => Type::Data((*dt).clone()),
&RunVal::Tuple(ref vals) => Type::Tuple(vals.iter().map(get_val_type).collect()),
&RunVal::Func(_, _, _, ref ty) => ty.clone(),
&RunVal::Macro(_) => Type::Any, // TODO
&RunVal::State(_, ref ty) => ty.clone(),
&RunVal::Gate(_) => Type::Any, // TODO
}
}
pub fn build_bool(val: &RunVal) -> Option<bool> {
match val {
&RunVal::Index(n) => Some(n > 0),
&RunVal::Data(ref _ty, n) => Some(n > 0),
&RunVal::Tuple(ref vec) => Some(vec.len() > 0),
_ => None,
}
}
pub fn build_state(val: RunVal) -> State {
build_state_typed(val).unwrap().0
}
pub fn build_state_typed(val: RunVal) -> Ret<(State, Type)> {
match val {
RunVal::Index(n) => Ok((get_state(n), Type::Any)),
RunVal::Data(dt, index) => Ok((get_state(index).pad(dt.variants.len()), Type::Data(dt))),
RunVal::Tuple(vals) => {
let states = vals.into_iter().map(|v| build_state_typed(v)).collect::<Ret<Vec<(State, Type)>>>()?;
let ty = Type::Tuple(states.iter().map(|(_, t)| t.clone()).collect());
Ok((states.into_iter().fold(get_state(0), |a, (b, _)| State::combine(a, b)), ty))
},
RunVal::State(state, ty) => Ok((state, ty)),
val => err!("Cannot build state from {}", val)
}
}
pub fn eval_gate_body(exp: &Exp, ctx: &Context) -> Option<Gate> {
match exp {
&Exp::Extract(ref _arg, ref cases) => Some(create_extract_gate_typed(cases, 0, ctx).0),
_ => None,
}
}
pub fn build_gate(val: &RunVal, ctx: &Context) -> Option<Gate> {
match val {
&RunVal::Tuple(ref vals) => vals.iter()
.fold(Some(vec![get_state(0)]),
|a, b| a.and_then(|a| build_gate(b, ctx).map(|b| a.combine(b)))),
&RunVal::Func(ref fn_ctx, ref _pat, ref body, ref _ty) => eval_gate_body(body, fn_ctx), // TODO use type
&RunVal::Gate(ref gate) => Some(gate.clone()),
_ => None,
}
}
pub fn iterate_val(val: RunVal) -> Option<Vec<RunVal>> {
match val {
RunVal::Index(i) => | {
Some((0..i).map(RunVal::Index).collect())
} | conditional_block | |
eval.rs |
}
#[derive(Clone,Debug,PartialEq)]
pub struct Context {
path: String,
vars: HashMap<Ident, RunVal>,
types: TypeContext,
}
impl Context {
pub fn new(path: String) -> Context {
Context {
path,
vars: HashMap::new(),
types: TypeContext::new(),
}
}
pub fn path(&self) -> &String {
&self.path
}
pub fn types(&self) -> &TypeContext {
&self.types
}
pub fn create_child(&self) -> Context {
self.clone()
}
pub fn find_var(&self, id: &Ident) -> Ret<RunVal> {
unwrap_from_context("Variable", id, self.vars.get(id))
}
pub fn add_var(&mut self, id: Ident, val: RunVal, ty: Type) -> Ret {
self.vars.insert(id.clone(), val);
self.types.add_var_type(id, ty)
}
pub fn find_type(&self, id: &Ident) -> Ret<Type> {
self.types.find_type(id)
}
pub fn add_type(&mut self, id: String, ty: Type) -> Ret {
self.types.add_type(id, ty)
}
pub fn add_datatype(&mut self, id: String, variants: Vec<Ident>) -> Ret {
let rc = Rc::new(DataType {id: id.clone(), variants: variants.clone()});
for (i, variant) in variants.iter().enumerate() {
self.add_var(variant.clone(), RunVal::Data(rc.clone(), i), Type::Data(rc.clone()))?;
}
self.add_type(id, Type::Data(rc))
}
pub fn add_macro(&mut self, id: &str, handle: &'static dyn Fn(&Exp, &Context) -> Ret<RunVal>) -> Ret {
self.add_var(id.to_string(), RunVal::Macro(Macro(id.to_string(), Rc::new(handle))), Type::Any /* TODO define macro types */)
}
pub fn import(&self, path: &str) -> Ret<Module> {
use regex::Regex;
use std::path::Path;
use resource;
use stdlib;
use parser;
let (ctx, file) = if Regex::new("^[a-z]+:").unwrap().is_match(path) {(self.create_child(), path.to_string())}
else {
let import_path = Path::new(&self.path()).join(&resource::with_ext(path, "fqy"));
let mut import_dir = import_path.clone();
import_dir.pop();
let file = import_path.to_string_lossy().to_string();
let ctx = stdlib::create_ctx(&import_dir.to_string_lossy())?;
(ctx, file)
};
let exp = parser::parse_resource(&file)?;
Ok(Module {path: file.to_string(), exp: exp, ctx: ctx})
}
pub fn import_eval(&self, path: &str) -> Ret<RunVal> {
let mut module = self.import(path)?;
Ok(eval_exp_inline(&module.exp, &mut module.ctx))
}
}
#[derive(Clone,Debug,PartialEq)]
pub struct Module {
pub path: String,
pub exp: Exp,
pub ctx: Context,
}
pub fn eval_exp(exp: &Exp, ctx: &Context) -> RunVal {
match exp {
&Exp::Index(n) => RunVal::Index(n),
&Exp::String(ref s) => RunVal::String(s.to_string()),
&Exp::Var(ref id) => ctx.find_var(id).unwrap(),
&Exp::Scope(ref decls, ref ret) => {
let mut child = ctx.create_child();
for decl in decls {
eval_decl(decl, &mut child).unwrap();
}
eval_exp(ret, &child)
},
&Exp::Expand(_) => panic!("No context for expansion"),
&Exp::Tuple(ref args) => RunVal::Tuple(eval_exp_seq(args, ctx)),
&Exp::Concat(ref args) => {
//TODO adjacent gates
if args.len() == 1 {
if let Some(gate) = build_gate(&eval_exp(&args[0], ctx), ctx) {
return RunVal::Gate(gate)
}
}
let div = (args.len() as f32).sqrt();
let states = args.iter()
.map(|e| build_state_typed(eval_exp(e, ctx)))
.collect::<Ret<Vec<(State, Type)>>>().unwrap();
RunVal::State(states.iter()
.flat_map(|(s, _)| s)
.map(|n| n / div)
.collect(),
Type::Concat(states.into_iter()
.map(|(_, t)| t)
.collect()))
},
&Exp::Cond(ref cond_exp, ref then_exp, ref else_exp) => {
let val = eval_exp(cond_exp, ctx);
if let Some(b) = build_bool(&val) {
eval_exp(if b {then_exp} else {else_exp}, ctx)
}
else {
// TODO: consider removing in favor of using extract gates for explicitness
// let state = build_state(val);
// if state.len() > 2 {
// panic!("Conditional state cannot be {}-dimensional", state.len())
// }
// RunVal::State(state.extract(vec![
// build_state(eval_exp(else_exp, ctx)),
// build_state(eval_exp(then_exp, ctx)),
// ]), Type::Any /* TODO determine from then/else types */)
panic!("Non-boolean value: {}", val)
}
},
&Exp::Lambda(ref pat, ref body) => {
let ty = infer_type(exp, ctx.types()).unwrap();
RunVal::Func(Rc::new(ctx.clone()), pat.clone(), (**body).clone(), ty)
},
&Exp::Invoke(ref target, ref arg) => {
match eval_exp(target, ctx) {
// TODO proper tuple function evaluation
RunVal::Func(fn_ctx_rc, pat, body, _ty) => {
let mut fn_ctx = (*fn_ctx_rc).clone();
assign_pat(&pat, &eval_exp(arg, ctx), &mut fn_ctx).unwrap();
eval_exp(&body, &fn_ctx)
},
RunVal::Macro(Macro(_, handle)) => handle(arg, ctx).unwrap(),
RunVal::Gate(gate) => {
let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap();
RunVal::State(s.extract(gate), t)
},
val => {
let msg = &format!("Cannot invoke {}", val);
let state = build_state(eval_exp(arg, ctx));
let gate = build_gate(&val, ctx).expect(msg);
RunVal::State(state.extract(gate), Type::Any /* TODO infer output type from `target` */)
},
}
},
&Exp::Repeat(n, ref exp) => {
let val = eval_exp(&exp, ctx);
RunVal::Tuple((0..n).map(|_| val.clone()).collect())
},
&Exp::State(ref arg) => {
let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap();
RunVal::State(s, t)
},
&Exp::Phase(phase, ref arg) => {
let val = eval_exp(arg, ctx);
build_gate(&val, ctx)
.map(|g| RunVal::Gate(g.power(phase)))
.unwrap_or_else(|| {
let (s, t) = build_state_typed(val).unwrap();
RunVal::State(s.phase(phase), t)
})
},
&Exp::Extract(ref arg, ref cases) => {
let state = build_state(eval_exp(arg, ctx));
let (gate, gt) = create_extract_gate_typed(cases, state.len(), ctx);
RunVal::State(state.extract(gate), gt)
},
&Exp::Anno(ref exp, ref anno) => eval_type(anno, ctx.types()).unwrap().assign | {
match self {
&RunVal::Index(ref n) => write!(f, "{}", n),
&RunVal::String(ref s) => write!(f, "{:?}", s),
&RunVal::Data(ref dt, ref index) => write!(f, "{}", dt.variants[*index]),
&RunVal::Tuple(ref vals) => write!(f, "({})", vals.iter().map(|val| format!("{}", val)).collect::<Vec<_>>().join(", ")),
&RunVal::Func(ref _ctx, ref _pat, ref _body, ref ty) => write!(f, "fn{}", ty),
&RunVal::Macro(ref mc) => write!(f, "{:?}", mc),
&RunVal::State(ref state, ref ty) => if ty != &Type::Any {
write!(f, "{}: {}", StateView(state), ty)
} else {
write!(f, "{}", StateView(state))
},
&RunVal::Gate(ref gate) => write!(f, "[{}]", gate.iter().map(|state| format!("{}", StateView(state))).collect::<Vec<_>>().join(", ")),
}
} | identifier_body | |
avx.rs | 1 * Cb + 23 * Cr) / 32
let g4 = _mm256_srai_epi16::<5>(g3);
// Y - (11 * Cb + 23 * Cr) / 32 ;
let g = YmmRegister {
mm256: clamp_avx(_mm256_sub_epi16(y_c, g4))
};
// b = Y + 113 * Cb / 64
// 113 * cb
let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r);
//113 * Cb / 64
let b2 = _mm256_srai_epi16::<6>(b1);
// b = Y + 113 * Cb / 64 ;
let b = YmmRegister {
mm256: clamp_avx(_mm256_add_epi16(b2, y_c))
};
return (r, g, b);
}
#[inline]
#[target_feature(enable = "avx2")]
/// A baseline implementation of YCbCr to RGB conversion which does not carry
/// out clamping
///
/// This is used by the `ycbcr_to_rgba_avx` and `ycbcr_to_rgbx` conversion
/// routines
unsafe fn ycbcr_to_rgb_baseline_no_clamp(
y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16]
) -> (__m256i, __m256i, __m256i) {
// Load values into a register
//
let y_c = _mm256_loadu_si256(y.as_ptr().cast());
let cb_c = _mm256_loadu_si256(cb.as_ptr().cast());
let cr_c = _mm256_loadu_si256(cr.as_ptr().cast());
// AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb
// Cb = Cb-128;
let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128));
// cr = Cb -128;
let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128));
// Calculate Y->R
// r = Y + 45 * Cr / 32
// 45*cr
let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r);
// r1>>5
let r2 = _mm256_srai_epi16::<5>(r1);
//y+r2
let r = _mm256_add_epi16(y_c, r2);
// g = Y - (11 * Cb + 23 * Cr) / 32 ;
// 11*cb
let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r);
// 23*cr
let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r);
//(11
//(11 * Cb + 23 * Cr)
let g3 = _mm256_add_epi16(g1, g2);
// (11 * Cb + 23 * Cr) / 32
let g4 = _mm256_srai_epi16::<5>(g3);
// Y - (11 * Cb + 23 * Cr) / 32 ;
let g = _mm256_sub_epi16(y_c, g4);
// b = Y + 113 * Cb / 64
// 113 * cb
let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r);
//113 * Cb / 64
let b2 = _mm256_srai_epi16::<6>(b1);
// b = Y + 113 * Cb / 64 ;
let b = _mm256_add_epi16(b2, y_c);
return (r, g, b);
}
#[inline(always)]
pub fn ycbcr_to_rgba_avx2(
y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize
) {
unsafe {
ycbcr_to_rgba_unsafe(y, cb, cr, out, offset);
}
}
#[inline]
#[target_feature(enable = "avx2")]
#[rustfmt::skip]
unsafe fn ycbcr_to_rgba_unsafe(
y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16],
out: &mut [u8],
offset: &mut usize,
)
{
// check if we have enough space to write.
let tmp:& mut [u8; 64] = out.get_mut(*offset..*offset + 64).expect("Slice to small cannot write").try_into().unwrap();
let (r, g, b) = ycbcr_to_rgb_baseline_no_clamp(y, cb, cr);
// set alpha channel to 255 for opaque
// And no these comments were not from me pressing the keyboard
// Pack the integers into u8's using signed saturation.
let c = _mm256_packus_epi16(r, g); //aaaaa_bbbbb_aaaaa_bbbbbb
let d = _mm256_packus_epi16(b, _mm256_set1_epi16(255)); // cccccc_dddddd_ccccccc_ddddd
// transpose_u16 and interleave channels
let e = _mm256_unpacklo_epi8(c, d); //ab_ab_ab_ab_ab_ab_ab_ab
let f = _mm256_unpackhi_epi8(c, d); //cd_cd_cd_cd_cd_cd_cd_cd
// final transpose_u16
let g = _mm256_unpacklo_epi8(e, f); //abcd_abcd_abcd_abcd_abcd
let h = _mm256_unpackhi_epi8(e, f);
// undo packus shuffling...
let i = _mm256_permute2x128_si256::<{ shuffle(3, 2, 1, 0) }>(g, h);
let j = _mm256_permute2x128_si256::<{ shuffle(1, 2, 3, 0) }>(g, h);
let k = _mm256_permute2x128_si256::<{ shuffle(3, 2, 0, 1) }>(g, h);
let l = _mm256_permute2x128_si256::<{ shuffle(0, 3, 2, 1) }>(g, h);
let m = _mm256_blend_epi32::<0b1111_0000>(i, j);
let n = _mm256_blend_epi32::<0b1111_0000>(k, l);
// Store
// Use streaming instructions to prevent polluting the cache?
_mm256_storeu_si256(tmp.as_mut_ptr().cast(), m);
_mm256_storeu_si256(tmp[32..].as_mut_ptr().cast(), n);
*offset += 64;
}
/// Clamp values between 0 and 255
///
/// This function clamps all values in `reg` to be between 0 and 255
///( the accepted values for RGB)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
unsafe fn clamp_avx(reg: __m256i) -> __m256i {
// the lowest value
let min_s = _mm256_set1_epi16(0);
// Highest value
let max_s = _mm256_set1_epi16(255);
| let max_v = _mm256_max_epi16(reg, min_s); //max(a,0)
let min_v = _mm256_min_epi16(max_v, max_s); //min(max(a,0),255) | random_line_split | |
avx.rs | offset: &mut usize
) {
// Load output buffer
let tmp: &mut [u8; 48] = out
.get_mut(*offset..*offset + 48)
.expect("Slice to small cannot write")
.try_into()
.unwrap();
let (r, g, b) = ycbcr_to_rgb_baseline(y, cb, cr);
let mut j = 0;
let mut i = 0;
while i < 48 {
tmp[i] = r.array[j] as u8;
tmp[i + 1] = g.array[j] as u8;
tmp[i + 2] = b.array[j] as u8;
i += 3;
j += 1;
}
*offset += 48;
}
/// Baseline implementation of YCBCR to RGB for avx,
///
/// It uses integer operations as opposed to floats, the approximation is
/// difficult for the eye to see, but this means that it may produce different
/// values with libjpeg_turbo. if accuracy is of utmost importance, use that.
///
/// this function should be called for most implementations, including
/// - ycbcr->rgb
/// - ycbcr->rgba
/// - ycbcr->brga
/// - ycbcr->rgbx
#[inline]
#[target_feature(enable = "avx2")]
#[target_feature(enable = "avx")]
unsafe fn ycbcr_to_rgb_baseline(
y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16]
) -> (YmmRegister, YmmRegister, YmmRegister) {
// Load values into a register
//
// dst[127:0] := MEM[loaddr+127:loaddr]
// dst[255:128] := MEM[hiaddr+127:hiaddr]
let y_c = _mm256_loadu_si256(y.as_ptr().cast());
let cb_c = _mm256_loadu_si256(cb.as_ptr().cast());
let cr_c = _mm256_loadu_si256(cr.as_ptr().cast());
// AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb
// Cb = Cb-128;
let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128));
// cr = Cb -128;
let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128));
// Calculate Y->R
// r = Y + 45 * Cr / 32
// 45*cr
let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r);
// r1>>5
let r2 = _mm256_srai_epi16::<5>(r1);
//y+r2
let r = YmmRegister {
mm256: clamp_avx(_mm256_add_epi16(y_c, r2))
};
// g = Y - (11 * Cb + 23 * Cr) / 32 ;
// 11*cb
let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r);
// 23*cr
let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r);
//(11
//(11 * Cb + 23 * Cr)
let g3 = _mm256_add_epi16(g1, g2);
// (11 * Cb + 23 * Cr) / 32
let g4 = _mm256_srai_epi16::<5>(g3);
// Y - (11 * Cb + 23 * Cr) / 32 ;
let g = YmmRegister {
mm256: clamp_avx(_mm256_sub_epi16(y_c, g4))
};
// b = Y + 113 * Cb / 64
// 113 * cb
let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r);
//113 * Cb / 64
let b2 = _mm256_srai_epi16::<6>(b1);
// b = Y + 113 * Cb / 64 ;
let b = YmmRegister {
mm256: clamp_avx(_mm256_add_epi16(b2, y_c))
};
return (r, g, b);
}
#[inline]
#[target_feature(enable = "avx2")]
/// A baseline implementation of YCbCr to RGB conversion which does not carry
/// out clamping
///
/// This is used by the `ycbcr_to_rgba_avx` and `ycbcr_to_rgbx` conversion
/// routines
unsafe fn ycbcr_to_rgb_baseline_no_clamp(
y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16]
) -> (__m256i, __m256i, __m256i) {
// Load values into a register
//
let y_c = _mm256_loadu_si256(y.as_ptr().cast());
let cb_c = _mm256_loadu_si256(cb.as_ptr().cast());
let cr_c = _mm256_loadu_si256(cr.as_ptr().cast());
// AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb
// Cb = Cb-128;
let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128));
// cr = Cb -128;
let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128));
// Calculate Y->R
// r = Y + 45 * Cr / 32
// 45*cr
let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r);
// r1>>5
let r2 = _mm256_srai_epi16::<5>(r1);
//y+r2
let r = _mm256_add_epi16(y_c, r2);
// g = Y - (11 * Cb + 23 * Cr) / 32 ;
// 11*cb
let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r);
// 23*cr
let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r);
//(11
//(11 * Cb + 23 * Cr)
let g3 = _mm256_add_epi16(g1, g2);
// (11 * Cb + 23 * Cr) / 32
let g4 = _mm256_srai_epi16::<5>(g3);
// Y - (11 * Cb + 23 * Cr) / 32 ;
let g = _mm256_sub_epi16(y_c, g4);
// b = Y + 113 * Cb / 64
// 113 * cb
let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r);
//113 * Cb / 64
let b2 = _mm256_srai_epi16::<6>(b1);
// b = Y + 113 * Cb / 64 ;
let b = _mm256_add_epi16(b2, y_c);
return (r, g, b);
}
#[inline(always)]
pub fn ycbcr_to_rgba_avx2(
y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize
) | {
unsafe {
ycbcr_to_rgba_unsafe(y, cb, cr, out, offset);
}
} | identifier_body | |
avx.rs | = out
.get_mut(*offset..*offset + 48)
.expect("Slice to small cannot write")
.try_into()
.unwrap();
let (r, g, b) = ycbcr_to_rgb_baseline(y, cb, cr);
let mut j = 0;
let mut i = 0;
while i < 48 {
tmp[i] = r.array[j] as u8;
tmp[i + 1] = g.array[j] as u8;
tmp[i + 2] = b.array[j] as u8;
i += 3;
j += 1;
}
*offset += 48;
}
/// Baseline implementation of YCBCR to RGB for avx,
///
/// It uses integer operations as opposed to floats, the approximation is
/// difficult for the eye to see, but this means that it may produce different
/// values with libjpeg_turbo. if accuracy is of utmost importance, use that.
///
/// this function should be called for most implementations, including
/// - ycbcr->rgb
/// - ycbcr->rgba
/// - ycbcr->brga
/// - ycbcr->rgbx
#[inline]
#[target_feature(enable = "avx2")]
#[target_feature(enable = "avx")]
unsafe fn ycbcr_to_rgb_baseline(
y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16]
) -> (YmmRegister, YmmRegister, YmmRegister) {
// Load values into a register
//
// dst[127:0] := MEM[loaddr+127:loaddr]
// dst[255:128] := MEM[hiaddr+127:hiaddr]
let y_c = _mm256_loadu_si256(y.as_ptr().cast());
let cb_c = _mm256_loadu_si256(cb.as_ptr().cast());
let cr_c = _mm256_loadu_si256(cr.as_ptr().cast());
// AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb
// Cb = Cb-128;
let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128));
// cr = Cb -128;
let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128));
// Calculate Y->R
// r = Y + 45 * Cr / 32
// 45*cr
let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r);
// r1>>5
let r2 = _mm256_srai_epi16::<5>(r1);
//y+r2
let r = YmmRegister {
mm256: clamp_avx(_mm256_add_epi16(y_c, r2))
};
// g = Y - (11 * Cb + 23 * Cr) / 32 ;
// 11*cb
let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r);
// 23*cr
let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r);
//(11
//(11 * Cb + 23 * Cr)
let g3 = _mm256_add_epi16(g1, g2);
// (11 * Cb + 23 * Cr) / 32
let g4 = _mm256_srai_epi16::<5>(g3);
// Y - (11 * Cb + 23 * Cr) / 32 ;
let g = YmmRegister {
mm256: clamp_avx(_mm256_sub_epi16(y_c, g4))
};
// b = Y + 113 * Cb / 64
// 113 * cb
let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r);
//113 * Cb / 64
let b2 = _mm256_srai_epi16::<6>(b1);
// b = Y + 113 * Cb / 64 ;
let b = YmmRegister {
mm256: clamp_avx(_mm256_add_epi16(b2, y_c))
};
return (r, g, b);
}
#[inline]
#[target_feature(enable = "avx2")]
/// A baseline implementation of YCbCr to RGB conversion which does not carry
/// out clamping
///
/// This is used by the `ycbcr_to_rgba_avx` and `ycbcr_to_rgbx` conversion
/// routines
unsafe fn ycbcr_to_rgb_baseline_no_clamp(
y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16]
) -> (__m256i, __m256i, __m256i) {
// Load values into a register
//
let y_c = _mm256_loadu_si256(y.as_ptr().cast());
let cb_c = _mm256_loadu_si256(cb.as_ptr().cast());
let cr_c = _mm256_loadu_si256(cr.as_ptr().cast());
// AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb
// Cb = Cb-128;
let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128));
// cr = Cb -128;
let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128));
// Calculate Y->R
// r = Y + 45 * Cr / 32
// 45*cr
let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r);
// r1>>5
let r2 = _mm256_srai_epi16::<5>(r1);
//y+r2
let r = _mm256_add_epi16(y_c, r2);
// g = Y - (11 * Cb + 23 * Cr) / 32 ;
// 11*cb
let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r);
// 23*cr
let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r);
//(11
//(11 * Cb + 23 * Cr)
let g3 = _mm256_add_epi16(g1, g2);
// (11 * Cb + 23 * Cr) / 32
let g4 = _mm256_srai_epi16::<5>(g3);
// Y - (11 * Cb + 23 * Cr) / 32 ;
let g = _mm256_sub_epi16(y_c, g4);
// b = Y + 113 * Cb / 64
// 113 * cb
let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r);
//113 * Cb / 64
let b2 = _mm256_srai_epi16::<6>(b1);
// b = Y + 113 * Cb / 64 ;
let b = _mm256_add_epi16(b2, y_c);
return (r, g, b);
}
#[inline(always)]
pub fn ycbcr_to_rgba_avx2(
y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize
) {
unsafe {
ycbcr_to_rgba_unsafe(y, cb, cr, out, offset);
}
}
#[inline]
#[target_feature(enable = "avx2")]
#[rustfmt::skip]
unsafe fn | ycbcr_to_rgba_unsafe | identifier_name | |
run.js | (scam) {
let abusereport = stripIndents`I would like to inform you of suspicious activities at the domain ${url.parse(scam.url).hostname}
${'ip' in scam ? `located at IP address ${scam['ip']}`: ''}.
${'subcategory' in scam && scam.subcategory == "NanoWallet" ?
`The domain is impersonating NanoWallet.io, a website where people can create
Nano wallets (a cryptocurrency like Bitcoin).` : ''}
${'category' in scam && scam.category == "Fake ICO" ?
`The domain is impersonating a website where an ICO is being held (initial coin offering, like
an initial public offering but it's for cryptocurrencies)` : ''}
${'category' in scam && scam.category == "Phishing" ?
`The attackers wish to steal funds by using phishing to get the victim's private keys (passwords to a wallet)
and using them to send funds to their own wallets.` : ''}
${'category' in scam && scam.category == "Fake ICO" ?
`The attackers wish to steal funds by cloning the real website and changing the XRB address so
people will send funds to the attackers' address instead of the real address.` : ''}
Please shut down this domain so further attacks will be prevented.`
return abusereport
}
/* Start the web server */
function startWebServer() {
app.use(express.static('_static')); // Serve all static pages first
app.use('/screenshot', express.static('_cache/screenshots/')); // Serve all screenshots
app.use(bodyParser.json({ strict: true })) // to support JSON-encoded bodies
app.get('/(/|index.html)?', async function (_req, res) { // Serve index.html
res.send(await helpers.layout('index', {}))
})
app.get('/search/', async function (_req, res) { // Serve /search/
const verified = [].concat((await getCache()).verified)
const sorted = verified.sort(function (a, b) {
return a.name.localeCompare(b.name)
})
const table = sorted.map((url) => {
if ('featured' in url && url.featured) {
// TODO: put the verified images here
/*if (
await fs.pathExists("_static/img/" + url.name.toLowerCase().replace(' ', '') + ".png") ||
await fs.pathExists("_static/img/" + url.name.toLowerCase().replace(' ', '') + ".svg")
) {
table += "<tr><td><img class='project icon' src='/img/" + url.name.toLowerCase().replace(' ', '') + ".png'>" + url.name + "</td><td><a target='_blank' href='" + url.url + "'>" + url.url + "</a></td></tr>";
} else {*/
//helpers.rollbar.warn(`Warning: No verified icon was found for ${url.name}`);
return `<tr>
<td>${url.name}</td>
<td><a target="_blank" href="${url.url}">${url.url}</a></td>
</tr>`
//}
}
return null
}).filter((s) => s).join('')
res.send(await helpers.layout('search', {
'trusted.table': table,
'page.title': 'Search for scam sites, scammers addresses and scam ips'
}))
})
app.get('/faq/', async function (_req, res) { // Serve /faq/
res.send(await helpers.layout('faq', {
'page.title': 'FAQ'
}))
})
// Serve /report/, /report/domain/, and /report/address/ or /report/domain/fake-mycrypto.com
app.get('/report/:type?/:value?', async function (req, res, next) {
let value = ''
if (req.params.value) {
value = safeHtml`${req.params.value}`
}
switch (`${req.params.type}`) {
case 'address':
res.send(await helpers.layout('reportaddress', { 'page.placeholder': value }))
break
case 'domain':
res.send(await helpers.layout('reportdomain', { 'page.placeholder': value }))
break
default:
if (!req.params.type) {
res.send(await helpers.layout('report', {}))
} else {
return next(new Error(`Request type ${req.params.type}`))
}
}
})
// Serve /scams/
app.get('/scams/:page?/:sorting?/:direction?', async function (req, res, next) {
const MAX_RESULTS_PER_PAGE = 30
const scams = [].concat((await getCache()).scams)
const currentDirection = `${req.params.direction}` === 'ascending' ? 'ascending' : 'descending'
let direction = {
category: '',
subcategory: '',
status: '',
title: '',
}
let sorting = {
category: '',
subcategory: '',
status: '',
title: ''
}
switch (`${req.params.sorting}`) {
case 'category':
sorting.category = 'sorted'
direction.category = currentDirection
scams.sort(function (a, b) {
if ('category' in a && 'category' in b && a.category && b.category) {
return a.category.localeCompare(b.category)
} else {
return -1
}
})
break
case 'subcategory':
sorting.subcategory = 'sorted'
direction.subcategory = currentDirection
scams.sort(function (a, b) {
if ('subcategory' in a && 'subcategory' in b && a.subcategory && b.subcategory) {
return a.subcategory.localeCompare(b.subcategory)
} else {
return -1
}
})
break
case 'title':
sorting.title = 'sorted'
direction.title = currentDirection
scams.sort(function (a, b) {
return a.name.localeCompare(b.name)
})
break
case 'status':
sorting.status = 'sorted'
direction.status = currentDirection
scams.sort(function (a, b) {
if ('status' in a && 'status' in b) {
if ((a.status == 'Active' && b.status != 'Active') || (a.status == 'Inactive' && (b.status == 'Suspended' || b.status == 'Offline')) || (a.status == 'Suspended' && b.status == 'Offline')) {
return -1
} else if (a.status == b.status) {
return 0
} else {
return 1
}
} else {
return 1
}
})
break
default:
if (!req.params.sorting) {
scams.sort(function (a, b) {
return b.id - a.id
})
} else {
return next(new Error(`Invalid sorting "${req.params.sorting}"`))
}
}
if (currentDirection === 'descending') {
scams.reverse()
}
let addresses = {}
var intActiveScams = 0
var intInactiveScams = 0
scams.forEach(function (scam) {
if ('addresses' in scam) {
scam.addresses.forEach(function (address) {
addresses[address] = true
})
}
if ('status' in scam) {
if (scam.status === 'Active') {
++intActiveScams
} else {
++intInactiveScams
}
}
})
let max = MAX_RESULTS_PER_PAGE
let start = 0
let pagination = []
const page = +req.params.page || 1
if (req.params.page == "all") {
max = scams.length
} else if (page) {
max = ((page - 1) * MAX_RESULTS_PER_PAGE) + MAX_RESULTS_PER_PAGE
start = (page - 1) * MAX_RESULTS_PER_PAGE
}
const paginate = req.params.sorting ? `/${req.params.sorting}/${currentDirection}` : ''
const table = scams.slice(start, max).map((scam) => {
let status = '<td>None</td>'
let category = scam.category || '<i class="remove icon"></i> None'
let subcategory = scam.subcategory || '<i class="remove icon"></i> None'
if ('status' in scam) {
switch (scam.status) {
case 'Active':
status = "<td class='offline'><i class='warning sign icon'></i> Active</td>"
break
case 'Inactive':
status = "<td class='suspended'><i class='remove icon'></i> Inactive</td>"
break
case 'Offline':
status = "<td class='activ'><i class='checkmark icon'></i> Offline</td>"
break
case 'Suspended':
status = "<td class='suspended'><i class='remove icon'></i> Suspended</td>"
break
}
}
if ('category' in scam) {
switch (scam.category) {
case "Phishing":
category = '<i class="address book icon"></i> Phishing'
break
case "Scamming":
category = '<i class="payment icon"></i> Scamming'
break
case "Fake ICO":
category = '<i class="dollar icon"></i> Fake ICO'
break
}
}
if ('subcategory' in scam && scam | generateAbuseReport | identifier_name | |
run.js | /report/address/ or /report/domain/fake-mycrypto.com
app.get('/report/:type?/:value?', async function (req, res, next) {
let value = ''
if (req.params.value) {
value = safeHtml`${req.params.value}`
}
switch (`${req.params.type}`) {
case 'address':
res.send(await helpers.layout('reportaddress', { 'page.placeholder': value }))
break
case 'domain':
res.send(await helpers.layout('reportdomain', { 'page.placeholder': value }))
break
default:
if (!req.params.type) {
res.send(await helpers.layout('report', {}))
} else {
return next(new Error(`Request type ${req.params.type}`))
}
}
})
// Serve /scams/
app.get('/scams/:page?/:sorting?/:direction?', async function (req, res, next) {
const MAX_RESULTS_PER_PAGE = 30
const scams = [].concat((await getCache()).scams)
const currentDirection = `${req.params.direction}` === 'ascending' ? 'ascending' : 'descending'
let direction = {
category: '',
subcategory: '',
status: '',
title: '',
}
let sorting = {
category: '',
subcategory: '',
status: '',
title: ''
}
switch (`${req.params.sorting}`) {
case 'category':
sorting.category = 'sorted'
direction.category = currentDirection
scams.sort(function (a, b) {
if ('category' in a && 'category' in b && a.category && b.category) {
return a.category.localeCompare(b.category)
} else {
return -1
}
})
break
case 'subcategory':
sorting.subcategory = 'sorted'
direction.subcategory = currentDirection
scams.sort(function (a, b) {
if ('subcategory' in a && 'subcategory' in b && a.subcategory && b.subcategory) {
return a.subcategory.localeCompare(b.subcategory)
} else {
return -1
}
})
break
case 'title':
sorting.title = 'sorted'
direction.title = currentDirection
scams.sort(function (a, b) {
return a.name.localeCompare(b.name)
})
break
case 'status':
sorting.status = 'sorted'
direction.status = currentDirection
scams.sort(function (a, b) {
if ('status' in a && 'status' in b) {
if ((a.status == 'Active' && b.status != 'Active') || (a.status == 'Inactive' && (b.status == 'Suspended' || b.status == 'Offline')) || (a.status == 'Suspended' && b.status == 'Offline')) {
return -1
} else if (a.status == b.status) {
return 0
} else {
return 1
}
} else {
return 1
}
})
break
default:
if (!req.params.sorting) {
scams.sort(function (a, b) {
return b.id - a.id
})
} else {
return next(new Error(`Invalid sorting "${req.params.sorting}"`))
}
}
if (currentDirection === 'descending') {
scams.reverse()
}
| var intActiveScams = 0
var intInactiveScams = 0
scams.forEach(function (scam) {
if ('addresses' in scam) {
scam.addresses.forEach(function (address) {
addresses[address] = true
})
}
if ('status' in scam) {
if (scam.status === 'Active') {
++intActiveScams
} else {
++intInactiveScams
}
}
})
let max = MAX_RESULTS_PER_PAGE
let start = 0
let pagination = []
const page = +req.params.page || 1
if (req.params.page == "all") {
max = scams.length
} else if (page) {
max = ((page - 1) * MAX_RESULTS_PER_PAGE) + MAX_RESULTS_PER_PAGE
start = (page - 1) * MAX_RESULTS_PER_PAGE
}
const paginate = req.params.sorting ? `/${req.params.sorting}/${currentDirection}` : ''
const table = scams.slice(start, max).map((scam) => {
let status = '<td>None</td>'
let category = scam.category || '<i class="remove icon"></i> None'
let subcategory = scam.subcategory || '<i class="remove icon"></i> None'
if ('status' in scam) {
switch (scam.status) {
case 'Active':
status = "<td class='offline'><i class='warning sign icon'></i> Active</td>"
break
case 'Inactive':
status = "<td class='suspended'><i class='remove icon'></i> Inactive</td>"
break
case 'Offline':
status = "<td class='activ'><i class='checkmark icon'></i> Offline</td>"
break
case 'Suspended':
status = "<td class='suspended'><i class='remove icon'></i> Suspended</td>"
break
}
}
if ('category' in scam) {
switch (scam.category) {
case "Phishing":
category = '<i class="address book icon"></i> Phishing'
break
case "Scamming":
category = '<i class="payment icon"></i> Scamming'
break
case "Fake ICO":
category = '<i class="dollar icon"></i> Fake ICO'
break
}
}
if ('subcategory' in scam && scam.subcategory) {
const sub = scam.subcategory.toLowerCase().replace(/\s/g, '')
if (sub == "wallets") {
subcategory = `<i class="credit card alternative icon"></i> ${scam.subcategory}`
}
// TODO: put icons here
/*else if (fs.existsSync(`_static/img/${sub}.png`)) {
subcategory = `<img
src="/img/${scams[i].subcategory.toLowerCase().replace(/\s/g, '')}.png"
class="subcategoryicon"> ${scams[i].subcategory}`;
} else {
subcategory = scams[i].subcategory
if (!(icon_warnings.includes(subcategory))) {
icon_warnings.push(subcategory)
}
}*/
}
let name = scam.name
if (name.length > 40) {
name = name.substring(0, 40) + '...'
}
return `<tr>
<td>${category}</td>
<td>${subcategory}</td>
${status}
<td>${name}</td>
<td class="center">
<a href='/scam/${scam.id}'><i class='search icon'></i></a>
</td>
</tr>`
}).join('')
if (req.params.page !== "all") {
let arrLoop = [-2, 3]
if (page == 0) {
arrLoop = [1, 6]
} else if (page == 1) {
arrLoop = [0, 5]
} else if (page == 2) {
arrLoop = [-1, 4]
}
for (let i = arrLoop[0]; i < arrLoop[1]; i++) {
let intPageNumber = (page + Number(i))
let strItemClass = "item"
let strHref = `/scams/${intPageNumber}${paginate}`
if ((intPageNumber > (scams.length) / MAX_RESULTS_PER_PAGE) || (intPageNumber < 1)) {
strItemClass = "disabled item"
strHref = "#"
} else if (page == intPageNumber) {
strItemClass = "active item"
}
pagination.push(`<a
href="${strHref}"
class="${strItemClass}">${intPageNumber}</a>`)
}
if (page > 3) {
pagination.unshift(`<a
class="item"
href="/scams/1${paginate}">
<i class="angle double left icon"></i>
</a>`)
}
if (page < Math.ceil(scams.length / MAX_RESULTS_PER_PAGE) - 3) {
pagination.push(`<a
class="item"
href="/scams/${(Math.ceil(scams.length / MAX_RESULTS_PER_PAGE) - 1)}${paginate}">
<i class='angle double right icon'></i>
</a>`
)
}
}
res.send(await helpers.layout('scams', {
'sorting.category.direction': direction.category,
'sorting.subcategory.direction': direction.subcategory,
'sorting.status.direction': direction.status,
'sorting.title.direction': direction.title,
'sorting.category': sorting.category,
'sorting.subcategory': sorting.subcategory,
'sorting.status': sorting.status,
'sorting.title': sorting.title,
'scams.total': scams.length.toLocaleString('en-US'),
'scams.active': intActiveScams.toLocaleString('en-US'),
'addresses.total': Object.keys(addresses).length.toLocaleString('en-US'),
'scams.inactive': intInactiveScams.toLocaleString('en-US'),
| let addresses = {}
| random_line_split |
run.js | /report/address/ or /report/domain/fake-mycrypto.com
app.get('/report/:type?/:value?', async function (req, res, next) {
let value = ''
if (req.params.value) {
value = safeHtml`${req.params.value}`
}
switch (`${req.params.type}`) {
case 'address':
res.send(await helpers.layout('reportaddress', { 'page.placeholder': value }))
break
case 'domain':
res.send(await helpers.layout('reportdomain', { 'page.placeholder': value }))
break
default:
if (!req.params.type) {
res.send(await helpers.layout('report', {}))
} else {
return next(new Error(`Request type ${req.params.type}`))
}
}
})
// Serve /scams/
app.get('/scams/:page?/:sorting?/:direction?', async function (req, res, next) {
const MAX_RESULTS_PER_PAGE = 30
const scams = [].concat((await getCache()).scams)
const currentDirection = `${req.params.direction}` === 'ascending' ? 'ascending' : 'descending'
let direction = {
category: '',
subcategory: '',
status: '',
title: '',
}
let sorting = {
category: '',
subcategory: '',
status: '',
title: ''
}
switch (`${req.params.sorting}`) {
case 'category':
sorting.category = 'sorted'
direction.category = currentDirection
scams.sort(function (a, b) {
if ('category' in a && 'category' in b && a.category && b.category) | else {
return -1
}
})
break
case 'subcategory':
sorting.subcategory = 'sorted'
direction.subcategory = currentDirection
scams.sort(function (a, b) {
if ('subcategory' in a && 'subcategory' in b && a.subcategory && b.subcategory) {
return a.subcategory.localeCompare(b.subcategory)
} else {
return -1
}
})
break
case 'title':
sorting.title = 'sorted'
direction.title = currentDirection
scams.sort(function (a, b) {
return a.name.localeCompare(b.name)
})
break
case 'status':
sorting.status = 'sorted'
direction.status = currentDirection
scams.sort(function (a, b) {
if ('status' in a && 'status' in b) {
if ((a.status == 'Active' && b.status != 'Active') || (a.status == 'Inactive' && (b.status == 'Suspended' || b.status == 'Offline')) || (a.status == 'Suspended' && b.status == 'Offline')) {
return -1
} else if (a.status == b.status) {
return 0
} else {
return 1
}
} else {
return 1
}
})
break
default:
if (!req.params.sorting) {
scams.sort(function (a, b) {
return b.id - a.id
})
} else {
return next(new Error(`Invalid sorting "${req.params.sorting}"`))
}
}
if (currentDirection === 'descending') {
scams.reverse()
}
let addresses = {}
var intActiveScams = 0
var intInactiveScams = 0
scams.forEach(function (scam) {
if ('addresses' in scam) {
scam.addresses.forEach(function (address) {
addresses[address] = true
})
}
if ('status' in scam) {
if (scam.status === 'Active') {
++intActiveScams
} else {
++intInactiveScams
}
}
})
let max = MAX_RESULTS_PER_PAGE
let start = 0
let pagination = []
const page = +req.params.page || 1
if (req.params.page == "all") {
max = scams.length
} else if (page) {
max = ((page - 1) * MAX_RESULTS_PER_PAGE) + MAX_RESULTS_PER_PAGE
start = (page - 1) * MAX_RESULTS_PER_PAGE
}
const paginate = req.params.sorting ? `/${req.params.sorting}/${currentDirection}` : ''
const table = scams.slice(start, max).map((scam) => {
let status = '<td>None</td>'
let category = scam.category || '<i class="remove icon"></i> None'
let subcategory = scam.subcategory || '<i class="remove icon"></i> None'
if ('status' in scam) {
switch (scam.status) {
case 'Active':
status = "<td class='offline'><i class='warning sign icon'></i> Active</td>"
break
case 'Inactive':
status = "<td class='suspended'><i class='remove icon'></i> Inactive</td>"
break
case 'Offline':
status = "<td class='activ'><i class='checkmark icon'></i> Offline</td>"
break
case 'Suspended':
status = "<td class='suspended'><i class='remove icon'></i> Suspended</td>"
break
}
}
if ('category' in scam) {
switch (scam.category) {
case "Phishing":
category = '<i class="address book icon"></i> Phishing'
break
case "Scamming":
category = '<i class="payment icon"></i> Scamming'
break
case "Fake ICO":
category = '<i class="dollar icon"></i> Fake ICO'
break
}
}
if ('subcategory' in scam && scam.subcategory) {
const sub = scam.subcategory.toLowerCase().replace(/\s/g, '')
if (sub == "wallets") {
subcategory = `<i class="credit card alternative icon"></i> ${scam.subcategory}`
}
// TODO: put icons here
/*else if (fs.existsSync(`_static/img/${sub}.png`)) {
subcategory = `<img
src="/img/${scams[i].subcategory.toLowerCase().replace(/\s/g, '')}.png"
class="subcategoryicon"> ${scams[i].subcategory}`;
} else {
subcategory = scams[i].subcategory
if (!(icon_warnings.includes(subcategory))) {
icon_warnings.push(subcategory)
}
}*/
}
let name = scam.name
if (name.length > 40) {
name = name.substring(0, 40) + '...'
}
return `<tr>
<td>${category}</td>
<td>${subcategory}</td>
${status}
<td>${name}</td>
<td class="center">
<a href='/scam/${scam.id}'><i class='search icon'></i></a>
</td>
</tr>`
}).join('')
if (req.params.page !== "all") {
let arrLoop = [-2, 3]
if (page == 0) {
arrLoop = [1, 6]
} else if (page == 1) {
arrLoop = [0, 5]
} else if (page == 2) {
arrLoop = [-1, 4]
}
for (let i = arrLoop[0]; i < arrLoop[1]; i++) {
let intPageNumber = (page + Number(i))
let strItemClass = "item"
let strHref = `/scams/${intPageNumber}${paginate}`
if ((intPageNumber > (scams.length) / MAX_RESULTS_PER_PAGE) || (intPageNumber < 1)) {
strItemClass = "disabled item"
strHref = "#"
} else if (page == intPageNumber) {
strItemClass = "active item"
}
pagination.push(`<a
href="${strHref}"
class="${strItemClass}">${intPageNumber}</a>`)
}
if (page > 3) {
pagination.unshift(`<a
class="item"
href="/scams/1${paginate}">
<i class="angle double left icon"></i>
</a>`)
}
if (page < Math.ceil(scams.length / MAX_RESULTS_PER_PAGE) - 3) {
pagination.push(`<a
class="item"
href="/scams/${(Math.ceil(scams.length / MAX_RESULTS_PER_PAGE) - 1)}${paginate}">
<i class='angle double right icon'></i>
</a>`
)
}
}
res.send(await helpers.layout('scams', {
'sorting.category.direction': direction.category,
'sorting.subcategory.direction': direction.subcategory,
'sorting.status.direction': direction.status,
'sorting.title.direction': direction.title,
'sorting.category': sorting.category,
'sorting.subcategory': sorting.subcategory,
'sorting.status': sorting.status,
'sorting.title': sorting.title,
'scams.total': scams.length.toLocaleString('en-US'),
'scams.active': intActiveScams.toLocaleString('en-US'),
'addresses.total': Object.keys(addresses).length.toLocaleString('en-US'),
'scams.inactive': intInactiveScams.toLocaleString('en-US | {
return a.category.localeCompare(b.category)
} | conditional_block |
run.js | Please shut down this domain so further attacks will be prevented.`
return abusereport
}
/* Start the web server */
function startWebServer() {
app.use(express.static('_static')); // Serve all static pages first
app.use('/screenshot', express.static('_cache/screenshots/')); // Serve all screenshots
app.use(bodyParser.json({ strict: true })) // to support JSON-encoded bodies
app.get('/(/|index.html)?', async function (_req, res) { // Serve index.html
res.send(await helpers.layout('index', {}))
})
app.get('/search/', async function (_req, res) { // Serve /search/
const verified = [].concat((await getCache()).verified)
const sorted = verified.sort(function (a, b) {
return a.name.localeCompare(b.name)
})
const table = sorted.map((url) => {
if ('featured' in url && url.featured) {
// TODO: put the verified images here
/*if (
await fs.pathExists("_static/img/" + url.name.toLowerCase().replace(' ', '') + ".png") ||
await fs.pathExists("_static/img/" + url.name.toLowerCase().replace(' ', '') + ".svg")
) {
table += "<tr><td><img class='project icon' src='/img/" + url.name.toLowerCase().replace(' ', '') + ".png'>" + url.name + "</td><td><a target='_blank' href='" + url.url + "'>" + url.url + "</a></td></tr>";
} else {*/
//helpers.rollbar.warn(`Warning: No verified icon was found for ${url.name}`);
return `<tr>
<td>${url.name}</td>
<td><a target="_blank" href="${url.url}">${url.url}</a></td>
</tr>`
//}
}
return null
}).filter((s) => s).join('')
res.send(await helpers.layout('search', {
'trusted.table': table,
'page.title': 'Search for scam sites, scammers addresses and scam ips'
}))
})
app.get('/faq/', async function (_req, res) { // Serve /faq/
res.send(await helpers.layout('faq', {
'page.title': 'FAQ'
}))
})
// Serve /report/, /report/domain/, and /report/address/ or /report/domain/fake-mycrypto.com
app.get('/report/:type?/:value?', async function (req, res, next) {
let value = ''
if (req.params.value) {
value = safeHtml`${req.params.value}`
}
switch (`${req.params.type}`) {
case 'address':
res.send(await helpers.layout('reportaddress', { 'page.placeholder': value }))
break
case 'domain':
res.send(await helpers.layout('reportdomain', { 'page.placeholder': value }))
break
default:
if (!req.params.type) {
res.send(await helpers.layout('report', {}))
} else {
return next(new Error(`Request type ${req.params.type}`))
}
}
})
// Serve /scams/
app.get('/scams/:page?/:sorting?/:direction?', async function (req, res, next) {
const MAX_RESULTS_PER_PAGE = 30
const scams = [].concat((await getCache()).scams)
const currentDirection = `${req.params.direction}` === 'ascending' ? 'ascending' : 'descending'
let direction = {
category: '',
subcategory: '',
status: '',
title: '',
}
let sorting = {
category: '',
subcategory: '',
status: '',
title: ''
}
switch (`${req.params.sorting}`) {
case 'category':
sorting.category = 'sorted'
direction.category = currentDirection
scams.sort(function (a, b) {
if ('category' in a && 'category' in b && a.category && b.category) {
return a.category.localeCompare(b.category)
} else {
return -1
}
})
break
case 'subcategory':
sorting.subcategory = 'sorted'
direction.subcategory = currentDirection
scams.sort(function (a, b) {
if ('subcategory' in a && 'subcategory' in b && a.subcategory && b.subcategory) {
return a.subcategory.localeCompare(b.subcategory)
} else {
return -1
}
})
break
case 'title':
sorting.title = 'sorted'
direction.title = currentDirection
scams.sort(function (a, b) {
return a.name.localeCompare(b.name)
})
break
case 'status':
sorting.status = 'sorted'
direction.status = currentDirection
scams.sort(function (a, b) {
if ('status' in a && 'status' in b) {
if ((a.status == 'Active' && b.status != 'Active') || (a.status == 'Inactive' && (b.status == 'Suspended' || b.status == 'Offline')) || (a.status == 'Suspended' && b.status == 'Offline')) {
return -1
} else if (a.status == b.status) {
return 0
} else {
return 1
}
} else {
return 1
}
})
break
default:
if (!req.params.sorting) {
scams.sort(function (a, b) {
return b.id - a.id
})
} else {
return next(new Error(`Invalid sorting "${req.params.sorting}"`))
}
}
if (currentDirection === 'descending') {
scams.reverse()
}
let addresses = {}
var intActiveScams = 0
var intInactiveScams = 0
scams.forEach(function (scam) {
if ('addresses' in scam) {
scam.addresses.forEach(function (address) {
addresses[address] = true
})
}
if ('status' in scam) {
if (scam.status === 'Active') {
++intActiveScams
} else {
++intInactiveScams
}
}
})
let max = MAX_RESULTS_PER_PAGE
let start = 0
let pagination = []
const page = +req.params.page || 1
if (req.params.page == "all") {
max = scams.length
} else if (page) {
max = ((page - 1) * MAX_RESULTS_PER_PAGE) + MAX_RESULTS_PER_PAGE
start = (page - 1) * MAX_RESULTS_PER_PAGE
}
const paginate = req.params.sorting ? `/${req.params.sorting}/${currentDirection}` : ''
const table = scams.slice(start, max).map((scam) => {
let status = '<td>None</td>'
let category = scam.category || '<i class="remove icon"></i> None'
let subcategory = scam.subcategory || '<i class="remove icon"></i> None'
if ('status' in scam) {
switch (scam.status) {
case 'Active':
status = "<td class='offline'><i class='warning sign icon'></i> Active</td>"
break
case 'Inactive':
status = "<td class='suspended'><i class='remove icon'></i> Inactive</td>"
break
case 'Offline':
status = "<td class='activ'><i class='checkmark icon'></i> Offline</td>"
break
case 'Suspended':
status = "<td class='suspended'><i class='remove icon'></i> Suspended</td>"
break
}
}
if ('category' in scam) {
switch (scam.category) {
case "Phishing":
category = '<i class="address book icon"></i> Phishing'
break
case "Scamming":
category = '<i class="payment icon"></i> Scamming'
break
case "Fake ICO":
category = '<i class="dollar icon"></i> Fake ICO'
break
}
}
if ('subcategory' in scam && scam.subcategory) {
const sub = | {
let abusereport = stripIndents`I would like to inform you of suspicious activities at the domain ${url.parse(scam.url).hostname}
${'ip' in scam ? `located at IP address ${scam['ip']}`: ''}.
${'subcategory' in scam && scam.subcategory == "NanoWallet" ?
`The domain is impersonating NanoWallet.io, a website where people can create
Nano wallets (a cryptocurrency like Bitcoin).` : ''}
${'category' in scam && scam.category == "Fake ICO" ?
`The domain is impersonating a website where an ICO is being held (initial coin offering, like
an initial public offering but it's for cryptocurrencies)` : ''}
${'category' in scam && scam.category == "Phishing" ?
`The attackers wish to steal funds by using phishing to get the victim's private keys (passwords to a wallet)
and using them to send funds to their own wallets.` : ''}
${'category' in scam && scam.category == "Fake ICO" ?
`The attackers wish to steal funds by cloning the real website and changing the XRB address so
people will send funds to the attackers' address instead of the real address.` : ''}
| identifier_body | |
Index.js | status: '2'
},
filterValue: {
status: [2]
},
searchData: {},
downloadStatus: 0,
agencyTree: null,
}
}
async componentDidMount() {
this.getRechargeDetails();
this.getAgencyTree();
}
getAgencyTree() {
DataAgencys.getTreeData(this.props.match.params.id, (data) => {
this.setState({ agencyTree: data });
}, true);
}
getRechargeDetails() {
const { filterInfo, searchData } = this.state;
const data = {
time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`,
limit: this.state.pagination.pageSize,
page: this.state.pagination.current,
...filterInfo,
...searchData,
};
this.setState({
loading: true,
})
NetOperation.getRechargeDetails(data).then((res) => {
this.setState({
loading: false,
dataSource: res.data.rows,
pagination: res.data.pagination
});
}).catch((e) => {
message.error(e.msg);
});
}
onCallBack = (filterData) => {
this.state.pagination.current = 1;
this.setState({
// filterValue: null,
// filterInfo: null,
searchData: filterData,
}, () => {
this.getRechargeDetails();
});
}
onClose = () => {
this.props.history.push(this.props.match.url);
}
open(id) {
this.props.history.push(`${this.props.match.url}/${id}`);
}
handleTableChange = (pagination, filters, sorter) => {
const state = this.state;
const _page = state.pagination;
if (pagination.current != _page.current) {
_page.current = pagination.current;
}
let objInfo = state.filterInfo || {};
if (filters.pay_channel && filters.pay_channel.length) {
objInfo.pay_channel = filters.pay_channel.join(',');
}
if (filters.status && filters.status.length) {
objInfo.status = filters.status.join(',');
}
if (filters.is_internal_staff && filters.is_internal_staff.length == 1) {
objInfo.is_internal_staff = filters.is_internal_staff.join(',');
} else {
objInfo.is_internal_staff = '';
}
this.setState({
loading: true,
filterValue: filters,
filterInfo: objInfo
}, () => {
this.getRechargeDetails();
});
}
exportAlert = () => {
this.setState({ downloadStatus: 1 });
Modal.confirm({
title: '确认提示',
content: '确定导出当前筛选数据的Excel表格吗?',
width: '450px',
centered: true,
onOk: () => {
this.exportDetails();
},
onCancel: () => {
this.setState({ downloadStatus: 0 });
},
});
}
exportDetails() {
const state = this.state;
| = {
time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`,
...state.filterInfo,
...state.searchData
};
this.setState({ downloadStatus: 2 });
NetOperation.exportRecharge(data).then((res) => {
const items = res.data;
if (items && items.id) {
this.downloadExcelFile(items.id);
}
}).catch((err) => {
this.setState({ downloadStatus: 0 });
if (err.msg) {
message.error(err.msg);
}
});
}
downloadExcelFile = (id) => {
NetOperation.downloadExcelFile(id).then((res) => {
const items = res.data;
if (items && items.path) {
this.setState({ downloadStatus: 0 });
window.location.href = '/' + items.path;
} else {
window.setTimeout((e) => {
this.downloadExcelFile(id);
}, 500);
}
}).catch((err) => {
this.setState({ downloadStatus: 0 });
if (err.msg) {
message.error(err.msg);
}
});
}
creatColumns(state) {
const columns = [
{
title: '流水号',
dataIndex: 'order_number',
fixed: 'left',
width: 210
}, {
title: '三方单号',
dataIndex: 'serial_number',
fixed: 'left',
width: 320,
render: data => {
if (data.trim()) {
return data;
}
return '-';
}
}, {
title: '交易金额',
width: 100,
align: 'right',
render: (data) => utils.formatMoney((data.price * data.goods_amount) / 100)
}, {
title: '交易时间',
dataIndex: 'create_time',
width: 130,
render: data => {
if (data) {
return moment.unix(data).format('YYYY-MM-DD HH:mm');
}
return '-';
}
}, {
title: '支付时间',
dataIndex: 'update_time',
width: 130,
render: data => {
if (data) {
return moment.unix(data).format('YYYY-MM-DD HH:mm');
}
return '-';
}
}, {
title: '客户',
key: 'is_internal_staff',
width: 150,
filteredValue: (state.filterValue ? state.filterValue.is_internal_staff : []),
filters: [
{ text: '正式客户', value: '0' },
{ text: '测试客户', value: '1' }
],
render: data => {
const customer_name = data.customer_name || '-';
let is_internal_staff = '';
if (data.is_internal_staff) {
is_internal_staff = <label className={classnames(globalStyles.tag, globalStyles.staffTag)}>测试</label>;
}
return <Fragment>
<a href="javascript:;" onClick={() => { this.open(data.customer_id) }}>{customer_name}</a>
<div>{is_internal_staff}</div>
</Fragment>
}
}, {
title: '所属机构',
dataIndex: 'agency_id',
width: 150,
render: data => {
return DataAgencys.getField(data, 'alias', (items) => { this.setState({}) });
}
}, {
title: '支付方式',
dataIndex: 'pay_channel',
width: 140,
filteredValue: (state.filterValue ? state.filterValue.pay_channel : []),
filters: [
{ text: '银行转账', value: 1 },
{ text: '微信支付', value: 2 },
{ text: '支付宝支付', value: 3 },
{ text: '易宝支付', value: 4 },
{ text: '苹果支付', value: 5 },
{ text: '连连支付', value: 6 },
{ text: '汇潮支付', value: 7 },
{ text: '双乾-支付宝', value: 10 },
{ text: '易票联支付', value: 15 },
{ text: '优畅-支付宝', value: 18 },
{ text: '优畅-微信', value: 19 },
{ text: '乾易付-支付宝', value: 30 },
{ text: '乾易付-微信', value: 31 },
{ text: '汇付支付', value: 35 },
{ text: '汇德汇付-支付宝', value: 36 },
{ text: '汇德汇付-微信', value: 37 }
],
render: (data) => {
switch(data) {
case 1: return '银行转账';
case 2: return '微信支付';
case 3: return '支付宝支付';
case 4: return '易宝支付';
case 5: return '苹果支付';
case 6: return '连连支付';
case 7: return '汇潮支付';
case 10: return '双乾-支付宝';
case 15: return '易票联支付';
case 18: return '优畅-支付宝';
case 19: return '优畅-微信';
case 30: return '乾易付-支付宝';
case 31: return '乾易付-微信';
case 35: return '汇付支付';
case 36: return '汇德汇付-支付宝';
case 37: return '汇德汇付-微信';
default: return '-';
}
}
}, {
title: '支付状态',
dataIndex: 'status',
width: 110,
filteredValue: (state.filterValue ? state | const data | identifier_name |
Index.js | status: '2'
},
filterValue: {
status: [2]
},
searchData: {},
downloadStatus: 0,
agencyTree: null,
}
}
async componentDidMount() {
this | encyTree() {
DataAgencys.getTreeData(this.props.match.params.id, (data) => {
this.setState({ agencyTree: data });
}, true);
}
getRechargeDetails() {
const { filterInfo, searchData } = this.state;
const data = {
time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`,
limit: this.state.pagination.pageSize,
page: this.state.pagination.current,
...filterInfo,
...searchData,
};
this.setState({
loading: true,
})
NetOperation.getRechargeDetails(data).then((res) => {
this.setState({
loading: false,
dataSource: res.data.rows,
pagination: res.data.pagination
});
}).catch((e) => {
message.error(e.msg);
});
}
onCallBack = (filterData) => {
this.state.pagination.current = 1;
this.setState({
// filterValue: null,
// filterInfo: null,
searchData: filterData,
}, () => {
this.getRechargeDetails();
});
}
onClose = () => {
this.props.history.push(this.props.match.url);
}
open(id) {
this.props.history.push(`${this.props.match.url}/${id}`);
}
handleTableChange = (pagination, filters, sorter) => {
const state = this.state;
const _page = state.pagination;
if (pagination.current != _page.current) {
_page.current = pagination.current;
}
let objInfo = state.filterInfo || {};
if (filters.pay_channel && filters.pay_channel.length) {
objInfo.pay_channel = filters.pay_channel.join(',');
}
if (filters.status && filters.status.length) {
objInfo.status = filters.status.join(',');
}
if (filters.is_internal_staff && filters.is_internal_staff.length == 1) {
objInfo.is_internal_staff = filters.is_internal_staff.join(',');
} else {
objInfo.is_internal_staff = '';
}
this.setState({
loading: true,
filterValue: filters,
filterInfo: objInfo
}, () => {
this.getRechargeDetails();
});
}
exportAlert = () => {
this.setState({ downloadStatus: 1 });
Modal.confirm({
title: '确认提示',
content: '确定导出当前筛选数据的Excel表格吗?',
width: '450px',
centered: true,
onOk: () => {
this.exportDetails();
},
onCancel: () => {
this.setState({ downloadStatus: 0 });
},
});
}
exportDetails() {
const state = this.state;
const data = {
time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`,
...state.filterInfo,
...state.searchData
};
this.setState({ downloadStatus: 2 });
NetOperation.exportRecharge(data).then((res) => {
const items = res.data;
if (items && items.id) {
this.downloadExcelFile(items.id);
}
}).catch((err) => {
this.setState({ downloadStatus: 0 });
if (err.msg) {
message.error(err.msg);
}
});
}
downloadExcelFile = (id) => {
NetOperation.downloadExcelFile(id).then((res) => {
const items = res.data;
if (items && items.path) {
this.setState({ downloadStatus: 0 });
window.location.href = '/' + items.path;
} else {
window.setTimeout((e) => {
this.downloadExcelFile(id);
}, 500);
}
}).catch((err) => {
this.setState({ downloadStatus: 0 });
if (err.msg) {
message.error(err.msg);
}
});
}
creatColumns(state) {
const columns = [
{
title: '流水号',
dataIndex: 'order_number',
fixed: 'left',
width: 210
}, {
title: '三方单号',
dataIndex: 'serial_number',
fixed: 'left',
width: 320,
render: data => {
if (data.trim()) {
return data;
}
return '-';
}
}, {
title: '交易金额',
width: 100,
align: 'right',
render: (data) => utils.formatMoney((data.price * data.goods_amount) / 100)
}, {
title: '交易时间',
dataIndex: 'create_time',
width: 130,
render: data => {
if (data) {
return moment.unix(data).format('YYYY-MM-DD HH:mm');
}
return '-';
}
}, {
title: '支付时间',
dataIndex: 'update_time',
width: 130,
render: data => {
if (data) {
return moment.unix(data).format('YYYY-MM-DD HH:mm');
}
return '-';
}
}, {
title: '客户',
key: 'is_internal_staff',
width: 150,
filteredValue: (state.filterValue ? state.filterValue.is_internal_staff : []),
filters: [
{ text: '正式客户', value: '0' },
{ text: '测试客户', value: '1' }
],
render: data => {
const customer_name = data.customer_name || '-';
let is_internal_staff = '';
if (data.is_internal_staff) {
is_internal_staff = <label className={classnames(globalStyles.tag, globalStyles.staffTag)}>测试</label>;
}
return <Fragment>
<a href="javascript:;" onClick={() => { this.open(data.customer_id) }}>{customer_name}</a>
<div>{is_internal_staff}</div>
</Fragment>
}
}, {
title: '所属机构',
dataIndex: 'agency_id',
width: 150,
render: data => {
return DataAgencys.getField(data, 'alias', (items) => { this.setState({}) });
}
}, {
title: '支付方式',
dataIndex: 'pay_channel',
width: 140,
filteredValue: (state.filterValue ? state.filterValue.pay_channel : []),
filters: [
{ text: '银行转账', value: 1 },
{ text: '微信支付', value: 2 },
{ text: '支付宝支付', value: 3 },
{ text: '易宝支付', value: 4 },
{ text: '苹果支付', value: 5 },
{ text: '连连支付', value: 6 },
{ text: '汇潮支付', value: 7 },
{ text: '双乾-支付宝', value: 10 },
{ text: '易票联支付', value: 15 },
{ text: '优畅-支付宝', value: 18 },
{ text: '优畅-微信', value: 19 },
{ text: '乾易付-支付宝', value: 30 },
{ text: '乾易付-微信', value: 31 },
{ text: '汇付支付', value: 35 },
{ text: '汇德汇付-支付宝', value: 36 },
{ text: '汇德汇付-微信', value: 37 }
],
render: (data) => {
switch(data) {
case 1: return '银行转账';
case 2: return '微信支付';
case 3: return '支付宝支付';
case 4: return '易宝支付';
case 5: return '苹果支付';
case 6: return '连连支付';
case 7: return '汇潮支付';
case 10: return '双乾-支付宝';
case 15: return '易票联支付';
case 18: return '优畅-支付宝';
case 19: return '优畅-微信';
case 30: return '乾易付-支付宝';
case 31: return '乾易付-微信';
case 35: return '汇付支付';
case 36: return '汇德汇付-支付宝';
case 37: return '汇德汇付-微信';
default: return '-';
}
}
}, {
title: '支付状态',
dataIndex: 'status',
width: 110,
filteredValue: (state.filterValue ? state | .getRechargeDetails();
this.getAgencyTree();
}
getAg | identifier_body |
Index.js | status: '2'
},
filterValue: {
status: [2]
},
searchData: {},
downloadStatus: 0,
agencyTree: null,
}
}
async componentDidMount() {
this.getRechargeDetails();
this.getAgencyTree();
}
getAgencyTree() {
DataAgencys.getTreeData(this.props.match.params.id, (data) => {
this.setState({ agencyTree: data });
}, true);
}
getRechargeDetails() {
const { filterInfo, searchData } = this.state;
const data = {
time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`,
limit: this.state.pagination.pageSize,
page: this.state.pagination.current,
...filterInfo,
...searchData,
};
this.setState({
loading: true,
})
NetOperation.getRechargeDetails(data).then((res) => {
this.setState({
loading: false,
dataSource: res.data.rows,
pagination: res.data.pagination
});
}).catch((e) => {
message.error(e.msg);
});
}
onCallBack = (filterData) => {
this.state.pagination.current = 1;
this.setState({
// filterValue: null,
// filterInfo: null,
searchData: filterData,
}, () => {
this.getRechargeDetails();
});
}
onClose = () => {
this.props.history.push(this.props.match.url);
}
open(id) {
this.props.history.push(`${this.props.match.url}/${id}`);
}
handleTableChange = (pagination, filters, sorter) => {
const state = this.state;
const _page = state.pagination;
if (pagination.current != _page.current) {
_page.current = pagination.current;
}
let objInfo = state.filterInfo || {};
if (filters.pay_channel && filters.pay_channel.length) {
objInfo.pay_channel = filters.pay_channel.join(',');
}
if (filters.status && filters.status.length) {
objInfo.status = filters.status.join(',');
}
if (filters.is_internal_staff && filters.is_internal_staff.length == 1) {
objInfo.is_internal_staff = filters.is_internal_staff.join(',');
} else {
objInfo.is_internal_staff = '';
}
this.setState({
loading: true,
filterValue: filters,
filterInfo: objInfo
}, () => {
this.getRechargeDetails();
});
}
exportAlert = () => {
this.setState({ downloadStatus: 1 });
Modal.confirm({
title: '确认提示',
content: '确定导出当前筛选数据的Excel表格吗?',
width: '450px',
centered: true,
onOk: () => {
this.exportDetails();
},
onCancel: () => {
this.setState({ downloadStatus: 0 });
},
});
}
exportDetails() {
const state = this.state;
const data = {
time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`,
...state.filterInfo,
...state.searchData
};
this.setState({ downloadStatus: 2 });
NetOperation.exportRecharge(data).then((res) => {
const items = res.data;
if (items && items.id) {
this.downloadExcelFile(items.id);
}
}).catch((err) => {
this.setState({ downloadStatus: 0 });
if (err.msg) {
message.error(err.msg);
}
});
}
downloadExcelFile = (id) => {
NetOperation.downloadExcelFile(id).then((res) => {
const items = res.data;
if (items && items.path) {
this.setState({ downloadStatus: 0 });
window.location.href = '/' + items.path;
} else {
window.setTimeout((e) => {
this.downloadExcelFile(id);
}, 500);
}
}).catch((err) => {
this.setState({ downloadStatus: 0 });
if (err.msg) {
message.error(err.msg);
}
});
}
creatColumns(state) {
const columns = [
{
title: '流水号',
dataIndex: 'order_number',
fixed: 'left',
width: 210
}, {
title: '三方单号',
dataIndex: 'serial_number',
fixed: 'left',
width: 320, | }
}, {
title: '交易金额',
width: 100,
align: 'right',
render: (data) => utils.formatMoney((data.price * data.goods_amount) / 100)
}, {
title: '交易时间',
dataIndex: 'create_time',
width: 130,
render: data => {
if (data) {
return moment.unix(data).format('YYYY-MM-DD HH:mm');
}
return '-';
}
}, {
title: '支付时间',
dataIndex: 'update_time',
width: 130,
render: data => {
if (data) {
return moment.unix(data).format('YYYY-MM-DD HH:mm');
}
return '-';
}
}, {
title: '客户',
key: 'is_internal_staff',
width: 150,
filteredValue: (state.filterValue ? state.filterValue.is_internal_staff : []),
filters: [
{ text: '正式客户', value: '0' },
{ text: '测试客户', value: '1' }
],
render: data => {
const customer_name = data.customer_name || '-';
let is_internal_staff = '';
if (data.is_internal_staff) {
is_internal_staff = <label className={classnames(globalStyles.tag, globalStyles.staffTag)}>测试</label>;
}
return <Fragment>
<a href="javascript:;" onClick={() => { this.open(data.customer_id) }}>{customer_name}</a>
<div>{is_internal_staff}</div>
</Fragment>
}
}, {
title: '所属机构',
dataIndex: 'agency_id',
width: 150,
render: data => {
return DataAgencys.getField(data, 'alias', (items) => { this.setState({}) });
}
}, {
title: '支付方式',
dataIndex: 'pay_channel',
width: 140,
filteredValue: (state.filterValue ? state.filterValue.pay_channel : []),
filters: [
{ text: '银行转账', value: 1 },
{ text: '微信支付', value: 2 },
{ text: '支付宝支付', value: 3 },
{ text: '易宝支付', value: 4 },
{ text: '苹果支付', value: 5 },
{ text: '连连支付', value: 6 },
{ text: '汇潮支付', value: 7 },
{ text: '双乾-支付宝', value: 10 },
{ text: '易票联支付', value: 15 },
{ text: '优畅-支付宝', value: 18 },
{ text: '优畅-微信', value: 19 },
{ text: '乾易付-支付宝', value: 30 },
{ text: '乾易付-微信', value: 31 },
{ text: '汇付支付', value: 35 },
{ text: '汇德汇付-支付宝', value: 36 },
{ text: '汇德汇付-微信', value: 37 }
],
render: (data) => {
switch(data) {
case 1: return '银行转账';
case 2: return '微信支付';
case 3: return '支付宝支付';
case 4: return '易宝支付';
case 5: return '苹果支付';
case 6: return '连连支付';
case 7: return '汇潮支付';
case 10: return '双乾-支付宝';
case 15: return '易票联支付';
case 18: return '优畅-支付宝';
case 19: return '优畅-微信';
case 30: return '乾易付-支付宝';
case 31: return '乾易付-微信';
case 35: return '汇付支付';
case 36: return '汇德汇付-支付宝';
case 37: return '汇德汇付-微信';
default: return '-';
}
}
}, {
title: '支付状态',
dataIndex: 'status',
width: 110,
filteredValue: (state.filterValue ? state | render: data => {
if (data.trim()) {
return data;
}
return '-'; | random_line_split |
Index.js | status: '2'
},
filterValue: {
status: [2]
},
searchData: {},
downloadStatus: 0,
agencyTree: null,
}
}
async componentDidMount() {
this.getRechargeDetails();
this.getAgencyTree();
}
getAgencyTree() {
DataAgencys.getTreeData(this.props.match.params.id, (data) => {
this.setState({ agencyTree: data });
}, true);
}
getRechargeDetails() {
const { filterInfo, searchData } = this.state;
const data = {
time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`,
limit: this.state.pagination.pageSize,
page: this.state.pagination.current,
...filterInfo,
...searchData,
};
this.setState({
loading: true,
})
NetOperation.getRechargeDetails(data).then((res) => {
this.setState({
loading: false,
dataSource: res.data.rows,
pagination: res.data.pagination
});
}).catch((e) => {
message.error(e.msg);
});
}
onCallBack = (filterData) => {
this.state.pagination.current = 1;
this.setState({
// filterValue: null,
// filterInfo: null,
searchData: filterData,
}, () => {
this.getRechargeDetails();
});
}
onClose = () => {
this.props.history.push(this.props.match.url);
}
open(id) {
this.props.history.push(`${this.props.match.url}/${id}`);
}
handleTableChange = (pagination, filters, sorter) => {
const state = this.state;
const _page = state.pagination;
if (pagination.current != _page.current) {
_page.current = pagination.current;
}
let objInfo = state.filterInfo || {};
if (filters.pay_channel && filters.pay_channel.length) {
objInfo.pay_channel = filters.pay_channel.join(',');
}
if (filters.status && filters.status.length) {
objInfo.status = filters.status.join(',');
}
if (filters.is_internal_staff && filters.is_internal_staff.length == 1) {
objInfo.is_internal_staff = filters.is_internal_staff.join(',');
} else {
objInfo.is_internal_staff = '';
}
this.setState({
loading: true,
filterValue: filters,
filterInfo: objInfo
}, () => {
this.getRechargeDetails();
});
}
exportAlert = () => {
this.setState({ downloadStatus: 1 });
Modal.confirm({
title: '确认提示',
content: '确定导出当前筛选数据的Excel表格吗?',
width: '450px',
centered: true,
onOk: () => {
this.exportDetails();
},
onCancel: () => {
this.setState({ downloadStatus: 0 });
},
});
}
exportDetails() {
const state = this.state;
const data = {
time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`,
...state.filterInfo,
...state.searchData
};
this.setState({ downloadStatus: 2 });
NetOperation.exportRecharge(data).then((res) => {
const items = res.data;
if (items && items.id) {
this.downloadExcelFile(items.id);
}
}).catch((err) => {
this.setState({ downloadStatus: 0 });
if (err.msg) {
message.error(err.msg);
}
});
}
downloadExcelFile = (id) => {
NetOperation.downloadExcelFile(id).then((res) => {
const items = res.data;
if (items && items.path) {
this.setState({ downloadStatus: 0 });
window.location.href = '/' + items.path;
} else {
window.setTimeout((e) => {
this.downloadExcelFile(id);
}, 500);
}
}).catch((err) => {
this.setState({ downloadStatus: 0 });
if (err.msg) {
message.error(err.msg);
}
});
}
| mns = [
{
title: '流水号',
dataIndex: 'order_number',
fixed: 'left',
width: 210
}, {
title: '三方单号',
dataIndex: 'serial_number',
fixed: 'left',
width: 320,
render: data => {
if (data.trim()) {
return data;
}
return '-';
}
}, {
title: '交易金额',
width: 100,
align: 'right',
render: (data) => utils.formatMoney((data.price * data.goods_amount) / 100)
}, {
title: '交易时间',
dataIndex: 'create_time',
width: 130,
render: data => {
if (data) {
return moment.unix(data).format('YYYY-MM-DD HH:mm');
}
return '-';
}
}, {
title: '支付时间',
dataIndex: 'update_time',
width: 130,
render: data => {
if (data) {
return moment.unix(data).format('YYYY-MM-DD HH:mm');
}
return '-';
}
}, {
title: '客户',
key: 'is_internal_staff',
width: 150,
filteredValue: (state.filterValue ? state.filterValue.is_internal_staff : []),
filters: [
{ text: '正式客户', value: '0' },
{ text: '测试客户', value: '1' }
],
render: data => {
const customer_name = data.customer_name || '-';
let is_internal_staff = '';
if (data.is_internal_staff) {
is_internal_staff = <label className={classnames(globalStyles.tag, globalStyles.staffTag)}>测试</label>;
}
return <Fragment>
<a href="javascript:;" onClick={() => { this.open(data.customer_id) }}>{customer_name}</a>
<div>{is_internal_staff}</div>
</Fragment>
}
}, {
title: '所属机构',
dataIndex: 'agency_id',
width: 150,
render: data => {
return DataAgencys.getField(data, 'alias', (items) => { this.setState({}) });
}
}, {
title: '支付方式',
dataIndex: 'pay_channel',
width: 140,
filteredValue: (state.filterValue ? state.filterValue.pay_channel : []),
filters: [
{ text: '银行转账', value: 1 },
{ text: '微信支付', value: 2 },
{ text: '支付宝支付', value: 3 },
{ text: '易宝支付', value: 4 },
{ text: '苹果支付', value: 5 },
{ text: '连连支付', value: 6 },
{ text: '汇潮支付', value: 7 },
{ text: '双乾-支付宝', value: 10 },
{ text: '易票联支付', value: 15 },
{ text: '优畅-支付宝', value: 18 },
{ text: '优畅-微信', value: 19 },
{ text: '乾易付-支付宝', value: 30 },
{ text: '乾易付-微信', value: 31 },
{ text: '汇付支付', value: 35 },
{ text: '汇德汇付-支付宝', value: 36 },
{ text: '汇德汇付-微信', value: 37 }
],
render: (data) => {
switch(data) {
case 1: return '银行转账';
case 2: return '微信支付';
case 3: return '支付宝支付';
case 4: return '易宝支付';
case 5: return '苹果支付';
case 6: return '连连支付';
case 7: return '汇潮支付';
case 10: return '双乾-支付宝';
case 15: return '易票联支付';
case 18: return '优畅-支付宝';
case 19: return '优畅-微信';
case 30: return '乾易付-支付宝';
case 31: return '乾易付-微信';
case 35: return '汇付支付';
case 36: return '汇德汇付-支付宝';
case 37: return '汇德汇付-微信';
default: return '-';
}
}
}, {
title: '支付状态',
dataIndex: 'status',
width: 110,
filteredValue: (state.filterValue ? | creatColumns(state) {
const colu | conditional_block |
day_06.rs | ///
/// This view is partial - the actual grid extends infinitely in all directions.
/// Using the Manhattan distance, each location's closest coordinate can be
/// determined, shown here in lowercase:
///
/// aaaaa.cccc
/// aAaaa.cccc
/// aaaddecccc
/// aadddeccCc
/// ..dDdeeccc
/// bb.deEeecc
/// bBb.eeee..
/// bbb.eeefff
/// bbb.eeffff
/// bbb.ffffFf
///
/// Locations shown as . are equally far from two or more coordinates, and so
/// they don't count as being closest to any.
///
/// In this example, the areas of coordinates A, B, C, and F are infinite -
/// while not shown here, their areas extend forever outside the visible grid.
/// However, the areas of coordinates D and E are finite: D is closest to 9
/// locations, and E is closest to 17 (both including the coordinate's location
/// itself). Therefore, in this example, the size of the largest area is 17.
///
/// What is the size of the largest area that isn't infinite?
///
/// --- Part Two ---
///
/// On the other hand, if the coordinates are safe, maybe the best you can do is
/// try to find a region near as many coordinates as possible.
///
/// For example, suppose you want the sum of the Manhattan distance to all of
/// the coordinates to be less than 32. For each location, add up the distances
/// to all of the given coordinates; if the total of those distances is less
/// than 32, that location is within the desired region. Using the same
/// coordinates as above, the resulting region looks like this:
///
/// ..........
/// .A........
/// ..........
/// ...###..C.
/// ..#D###...
/// ..###E#...
/// .B.###....
/// ..........
/// ..........
/// ........F.
///
/// In particular, consider the highlighted location 4,3 located at the top
/// middle of the region. Its calculation is as follows, where abs() is the
/// absolute value function:
///
/// Distance to coordinate A: abs(4-1) + abs(3-1) = 5
/// Distance to coordinate B: abs(4-1) + abs(3-6) = 6
/// Distance to coordinate C: abs(4-8) + abs(3-3) = 4
/// Distance to coordinate D: abs(4-3) + abs(3-4) = 2
/// Distance to coordinate E: abs(4-5) + abs(3-5) = 3
/// Distance to coordinate F: abs(4-8) + abs(3-9) = 10
/// Total distance: 5 + 6 + 4 + 2 + 3 + 10 = 30
///
/// Because the total distance to all coordinates (30) is less than 32, the
/// location is within the region.
///
/// This region, which also includes coordinates D and E, has a total size of
/// 16.
///
/// Your actual region will need to be much larger than this example, though,
/// instead including all locations with a total distance of less than 10000.
///
/// What is the size of the region containing all locations which have a total
/// distance to all given coordinates of less than 10000?
use regex::Regex;
use std::collections::{HashMap, HashSet};
use std::cmp::Ordering;
use std::i32;
type Point = (i32, i32);
type Grid = HashMap<Point, usize>;
#[derive(Debug, PartialEq, Eq)]
struct Range {
min: i32,
max: i32,
}
#[derive(Debug, PartialEq, Eq)]
struct Bounds {
x: Range,
y: Range,
}
pub fn run() | .unwrap();
println!("The biggest non-infinite area size is: {}", biggest_area_size);
let concentrated_area = count_points_below(&points, &bounds, 10_000);
println!("The size of the area that have a total distance less than \
10.000 is: {}", concentrated_area);
}
fn create_grid(points: &Vec<Point>, bounds: &Bounds) -> Grid {
let mut grid = HashMap::new();
for x in bounds.x.min..=bounds.x.max {
for y in bounds.y.min..=bounds.y.max {
let point = (x, y);
match closest_point(&point, points) {
Some(area_number) => grid.insert(point, area_number),
None => None,
};
}
}
grid
}
fn count_points_below(points: &Vec<Point>, bounds: &Bounds, treshold: i32) -> i32 {
let mut count = 0;
for x in bounds.x.min..=bounds.x.max {
for y in bounds.y.min..=bounds.y.max {
let point = (x, y);
if total_distance(&point, points) < treshold {
count += 1;
};
}
}
count
}
fn create_bounds(points: &Vec<Point>) -> Bounds {
let x_min = points.iter()
.map(|(x, _)| x)
.min()
.unwrap();
let x_max = points.iter()
.map(|(x, _)| x)
.max()
.unwrap();
let x_range = Range {min:*x_min, max:*x_max};
let y_min = points.iter()
.map(|(_, y)| y)
.min()
.unwrap();
let y_max = points.iter()
.map(|(_, y)| y)
.max()
.unwrap();
let y_range = Range {min:*y_min, max:*y_max};
Bounds {x:x_range, y:y_range}
}
fn distance((x1, y1): &Point, (x2, y2): &Point) -> i32 {
(x1 - x2).abs() + (y1 - y2).abs()
}
fn total_distance(reference_point: &Point, points: &Vec<Point>) -> i32 {
points.iter()
.map(|point| distance(reference_point, point))
.sum()
}
fn closest_point(reference_point: &Point, points: &Vec<Point>) -> Option<usize> {
let (index, _) = points.iter()
.map(|point| distance(reference_point, point))
.enumerate()
.fold((None, i32::MAX), |(some_index, minimum), (new_index, new_value)| {
match minimum.cmp(&new_value) {
Ordering::Greater => (Some(new_index), new_value),
Ordering::Less => (some_index, minimum),
Ordering::Equal => (None, minimum),
}
});
index
}
fn on_bounds(&(x, y): &Point, bounds: &Bounds) -> bool {
if (bounds.x.min == x || bounds.x.max == x) &&
(bounds.y.min <= y && bounds.y.max >= y) {
return true
}
if (bounds.y.min == y || bounds.y.max == y) &&
(bounds.x.min <= x && bounds.x.max >= x) {
return true
}
false
}
fn parse_input(input: &str) -> Vec<Point> {
input.lines()
.filter_map(|line| convert_line(line))
.collect()
}
fn convert_line(line: &str) -> Option<Point> {
lazy_static! {
static ref RE: Regex = Regex::new(r"(\d*), (\d*)").unwrap();
}
let captures = RE.captures(line).unwrap();
match (captures.get(1), captures.get(2)) {
(Some(x), Some(y)) =>
Some((x.as_str().parse().unwrap(),
y.as_str().parse().unwrap())),
_ => None,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_input() {
let input =
"1, 1\n\
1, 6\n\
8, 3\n\
3, 4\n\
5, 5\n\
8, 9";
let output: Vec<Point> = vec![
(1, 1),
(1, 6),
(8, 3),
(3, 4),
(5, 5),
(8, 9)];
assert_eq!(parse_input(input), output);
}
#[test]
fn test_create_bounds() {
let input: Vec<Point> = vec![
(0, | {
let points = parse_input(include_str!("../input/day_06.txt"));
let bounds = create_bounds(&points);
let grid = create_grid(&points, &bounds);
let mut areas = HashMap::new();
let mut infinite_areas = HashSet::new();
for (point, area_number) in grid.iter() {
if on_bounds(point,&bounds) {
infinite_areas.insert(*area_number);
areas.remove(area_number);
}
if !infinite_areas.contains(area_number) {
*areas.entry(area_number).or_insert(0) += 1;
}
}
let biggest_area_size = areas.values()
.max() | identifier_body |
day_06.rs | For example, suppose you want the sum of the Manhattan distance to all of
/// the coordinates to be less than 32. For each location, add up the distances
/// to all of the given coordinates; if the total of those distances is less
/// than 32, that location is within the desired region. Using the same
/// coordinates as above, the resulting region looks like this:
///
/// ..........
/// .A........
/// ..........
/// ...###..C.
/// ..#D###...
/// ..###E#...
/// .B.###....
/// ..........
/// ..........
/// ........F.
///
/// In particular, consider the highlighted location 4,3 located at the top
/// middle of the region. Its calculation is as follows, where abs() is the
/// absolute value function:
///
/// Distance to coordinate A: abs(4-1) + abs(3-1) = 5
/// Distance to coordinate B: abs(4-1) + abs(3-6) = 6
/// Distance to coordinate C: abs(4-8) + abs(3-3) = 4
/// Distance to coordinate D: abs(4-3) + abs(3-4) = 2
/// Distance to coordinate E: abs(4-5) + abs(3-5) = 3
/// Distance to coordinate F: abs(4-8) + abs(3-9) = 10
/// Total distance: 5 + 6 + 4 + 2 + 3 + 10 = 30
///
/// Because the total distance to all coordinates (30) is less than 32, the
/// location is within the region.
///
/// This region, which also includes coordinates D and E, has a total size of
/// 16.
///
/// Your actual region will need to be much larger than this example, though,
/// instead including all locations with a total distance of less than 10000.
///
/// What is the size of the region containing all locations which have a total
/// distance to all given coordinates of less than 10000?
use regex::Regex;
use std::collections::{HashMap, HashSet};
use std::cmp::Ordering;
use std::i32;
type Point = (i32, i32);
type Grid = HashMap<Point, usize>;
#[derive(Debug, PartialEq, Eq)]
struct Range {
min: i32,
max: i32,
}
#[derive(Debug, PartialEq, Eq)]
struct Bounds {
x: Range,
y: Range,
}
pub fn run() {
let points = parse_input(include_str!("../input/day_06.txt"));
let bounds = create_bounds(&points);
let grid = create_grid(&points, &bounds);
let mut areas = HashMap::new();
let mut infinite_areas = HashSet::new();
for (point, area_number) in grid.iter() {
if on_bounds(point,&bounds) {
infinite_areas.insert(*area_number);
areas.remove(area_number);
}
if !infinite_areas.contains(area_number) {
*areas.entry(area_number).or_insert(0) += 1;
}
}
let biggest_area_size = areas.values()
.max()
.unwrap();
println!("The biggest non-infinite area size is: {}", biggest_area_size);
let concentrated_area = count_points_below(&points, &bounds, 10_000);
println!("The size of the area that have a total distance less than \
10.000 is: {}", concentrated_area);
}
fn create_grid(points: &Vec<Point>, bounds: &Bounds) -> Grid {
let mut grid = HashMap::new();
for x in bounds.x.min..=bounds.x.max {
for y in bounds.y.min..=bounds.y.max {
let point = (x, y);
match closest_point(&point, points) {
Some(area_number) => grid.insert(point, area_number),
None => None,
};
}
}
grid
}
fn count_points_below(points: &Vec<Point>, bounds: &Bounds, treshold: i32) -> i32 {
let mut count = 0;
for x in bounds.x.min..=bounds.x.max {
for y in bounds.y.min..=bounds.y.max {
let point = (x, y);
if total_distance(&point, points) < treshold {
count += 1;
};
}
}
count
}
fn create_bounds(points: &Vec<Point>) -> Bounds {
let x_min = points.iter()
.map(|(x, _)| x)
.min()
.unwrap();
let x_max = points.iter()
.map(|(x, _)| x)
.max()
.unwrap();
let x_range = Range {min:*x_min, max:*x_max};
let y_min = points.iter()
.map(|(_, y)| y)
.min()
.unwrap();
let y_max = points.iter()
.map(|(_, y)| y)
.max()
.unwrap();
let y_range = Range {min:*y_min, max:*y_max};
Bounds {x:x_range, y:y_range}
}
fn distance((x1, y1): &Point, (x2, y2): &Point) -> i32 {
(x1 - x2).abs() + (y1 - y2).abs()
}
fn total_distance(reference_point: &Point, points: &Vec<Point>) -> i32 {
points.iter()
.map(|point| distance(reference_point, point))
.sum()
}
fn closest_point(reference_point: &Point, points: &Vec<Point>) -> Option<usize> {
let (index, _) = points.iter()
.map(|point| distance(reference_point, point))
.enumerate()
.fold((None, i32::MAX), |(some_index, minimum), (new_index, new_value)| {
match minimum.cmp(&new_value) {
Ordering::Greater => (Some(new_index), new_value),
Ordering::Less => (some_index, minimum),
Ordering::Equal => (None, minimum),
}
});
index
}
fn on_bounds(&(x, y): &Point, bounds: &Bounds) -> bool {
if (bounds.x.min == x || bounds.x.max == x) &&
(bounds.y.min <= y && bounds.y.max >= y) {
return true
}
if (bounds.y.min == y || bounds.y.max == y) &&
(bounds.x.min <= x && bounds.x.max >= x) {
return true
}
false
}
fn parse_input(input: &str) -> Vec<Point> {
input.lines()
.filter_map(|line| convert_line(line))
.collect()
}
fn convert_line(line: &str) -> Option<Point> {
lazy_static! {
static ref RE: Regex = Regex::new(r"(\d*), (\d*)").unwrap();
}
let captures = RE.captures(line).unwrap();
match (captures.get(1), captures.get(2)) {
(Some(x), Some(y)) =>
Some((x.as_str().parse().unwrap(),
y.as_str().parse().unwrap())),
_ => None,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_input() {
let input =
"1, 1\n\
1, 6\n\
8, 3\n\
3, 4\n\
5, 5\n\
8, 9";
let output: Vec<Point> = vec![
(1, 1),
(1, 6),
(8, 3),
(3, 4),
(5, 5),
(8, 9)];
assert_eq!(parse_input(input), output);
}
#[test]
fn test_create_bounds() {
let input: Vec<Point> = vec![
(0, 0),
(1, 1),
(3, 2)];
let x_range = Range {min:0, max:3};
let y_range = Range {min:0, max:2};
let output = Bounds {x:x_range, y:y_range};
assert_eq!(create_bounds(&input), output);
}
#[test]
fn test_create_grid() {
let input: Vec<Point> = vec![
(0, 0),
(2, 2)];
let x_range = Range {min:0, max:2};
let y_range = Range {min:0, max:2};
let bounds = Bounds {x:x_range, y:y_range};
let mut output = HashMap::new();
output.insert((0, 0), 0);
output.insert((1, 0), 0);
output.insert((0, 1), 0);
output.insert((2, 1), 1);
output.insert((1, 2), 1);
output.insert((2, 2), 1);
assert_eq!(create_grid(&input, &bounds), output);
}
#[test] | fn test_on_bounds() {
let x_range = Range {min:0, max:3};
let y_range = Range {min:2, max:6}; | random_line_split | |
day_06.rs | ///
/// This view is partial - the actual grid extends infinitely in all directions.
/// Using the Manhattan distance, each location's closest coordinate can be
/// determined, shown here in lowercase:
///
/// aaaaa.cccc
/// aAaaa.cccc
/// aaaddecccc
/// aadddeccCc
/// ..dDdeeccc
/// bb.deEeecc
/// bBb.eeee..
/// bbb.eeefff
/// bbb.eeffff
/// bbb.ffffFf
///
/// Locations shown as . are equally far from two or more coordinates, and so
/// they don't count as being closest to any.
///
/// In this example, the areas of coordinates A, B, C, and F are infinite -
/// while not shown here, their areas extend forever outside the visible grid.
/// However, the areas of coordinates D and E are finite: D is closest to 9
/// locations, and E is closest to 17 (both including the coordinate's location
/// itself). Therefore, in this example, the size of the largest area is 17.
///
/// What is the size of the largest area that isn't infinite?
///
/// --- Part Two ---
///
/// On the other hand, if the coordinates are safe, maybe the best you can do is
/// try to find a region near as many coordinates as possible.
///
/// For example, suppose you want the sum of the Manhattan distance to all of
/// the coordinates to be less than 32. For each location, add up the distances
/// to all of the given coordinates; if the total of those distances is less
/// than 32, that location is within the desired region. Using the same
/// coordinates as above, the resulting region looks like this:
///
/// ..........
/// .A........
/// ..........
/// ...###..C.
/// ..#D###...
/// ..###E#...
/// .B.###....
/// ..........
/// ..........
/// ........F.
///
/// In particular, consider the highlighted location 4,3 located at the top
/// middle of the region. Its calculation is as follows, where abs() is the
/// absolute value function:
///
/// Distance to coordinate A: abs(4-1) + abs(3-1) = 5
/// Distance to coordinate B: abs(4-1) + abs(3-6) = 6
/// Distance to coordinate C: abs(4-8) + abs(3-3) = 4
/// Distance to coordinate D: abs(4-3) + abs(3-4) = 2
/// Distance to coordinate E: abs(4-5) + abs(3-5) = 3
/// Distance to coordinate F: abs(4-8) + abs(3-9) = 10
/// Total distance: 5 + 6 + 4 + 2 + 3 + 10 = 30
///
/// Because the total distance to all coordinates (30) is less than 32, the
/// location is within the region.
///
/// This region, which also includes coordinates D and E, has a total size of
/// 16.
///
/// Your actual region will need to be much larger than this example, though,
/// instead including all locations with a total distance of less than 10000.
///
/// What is the size of the region containing all locations which have a total
/// distance to all given coordinates of less than 10000?
use regex::Regex;
use std::collections::{HashMap, HashSet};
use std::cmp::Ordering;
use std::i32;
type Point = (i32, i32);
type Grid = HashMap<Point, usize>;
#[derive(Debug, PartialEq, Eq)]
struct Range {
min: i32,
max: i32,
}
#[derive(Debug, PartialEq, Eq)]
struct Bounds {
x: Range,
y: Range,
}
pub fn run() {
let points = parse_input(include_str!("../input/day_06.txt"));
let bounds = create_bounds(&points);
let grid = create_grid(&points, &bounds);
let mut areas = HashMap::new();
let mut infinite_areas = HashSet::new();
for (point, area_number) in grid.iter() {
if on_bounds(point,&bounds) {
infinite_areas.insert(*area_number);
areas.remove(area_number);
}
if !infinite_areas.contains(area_number) {
*areas.entry(area_number).or_insert(0) += 1;
}
}
let biggest_area_size = areas.values()
.max()
.unwrap();
println!("The biggest non-infinite area size is: {}", biggest_area_size);
let concentrated_area = count_points_below(&points, &bounds, 10_000);
println!("The size of the area that have a total distance less than \
10.000 is: {}", concentrated_area);
}
fn create_grid(points: &Vec<Point>, bounds: &Bounds) -> Grid {
let mut grid = HashMap::new();
for x in bounds.x.min..=bounds.x.max {
for y in bounds.y.min..=bounds.y.max {
let point = (x, y);
match closest_point(&point, points) {
Some(area_number) => grid.insert(point, area_number),
None => None,
};
}
}
grid
}
fn count_points_below(points: &Vec<Point>, bounds: &Bounds, treshold: i32) -> i32 {
let mut count = 0;
for x in bounds.x.min..=bounds.x.max {
for y in bounds.y.min..=bounds.y.max {
let point = (x, y);
if total_distance(&point, points) < treshold {
count += 1;
};
}
}
count
}
fn create_bounds(points: &Vec<Point>) -> Bounds {
let x_min = points.iter()
.map(|(x, _)| x)
.min()
.unwrap();
let x_max = points.iter()
.map(|(x, _)| x)
.max()
.unwrap();
let x_range = Range {min:*x_min, max:*x_max};
let y_min = points.iter()
.map(|(_, y)| y)
.min()
.unwrap();
let y_max = points.iter()
.map(|(_, y)| y)
.max()
.unwrap();
let y_range = Range {min:*y_min, max:*y_max};
Bounds {x:x_range, y:y_range}
}
fn distance((x1, y1): &Point, (x2, y2): &Point) -> i32 {
(x1 - x2).abs() + (y1 - y2).abs()
}
fn | (reference_point: &Point, points: &Vec<Point>) -> i32 {
points.iter()
.map(|point| distance(reference_point, point))
.sum()
}
fn closest_point(reference_point: &Point, points: &Vec<Point>) -> Option<usize> {
let (index, _) = points.iter()
.map(|point| distance(reference_point, point))
.enumerate()
.fold((None, i32::MAX), |(some_index, minimum), (new_index, new_value)| {
match minimum.cmp(&new_value) {
Ordering::Greater => (Some(new_index), new_value),
Ordering::Less => (some_index, minimum),
Ordering::Equal => (None, minimum),
}
});
index
}
fn on_bounds(&(x, y): &Point, bounds: &Bounds) -> bool {
if (bounds.x.min == x || bounds.x.max == x) &&
(bounds.y.min <= y && bounds.y.max >= y) {
return true
}
if (bounds.y.min == y || bounds.y.max == y) &&
(bounds.x.min <= x && bounds.x.max >= x) {
return true
}
false
}
fn parse_input(input: &str) -> Vec<Point> {
input.lines()
.filter_map(|line| convert_line(line))
.collect()
}
fn convert_line(line: &str) -> Option<Point> {
lazy_static! {
static ref RE: Regex = Regex::new(r"(\d*), (\d*)").unwrap();
}
let captures = RE.captures(line).unwrap();
match (captures.get(1), captures.get(2)) {
(Some(x), Some(y)) =>
Some((x.as_str().parse().unwrap(),
y.as_str().parse().unwrap())),
_ => None,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_input() {
let input =
"1, 1\n\
1, 6\n\
8, 3\n\
3, 4\n\
5, 5\n\
8, 9";
let output: Vec<Point> = vec![
(1, 1),
(1, 6),
(8, 3),
(3, 4),
(5, 5),
(8, 9)];
assert_eq!(parse_input(input), output);
}
#[test]
fn test_create_bounds() {
let input: Vec<Point> = vec![
(0, | total_distance | identifier_name |
lib.rs | the License.
// We take the "low road" here when returning the structs - we expose the
// items (and arrays of items) as strings, which are JSON. The rust side of
// the world gets serialization and deserialization for free and it makes
// memory management that little bit simpler.
extern crate failure;
extern crate serde_json;
extern crate url;
extern crate reqwest;
#[macro_use] extern crate ffi_toolkit;
extern crate mentat;
extern crate sync15_passwords;
extern crate sync15_adapter as sync;
#[macro_use] extern crate log;
mod error;
use error::{
ExternError,
with_translated_result,
with_translated_value_result,
with_translated_void_result,
with_translated_string_result,
with_translated_opt_string_result,
};
use std::os::raw::{
c_char,
};
use std::sync::{Once, ONCE_INIT};
use ffi_toolkit::string::{
c_char_to_string,
};
pub use ffi_toolkit::memory::{
destroy_c_char,
};
use sync::{
Sync15StorageClient,
Sync15StorageClientInit,
GlobalState,
};
use sync15_passwords::{
passwords,
PasswordEngine,
ServerPassword,
};
pub struct SyncInfo {
state: GlobalState,
client: Sync15StorageClient,
// Used so that we know whether or not we need to re-initialize `client`
last_client_init: Sync15StorageClientInit,
}
pub struct PasswordState {
engine: PasswordEngine,
sync: Option<SyncInfo>,
}
#[cfg(target_os = "android")]
extern { pub fn __android_log_write(level: ::std::os::raw::c_int, tag: *const c_char, text: *const c_char) -> ::std::os::raw::c_int; }
struct DevLogger;
impl log::Log for DevLogger {
fn enabled(&self, _: &log::Metadata) -> bool { true }
fn | (&self, record: &log::Record) {
let message = format!("{}:{} -- {}", record.level(), record.target(), record.args());
println!("{}", message);
#[cfg(target_os = "android")]
{
unsafe {
let message = ::std::ffi::CString::new(message).unwrap();
let level_int = match record.level() {
log::Level::Trace => 2,
log::Level::Debug => 3,
log::Level::Info => 4,
log::Level::Warn => 5,
log::Level::Error => 6,
};
let message = message.as_ptr();
let tag = b"RustInternal\0";
__android_log_write(level_int, tag.as_ptr() as *const c_char, message);
}
}
// TODO ios (use NSLog(__CFStringMakeConstantString(b"%s\0"), ...), maybe windows? (OutputDebugStringA)
}
fn flush(&self) {}
}
static INIT_LOGGER: Once = ONCE_INIT;
static DEV_LOGGER: &'static log::Log = &DevLogger;
fn init_logger() {
log::set_logger(DEV_LOGGER).unwrap();
log::set_max_level(log::LevelFilter::Trace);
std::env::set_var("RUST_BACKTRACE", "1");
info!("Hooked up rust logger!");
}
define_destructor!(sync15_passwords_state_destroy, PasswordState);
// This is probably too many string arguments...
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_state_new(
mentat_db_path: *const c_char,
encryption_key: *const c_char,
error: *mut ExternError
) -> *mut PasswordState {
INIT_LOGGER.call_once(init_logger);
with_translated_result(error, || {
let store = mentat::Store::open_with_key(c_char_to_string(mentat_db_path),
c_char_to_string(encryption_key))?;
let engine = PasswordEngine::new(store)?;
Ok(PasswordState {
engine,
sync: None,
})
})
}
// indirection to help `?` figure out the target error type
fn parse_url(url: &str) -> sync::Result<url::Url> {
Ok(url::Url::parse(url)?)
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_sync(
state: *mut PasswordState,
key_id: *const c_char,
access_token: *const c_char,
sync_key: *const c_char,
tokenserver_url: *const c_char,
error: *mut ExternError
) {
with_translated_void_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
let root_sync_key = sync::KeyBundle::from_ksync_base64(
c_char_to_string(sync_key).into())?;
let requested_init = Sync15StorageClientInit {
key_id: c_char_to_string(key_id).into(),
access_token: c_char_to_string(access_token).into(),
tokenserver_url: parse_url(c_char_to_string(tokenserver_url))?,
};
// TODO: If `to_ready` (or anything else with a ?) fails below, this
// `take()` means we end up with `state.sync.is_none()`, which means the
// next sync will redownload meta/global, crypto/keys, etc. without
// needing to. (AFAICT fixing this requires a change in sync15-adapter,
// since to_ready takes GlobalState as a move, and it's not clear if
// that change even is a good idea).
let mut sync_info = state.sync.take().map(Ok)
.unwrap_or_else(|| -> sync::Result<SyncInfo> {
let state = GlobalState::default();
let client = Sync15StorageClient::new(requested_init.clone())?;
Ok(SyncInfo {
state,
client,
last_client_init: requested_init.clone(),
})
})?;
// If the options passed for initialization of the storage client aren't
// the same as the ones we used last time, reinitialize it. (Note that
// we could avoid the comparison in the case where we had `None` in
// `state.sync` before, but this probably doesn't matter).
if requested_init != sync_info.last_client_init {
sync_info.client = Sync15StorageClient::new(requested_init.clone())?;
sync_info.last_client_init = requested_init;
}
{ // Scope borrow of `sync_info.client`
let mut state_machine =
sync::SetupStateMachine::for_readonly_sync(&sync_info.client, &root_sync_key);
let next_sync_state = state_machine.to_ready(sync_info.state)?;
sync_info.state = next_sync_state;
}
// We don't use a ? on the next line so that even if `state.engine.sync`
// fails, we don't forget the sync_state.
let result = state.engine.sync(&sync_info.client, &sync_info.state);
state.sync = Some(sync_info);
result
});
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_touch(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) {
with_translated_void_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
state.engine.touch_credential(c_char_to_string(id).into())?;
Ok(())
});
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_delete(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> bool {
with_translated_value_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
let deleted = state.engine.delete_credential(c_char_to_string(id).into())?;
Ok(deleted)
})
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_wipe(state: *mut PasswordState, error: *mut ExternError) {
with_translated_void_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
state.engine.wipe()?;
Ok(())
});
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_reset(state: *mut PasswordState, error: *mut ExternError) {
with_translated_void_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
state.engine.reset()?;
// XXX We probably need to clear out some things from `state.service`!
Ok(())
});
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_get_all(state: *mut PasswordState, error: *mut ExternError) -> *mut c_char {
with_translated_string_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
// Type declaration is just to make sure we have the right type (and for documentation)
let passwords: Vec<ServerPassword> = {
let mut in_progress_read = state.engine.store.begin_read()?;
passwords::get_all_sync_passwords(&mut in_progress_read)?
};
let result = serde_json::to_string(&passwords)?;
Ok(result)
})
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_get_by_id(state: *mut Password | log | identifier_name |
lib.rs | // We take the "low road" here when returning the structs - we expose the
// items (and arrays of items) as strings, which are JSON. The rust side of
// the world gets serialization and deserialization for free and it makes
// memory management that little bit simpler.
extern crate failure;
extern crate serde_json;
extern crate url;
extern crate reqwest;
#[macro_use] extern crate ffi_toolkit;
extern crate mentat;
extern crate sync15_passwords;
extern crate sync15_adapter as sync;
#[macro_use] extern crate log;
mod error;
use error::{
ExternError,
with_translated_result,
with_translated_value_result,
with_translated_void_result,
with_translated_string_result,
with_translated_opt_string_result,
};
use std::os::raw::{
c_char,
};
use std::sync::{Once, ONCE_INIT};
use ffi_toolkit::string::{
c_char_to_string,
};
pub use ffi_toolkit::memory::{
destroy_c_char,
};
use sync::{
Sync15StorageClient,
Sync15StorageClientInit,
GlobalState,
};
use sync15_passwords::{
passwords,
PasswordEngine,
ServerPassword,
};
pub struct SyncInfo {
state: GlobalState,
client: Sync15StorageClient,
// Used so that we know whether or not we need to re-initialize `client`
last_client_init: Sync15StorageClientInit,
}
pub struct PasswordState {
engine: PasswordEngine,
sync: Option<SyncInfo>,
}
#[cfg(target_os = "android")]
extern { pub fn __android_log_write(level: ::std::os::raw::c_int, tag: *const c_char, text: *const c_char) -> ::std::os::raw::c_int; }
struct DevLogger;
impl log::Log for DevLogger {
fn enabled(&self, _: &log::Metadata) -> bool { true }
fn log(&self, record: &log::Record) {
let message = format!("{}:{} -- {}", record.level(), record.target(), record.args());
println!("{}", message);
#[cfg(target_os = "android")]
{
unsafe {
let message = ::std::ffi::CString::new(message).unwrap();
let level_int = match record.level() {
log::Level::Trace => 2,
log::Level::Debug => 3,
log::Level::Info => 4,
log::Level::Warn => 5,
log::Level::Error => 6,
};
let message = message.as_ptr();
let tag = b"RustInternal\0";
__android_log_write(level_int, tag.as_ptr() as *const c_char, message);
}
}
// TODO ios (use NSLog(__CFStringMakeConstantString(b"%s\0"), ...), maybe windows? (OutputDebugStringA)
}
fn flush(&self) {}
}
static INIT_LOGGER: Once = ONCE_INIT;
static DEV_LOGGER: &'static log::Log = &DevLogger;
fn init_logger() {
log::set_logger(DEV_LOGGER).unwrap();
log::set_max_level(log::LevelFilter::Trace);
std::env::set_var("RUST_BACKTRACE", "1");
info!("Hooked up rust logger!");
}
define_destructor!(sync15_passwords_state_destroy, PasswordState);
// This is probably too many string arguments...
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_state_new(
mentat_db_path: *const c_char,
encryption_key: *const c_char,
error: *mut ExternError
) -> *mut PasswordState {
INIT_LOGGER.call_once(init_logger);
with_translated_result(error, || {
let store = mentat::Store::open_with_key(c_char_to_string(mentat_db_path),
c_char_to_string(encryption_key))?;
let engine = PasswordEngine::new(store)?;
Ok(PasswordState {
engine,
sync: None,
})
})
}
// indirection to help `?` figure out the target error type
fn parse_url(url: &str) -> sync::Result<url::Url> {
Ok(url::Url::parse(url)?)
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_sync(
state: *mut PasswordState,
key_id: *const c_char,
access_token: *const c_char,
sync_key: *const c_char,
tokenserver_url: *const c_char,
error: *mut ExternError
) {
with_translated_void_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
let root_sync_key = sync::KeyBundle::from_ksync_base64(
c_char_to_string(sync_key).into())?;
let requested_init = Sync15StorageClientInit {
key_id: c_char_to_string(key_id).into(),
access_token: c_char_to_string(access_token).into(),
tokenserver_url: parse_url(c_char_to_string(tokenserver_url))?,
};
// TODO: If `to_ready` (or anything else with a ?) fails below, this
// `take()` means we end up with `state.sync.is_none()`, which means the
// next sync will redownload meta/global, crypto/keys, etc. without
// needing to. (AFAICT fixing this requires a change in sync15-adapter,
// since to_ready takes GlobalState as a move, and it's not clear if
// that change even is a good idea).
let mut sync_info = state.sync.take().map(Ok)
.unwrap_or_else(|| -> sync::Result<SyncInfo> {
let state = GlobalState::default();
let client = Sync15StorageClient::new(requested_init.clone())?;
Ok(SyncInfo {
state,
client,
last_client_init: requested_init.clone(),
})
})?;
// If the options passed for initialization of the storage client aren't
// the same as the ones we used last time, reinitialize it. (Note that
// we could avoid the comparison in the case where we had `None` in
// `state.sync` before, but this probably doesn't matter).
if requested_init != sync_info.last_client_init {
sync_info.client = Sync15StorageClient::new(requested_init.clone())?;
sync_info.last_client_init = requested_init;
}
{ // Scope borrow of `sync_info.client`
let mut state_machine =
sync::SetupStateMachine::for_readonly_sync(&sync_info.client, &root_sync_key);
let next_sync_state = state_machine.to_ready(sync_info.state)?;
sync_info.state = next_sync_state;
}
// We don't use a ? on the next line so that even if `state.engine.sync`
// fails, we don't forget the sync_state.
let result = state.engine.sync(&sync_info.client, &sync_info.state);
state.sync = Some(sync_info);
result
});
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_touch(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) {
with_translated_void_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
state.engine.touch_credential(c_char_to_string(id).into())?;
Ok(())
});
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_delete(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> bool {
with_translated_value_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
let deleted = state.engine.delete_credential(c_char_to_string(id).into())?;
Ok(deleted)
})
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_wipe(state: *mut PasswordState, error: *mut ExternError) {
with_translated_void_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
state.engine.wipe()?;
Ok(())
});
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_reset(state: *mut PasswordState, error: *mut ExternError) {
with_translated_void_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
state.engine.reset()?;
// XXX We probably need to clear out some things from `state.service`!
Ok(())
});
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_get_all(state: *mut PasswordState, error: *mut ExternError) -> *mut c_char {
with_translated_string_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
// Type declaration is just to make sure we have the right type (and for documentation)
let passwords: Vec<ServerPassword> = {
let mut in_progress_read = state.engine.store.begin_read()?;
passwords::get_all_sync_passwords(&mut in_progress_read)?
};
let result | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
| random_line_split | |
lib.rs | the License.
// We take the "low road" here when returning the structs - we expose the
// items (and arrays of items) as strings, which are JSON. The rust side of
// the world gets serialization and deserialization for free and it makes
// memory management that little bit simpler.
extern crate failure;
extern crate serde_json;
extern crate url;
extern crate reqwest;
#[macro_use] extern crate ffi_toolkit;
extern crate mentat;
extern crate sync15_passwords;
extern crate sync15_adapter as sync;
#[macro_use] extern crate log;
mod error;
use error::{
ExternError,
with_translated_result,
with_translated_value_result,
with_translated_void_result,
with_translated_string_result,
with_translated_opt_string_result,
};
use std::os::raw::{
c_char,
};
use std::sync::{Once, ONCE_INIT};
use ffi_toolkit::string::{
c_char_to_string,
};
pub use ffi_toolkit::memory::{
destroy_c_char,
};
use sync::{
Sync15StorageClient,
Sync15StorageClientInit,
GlobalState,
};
use sync15_passwords::{
passwords,
PasswordEngine,
ServerPassword,
};
pub struct SyncInfo {
state: GlobalState,
client: Sync15StorageClient,
// Used so that we know whether or not we need to re-initialize `client`
last_client_init: Sync15StorageClientInit,
}
pub struct PasswordState {
engine: PasswordEngine,
sync: Option<SyncInfo>,
}
#[cfg(target_os = "android")]
extern { pub fn __android_log_write(level: ::std::os::raw::c_int, tag: *const c_char, text: *const c_char) -> ::std::os::raw::c_int; }
struct DevLogger;
impl log::Log for DevLogger {
fn enabled(&self, _: &log::Metadata) -> bool { true }
fn log(&self, record: &log::Record) {
let message = format!("{}:{} -- {}", record.level(), record.target(), record.args());
println!("{}", message);
#[cfg(target_os = "android")]
{
unsafe {
let message = ::std::ffi::CString::new(message).unwrap();
let level_int = match record.level() {
log::Level::Trace => 2,
log::Level::Debug => 3,
log::Level::Info => 4,
log::Level::Warn => 5,
log::Level::Error => 6,
};
let message = message.as_ptr();
let tag = b"RustInternal\0";
__android_log_write(level_int, tag.as_ptr() as *const c_char, message);
}
}
// TODO ios (use NSLog(__CFStringMakeConstantString(b"%s\0"), ...), maybe windows? (OutputDebugStringA)
}
fn flush(&self) {}
}
static INIT_LOGGER: Once = ONCE_INIT;
static DEV_LOGGER: &'static log::Log = &DevLogger;
fn init_logger() {
log::set_logger(DEV_LOGGER).unwrap();
log::set_max_level(log::LevelFilter::Trace);
std::env::set_var("RUST_BACKTRACE", "1");
info!("Hooked up rust logger!");
}
define_destructor!(sync15_passwords_state_destroy, PasswordState);
// This is probably too many string arguments...
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_state_new(
mentat_db_path: *const c_char,
encryption_key: *const c_char,
error: *mut ExternError
) -> *mut PasswordState {
INIT_LOGGER.call_once(init_logger);
with_translated_result(error, || {
let store = mentat::Store::open_with_key(c_char_to_string(mentat_db_path),
c_char_to_string(encryption_key))?;
let engine = PasswordEngine::new(store)?;
Ok(PasswordState {
engine,
sync: None,
})
})
}
// indirection to help `?` figure out the target error type
fn parse_url(url: &str) -> sync::Result<url::Url> { |
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_sync(
state: *mut PasswordState,
key_id: *const c_char,
access_token: *const c_char,
sync_key: *const c_char,
tokenserver_url: *const c_char,
error: *mut ExternError
) {
with_translated_void_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
let root_sync_key = sync::KeyBundle::from_ksync_base64(
c_char_to_string(sync_key).into())?;
let requested_init = Sync15StorageClientInit {
key_id: c_char_to_string(key_id).into(),
access_token: c_char_to_string(access_token).into(),
tokenserver_url: parse_url(c_char_to_string(tokenserver_url))?,
};
// TODO: If `to_ready` (or anything else with a ?) fails below, this
// `take()` means we end up with `state.sync.is_none()`, which means the
// next sync will redownload meta/global, crypto/keys, etc. without
// needing to. (AFAICT fixing this requires a change in sync15-adapter,
// since to_ready takes GlobalState as a move, and it's not clear if
// that change even is a good idea).
let mut sync_info = state.sync.take().map(Ok)
.unwrap_or_else(|| -> sync::Result<SyncInfo> {
let state = GlobalState::default();
let client = Sync15StorageClient::new(requested_init.clone())?;
Ok(SyncInfo {
state,
client,
last_client_init: requested_init.clone(),
})
})?;
// If the options passed for initialization of the storage client aren't
// the same as the ones we used last time, reinitialize it. (Note that
// we could avoid the comparison in the case where we had `None` in
// `state.sync` before, but this probably doesn't matter).
if requested_init != sync_info.last_client_init {
sync_info.client = Sync15StorageClient::new(requested_init.clone())?;
sync_info.last_client_init = requested_init;
}
{ // Scope borrow of `sync_info.client`
let mut state_machine =
sync::SetupStateMachine::for_readonly_sync(&sync_info.client, &root_sync_key);
let next_sync_state = state_machine.to_ready(sync_info.state)?;
sync_info.state = next_sync_state;
}
// We don't use a ? on the next line so that even if `state.engine.sync`
// fails, we don't forget the sync_state.
let result = state.engine.sync(&sync_info.client, &sync_info.state);
state.sync = Some(sync_info);
result
});
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_touch(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) {
with_translated_void_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
state.engine.touch_credential(c_char_to_string(id).into())?;
Ok(())
});
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_delete(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> bool {
with_translated_value_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
let deleted = state.engine.delete_credential(c_char_to_string(id).into())?;
Ok(deleted)
})
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_wipe(state: *mut PasswordState, error: *mut ExternError) {
with_translated_void_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
state.engine.wipe()?;
Ok(())
});
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_reset(state: *mut PasswordState, error: *mut ExternError) {
with_translated_void_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
state.engine.reset()?;
// XXX We probably need to clear out some things from `state.service`!
Ok(())
});
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_get_all(state: *mut PasswordState, error: *mut ExternError) -> *mut c_char {
with_translated_string_result(error, || {
assert_pointer_not_null!(state);
let state = &mut *state;
// Type declaration is just to make sure we have the right type (and for documentation)
let passwords: Vec<ServerPassword> = {
let mut in_progress_read = state.engine.store.begin_read()?;
passwords::get_all_sync_passwords(&mut in_progress_read)?
};
let result = serde_json::to_string(&passwords)?;
Ok(result)
})
}
#[no_mangle]
pub unsafe extern "C" fn sync15_passwords_get_by_id(state: *mut |
Ok(url::Url::parse(url)?)
}
| identifier_body |
lucy.go | );
extern cfish_Vector*
GOLUCY_Doc_Field_Names(lucy_Doc *self);
extern cfish_Vector*
(*GOLUCY_Doc_Field_Names_BRIDGE)(lucy_Doc *self);
extern bool
GOLUCY_Doc_Equals(lucy_Doc *self, cfish_Obj *other);
extern bool
(*GOLUCY_Doc_Equals_BRIDGE)(lucy_Doc *self, cfish_Obj *other);
extern void
GOLUCY_Doc_Destroy(lucy_Doc *self);
extern void
(*GOLUCY_Doc_Destroy_BRIDGE)(lucy_Doc *self);
extern lucy_HitDoc*
GOLUCY_DefDocReader_Fetch_Doc(lucy_DefaultDocReader *self, int32_t doc_id);
extern lucy_HitDoc*
(*GOLUCY_DefDocReader_Fetch_Doc_BRIDGE)(lucy_DefaultDocReader *self, int32_t doc_id);
extern void
GOLUCY_Inverter_Invert_Doc(lucy_Inverter *self, lucy_Doc *doc);
extern void
(*GOLUCY_Inverter_Invert_Doc_BRIDGE)(lucy_Inverter *self, lucy_Doc *doc);
// C symbols linked into a Go-built package archive are not visible to
// external C code -- but internal code *can* see symbols from outside.
// This allows us to fake up symbol export by assigning values only known
// interally to external symbols during Go package initialization.
static CFISH_INLINE void
GOLUCY_glue_exported_symbols() {
GOLUCY_RegexTokenizer_init_BRIDGE = GOLUCY_RegexTokenizer_init;
GOLUCY_RegexTokenizer_Destroy_BRIDGE = GOLUCY_RegexTokenizer_Destroy;
GOLUCY_RegexTokenizer_Tokenize_Utf8_BRIDGE
= (LUCY_RegexTokenizer_Tokenize_Utf8_t)GOLUCY_RegexTokenizer_Tokenize_Utf8;
GOLUCY_Doc_init_BRIDGE = GOLUCY_Doc_init;
GOLUCY_Doc_Set_Fields_BRIDGE = GOLUCY_Doc_Set_Fields;
GOLUCY_Doc_Get_Size_BRIDGE = GOLUCY_Doc_Get_Size;
GOLUCY_Doc_Store_BRIDGE = GOLUCY_Doc_Store;
GOLUCY_Doc_Serialize_BRIDGE = GOLUCY_Doc_Serialize;
GOLUCY_Doc_Deserialize_BRIDGE = GOLUCY_Doc_Deserialize;
GOLUCY_Doc_Extract_BRIDGE = GOLUCY_Doc_Extract;
GOLUCY_Doc_Field_Names_BRIDGE = GOLUCY_Doc_Field_Names;
GOLUCY_Doc_Equals_BRIDGE = GOLUCY_Doc_Equals;
GOLUCY_Doc_Destroy_BRIDGE = GOLUCY_Doc_Destroy;
GOLUCY_DefDocReader_Fetch_Doc_BRIDGE = GOLUCY_DefDocReader_Fetch_Doc;
GOLUCY_Inverter_Invert_Doc_BRIDGE = GOLUCY_Inverter_Invert_Doc;
}
static uint32_t
S_count_code_points(const char *string, size_t len) {
uint32_t num_code_points = 0;
size_t i = 0;
while (i < len) {
i += cfish_StrHelp_UTF8_COUNT[(uint8_t)(string[i])];
++num_code_points;
}
if (i != len) {
CFISH_THROW(CFISH_ERR, "Match between code point boundaries in '%s'", string);
}
return num_code_points;
}
// Returns the number of code points through the end of the match.
static int
push_token(const char *str, int start, int end, int last_end,
int cp_count, lucy_Inversion *inversion) {
const char *match = str + start;
int match_len = end - start;
int cp_start = cp_count + S_count_code_points(str + last_end, start - last_end);
int cp_end = cp_start + S_count_code_points(match, match_len);
lucy_Token *token = lucy_Token_new(match, match_len, cp_start, cp_end, 1.0f, 1);
LUCY_Inversion_Append(inversion, token);
return cp_end;
}
static void
null_terminate_string(char *string, size_t len) {
string[len] = '\0';
}
*/
import "C"
import "unsafe"
import "fmt"
import "regexp"
import "git-wip-us.apache.org/repos/asf/lucy-clownfish.git/runtime/go/clownfish"
var registry *objRegistry
func init() {
C.GOLUCY_glue_exported_symbols()
C.lucy_bootstrap_parcel()
registry = newObjRegistry(16)
}
//export GOLUCY_RegexTokenizer_init
func GOLUCY_RegexTokenizer_init(rt *C.lucy_RegexTokenizer, pattern *C.cfish_String) *C.lucy_RegexTokenizer {
C.lucy_Analyzer_init(((*C.lucy_Analyzer)(unsafe.Pointer(rt))))
ivars := C.lucy_RegexTokenizer_IVARS(rt)
ivars.pattern = C.CFISH_Str_Clone(pattern)
var patternGo string
if pattern == nil {
patternGo = "\\w+(?:['\\x{2019}]\\w+)*"
} else {
patternGo = clownfish.CFStringToGo(unsafe.Pointer(pattern))
}
rx, err := regexp.Compile(patternGo)
if err != nil {
panic(err)
}
rxID := registry.store(rx)
ivars.token_re = unsafe.Pointer(rxID)
return rt
}
//export GOLUCY_RegexTokenizer_Destroy
func GOLUCY_RegexTokenizer_Destroy(rt *C.lucy_RegexTokenizer) {
ivars := C.lucy_RegexTokenizer_IVARS(rt)
rxID := uintptr(ivars.token_re)
registry.delete(rxID)
C.cfish_super_destroy(unsafe.Pointer(rt), C.LUCY_REGEXTOKENIZER)
}
//export GOLUCY_RegexTokenizer_Tokenize_Utf8
func GOLUCY_RegexTokenizer_Tokenize_Utf8(rt *C.lucy_RegexTokenizer, str *C.char,
stringLen C.size_t, inversion *C.lucy_Inversion) {
ivars := C.lucy_RegexTokenizer_IVARS(rt)
rxID := uintptr(ivars.token_re)
rx, ok := registry.fetch(rxID).(*regexp.Regexp)
if !ok {
mess := fmt.Sprintf("Failed to Fetch *RegExp with id %d and pattern %s",
rxID, clownfish.CFStringToGo(unsafe.Pointer(ivars.pattern)))
panic(clownfish.NewErr(mess))
}
buf := C.GoBytes(unsafe.Pointer(str), C.int(stringLen))
found := rx.FindAllIndex(buf, int(stringLen))
lastEnd := 0
cpCount := 0
for _, startEnd := range found {
cpCount = int(C.push_token(str, C.int(startEnd[0]), C.int(startEnd[1]),
C.int(lastEnd), C.int(cpCount), inversion))
lastEnd = startEnd[1]
}
}
func | (docID int32) Doc {
retvalCF := C.lucy_Doc_new(nil, C.int32_t(docID))
return WRAPDoc(unsafe.Pointer(retvalCF))
}
//export GOLUCY_Doc_init
func GOLUCY_Doc_init(d *C.lucy_Doc, fields unsafe.Pointer, docID C.int32_t) *C.lucy_Doc {
ivars := C.lucy_Doc_IVARS(d)
if fields != nil {
ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields))
} else {
ivars.fields = unsafe.Pointer(C.cfish_Hash_new(0))
}
ivars.doc_id = docID
return d
}
//export GOLUCY_Doc_Set_Fields
func GOLUCY_Doc_Set_Fields(d *C.lucy_Doc, fields unsafe.Pointer) {
ivars := C.lucy_Doc_IVARS(d)
temp := ivars.fields
ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields))
C.cfish_decref(temp)
}
//export GOLUCY_Doc_Get_Size
func GOLUCY_Doc_Get_Size(d *C.lucy_Doc) C.uint32_t {
ivars := C.lucy_Doc_IVARS(d)
hash := ((*C.cfish_Hash)(ivars.fields))
return C.uint32_t(C.CFISH_Hash_Get_Size(hash))
}
//export GOLUCY_Doc_Store
func GOLUCY_Doc_Store(d *C.lucy_Doc, field *C.cfish_String, value *C.cfish_Obj) {
ivars := C.lucy_Doc_IVARS(d)
hash := (*C.cfish_Hash)(ivars.fields)
C.CFISH_Hash_Store(hash, field, C.cfish_inc_refcount(unsafe.Pointer(value)))
}
//export GOLUCY_Doc_Serialize
func GOLUCY_Doc_Serialize(d *C.lucy_Doc, outstream *C.lucy_OutStream) {
ivars := C.lucy_Doc_IVARS(d)
| NewDoc | identifier_name |
lucy.go | field);
extern cfish_Vector*
GOLUCY_Doc_Field_Names(lucy_Doc *self);
extern cfish_Vector*
(*GOLUCY_Doc_Field_Names_BRIDGE)(lucy_Doc *self);
extern bool
GOLUCY_Doc_Equals(lucy_Doc *self, cfish_Obj *other);
extern bool
(*GOLUCY_Doc_Equals_BRIDGE)(lucy_Doc *self, cfish_Obj *other);
extern void
GOLUCY_Doc_Destroy(lucy_Doc *self);
extern void
(*GOLUCY_Doc_Destroy_BRIDGE)(lucy_Doc *self);
extern lucy_HitDoc*
GOLUCY_DefDocReader_Fetch_Doc(lucy_DefaultDocReader *self, int32_t doc_id);
extern lucy_HitDoc*
(*GOLUCY_DefDocReader_Fetch_Doc_BRIDGE)(lucy_DefaultDocReader *self, int32_t doc_id);
extern void
GOLUCY_Inverter_Invert_Doc(lucy_Inverter *self, lucy_Doc *doc);
extern void
(*GOLUCY_Inverter_Invert_Doc_BRIDGE)(lucy_Inverter *self, lucy_Doc *doc);
// C symbols linked into a Go-built package archive are not visible to
// external C code -- but internal code *can* see symbols from outside.
// This allows us to fake up symbol export by assigning values only known
// interally to external symbols during Go package initialization.
static CFISH_INLINE void
GOLUCY_glue_exported_symbols() {
GOLUCY_RegexTokenizer_init_BRIDGE = GOLUCY_RegexTokenizer_init;
GOLUCY_RegexTokenizer_Destroy_BRIDGE = GOLUCY_RegexTokenizer_Destroy;
GOLUCY_RegexTokenizer_Tokenize_Utf8_BRIDGE
= (LUCY_RegexTokenizer_Tokenize_Utf8_t)GOLUCY_RegexTokenizer_Tokenize_Utf8;
GOLUCY_Doc_init_BRIDGE = GOLUCY_Doc_init;
GOLUCY_Doc_Set_Fields_BRIDGE = GOLUCY_Doc_Set_Fields;
GOLUCY_Doc_Get_Size_BRIDGE = GOLUCY_Doc_Get_Size;
GOLUCY_Doc_Store_BRIDGE = GOLUCY_Doc_Store;
GOLUCY_Doc_Serialize_BRIDGE = GOLUCY_Doc_Serialize;
GOLUCY_Doc_Deserialize_BRIDGE = GOLUCY_Doc_Deserialize;
GOLUCY_Doc_Extract_BRIDGE = GOLUCY_Doc_Extract;
GOLUCY_Doc_Field_Names_BRIDGE = GOLUCY_Doc_Field_Names;
GOLUCY_Doc_Equals_BRIDGE = GOLUCY_Doc_Equals;
GOLUCY_Doc_Destroy_BRIDGE = GOLUCY_Doc_Destroy;
GOLUCY_DefDocReader_Fetch_Doc_BRIDGE = GOLUCY_DefDocReader_Fetch_Doc;
GOLUCY_Inverter_Invert_Doc_BRIDGE = GOLUCY_Inverter_Invert_Doc;
}
static uint32_t
S_count_code_points(const char *string, size_t len) {
uint32_t num_code_points = 0;
size_t i = 0;
while (i < len) {
i += cfish_StrHelp_UTF8_COUNT[(uint8_t)(string[i])];
++num_code_points;
}
if (i != len) {
CFISH_THROW(CFISH_ERR, "Match between code point boundaries in '%s'", string);
}
return num_code_points;
}
// Returns the number of code points through the end of the match.
static int
push_token(const char *str, int start, int end, int last_end,
int cp_count, lucy_Inversion *inversion) {
const char *match = str + start;
int match_len = end - start;
int cp_start = cp_count + S_count_code_points(str + last_end, start - last_end);
int cp_end = cp_start + S_count_code_points(match, match_len);
lucy_Token *token = lucy_Token_new(match, match_len, cp_start, cp_end, 1.0f, 1);
LUCY_Inversion_Append(inversion, token);
return cp_end;
}
static void
null_terminate_string(char *string, size_t len) {
string[len] = '\0';
}
*/
import "C"
import "unsafe"
import "fmt"
import "regexp"
import "git-wip-us.apache.org/repos/asf/lucy-clownfish.git/runtime/go/clownfish"
var registry *objRegistry
func init() {
C.GOLUCY_glue_exported_symbols()
C.lucy_bootstrap_parcel()
registry = newObjRegistry(16)
}
//export GOLUCY_RegexTokenizer_init
func GOLUCY_RegexTokenizer_init(rt *C.lucy_RegexTokenizer, pattern *C.cfish_String) *C.lucy_RegexTokenizer {
C.lucy_Analyzer_init(((*C.lucy_Analyzer)(unsafe.Pointer(rt))))
ivars := C.lucy_RegexTokenizer_IVARS(rt)
ivars.pattern = C.CFISH_Str_Clone(pattern)
var patternGo string
if pattern == nil {
patternGo = "\\w+(?:['\\x{2019}]\\w+)*"
} else {
patternGo = clownfish.CFStringToGo(unsafe.Pointer(pattern))
}
rx, err := regexp.Compile(patternGo)
if err != nil {
panic(err)
}
rxID := registry.store(rx)
ivars.token_re = unsafe.Pointer(rxID)
return rt
}
//export GOLUCY_RegexTokenizer_Destroy
func GOLUCY_RegexTokenizer_Destroy(rt *C.lucy_RegexTokenizer) {
ivars := C.lucy_RegexTokenizer_IVARS(rt)
rxID := uintptr(ivars.token_re) | registry.delete(rxID)
C.cfish_super_destroy(unsafe.Pointer(rt), C.LUCY_REGEXTOKENIZER)
}
//export GOLUCY_RegexTokenizer_Tokenize_Utf8
func GOLUCY_RegexTokenizer_Tokenize_Utf8(rt *C.lucy_RegexTokenizer, str *C.char,
stringLen C.size_t, inversion *C.lucy_Inversion) {
ivars := C.lucy_RegexTokenizer_IVARS(rt)
rxID := uintptr(ivars.token_re)
rx, ok := registry.fetch(rxID).(*regexp.Regexp)
if !ok {
mess := fmt.Sprintf("Failed to Fetch *RegExp with id %d and pattern %s",
rxID, clownfish.CFStringToGo(unsafe.Pointer(ivars.pattern)))
panic(clownfish.NewErr(mess))
}
buf := C.GoBytes(unsafe.Pointer(str), C.int(stringLen))
found := rx.FindAllIndex(buf, int(stringLen))
lastEnd := 0
cpCount := 0
for _, startEnd := range found {
cpCount = int(C.push_token(str, C.int(startEnd[0]), C.int(startEnd[1]),
C.int(lastEnd), C.int(cpCount), inversion))
lastEnd = startEnd[1]
}
}
func NewDoc(docID int32) Doc {
retvalCF := C.lucy_Doc_new(nil, C.int32_t(docID))
return WRAPDoc(unsafe.Pointer(retvalCF))
}
//export GOLUCY_Doc_init
func GOLUCY_Doc_init(d *C.lucy_Doc, fields unsafe.Pointer, docID C.int32_t) *C.lucy_Doc {
ivars := C.lucy_Doc_IVARS(d)
if fields != nil {
ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields))
} else {
ivars.fields = unsafe.Pointer(C.cfish_Hash_new(0))
}
ivars.doc_id = docID
return d
}
//export GOLUCY_Doc_Set_Fields
func GOLUCY_Doc_Set_Fields(d *C.lucy_Doc, fields unsafe.Pointer) {
ivars := C.lucy_Doc_IVARS(d)
temp := ivars.fields
ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields))
C.cfish_decref(temp)
}
//export GOLUCY_Doc_Get_Size
func GOLUCY_Doc_Get_Size(d *C.lucy_Doc) C.uint32_t {
ivars := C.lucy_Doc_IVARS(d)
hash := ((*C.cfish_Hash)(ivars.fields))
return C.uint32_t(C.CFISH_Hash_Get_Size(hash))
}
//export GOLUCY_Doc_Store
func GOLUCY_Doc_Store(d *C.lucy_Doc, field *C.cfish_String, value *C.cfish_Obj) {
ivars := C.lucy_Doc_IVARS(d)
hash := (*C.cfish_Hash)(ivars.fields)
C.CFISH_Hash_Store(hash, field, C.cfish_inc_refcount(unsafe.Pointer(value)))
}
//export GOLUCY_Doc_Serialize
func GOLUCY_Doc_Serialize(d *C.lucy_Doc, outstream *C.lucy_OutStream) {
ivars := C.lucy_Doc_IVARS(d)
| random_line_split | |
lucy.go | );
extern cfish_Vector*
GOLUCY_Doc_Field_Names(lucy_Doc *self);
extern cfish_Vector*
(*GOLUCY_Doc_Field_Names_BRIDGE)(lucy_Doc *self);
extern bool
GOLUCY_Doc_Equals(lucy_Doc *self, cfish_Obj *other);
extern bool
(*GOLUCY_Doc_Equals_BRIDGE)(lucy_Doc *self, cfish_Obj *other);
extern void
GOLUCY_Doc_Destroy(lucy_Doc *self);
extern void
(*GOLUCY_Doc_Destroy_BRIDGE)(lucy_Doc *self);
extern lucy_HitDoc*
GOLUCY_DefDocReader_Fetch_Doc(lucy_DefaultDocReader *self, int32_t doc_id);
extern lucy_HitDoc*
(*GOLUCY_DefDocReader_Fetch_Doc_BRIDGE)(lucy_DefaultDocReader *self, int32_t doc_id);
extern void
GOLUCY_Inverter_Invert_Doc(lucy_Inverter *self, lucy_Doc *doc);
extern void
(*GOLUCY_Inverter_Invert_Doc_BRIDGE)(lucy_Inverter *self, lucy_Doc *doc);
// C symbols linked into a Go-built package archive are not visible to
// external C code -- but internal code *can* see symbols from outside.
// This allows us to fake up symbol export by assigning values only known
// interally to external symbols during Go package initialization.
static CFISH_INLINE void
GOLUCY_glue_exported_symbols() {
GOLUCY_RegexTokenizer_init_BRIDGE = GOLUCY_RegexTokenizer_init;
GOLUCY_RegexTokenizer_Destroy_BRIDGE = GOLUCY_RegexTokenizer_Destroy;
GOLUCY_RegexTokenizer_Tokenize_Utf8_BRIDGE
= (LUCY_RegexTokenizer_Tokenize_Utf8_t)GOLUCY_RegexTokenizer_Tokenize_Utf8;
GOLUCY_Doc_init_BRIDGE = GOLUCY_Doc_init;
GOLUCY_Doc_Set_Fields_BRIDGE = GOLUCY_Doc_Set_Fields;
GOLUCY_Doc_Get_Size_BRIDGE = GOLUCY_Doc_Get_Size;
GOLUCY_Doc_Store_BRIDGE = GOLUCY_Doc_Store;
GOLUCY_Doc_Serialize_BRIDGE = GOLUCY_Doc_Serialize;
GOLUCY_Doc_Deserialize_BRIDGE = GOLUCY_Doc_Deserialize;
GOLUCY_Doc_Extract_BRIDGE = GOLUCY_Doc_Extract;
GOLUCY_Doc_Field_Names_BRIDGE = GOLUCY_Doc_Field_Names;
GOLUCY_Doc_Equals_BRIDGE = GOLUCY_Doc_Equals;
GOLUCY_Doc_Destroy_BRIDGE = GOLUCY_Doc_Destroy;
GOLUCY_DefDocReader_Fetch_Doc_BRIDGE = GOLUCY_DefDocReader_Fetch_Doc;
GOLUCY_Inverter_Invert_Doc_BRIDGE = GOLUCY_Inverter_Invert_Doc;
}
static uint32_t
S_count_code_points(const char *string, size_t len) {
uint32_t num_code_points = 0;
size_t i = 0;
while (i < len) {
i += cfish_StrHelp_UTF8_COUNT[(uint8_t)(string[i])];
++num_code_points;
}
if (i != len) {
CFISH_THROW(CFISH_ERR, "Match between code point boundaries in '%s'", string);
}
return num_code_points;
}
// Returns the number of code points through the end of the match.
static int
push_token(const char *str, int start, int end, int last_end,
int cp_count, lucy_Inversion *inversion) {
const char *match = str + start;
int match_len = end - start;
int cp_start = cp_count + S_count_code_points(str + last_end, start - last_end);
int cp_end = cp_start + S_count_code_points(match, match_len);
lucy_Token *token = lucy_Token_new(match, match_len, cp_start, cp_end, 1.0f, 1);
LUCY_Inversion_Append(inversion, token);
return cp_end;
}
static void
null_terminate_string(char *string, size_t len) {
string[len] = '\0';
}
*/
import "C"
import "unsafe"
import "fmt"
import "regexp"
import "git-wip-us.apache.org/repos/asf/lucy-clownfish.git/runtime/go/clownfish"
var registry *objRegistry
func init() {
C.GOLUCY_glue_exported_symbols()
C.lucy_bootstrap_parcel()
registry = newObjRegistry(16)
}
//export GOLUCY_RegexTokenizer_init
func GOLUCY_RegexTokenizer_init(rt *C.lucy_RegexTokenizer, pattern *C.cfish_String) *C.lucy_RegexTokenizer {
C.lucy_Analyzer_init(((*C.lucy_Analyzer)(unsafe.Pointer(rt))))
ivars := C.lucy_RegexTokenizer_IVARS(rt)
ivars.pattern = C.CFISH_Str_Clone(pattern)
var patternGo string
if pattern == nil | else {
patternGo = clownfish.CFStringToGo(unsafe.Pointer(pattern))
}
rx, err := regexp.Compile(patternGo)
if err != nil {
panic(err)
}
rxID := registry.store(rx)
ivars.token_re = unsafe.Pointer(rxID)
return rt
}
//export GOLUCY_RegexTokenizer_Destroy
func GOLUCY_RegexTokenizer_Destroy(rt *C.lucy_RegexTokenizer) {
ivars := C.lucy_RegexTokenizer_IVARS(rt)
rxID := uintptr(ivars.token_re)
registry.delete(rxID)
C.cfish_super_destroy(unsafe.Pointer(rt), C.LUCY_REGEXTOKENIZER)
}
//export GOLUCY_RegexTokenizer_Tokenize_Utf8
func GOLUCY_RegexTokenizer_Tokenize_Utf8(rt *C.lucy_RegexTokenizer, str *C.char,
stringLen C.size_t, inversion *C.lucy_Inversion) {
ivars := C.lucy_RegexTokenizer_IVARS(rt)
rxID := uintptr(ivars.token_re)
rx, ok := registry.fetch(rxID).(*regexp.Regexp)
if !ok {
mess := fmt.Sprintf("Failed to Fetch *RegExp with id %d and pattern %s",
rxID, clownfish.CFStringToGo(unsafe.Pointer(ivars.pattern)))
panic(clownfish.NewErr(mess))
}
buf := C.GoBytes(unsafe.Pointer(str), C.int(stringLen))
found := rx.FindAllIndex(buf, int(stringLen))
lastEnd := 0
cpCount := 0
for _, startEnd := range found {
cpCount = int(C.push_token(str, C.int(startEnd[0]), C.int(startEnd[1]),
C.int(lastEnd), C.int(cpCount), inversion))
lastEnd = startEnd[1]
}
}
func NewDoc(docID int32) Doc {
retvalCF := C.lucy_Doc_new(nil, C.int32_t(docID))
return WRAPDoc(unsafe.Pointer(retvalCF))
}
//export GOLUCY_Doc_init
func GOLUCY_Doc_init(d *C.lucy_Doc, fields unsafe.Pointer, docID C.int32_t) *C.lucy_Doc {
ivars := C.lucy_Doc_IVARS(d)
if fields != nil {
ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields))
} else {
ivars.fields = unsafe.Pointer(C.cfish_Hash_new(0))
}
ivars.doc_id = docID
return d
}
//export GOLUCY_Doc_Set_Fields
func GOLUCY_Doc_Set_Fields(d *C.lucy_Doc, fields unsafe.Pointer) {
ivars := C.lucy_Doc_IVARS(d)
temp := ivars.fields
ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields))
C.cfish_decref(temp)
}
//export GOLUCY_Doc_Get_Size
func GOLUCY_Doc_Get_Size(d *C.lucy_Doc) C.uint32_t {
ivars := C.lucy_Doc_IVARS(d)
hash := ((*C.cfish_Hash)(ivars.fields))
return C.uint32_t(C.CFISH_Hash_Get_Size(hash))
}
//export GOLUCY_Doc_Store
func GOLUCY_Doc_Store(d *C.lucy_Doc, field *C.cfish_String, value *C.cfish_Obj) {
ivars := C.lucy_Doc_IVARS(d)
hash := (*C.cfish_Hash)(ivars.fields)
C.CFISH_Hash_Store(hash, field, C.cfish_inc_refcount(unsafe.Pointer(value)))
}
//export GOLUCY_Doc_Serialize
func GOLUCY_Doc_Serialize(d *C.lucy_Doc, outstream *C.lucy_OutStream) {
ivars := C.lucy_Doc_IVARS(d)
| {
patternGo = "\\w+(?:['\\x{2019}]\\w+)*"
} | conditional_block |
lucy.go | );
extern cfish_Vector*
GOLUCY_Doc_Field_Names(lucy_Doc *self);
extern cfish_Vector*
(*GOLUCY_Doc_Field_Names_BRIDGE)(lucy_Doc *self);
extern bool
GOLUCY_Doc_Equals(lucy_Doc *self, cfish_Obj *other);
extern bool
(*GOLUCY_Doc_Equals_BRIDGE)(lucy_Doc *self, cfish_Obj *other);
extern void
GOLUCY_Doc_Destroy(lucy_Doc *self);
extern void
(*GOLUCY_Doc_Destroy_BRIDGE)(lucy_Doc *self);
extern lucy_HitDoc*
GOLUCY_DefDocReader_Fetch_Doc(lucy_DefaultDocReader *self, int32_t doc_id);
extern lucy_HitDoc*
(*GOLUCY_DefDocReader_Fetch_Doc_BRIDGE)(lucy_DefaultDocReader *self, int32_t doc_id);
extern void
GOLUCY_Inverter_Invert_Doc(lucy_Inverter *self, lucy_Doc *doc);
extern void
(*GOLUCY_Inverter_Invert_Doc_BRIDGE)(lucy_Inverter *self, lucy_Doc *doc);
// C symbols linked into a Go-built package archive are not visible to
// external C code -- but internal code *can* see symbols from outside.
// This allows us to fake up symbol export by assigning values only known
// interally to external symbols during Go package initialization.
static CFISH_INLINE void
GOLUCY_glue_exported_symbols() {
GOLUCY_RegexTokenizer_init_BRIDGE = GOLUCY_RegexTokenizer_init;
GOLUCY_RegexTokenizer_Destroy_BRIDGE = GOLUCY_RegexTokenizer_Destroy;
GOLUCY_RegexTokenizer_Tokenize_Utf8_BRIDGE
= (LUCY_RegexTokenizer_Tokenize_Utf8_t)GOLUCY_RegexTokenizer_Tokenize_Utf8;
GOLUCY_Doc_init_BRIDGE = GOLUCY_Doc_init;
GOLUCY_Doc_Set_Fields_BRIDGE = GOLUCY_Doc_Set_Fields;
GOLUCY_Doc_Get_Size_BRIDGE = GOLUCY_Doc_Get_Size;
GOLUCY_Doc_Store_BRIDGE = GOLUCY_Doc_Store;
GOLUCY_Doc_Serialize_BRIDGE = GOLUCY_Doc_Serialize;
GOLUCY_Doc_Deserialize_BRIDGE = GOLUCY_Doc_Deserialize;
GOLUCY_Doc_Extract_BRIDGE = GOLUCY_Doc_Extract;
GOLUCY_Doc_Field_Names_BRIDGE = GOLUCY_Doc_Field_Names;
GOLUCY_Doc_Equals_BRIDGE = GOLUCY_Doc_Equals;
GOLUCY_Doc_Destroy_BRIDGE = GOLUCY_Doc_Destroy;
GOLUCY_DefDocReader_Fetch_Doc_BRIDGE = GOLUCY_DefDocReader_Fetch_Doc;
GOLUCY_Inverter_Invert_Doc_BRIDGE = GOLUCY_Inverter_Invert_Doc;
}
static uint32_t
S_count_code_points(const char *string, size_t len) {
uint32_t num_code_points = 0;
size_t i = 0;
while (i < len) {
i += cfish_StrHelp_UTF8_COUNT[(uint8_t)(string[i])];
++num_code_points;
}
if (i != len) {
CFISH_THROW(CFISH_ERR, "Match between code point boundaries in '%s'", string);
}
return num_code_points;
}
// Returns the number of code points through the end of the match.
static int
push_token(const char *str, int start, int end, int last_end,
int cp_count, lucy_Inversion *inversion) {
const char *match = str + start;
int match_len = end - start;
int cp_start = cp_count + S_count_code_points(str + last_end, start - last_end);
int cp_end = cp_start + S_count_code_points(match, match_len);
lucy_Token *token = lucy_Token_new(match, match_len, cp_start, cp_end, 1.0f, 1);
LUCY_Inversion_Append(inversion, token);
return cp_end;
}
static void
null_terminate_string(char *string, size_t len) {
string[len] = '\0';
}
*/
import "C"
import "unsafe"
import "fmt"
import "regexp"
import "git-wip-us.apache.org/repos/asf/lucy-clownfish.git/runtime/go/clownfish"
var registry *objRegistry
func init() {
C.GOLUCY_glue_exported_symbols()
C.lucy_bootstrap_parcel()
registry = newObjRegistry(16)
}
//export GOLUCY_RegexTokenizer_init
func GOLUCY_RegexTokenizer_init(rt *C.lucy_RegexTokenizer, pattern *C.cfish_String) *C.lucy_RegexTokenizer {
C.lucy_Analyzer_init(((*C.lucy_Analyzer)(unsafe.Pointer(rt))))
ivars := C.lucy_RegexTokenizer_IVARS(rt)
ivars.pattern = C.CFISH_Str_Clone(pattern)
var patternGo string
if pattern == nil {
patternGo = "\\w+(?:['\\x{2019}]\\w+)*"
} else {
patternGo = clownfish.CFStringToGo(unsafe.Pointer(pattern))
}
rx, err := regexp.Compile(patternGo)
if err != nil {
panic(err)
}
rxID := registry.store(rx)
ivars.token_re = unsafe.Pointer(rxID)
return rt
}
//export GOLUCY_RegexTokenizer_Destroy
func GOLUCY_RegexTokenizer_Destroy(rt *C.lucy_RegexTokenizer) {
ivars := C.lucy_RegexTokenizer_IVARS(rt)
rxID := uintptr(ivars.token_re)
registry.delete(rxID)
C.cfish_super_destroy(unsafe.Pointer(rt), C.LUCY_REGEXTOKENIZER)
}
//export GOLUCY_RegexTokenizer_Tokenize_Utf8
func GOLUCY_RegexTokenizer_Tokenize_Utf8(rt *C.lucy_RegexTokenizer, str *C.char,
stringLen C.size_t, inversion *C.lucy_Inversion) {
ivars := C.lucy_RegexTokenizer_IVARS(rt)
rxID := uintptr(ivars.token_re)
rx, ok := registry.fetch(rxID).(*regexp.Regexp)
if !ok {
mess := fmt.Sprintf("Failed to Fetch *RegExp with id %d and pattern %s",
rxID, clownfish.CFStringToGo(unsafe.Pointer(ivars.pattern)))
panic(clownfish.NewErr(mess))
}
buf := C.GoBytes(unsafe.Pointer(str), C.int(stringLen))
found := rx.FindAllIndex(buf, int(stringLen))
lastEnd := 0
cpCount := 0
for _, startEnd := range found {
cpCount = int(C.push_token(str, C.int(startEnd[0]), C.int(startEnd[1]),
C.int(lastEnd), C.int(cpCount), inversion))
lastEnd = startEnd[1]
}
}
func NewDoc(docID int32) Doc {
retvalCF := C.lucy_Doc_new(nil, C.int32_t(docID))
return WRAPDoc(unsafe.Pointer(retvalCF))
}
//export GOLUCY_Doc_init
func GOLUCY_Doc_init(d *C.lucy_Doc, fields unsafe.Pointer, docID C.int32_t) *C.lucy_Doc |
//export GOLUCY_Doc_Set_Fields
func GOLUCY_Doc_Set_Fields(d *C.lucy_Doc, fields unsafe.Pointer) {
ivars := C.lucy_Doc_IVARS(d)
temp := ivars.fields
ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields))
C.cfish_decref(temp)
}
//export GOLUCY_Doc_Get_Size
func GOLUCY_Doc_Get_Size(d *C.lucy_Doc) C.uint32_t {
ivars := C.lucy_Doc_IVARS(d)
hash := ((*C.cfish_Hash)(ivars.fields))
return C.uint32_t(C.CFISH_Hash_Get_Size(hash))
}
//export GOLUCY_Doc_Store
func GOLUCY_Doc_Store(d *C.lucy_Doc, field *C.cfish_String, value *C.cfish_Obj) {
ivars := C.lucy_Doc_IVARS(d)
hash := (*C.cfish_Hash)(ivars.fields)
C.CFISH_Hash_Store(hash, field, C.cfish_inc_refcount(unsafe.Pointer(value)))
}
//export GOLUCY_Doc_Serialize
func GOLUCY_Doc_Serialize(d *C.lucy_Doc, outstream *C.lucy_OutStream) {
ivars := C.lucy_Doc_IVARS(d | {
ivars := C.lucy_Doc_IVARS(d)
if fields != nil {
ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields))
} else {
ivars.fields = unsafe.Pointer(C.cfish_Hash_new(0))
}
ivars.doc_id = docID
return d
} | identifier_body |
settings.py | (section, option):
return config.get(section, option) if config.has_option(section, option) else None
return get(section, option) if section else get
mailman_cfg = read_cfg('/etc/mailman.cfg')
BASE_DIR = '/usr/lib/bundles/mailman-webui'
CONF_DIR = '/etc/mailman-webui'
DATA_DIR = '/var/lib/mailman-webui'
LOG_DIR = '/var/log/mailman-webui'
# Hosts/domain names that are valid for this site.
# NOTE: You MUST add domain name of your instance of this application here!
# See https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost']
# Mailman API credentials
# NOTE: Replace with hard-coded values if Mailman is running on a different host.
MAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice', 'port') or 8001)
MAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin'
MAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass')
MAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general', 'api_key')
MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1', '::ffff:127.0.0.1')
# REST API
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
}
# Only display mailing-lists in HyperKitty from the same virtual host
# as the webserver.
FILTER_VHOST = False
#
# Application definition
#
SITE_ID = 1
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'hyperkitty',
'rest_framework',
'django_gravatar',
'paintstore',
'compressor',
'haystack',
'django_extensions',
'postorius',
'django_mailman3',
'stronghold',
# Uncomment the next line to enable integration with Sentry
# and set DSN in RAVEN_CONFIG.
#'raven.contrib.django.raven_compat',
'allauth',
'allauth.account',
'allauth.socialaccount',
# Uncomment providers that you want to use, if any.
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.gitlab',
#'allauth.socialaccount.providers.google',
#'allauth.socialaccount.providers.twitter',
#'allauth.socialaccount.providers.stackexchange',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django_mailman3.middleware.TimezoneMiddleware',
'postorius.middleware.PostoriusMiddleware',
# Uncomment to require a user to be authenticated to view any page.
#'stronghold.middleware.LoginRequiredMiddleware',
)
# A string representing the full Python import path to your root URLconf.
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# Directory for templates override.
joinpath(DATA_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.csrf',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django_mailman3.context_processors.common',
'hyperkitty.context_processors.common',
'postorius.context_processors.postorius',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Using the cache infrastructure can significantly improve performance on a
# production setup. This is an example with a local Memcached server.
#CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
# 'LOCATION': '127.0.0.1:11211',
# }
#}
#
# Databases
# See https://docs.djangoproject.com/en/1.9/ref/settings/#databases
#
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': joinpath(DATA_DIR, 'db.sqlite3'),
}
# Remove the above lines and uncomment the below to use PostgreSQL.
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'mailman_webui',
# 'USER': 'mailman_webui',
# 'PASSWORD': 'change-me',
# # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
# 'HOST': '127.0.0.1',
# 'PORT': '',
# }
}
# Full-text search engine
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': joinpath(DATA_DIR, 'fulltext_index'),
},
}
#
# Outgoing mails
#
# NOTE: Replace with hard-coded values if Mailman is running on a different host.
# The host and port of the SMTP server to use for sending email.
EMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost'
EMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25)
# Username and password to use for the SMTP server defined above.
EMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or ''
EMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or ''
# Whether to use a explicit TLS connection when talking to the SMTP server.
EMAIL_USE_TLS = False
# Whether to use an implicit TLS connection when talking to the SMTP server.
EMAIL_USE_SSL = False
# A tuple that lists people who get code error notifications. When DEBUG=False
# and a view raises an exception, Django will email these people with the full
# exception information. Each member of the tuple should be a tuple of (Full
# name, email address).
ADMINS = (
('Mailman Admin', 'root@localhost'),
)
# If you enable email reporting for error messages, this is where those emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-SERVER_EMAIL
#SERVER_EMAIL = 'root@your-domain.org'
# If you enable internal authentication, this is the address that the emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.9/ref/settings/#default-from-email
#DEFAULT_FROM_EMAIL = 'mailing-lists@you-domain.org'
#
# Security settings
#
# A secret key used for signing sessions, cookies, password reset tokens etc.
SECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read()
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
# If you're behind a proxy, use the X-Forwarded-Host header
# See https://docs.djangoproject.com/en/1.9/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = True
# And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER
# https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#SECURE_SSL_REDIRECT = True
# If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT
# contains at least this line:
#SECURE_REDIRECT_EXEMPT = [
# 'archives/api/mailman/.*', # Request from Mailman.
#]
#
# Authentication
#
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
# Uncomment to next line to enable LDAP authentication.
#'custom.LDAPBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
LOGIN_URL = 'account_login'
LOGIN_REDIRECT_URL = 'hk_root'
LOGOUT_URL = 'account_logout'
# Whether registration of new accounts is currently permitted.
REGISTRATION_OPEN = True
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator' },
{ 'NAME': ' | get | identifier_name | |
settings.py |
return get(section, option) if section else get
mailman_cfg = read_cfg('/etc/mailman.cfg')
BASE_DIR = '/usr/lib/bundles/mailman-webui'
CONF_DIR = '/etc/mailman-webui'
DATA_DIR = '/var/lib/mailman-webui'
LOG_DIR = '/var/log/mailman-webui'
# Hosts/domain names that are valid for this site.
# NOTE: You MUST add domain name of your instance of this application here!
# See https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost']
# Mailman API credentials
# NOTE: Replace with hard-coded values if Mailman is running on a different host.
MAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice', 'port') or 8001)
MAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin'
MAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass')
MAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general', 'api_key')
MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1', '::ffff:127.0.0.1')
# REST API
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
}
# Only display mailing-lists in HyperKitty from the same virtual host
# as the webserver.
FILTER_VHOST = False
#
# Application definition
#
SITE_ID = 1
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'hyperkitty',
'rest_framework',
'django_gravatar',
'paintstore',
'compressor',
'haystack',
'django_extensions',
'postorius',
'django_mailman3',
'stronghold',
# Uncomment the next line to enable integration with Sentry
# and set DSN in RAVEN_CONFIG.
#'raven.contrib.django.raven_compat',
'allauth',
'allauth.account',
'allauth.socialaccount',
# Uncomment providers that you want to use, if any.
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.gitlab',
#'allauth.socialaccount.providers.google',
#'allauth.socialaccount.providers.twitter',
#'allauth.socialaccount.providers.stackexchange',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django_mailman3.middleware.TimezoneMiddleware',
'postorius.middleware.PostoriusMiddleware',
# Uncomment to require a user to be authenticated to view any page.
#'stronghold.middleware.LoginRequiredMiddleware',
)
# A string representing the full Python import path to your root URLconf.
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# Directory for templates override.
joinpath(DATA_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.csrf',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django_mailman3.context_processors.common',
'hyperkitty.context_processors.common',
'postorius.context_processors.postorius',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Using the cache infrastructure can significantly improve performance on a
# production setup. This is an example with a local Memcached server.
#CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
# 'LOCATION': '127.0.0.1:11211',
# }
#}
#
# Databases
# See https://docs.djangoproject.com/en/1.9/ref/settings/#databases
#
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': joinpath(DATA_DIR, 'db.sqlite3'),
}
# Remove the above lines and uncomment the below to use PostgreSQL.
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'mailman_webui',
# 'USER': 'mailman_webui',
# 'PASSWORD': 'change-me',
# # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
# 'HOST': '127.0.0.1',
# 'PORT': '',
# }
}
# Full-text search engine
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': joinpath(DATA_DIR, 'fulltext_index'),
},
}
#
# Outgoing mails
#
# NOTE: Replace with hard-coded values if Mailman is running on a different host.
# The host and port of the SMTP server to use for sending email.
EMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost'
EMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25)
# Username and password to use for the SMTP server defined above.
EMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or ''
EMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or ''
# Whether to use a explicit TLS connection when talking to the SMTP server.
EMAIL_USE_TLS = False
# Whether to use an implicit TLS connection when talking to the SMTP server.
EMAIL_USE_SSL = False
# A tuple that lists people who get code error notifications. When DEBUG=False
# and a view raises an exception, Django will email these people with the full
# exception information. Each member of the tuple should be a tuple of (Full
# name, email address).
ADMINS = (
('Mailman Admin', 'root@localhost'),
)
# If you enable email reporting for error messages, this is where those emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-SERVER_EMAIL
#SERVER_EMAIL = 'root@your-domain.org'
# If you enable internal authentication, this is the address that the emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.9/ref/settings/#default-from-email
#DEFAULT_FROM_EMAIL = 'mailing-lists@you-domain.org'
#
# Security settings
#
# A secret key used for signing sessions, cookies, password reset tokens etc.
SECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read()
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
# If you're behind a proxy, use the X-Forwarded-Host header
# See https://docs.djangoproject.com/en/1.9/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = True
# And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER
# https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#SECURE_SSL_REDIRECT = True
# If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT
# contains at least this line:
#SECURE_REDIRECT_EXEMPT = [
# 'archives/api/mailman/.*', # Request from Mailman.
#]
#
# Authentication
#
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
# Uncomment to next line to enable LDAP authentication.
#'custom.LDAPBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
LOGIN_URL = 'account_login'
LOGIN_REDIRECT_URL = 'hk_root'
LOGOUT_URL = 'account_logout'
# Whether registration of new accounts is currently permitted.
REGISTRATION_OPEN = True
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.Common | return config.get(section, option) if config.has_option(section, option) else None | identifier_body | |
settings.py | Django settings for HyperKitty + Postorius
Pay attention to settings ALLOWED_HOSTS and DATABASES!
"""
from os.path import abspath, dirname, join as joinpath
from ConfigParser import SafeConfigParser
def read_cfg(path, section=None, option=None):
config = SafeConfigParser()
config.read(path)
def get(section, option):
return config.get(section, option) if config.has_option(section, option) else None
return get(section, option) if section else get
mailman_cfg = read_cfg('/etc/mailman.cfg')
BASE_DIR = '/usr/lib/bundles/mailman-webui'
CONF_DIR = '/etc/mailman-webui'
DATA_DIR = '/var/lib/mailman-webui'
LOG_DIR = '/var/log/mailman-webui'
# Hosts/domain names that are valid for this site.
# NOTE: You MUST add domain name of your instance of this application here!
# See https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost']
# Mailman API credentials
# NOTE: Replace with hard-coded values if Mailman is running on a different host.
MAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice', 'port') or 8001)
MAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin'
MAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass')
MAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general', 'api_key')
MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1', '::ffff:127.0.0.1')
# REST API
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
}
# Only display mailing-lists in HyperKitty from the same virtual host
# as the webserver.
FILTER_VHOST = False
#
# Application definition
#
SITE_ID = 1
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'hyperkitty',
'rest_framework',
'django_gravatar',
'paintstore',
'compressor',
'haystack',
'django_extensions',
'postorius',
'django_mailman3',
'stronghold',
# Uncomment the next line to enable integration with Sentry
# and set DSN in RAVEN_CONFIG.
#'raven.contrib.django.raven_compat',
'allauth',
'allauth.account',
'allauth.socialaccount',
# Uncomment providers that you want to use, if any.
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.gitlab',
#'allauth.socialaccount.providers.google',
#'allauth.socialaccount.providers.twitter',
#'allauth.socialaccount.providers.stackexchange',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django_mailman3.middleware.TimezoneMiddleware',
'postorius.middleware.PostoriusMiddleware',
# Uncomment to require a user to be authenticated to view any page.
#'stronghold.middleware.LoginRequiredMiddleware',
)
# A string representing the full Python import path to your root URLconf.
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# Directory for templates override.
joinpath(DATA_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.csrf',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django_mailman3.context_processors.common',
'hyperkitty.context_processors.common',
'postorius.context_processors.postorius',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Using the cache infrastructure can significantly improve performance on a
# production setup. This is an example with a local Memcached server.
#CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
# 'LOCATION': '127.0.0.1:11211',
# }
#}
#
# Databases
# See https://docs.djangoproject.com/en/1.9/ref/settings/#databases
#
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': joinpath(DATA_DIR, 'db.sqlite3'),
}
# Remove the above lines and uncomment the below to use PostgreSQL.
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'mailman_webui',
# 'USER': 'mailman_webui',
# 'PASSWORD': 'change-me',
# # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
# 'HOST': '127.0.0.1',
# 'PORT': '',
# }
}
# Full-text search engine
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': joinpath(DATA_DIR, 'fulltext_index'),
},
}
#
# Outgoing mails
#
# NOTE: Replace with hard-coded values if Mailman is running on a different host.
# The host and port of the SMTP server to use for sending email.
EMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost'
EMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25)
# Username and password to use for the SMTP server defined above.
EMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or ''
EMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or ''
# Whether to use a explicit TLS connection when talking to the SMTP server.
EMAIL_USE_TLS = False
# Whether to use an implicit TLS connection when talking to the SMTP server.
EMAIL_USE_SSL = False
# A tuple that lists people who get code error notifications. When DEBUG=False
# and a view raises an exception, Django will email these people with the full
# exception information. Each member of the tuple should be a tuple of (Full
# name, email address).
ADMINS = (
('Mailman Admin', 'root@localhost'),
)
# If you enable email reporting for error messages, this is where those emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-SERVER_EMAIL
#SERVER_EMAIL = 'root@your-domain.org'
# If you enable internal authentication, this is the address that the emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.9/ref/settings/#default-from-email
#DEFAULT_FROM_EMAIL = 'mailing-lists@you-domain.org'
#
# Security settings
#
# A secret key used for signing sessions, cookies, password reset tokens etc.
SECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read()
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
# If you're behind a proxy, use the X-Forwarded-Host header
# See https://docs.djangoproject.com/en/1.9/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = True
# And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER
# https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#SECURE_SSL_REDIRECT = True
# If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT
# contains at least this line:
#SECURE_REDIRECT_EXEMPT = [
# 'archives/api/mailman/.*', # Request from Mailman.
#]
#
# Authentication
#
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
# Uncomment to next line to enable LDAP authentication.
#'custom.LDAPBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
LOGIN_URL = 'account_login'
LOGIN_REDIRECT_URL = 'hk_root'
LOGOUT_URL = 'account_logout'
# Whether registration of new accounts is currently permitted.
REGISTRATION | """ | random_line_split | |
settings.py | allauth',
'allauth.account',
'allauth.socialaccount',
# Uncomment providers that you want to use, if any.
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.gitlab',
#'allauth.socialaccount.providers.google',
#'allauth.socialaccount.providers.twitter',
#'allauth.socialaccount.providers.stackexchange',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django_mailman3.middleware.TimezoneMiddleware',
'postorius.middleware.PostoriusMiddleware',
# Uncomment to require a user to be authenticated to view any page.
#'stronghold.middleware.LoginRequiredMiddleware',
)
# A string representing the full Python import path to your root URLconf.
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# Directory for templates override.
joinpath(DATA_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.csrf',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django_mailman3.context_processors.common',
'hyperkitty.context_processors.common',
'postorius.context_processors.postorius',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Using the cache infrastructure can significantly improve performance on a
# production setup. This is an example with a local Memcached server.
#CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
# 'LOCATION': '127.0.0.1:11211',
# }
#}
#
# Databases
# See https://docs.djangoproject.com/en/1.9/ref/settings/#databases
#
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': joinpath(DATA_DIR, 'db.sqlite3'),
}
# Remove the above lines and uncomment the below to use PostgreSQL.
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'mailman_webui',
# 'USER': 'mailman_webui',
# 'PASSWORD': 'change-me',
# # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
# 'HOST': '127.0.0.1',
# 'PORT': '',
# }
}
# Full-text search engine
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': joinpath(DATA_DIR, 'fulltext_index'),
},
}
#
# Outgoing mails
#
# NOTE: Replace with hard-coded values if Mailman is running on a different host.
# The host and port of the SMTP server to use for sending email.
EMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost'
EMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25)
# Username and password to use for the SMTP server defined above.
EMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or ''
EMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or ''
# Whether to use a explicit TLS connection when talking to the SMTP server.
EMAIL_USE_TLS = False
# Whether to use an implicit TLS connection when talking to the SMTP server.
EMAIL_USE_SSL = False
# A tuple that lists people who get code error notifications. When DEBUG=False
# and a view raises an exception, Django will email these people with the full
# exception information. Each member of the tuple should be a tuple of (Full
# name, email address).
ADMINS = (
('Mailman Admin', 'root@localhost'),
)
# If you enable email reporting for error messages, this is where those emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-SERVER_EMAIL
#SERVER_EMAIL = 'root@your-domain.org'
# If you enable internal authentication, this is the address that the emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.9/ref/settings/#default-from-email
#DEFAULT_FROM_EMAIL = 'mailing-lists@you-domain.org'
#
# Security settings
#
# A secret key used for signing sessions, cookies, password reset tokens etc.
SECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read()
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
# If you're behind a proxy, use the X-Forwarded-Host header
# See https://docs.djangoproject.com/en/1.9/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = True
# And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER
# https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#SECURE_SSL_REDIRECT = True
# If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT
# contains at least this line:
#SECURE_REDIRECT_EXEMPT = [
# 'archives/api/mailman/.*', # Request from Mailman.
#]
#
# Authentication
#
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
# Uncomment to next line to enable LDAP authentication.
#'custom.LDAPBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
LOGIN_URL = 'account_login'
LOGIN_REDIRECT_URL = 'hk_root'
LOGOUT_URL = 'account_logout'
# Whether registration of new accounts is currently permitted.
REGISTRATION_OPEN = True
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator' },
]
# URLs which are ignored by LoginRequiredMiddleware, i.e. the middleware
# does not *force* them to require authentication.
STRONGHOLD_PUBLIC_URLS = (
r'^/accounts/.*',
r'^/archives/api/mailman/.*',
)
## Django Allauth
# Custom AccountAdapter for allauth that respects REGISTRATION_OPEN variable.
ACCOUNT_ADAPTER = 'custom.CloseableRegistrationAccountAdapter'
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'
ACCOUNT_UNIQUE_EMAIL = True
# Whether to disable intermediate logout page.
ACCOUNT_LOGOUT_ON_GET = False
SOCIALACCOUNT_PROVIDERS = {}
#SOCIALACCOUNT_PROVIDERS = {
# 'openid': {
# 'SERVERS': [
# {
# 'id': 'yahoo',
# 'name': 'Yahoo',
# 'openid_url': 'http://me.yahoo.com'
# }
# ],
# },
# 'google': {
# 'SCOPE': ['profile', 'email'],
# 'AUTH_PARAMS': {'access_type': 'online'},
# },
# 'facebook': {
# 'METHOD': 'oauth2',
# 'SCOPE': ['email'],
# 'FIELDS': [
# 'email',
# 'name',
# 'first_name',
# 'last_name',
# 'locale',
# 'timezone',
# ],
# 'VERSION': 'v2.4',
# },
#}
## Django LDAP
if 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS:
| import ldap
from django_auth_ldap.config import LDAPSearch
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs')
AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org'
AUTH_LDAP_USER_SEARCH = LDAPSearch(
'ou=People,dc=example,dc=org',
ldap.SCOPE_SUBTREE,
'(&(mail=*)(uid=%(user)s))'
)
AUTH_LDAP_USER_ATTR_MAP = {
'first_name': 'givenName',
'last_name': 'sn',
'email': 'mail',
} | conditional_block | |
document-data.ts | DefinitionFor(modeId: string, wordDefinition: RegExp | null): void {
_modeId2WordDefinition.set(modeId, wordDefinition);
}
export function getWordDefinitionFor(modeId: string): RegExp {
return _modeId2WordDefinition.get(modeId)!;
}
export class DocumentDataExt {
private disposed = false;
private dirty: boolean;
private _document: theia.TextDocument;
private textLines = new Array<theia.TextLine>();
private lineStarts: PrefixSumComputer | undefined;
constructor(private proxy: DocumentsMain, private uri: URI, private lines: string[], private eol: string,
private languageId: string, private versionId: number, isDirty: boolean) {
this.dirty = isDirty;
}
dispose(): void {
ok(!this.disposed);
this.dirty = false;
this.disposed = true;
}
onEvents(e: ModelChangedEvent): void {
if (e.eol && e.eol !== this.eol) {
this.eol = e.eol;
this.lineStarts = undefined;
}
// Update my lines
const changes = e.changes;
// tslint:disable-next-line:one-variable-per-declaration
for (let i = 0, len = changes.length; i < len; i++) {
const change = changes[i];
this.acceptDeleteRange(change.range);
this.acceptInsertText(new Position(change.range.startLineNumber, change.range.startColumn), change.text);
}
this.versionId = e.versionId;
}
acceptIsDirty(isDirty: boolean): void {
ok(!this.disposed);
this.dirty = isDirty;
}
acceptLanguageId(langId: string): void {
ok(!this.disposed);
this.languageId = langId;
}
get document(): theia.TextDocument {
if (!this._document) {
const that = this;
this._document = {
get uri() { return that.uri; },
get fileName() { return that.uri.fsPath; },
get isUntitled() { return that.uri.scheme === 'untitled'; },
get languageId() { return that.languageId; },
get version() { return that.versionId; },
get isClosed() { return that.disposed; },
get isDirty() { return that.dirty; },
save() { return that.save(); },
getText(range?) { return range ? that.getTextInRange(range) : that.getText(); },
get eol() { return that.eol === '\n' ? EndOfLine.LF : EndOfLine.CRLF; },
get lineCount() { return that.lines.length; },
lineAt(lineOrPos: number | theia.Position) { return that.lineAt(lineOrPos); },
offsetAt(pos) { return that.offsetAt(pos); },
positionAt(offset) { return that.positionAt(offset); },
validateRange(ran) { return that.validateRange(ran); },
validatePosition(pos) { return that.validatePosition(pos); },
getWordRangeAtPosition(pos, regexp?) { return that.getWordRangeAtPosition(pos, regexp); }
};
}
return Object.freeze(this._document);
}
private acceptInsertText(position: Position, insertText: string): void {
if (insertText.length === 0) {
// Nothing to insert
return;
}
const insertLines = insertText.split(/\r\n|\r|\n/);
if (insertLines.length === 1) {
// Inserting text on one line
this.setLineText(position.line - 1,
this.lines[position.line - 1].substring(0, position.character - 1)
+ insertLines[0]
+ this.lines[position.line - 1].substring(position.character - 1)
);
return;
}
// Append overflowing text from first line to the end of text to insert
insertLines[insertLines.length - 1] += this.lines[position.line - 1].substring(position.character - 1);
// Delete overflowing text from first line and insert text on first line
this.setLineText(position.line - 1,
this.lines[position.line - 1].substring(0, position.character - 1)
+ insertLines[0]
);
// Insert new lines & store lengths
const newLengths = new Uint32Array(insertLines.length - 1);
for (let i = 1; i < insertLines.length; i++) {
this.lines.splice(position.line + i - 1, 0, insertLines[i]);
newLengths[i - 1] = insertLines[i].length + this.eol.length;
}
if (this.lineStarts) {
// update prefix sum
this.lineStarts.insertValues(position.line, newLengths);
}
}
private acceptDeleteRange(range: ARange): void {
if (range.startLineNumber === range.endLineNumber) {
if (range.startColumn === range.endColumn) {
// Nothing to delete
return;
}
// Delete text on the affected line
this.setLineText(range.startLineNumber - 1,
this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1)
+ this.lines[range.startLineNumber - 1].substring(range.endColumn - 1)
);
return;
}
// Take remaining text on last line and append it to remaining text on first line
this.setLineText(range.startLineNumber - 1,
this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1)
+ this.lines[range.endLineNumber - 1].substring(range.endColumn - 1)
);
// Delete middle lines
this.lines.splice(range.startLineNumber, range.endLineNumber - range.startLineNumber);
if (this.lineStarts) {
this.lineStarts.removeValues(range.startLineNumber, range.endLineNumber - range.startLineNumber);
}
}
private setLineText(lineIndex: number, newValue: string): void {
this.lines[lineIndex] = newValue;
if (this.lineStarts) {
this.lineStarts.changeValue(lineIndex, this.lines[lineIndex].length + this.eol.length);
}
}
private save(): Promise<boolean> {
if (this.disposed) {
return Promise.reject(new Error('Document is closed'));
}
return this.proxy.$trySaveDocument(this.uri);
}
private getTextInRange(_range: theia.Range): string {
const range = this.validateRange(_range);
if (range.isEmpty) {
return '';
}
if (range.isSingleLine) {
return this.lines[range.start.line].substring(range.start.character, range.end.character);
}
const lineEnding = this.eol;
const startLineIndex = range.start.line;
const endLineIndex = range.end.line;
const resultLines: string[] = [];
resultLines.push(this.lines[startLineIndex].substring(range.start.character));
for (let i = startLineIndex + 1; i < endLineIndex; i++) {
resultLines.push(this.lines[i]);
}
resultLines.push(this.lines[endLineIndex].substring(0, range.end.character));
return resultLines.join(lineEnding);
}
private validateRange(range: theia.Range): theia.Range {
if (!(range instanceof Range)) {
throw new Error('Invalid argument');
}
const start = this.validatePosition(range.start);
const end = this.validatePosition(range.end);
if (start === range.start && end === range.end) { | private getText(): string {
return this.lines.join(this.eol);
}
private validatePosition(position: theia.Position): theia.Position {
if (!(position instanceof Position)) {
throw new Error('Invalid argument');
}
let { line, character } = position;
let hasChanged = false;
if (line < 0) {
line = 0;
character = 0;
hasChanged = true;
} else if (line >= this.lines.length) {
line = this.lines.length - 1;
character = this.lines[line].length;
hasChanged = true;
} else {
const maxCharacter = this.lines[line].length;
if (character < 0) {
character = 0;
hasChanged = true;
} else if (character > maxCharacter) {
character = maxCharacter;
hasChanged = true;
}
}
if (!hasChanged) {
return position;
}
return new Position(line, character);
}
private lineAt(lineOrPosition: number | theia.Position): theia.TextLine {
let line: number = -1;
if (lineOrPosition instanceof Position) {
line = lineOrPosition.line;
} else if (typeof lineOrPosition === 'number') {
line = lineOrPosition;
}
if (line < 0 || line >= this.lines.length) {
throw new Error('Illegal value for `line`');
}
let result = this.textLines[line];
if (!result || result.lineNumber !== line || result.text !== this.lines[line]) {
const text = this.lines[line];
const firstNonWhitespaceCharacterIndex = /^(\s*)/.exec(text)![1].length;
const range = new Range(line, 0, line, text.length);
const rangeIncludingLineBreak = line < this.lines.length - | return range;
}
return new Range(start.line, start.character, end.line, end.character);
}
| random_line_split |
document-data.ts | DefinitionFor(modeId: string, wordDefinition: RegExp | null): void {
_modeId2WordDefinition.set(modeId, wordDefinition);
}
export function getWordDefinitionFor(modeId: string): RegExp {
return _modeId2WordDefinition.get(modeId)!;
}
export class DocumentDataExt {
private disposed = false;
private dirty: boolean;
private _document: theia.TextDocument;
private textLines = new Array<theia.TextLine>();
private lineStarts: PrefixSumComputer | undefined;
constructor(private proxy: DocumentsMain, private uri: URI, private lines: string[], private eol: string,
private languageId: string, private versionId: number, isDirty: boolean) {
this.dirty = isDirty;
}
dispose(): void {
ok(!this.disposed);
this.dirty = false;
this.disposed = true;
}
onEvents(e: ModelChangedEvent): void {
if (e.eol && e.eol !== this.eol) {
this.eol = e.eol;
this.lineStarts = undefined;
}
// Update my lines
const changes = e.changes;
// tslint:disable-next-line:one-variable-per-declaration
for (let i = 0, len = changes.length; i < len; i++) {
const change = changes[i];
this.acceptDeleteRange(change.range);
this.acceptInsertText(new Position(change.range.startLineNumber, change.range.startColumn), change.text);
}
this.versionId = e.versionId;
}
acceptIsDirty(isDirty: boolean): void {
ok(!this.disposed);
this.dirty = isDirty;
}
acceptLanguageId(langId: string): void {
ok(!this.disposed);
this.languageId = langId;
}
get document(): theia.TextDocument {
if (!this._document) {
const that = this;
this._document = {
get uri() { return that.uri; },
get fileName() { return that.uri.fsPath; },
get isUntitled() { return that.uri.scheme === 'untitled'; },
get languageId() { return that.languageId; },
get version() { return that.versionId; },
get isClosed() { return that.disposed; },
get isDirty() { return that.dirty; },
save() { return that.save(); },
getText(range?) { return range ? that.getTextInRange(range) : that.getText(); },
get eol() { return that.eol === '\n' ? EndOfLine.LF : EndOfLine.CRLF; },
get | () { return that.lines.length; },
lineAt(lineOrPos: number | theia.Position) { return that.lineAt(lineOrPos); },
offsetAt(pos) { return that.offsetAt(pos); },
positionAt(offset) { return that.positionAt(offset); },
validateRange(ran) { return that.validateRange(ran); },
validatePosition(pos) { return that.validatePosition(pos); },
getWordRangeAtPosition(pos, regexp?) { return that.getWordRangeAtPosition(pos, regexp); }
};
}
return Object.freeze(this._document);
}
private acceptInsertText(position: Position, insertText: string): void {
if (insertText.length === 0) {
// Nothing to insert
return;
}
const insertLines = insertText.split(/\r\n|\r|\n/);
if (insertLines.length === 1) {
// Inserting text on one line
this.setLineText(position.line - 1,
this.lines[position.line - 1].substring(0, position.character - 1)
+ insertLines[0]
+ this.lines[position.line - 1].substring(position.character - 1)
);
return;
}
// Append overflowing text from first line to the end of text to insert
insertLines[insertLines.length - 1] += this.lines[position.line - 1].substring(position.character - 1);
// Delete overflowing text from first line and insert text on first line
this.setLineText(position.line - 1,
this.lines[position.line - 1].substring(0, position.character - 1)
+ insertLines[0]
);
// Insert new lines & store lengths
const newLengths = new Uint32Array(insertLines.length - 1);
for (let i = 1; i < insertLines.length; i++) {
this.lines.splice(position.line + i - 1, 0, insertLines[i]);
newLengths[i - 1] = insertLines[i].length + this.eol.length;
}
if (this.lineStarts) {
// update prefix sum
this.lineStarts.insertValues(position.line, newLengths);
}
}
private acceptDeleteRange(range: ARange): void {
if (range.startLineNumber === range.endLineNumber) {
if (range.startColumn === range.endColumn) {
// Nothing to delete
return;
}
// Delete text on the affected line
this.setLineText(range.startLineNumber - 1,
this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1)
+ this.lines[range.startLineNumber - 1].substring(range.endColumn - 1)
);
return;
}
// Take remaining text on last line and append it to remaining text on first line
this.setLineText(range.startLineNumber - 1,
this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1)
+ this.lines[range.endLineNumber - 1].substring(range.endColumn - 1)
);
// Delete middle lines
this.lines.splice(range.startLineNumber, range.endLineNumber - range.startLineNumber);
if (this.lineStarts) {
this.lineStarts.removeValues(range.startLineNumber, range.endLineNumber - range.startLineNumber);
}
}
private setLineText(lineIndex: number, newValue: string): void {
this.lines[lineIndex] = newValue;
if (this.lineStarts) {
this.lineStarts.changeValue(lineIndex, this.lines[lineIndex].length + this.eol.length);
}
}
private save(): Promise<boolean> {
if (this.disposed) {
return Promise.reject(new Error('Document is closed'));
}
return this.proxy.$trySaveDocument(this.uri);
}
private getTextInRange(_range: theia.Range): string {
const range = this.validateRange(_range);
if (range.isEmpty) {
return '';
}
if (range.isSingleLine) {
return this.lines[range.start.line].substring(range.start.character, range.end.character);
}
const lineEnding = this.eol;
const startLineIndex = range.start.line;
const endLineIndex = range.end.line;
const resultLines: string[] = [];
resultLines.push(this.lines[startLineIndex].substring(range.start.character));
for (let i = startLineIndex + 1; i < endLineIndex; i++) {
resultLines.push(this.lines[i]);
}
resultLines.push(this.lines[endLineIndex].substring(0, range.end.character));
return resultLines.join(lineEnding);
}
private validateRange(range: theia.Range): theia.Range {
if (!(range instanceof Range)) {
throw new Error('Invalid argument');
}
const start = this.validatePosition(range.start);
const end = this.validatePosition(range.end);
if (start === range.start && end === range.end) {
return range;
}
return new Range(start.line, start.character, end.line, end.character);
}
private getText(): string {
return this.lines.join(this.eol);
}
private validatePosition(position: theia.Position): theia.Position {
if (!(position instanceof Position)) {
throw new Error('Invalid argument');
}
let { line, character } = position;
let hasChanged = false;
if (line < 0) {
line = 0;
character = 0;
hasChanged = true;
} else if (line >= this.lines.length) {
line = this.lines.length - 1;
character = this.lines[line].length;
hasChanged = true;
} else {
const maxCharacter = this.lines[line].length;
if (character < 0) {
character = 0;
hasChanged = true;
} else if (character > maxCharacter) {
character = maxCharacter;
hasChanged = true;
}
}
if (!hasChanged) {
return position;
}
return new Position(line, character);
}
private lineAt(lineOrPosition: number | theia.Position): theia.TextLine {
let line: number = -1;
if (lineOrPosition instanceof Position) {
line = lineOrPosition.line;
} else if (typeof lineOrPosition === 'number') {
line = lineOrPosition;
}
if (line < 0 || line >= this.lines.length) {
throw new Error('Illegal value for `line`');
}
let result = this.textLines[line];
if (!result || result.lineNumber !== line || result.text !== this.lines[line]) {
const text = this.lines[line];
const firstNonWhitespaceCharacterIndex = /^(\s*)/.exec(text)![1].length;
const range = new Range(line, 0, line, text.length);
const rangeIncludingLineBreak = line < this.lines.length | lineCount | identifier_name |
document-data.ts | get eol() { return that.eol === '\n' ? EndOfLine.LF : EndOfLine.CRLF; },
get lineCount() { return that.lines.length; },
lineAt(lineOrPos: number | theia.Position) { return that.lineAt(lineOrPos); },
offsetAt(pos) { return that.offsetAt(pos); },
positionAt(offset) { return that.positionAt(offset); },
validateRange(ran) { return that.validateRange(ran); },
validatePosition(pos) { return that.validatePosition(pos); },
getWordRangeAtPosition(pos, regexp?) { return that.getWordRangeAtPosition(pos, regexp); }
};
}
return Object.freeze(this._document);
}
private acceptInsertText(position: Position, insertText: string): void {
if (insertText.length === 0) {
// Nothing to insert
return;
}
const insertLines = insertText.split(/\r\n|\r|\n/);
if (insertLines.length === 1) {
// Inserting text on one line
this.setLineText(position.line - 1,
this.lines[position.line - 1].substring(0, position.character - 1)
+ insertLines[0]
+ this.lines[position.line - 1].substring(position.character - 1)
);
return;
}
// Append overflowing text from first line to the end of text to insert
insertLines[insertLines.length - 1] += this.lines[position.line - 1].substring(position.character - 1);
// Delete overflowing text from first line and insert text on first line
this.setLineText(position.line - 1,
this.lines[position.line - 1].substring(0, position.character - 1)
+ insertLines[0]
);
// Insert new lines & store lengths
const newLengths = new Uint32Array(insertLines.length - 1);
for (let i = 1; i < insertLines.length; i++) {
this.lines.splice(position.line + i - 1, 0, insertLines[i]);
newLengths[i - 1] = insertLines[i].length + this.eol.length;
}
if (this.lineStarts) {
// update prefix sum
this.lineStarts.insertValues(position.line, newLengths);
}
}
private acceptDeleteRange(range: ARange): void {
if (range.startLineNumber === range.endLineNumber) {
if (range.startColumn === range.endColumn) {
// Nothing to delete
return;
}
// Delete text on the affected line
this.setLineText(range.startLineNumber - 1,
this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1)
+ this.lines[range.startLineNumber - 1].substring(range.endColumn - 1)
);
return;
}
// Take remaining text on last line and append it to remaining text on first line
this.setLineText(range.startLineNumber - 1,
this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1)
+ this.lines[range.endLineNumber - 1].substring(range.endColumn - 1)
);
// Delete middle lines
this.lines.splice(range.startLineNumber, range.endLineNumber - range.startLineNumber);
if (this.lineStarts) {
this.lineStarts.removeValues(range.startLineNumber, range.endLineNumber - range.startLineNumber);
}
}
private setLineText(lineIndex: number, newValue: string): void {
this.lines[lineIndex] = newValue;
if (this.lineStarts) {
this.lineStarts.changeValue(lineIndex, this.lines[lineIndex].length + this.eol.length);
}
}
private save(): Promise<boolean> {
if (this.disposed) {
return Promise.reject(new Error('Document is closed'));
}
return this.proxy.$trySaveDocument(this.uri);
}
private getTextInRange(_range: theia.Range): string {
const range = this.validateRange(_range);
if (range.isEmpty) {
return '';
}
if (range.isSingleLine) {
return this.lines[range.start.line].substring(range.start.character, range.end.character);
}
const lineEnding = this.eol;
const startLineIndex = range.start.line;
const endLineIndex = range.end.line;
const resultLines: string[] = [];
resultLines.push(this.lines[startLineIndex].substring(range.start.character));
for (let i = startLineIndex + 1; i < endLineIndex; i++) {
resultLines.push(this.lines[i]);
}
resultLines.push(this.lines[endLineIndex].substring(0, range.end.character));
return resultLines.join(lineEnding);
}
private validateRange(range: theia.Range): theia.Range {
if (!(range instanceof Range)) {
throw new Error('Invalid argument');
}
const start = this.validatePosition(range.start);
const end = this.validatePosition(range.end);
if (start === range.start && end === range.end) {
return range;
}
return new Range(start.line, start.character, end.line, end.character);
}
private getText(): string {
return this.lines.join(this.eol);
}
private validatePosition(position: theia.Position): theia.Position {
if (!(position instanceof Position)) {
throw new Error('Invalid argument');
}
let { line, character } = position;
let hasChanged = false;
if (line < 0) {
line = 0;
character = 0;
hasChanged = true;
} else if (line >= this.lines.length) {
line = this.lines.length - 1;
character = this.lines[line].length;
hasChanged = true;
} else {
const maxCharacter = this.lines[line].length;
if (character < 0) {
character = 0;
hasChanged = true;
} else if (character > maxCharacter) {
character = maxCharacter;
hasChanged = true;
}
}
if (!hasChanged) {
return position;
}
return new Position(line, character);
}
private lineAt(lineOrPosition: number | theia.Position): theia.TextLine {
let line: number = -1;
if (lineOrPosition instanceof Position) {
line = lineOrPosition.line;
} else if (typeof lineOrPosition === 'number') {
line = lineOrPosition;
}
if (line < 0 || line >= this.lines.length) {
throw new Error('Illegal value for `line`');
}
let result = this.textLines[line];
if (!result || result.lineNumber !== line || result.text !== this.lines[line]) {
const text = this.lines[line];
const firstNonWhitespaceCharacterIndex = /^(\s*)/.exec(text)![1].length;
const range = new Range(line, 0, line, text.length);
const rangeIncludingLineBreak = line < this.lines.length - 1
? new Range(line, 0, line + 1, 0)
: range;
result = Object.freeze({
lineNumber: line,
range,
rangeIncludingLineBreak,
text,
firstNonWhitespaceCharacterIndex,
isEmptyOrWhitespace: firstNonWhitespaceCharacterIndex === text.length
});
this.textLines[line] = result;
}
return result;
}
private offsetAt(position: theia.Position): number {
position = this.validatePosition(position);
this.ensureLineStarts();
return this.lineStarts!.getAccumulatedValue(position.line - 1) + position.character;
}
private ensureLineStarts(): void {
if (!this.lineStarts) {
const eolLength = this.eol.length;
const linesLength = this.lines.length;
const lineStartValues = new Uint32Array(linesLength);
for (let i = 0; i < linesLength; i++) {
lineStartValues[i] = this.lines[i].length + eolLength;
}
this.lineStarts = new PrefixSumComputer(lineStartValues);
}
}
private positionAt(offset: number): theia.Position {
offset = Math.floor(offset);
offset = Math.max(0, offset);
this.ensureLineStarts();
const out = this.lineStarts!.getIndexOf(offset);
const lineLength = this.lines[out.index].length;
return new Position(out.index, Math.min(out.remainder, lineLength));
}
private getWordRangeAtPosition(_position: theia.Position, regexp?: RegExp): theia.Range | undefined {
const position = this.validatePosition(_position);
if (!regexp) {
// use default when custom-regexp isn't provided
regexp = getWordDefinitionFor(this.languageId);
} else if (regExpLeadsToEndlessLoop(regexp)) {
// use default when custom-regexp is bad
console.warn(`[getWordRangeAtPosition]: ignoring custom regexp '${regexp.source}' because it matches the empty string.`);
regexp = getWordDefinitionFor(this.languageId);
}
const wordAtText = getWordAtText(
position.character + 1,
ensureValidWordDefinition(regexp),
this.lines[position.line],
0
);
if (wordAtText) | {
return new Range(position.line, wordAtText.startColumn - 1, position.line, wordAtText.endColumn - 1);
} | conditional_block | |
document-data.ts | DefinitionFor(modeId: string, wordDefinition: RegExp | null): void {
_modeId2WordDefinition.set(modeId, wordDefinition);
}
export function getWordDefinitionFor(modeId: string): RegExp {
return _modeId2WordDefinition.get(modeId)!;
}
export class DocumentDataExt {
private disposed = false;
private dirty: boolean;
private _document: theia.TextDocument;
private textLines = new Array<theia.TextLine>();
private lineStarts: PrefixSumComputer | undefined;
constructor(private proxy: DocumentsMain, private uri: URI, private lines: string[], private eol: string,
private languageId: string, private versionId: number, isDirty: boolean) {
this.dirty = isDirty;
}
dispose(): void {
ok(!this.disposed);
this.dirty = false;
this.disposed = true;
}
onEvents(e: ModelChangedEvent): void {
if (e.eol && e.eol !== this.eol) {
this.eol = e.eol;
this.lineStarts = undefined;
}
// Update my lines
const changes = e.changes;
// tslint:disable-next-line:one-variable-per-declaration
for (let i = 0, len = changes.length; i < len; i++) {
const change = changes[i];
this.acceptDeleteRange(change.range);
this.acceptInsertText(new Position(change.range.startLineNumber, change.range.startColumn), change.text);
}
this.versionId = e.versionId;
}
acceptIsDirty(isDirty: boolean): void {
ok(!this.disposed);
this.dirty = isDirty;
}
acceptLanguageId(langId: string): void {
ok(!this.disposed);
this.languageId = langId;
}
get document(): theia.TextDocument {
if (!this._document) {
const that = this;
this._document = {
get uri() { return that.uri; },
get fileName() { return that.uri.fsPath; },
get isUntitled() { return that.uri.scheme === 'untitled'; },
get languageId() { return that.languageId; },
get version() { return that.versionId; },
get isClosed() { return that.disposed; },
get isDirty() { return that.dirty; },
save() { return that.save(); },
getText(range?) { return range ? that.getTextInRange(range) : that.getText(); },
get eol() { return that.eol === '\n' ? EndOfLine.LF : EndOfLine.CRLF; },
get lineCount() { return that.lines.length; },
lineAt(lineOrPos: number | theia.Position) { return that.lineAt(lineOrPos); },
offsetAt(pos) { return that.offsetAt(pos); },
positionAt(offset) { return that.positionAt(offset); },
validateRange(ran) { return that.validateRange(ran); },
validatePosition(pos) { return that.validatePosition(pos); },
getWordRangeAtPosition(pos, regexp?) { return that.getWordRangeAtPosition(pos, regexp); }
};
}
return Object.freeze(this._document);
}
private acceptInsertText(position: Position, insertText: string): void {
if (insertText.length === 0) {
// Nothing to insert
return;
}
const insertLines = insertText.split(/\r\n|\r|\n/);
if (insertLines.length === 1) {
// Inserting text on one line
this.setLineText(position.line - 1,
this.lines[position.line - 1].substring(0, position.character - 1)
+ insertLines[0]
+ this.lines[position.line - 1].substring(position.character - 1)
);
return;
}
// Append overflowing text from first line to the end of text to insert
insertLines[insertLines.length - 1] += this.lines[position.line - 1].substring(position.character - 1);
// Delete overflowing text from first line and insert text on first line
this.setLineText(position.line - 1,
this.lines[position.line - 1].substring(0, position.character - 1)
+ insertLines[0]
);
// Insert new lines & store lengths
const newLengths = new Uint32Array(insertLines.length - 1);
for (let i = 1; i < insertLines.length; i++) {
this.lines.splice(position.line + i - 1, 0, insertLines[i]);
newLengths[i - 1] = insertLines[i].length + this.eol.length;
}
if (this.lineStarts) {
// update prefix sum
this.lineStarts.insertValues(position.line, newLengths);
}
}
private acceptDeleteRange(range: ARange): void {
if (range.startLineNumber === range.endLineNumber) {
if (range.startColumn === range.endColumn) {
// Nothing to delete
return;
}
// Delete text on the affected line
this.setLineText(range.startLineNumber - 1,
this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1)
+ this.lines[range.startLineNumber - 1].substring(range.endColumn - 1)
);
return;
}
// Take remaining text on last line and append it to remaining text on first line
this.setLineText(range.startLineNumber - 1,
this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1)
+ this.lines[range.endLineNumber - 1].substring(range.endColumn - 1)
);
// Delete middle lines
this.lines.splice(range.startLineNumber, range.endLineNumber - range.startLineNumber);
if (this.lineStarts) {
this.lineStarts.removeValues(range.startLineNumber, range.endLineNumber - range.startLineNumber);
}
}
private setLineText(lineIndex: number, newValue: string): void {
this.lines[lineIndex] = newValue;
if (this.lineStarts) {
this.lineStarts.changeValue(lineIndex, this.lines[lineIndex].length + this.eol.length);
}
}
private save(): Promise<boolean> {
if (this.disposed) {
return Promise.reject(new Error('Document is closed'));
}
return this.proxy.$trySaveDocument(this.uri);
}
private getTextInRange(_range: theia.Range): string {
const range = this.validateRange(_range);
if (range.isEmpty) {
return '';
}
if (range.isSingleLine) {
return this.lines[range.start.line].substring(range.start.character, range.end.character);
}
const lineEnding = this.eol;
const startLineIndex = range.start.line;
const endLineIndex = range.end.line;
const resultLines: string[] = [];
resultLines.push(this.lines[startLineIndex].substring(range.start.character));
for (let i = startLineIndex + 1; i < endLineIndex; i++) {
resultLines.push(this.lines[i]);
}
resultLines.push(this.lines[endLineIndex].substring(0, range.end.character));
return resultLines.join(lineEnding);
}
private validateRange(range: theia.Range): theia.Range {
if (!(range instanceof Range)) {
throw new Error('Invalid argument');
}
const start = this.validatePosition(range.start);
const end = this.validatePosition(range.end);
if (start === range.start && end === range.end) {
return range;
}
return new Range(start.line, start.character, end.line, end.character);
}
private getText(): string |
private validatePosition(position: theia.Position): theia.Position {
if (!(position instanceof Position)) {
throw new Error('Invalid argument');
}
let { line, character } = position;
let hasChanged = false;
if (line < 0) {
line = 0;
character = 0;
hasChanged = true;
} else if (line >= this.lines.length) {
line = this.lines.length - 1;
character = this.lines[line].length;
hasChanged = true;
} else {
const maxCharacter = this.lines[line].length;
if (character < 0) {
character = 0;
hasChanged = true;
} else if (character > maxCharacter) {
character = maxCharacter;
hasChanged = true;
}
}
if (!hasChanged) {
return position;
}
return new Position(line, character);
}
private lineAt(lineOrPosition: number | theia.Position): theia.TextLine {
let line: number = -1;
if (lineOrPosition instanceof Position) {
line = lineOrPosition.line;
} else if (typeof lineOrPosition === 'number') {
line = lineOrPosition;
}
if (line < 0 || line >= this.lines.length) {
throw new Error('Illegal value for `line`');
}
let result = this.textLines[line];
if (!result || result.lineNumber !== line || result.text !== this.lines[line]) {
const text = this.lines[line];
const firstNonWhitespaceCharacterIndex = /^(\s*)/.exec(text)![1].length;
const range = new Range(line, 0, line, text.length);
const rangeIncludingLineBreak = line < this.lines | {
return this.lines.join(this.eol);
} | identifier_body |
run_hook.go | /tools/clientcmd"
"kmodules.xyz/client-go/meta"
appcatalog_cs "kmodules.xyz/custom-resources/client/clientset/versioned"
)
type hookOptions struct {
masterURL string
kubeConfigPath string
namespace string
hookType string
backupSessionName string
restoreSessionName string
targetKind string
targetName string
invokerType string
invokerName string
hostname string
config *rest.Config
kubeClient kubernetes.Interface
stashClient cs.Interface
appClient appcatalog_cs.Interface
metricOpts restic.MetricsOptions
outputDir string
}
func NewCmdRunHook() *cobra.Command {
opt := hookOptions{
masterURL: "",
kubeConfigPath: "",
namespace: meta.Namespace(),
hostname: apis.DefaultHost,
}
cmd := &cobra.Command{
Use: "run-hook",
Short: "Execute Backup or Restore Hooks",
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
config, err := clientcmd.BuildConfigFromFlags(opt.masterURL, opt.kubeConfigPath)
if err != nil {
glog.Fatalf("Could not get Kubernetes config: %s", err)
return err
}
opt.config = config
opt.kubeClient = kubernetes.NewForConfigOrDie(config)
opt.stashClient = cs.NewForConfigOrDie(config)
opt.appClient = appcatalog_cs.NewForConfigOrDie(config)
err = opt.executeHook()
if err != nil {
// For preBackup or preRestore hook failure, we will fail the container so that the task does to proceed to next step.
// We will also update the BackupSession/RestoreSession status as the update-status Function will not execute.
if opt.hookType == apis.PreBackupHook || opt.hookType == apis.PreRestoreHook {
return opt.handlePreTaskHookFailure(err)
}
// For other postBackup or postRestore hook failure, we will simply write the failure output into the output directory.
// The update-status Function will update the status of the BackupSession/RestoreSession
return opt.handlePostTaskHookFailure(err)
}
return nil
},
}
cmd.Flags().StringVar(&opt.masterURL, "master", opt.masterURL, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
cmd.Flags().StringVar(&opt.kubeConfigPath, "kubeconfig", opt.kubeConfigPath, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
cmd.Flags().StringVar(&opt.backupSessionName, "backupsession", opt.backupSessionName, "Name of the respective BackupSession object")
cmd.Flags().StringVar(&opt.restoreSessionName, "restoresession", opt.restoreSessionName, "Name of the respective RestoreSession")
cmd.Flags().StringVar(&opt.invokerType, "invoker-type", opt.invokerType, "Type of the backup invoker")
cmd.Flags().StringVar(&opt.invokerName, "invoker-name", opt.invokerName, "Name of the respective backup invoker")
cmd.Flags().StringVar(&opt.targetName, "target-name", opt.targetName, "Name of the Target")
cmd.Flags().StringVar(&opt.targetKind, "target-kind", opt.targetName, "Kind of the Target")
cmd.Flags().StringVar(&opt.hookType, "hook-type", opt.hookType, "Type of hook to execute")
cmd.Flags().StringVar(&opt.hostname, "hostname", opt.hostname, "Name of the host that is being backed up or restored")
cmd.Flags().BoolVar(&opt.metricOpts.Enabled, "metrics-enabled", opt.metricOpts.Enabled, "Specify whether to export Prometheus metrics")
cmd.Flags().StringVar(&opt.metricOpts.PushgatewayURL, "metrics-pushgateway-url", opt.metricOpts.PushgatewayURL, "Pushgateway URL where the metrics will be pushed")
cmd.Flags().StringSliceVar(&opt.metricOpts.Labels, "metrics-labels", opt.metricOpts.Labels, "Labels to apply in exported metrics")
cmd.Flags().StringVar(&opt.metricOpts.JobName, "prom-job-name", StashDefaultMetricJob, "Metrics job name")
cmd.Flags().StringVar(&opt.outputDir, "output-dir", opt.outputDir, "Directory where output.json file will be written (keep empty if you don't need to write output in file)")
return cmd
}
func (opt *hookOptions) executeHook() error {
var hook interface{}
var executorPodName string
if opt.backupSessionName != "" {
// For backup hooks, BackupSession name will be provided. We will read the hooks from the underlying backup invoker.
invoker, err := apis.ExtractBackupInvokerInfo(opt.stashClient, opt.invokerType, opt.invokerName, opt.namespace)
if err != nil {
return err
}
// We need to extract the hook only for the current target
for _, targetInfo := range invoker.TargetsInfo {
if targetInfo.Target != nil && targetInfo.Target.Ref.Kind == opt.targetKind && targetInfo.Target.Ref.Name == opt.targetName {
hook = targetInfo.Hooks
executorPodName, err = opt.getHookExecutorPodName(targetInfo.Target.Ref)
if err != nil {
return err
}
break
}
}
} else if opt.restoreSessionName != "" {
// For restore hooks, RestoreSession name will be provided. We will read the hooks from the RestoreSession.
restoreSession, err := opt.stashClient.StashV1beta1().RestoreSessions(opt.namespace).Get(context.TODO(), opt.restoreSessionName, metav1.GetOptions{})
if err != nil {
return err
}
hook = restoreSession.Spec.Hooks
if restoreSession.Spec.Target != nil {
executorPodName, err = opt.getHookExecutorPodName(restoreSession.Spec.Target.Ref)
if err != nil {
return err
}
} else {
executorPodName = os.Getenv(apis.KeyPodName)
}
} else {
return fmt.Errorf("can not execute hooks. Reason: Respective BackupSession or RestoreSession has not been specified")
}
// Execute the hooks
return util.ExecuteHook(opt.config, hook, opt.hookType, executorPodName, opt.namespace)
}
func (opt *hookOptions) getHookExecutorPodName(targetRef v1beta1.TargetRef) (string, error) {
switch targetRef.Kind {
case apis.KindAppBinding:
// For AppBinding, we will execute the hooks in the respective app pod
return opt.getAppPodName(targetRef.Name)
default:
// For other types of target, hook will be executed where this process is running.
return os.Getenv(apis.KeyPodName), nil
}
}
func (opt *hookOptions) getAppPodName(appbindingName string) (string, error) {
// get the AppBinding
appbinding, err := opt.appClient.AppcatalogV1alpha1().AppBindings(opt.namespace).Get(context.TODO(), appbindingName, metav1.GetOptions{})
if err != nil {
return "", err
}
// AppBinding should have a Service in ClientConfig field. This service selects the app pod. We will execute the hooks in the app pod.
if appbinding.Spec.ClientConfig.Service != nil {
// there should be an endpoint with same name as the service which contains the name of the selected pods.
endPoint, err := opt.kubeClient.CoreV1().Endpoints(opt.namespace).Get(context.TODO(), appbinding.Spec.ClientConfig.Service.Name, metav1.GetOptions{})
if err != nil {
return "", err
}
for _, subSets := range endPoint.Subsets {
// get pod from the ready addresses
for _, readyAddrs := range subSets.Addresses {
if readyAddrs.TargetRef != nil && readyAddrs.TargetRef.Kind == apis.KindPod {
return readyAddrs.TargetRef.Name, nil
}
}
// no pod found in ready addresses. now try in not ready addresses.
for _, notReadyAddrs := range subSets.NotReadyAddresses {
if notReadyAddrs.TargetRef != nil && notReadyAddrs.TargetRef.Kind == apis.KindPod {
return notReadyAddrs.TargetRef.Name, nil
}
}
}
}
return "", fmt.Errorf("no pod found for AppBinding %s/%s", opt.namespace, appbindingName)
}
func (opt *hookOptions) | (hookErr error) error {
statusOpt := status.UpdateStatusOptions{
Config: opt.config,
KubeClient: opt.kubeClient,
StashClient: opt.stashClient,
Namespace: opt.namespace,
Metrics: opt.metricOpts,
TargetRef: v1beta1.TargetRef{
Kind: opt.targetKind,
Name: opt.targetName,
},
}
if opt.hookType == apis.PreBackupHook {
backupOutput := &restic.BackupOutput{
HostBackupStats: []v1beta1.HostBackupStats{
{
Hostname: opt.hostname,
Phase: v1beta1.HostBackupFailed,
Error: hookErr.Error(),
},
},
}
statusOpt.BackupSession = opt.backupSessionName | handlePreTaskHookFailure | identifier_name |
run_hook.go | /clientcmd"
"kmodules.xyz/client-go/meta"
appcatalog_cs "kmodules.xyz/custom-resources/client/clientset/versioned"
)
type hookOptions struct {
masterURL string
kubeConfigPath string
namespace string
hookType string
backupSessionName string
restoreSessionName string
targetKind string
targetName string
invokerType string
invokerName string
hostname string
config *rest.Config
kubeClient kubernetes.Interface
stashClient cs.Interface
appClient appcatalog_cs.Interface
metricOpts restic.MetricsOptions
outputDir string
}
func NewCmdRunHook() *cobra.Command {
opt := hookOptions{
masterURL: "",
kubeConfigPath: "",
namespace: meta.Namespace(),
hostname: apis.DefaultHost,
}
cmd := &cobra.Command{
Use: "run-hook",
Short: "Execute Backup or Restore Hooks",
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
config, err := clientcmd.BuildConfigFromFlags(opt.masterURL, opt.kubeConfigPath)
if err != nil {
glog.Fatalf("Could not get Kubernetes config: %s", err)
return err
}
opt.config = config
opt.kubeClient = kubernetes.NewForConfigOrDie(config)
opt.stashClient = cs.NewForConfigOrDie(config)
opt.appClient = appcatalog_cs.NewForConfigOrDie(config)
err = opt.executeHook()
if err != nil {
// For preBackup or preRestore hook failure, we will fail the container so that the task does to proceed to next step.
// We will also update the BackupSession/RestoreSession status as the update-status Function will not execute.
if opt.hookType == apis.PreBackupHook || opt.hookType == apis.PreRestoreHook {
return opt.handlePreTaskHookFailure(err)
}
// For other postBackup or postRestore hook failure, we will simply write the failure output into the output directory.
// The update-status Function will update the status of the BackupSession/RestoreSession
return opt.handlePostTaskHookFailure(err)
}
return nil
},
}
cmd.Flags().StringVar(&opt.masterURL, "master", opt.masterURL, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
cmd.Flags().StringVar(&opt.kubeConfigPath, "kubeconfig", opt.kubeConfigPath, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
cmd.Flags().StringVar(&opt.backupSessionName, "backupsession", opt.backupSessionName, "Name of the respective BackupSession object")
cmd.Flags().StringVar(&opt.restoreSessionName, "restoresession", opt.restoreSessionName, "Name of the respective RestoreSession")
cmd.Flags().StringVar(&opt.invokerType, "invoker-type", opt.invokerType, "Type of the backup invoker")
cmd.Flags().StringVar(&opt.invokerName, "invoker-name", opt.invokerName, "Name of the respective backup invoker")
cmd.Flags().StringVar(&opt.targetName, "target-name", opt.targetName, "Name of the Target")
cmd.Flags().StringVar(&opt.targetKind, "target-kind", opt.targetName, "Kind of the Target")
cmd.Flags().StringVar(&opt.hookType, "hook-type", opt.hookType, "Type of hook to execute")
cmd.Flags().StringVar(&opt.hostname, "hostname", opt.hostname, "Name of the host that is being backed up or restored")
cmd.Flags().BoolVar(&opt.metricOpts.Enabled, "metrics-enabled", opt.metricOpts.Enabled, "Specify whether to export Prometheus metrics")
cmd.Flags().StringVar(&opt.metricOpts.PushgatewayURL, "metrics-pushgateway-url", opt.metricOpts.PushgatewayURL, "Pushgateway URL where the metrics will be pushed")
cmd.Flags().StringSliceVar(&opt.metricOpts.Labels, "metrics-labels", opt.metricOpts.Labels, "Labels to apply in exported metrics")
cmd.Flags().StringVar(&opt.metricOpts.JobName, "prom-job-name", StashDefaultMetricJob, "Metrics job name")
cmd.Flags().StringVar(&opt.outputDir, "output-dir", opt.outputDir, "Directory where output.json file will be written (keep empty if you don't need to write output in file)")
return cmd
}
func (opt *hookOptions) executeHook() error {
var hook interface{}
var executorPodName string
if opt.backupSessionName != "" {
// For backup hooks, BackupSession name will be provided. We will read the hooks from the underlying backup invoker.
invoker, err := apis.ExtractBackupInvokerInfo(opt.stashClient, opt.invokerType, opt.invokerName, opt.namespace)
if err != nil {
return err
}
// We need to extract the hook only for the current target
for _, targetInfo := range invoker.TargetsInfo {
if targetInfo.Target != nil && targetInfo.Target.Ref.Kind == opt.targetKind && targetInfo.Target.Ref.Name == opt.targetName {
hook = targetInfo.Hooks
executorPodName, err = opt.getHookExecutorPodName(targetInfo.Target.Ref)
if err != nil {
return err
}
break
}
}
} else if opt.restoreSessionName != "" {
// For restore hooks, RestoreSession name will be provided. We will read the hooks from the RestoreSession.
restoreSession, err := opt.stashClient.StashV1beta1().RestoreSessions(opt.namespace).Get(context.TODO(), opt.restoreSessionName, metav1.GetOptions{})
if err != nil {
return err
}
hook = restoreSession.Spec.Hooks
if restoreSession.Spec.Target != nil {
executorPodName, err = opt.getHookExecutorPodName(restoreSession.Spec.Target.Ref)
if err != nil {
return err
}
} else {
executorPodName = os.Getenv(apis.KeyPodName)
}
} else {
return fmt.Errorf("can not execute hooks. Reason: Respective BackupSession or RestoreSession has not been specified")
}
// Execute the hooks
return util.ExecuteHook(opt.config, hook, opt.hookType, executorPodName, opt.namespace)
}
func (opt *hookOptions) getHookExecutorPodName(targetRef v1beta1.TargetRef) (string, error) {
switch targetRef.Kind {
case apis.KindAppBinding:
// For AppBinding, we will execute the hooks in the respective app pod
return opt.getAppPodName(targetRef.Name)
default:
// For other types of target, hook will be executed where this process is running.
return os.Getenv(apis.KeyPodName), nil
}
}
func (opt *hookOptions) getAppPodName(appbindingName string) (string, error) {
// get the AppBinding
appbinding, err := opt.appClient.AppcatalogV1alpha1().AppBindings(opt.namespace).Get(context.TODO(), appbindingName, metav1.GetOptions{})
if err != nil {
return "", err
}
// AppBinding should have a Service in ClientConfig field. This service selects the app pod. We will execute the hooks in the app pod.
if appbinding.Spec.ClientConfig.Service != nil {
// there should be an endpoint with same name as the service which contains the name of the selected pods.
endPoint, err := opt.kubeClient.CoreV1().Endpoints(opt.namespace).Get(context.TODO(), appbinding.Spec.ClientConfig.Service.Name, metav1.GetOptions{})
if err != nil {
return "", err
}
for _, subSets := range endPoint.Subsets {
// get pod from the ready addresses
for _, readyAddrs := range subSets.Addresses {
if readyAddrs.TargetRef != nil && readyAddrs.TargetRef.Kind == apis.KindPod {
return readyAddrs.TargetRef.Name, nil
}
}
// no pod found in ready addresses. now try in not ready addresses.
for _, notReadyAddrs := range subSets.NotReadyAddresses {
if notReadyAddrs.TargetRef != nil && notReadyAddrs.TargetRef.Kind == apis.KindPod |
}
}
}
return "", fmt.Errorf("no pod found for AppBinding %s/%s", opt.namespace, appbindingName)
}
func (opt *hookOptions) handlePreTaskHookFailure(hookErr error) error {
statusOpt := status.UpdateStatusOptions{
Config: opt.config,
KubeClient: opt.kubeClient,
StashClient: opt.stashClient,
Namespace: opt.namespace,
Metrics: opt.metricOpts,
TargetRef: v1beta1.TargetRef{
Kind: opt.targetKind,
Name: opt.targetName,
},
}
if opt.hookType == apis.PreBackupHook {
backupOutput := &restic.BackupOutput{
HostBackupStats: []v1beta1.HostBackupStats{
{
Hostname: opt.hostname,
Phase: v1beta1.HostBackupFailed,
Error: hookErr.Error(),
},
},
}
statusOpt.BackupSession = opt.backupSessionName | {
return notReadyAddrs.TargetRef.Name, nil
} | conditional_block |
run_hook.go | /tools/clientcmd"
"kmodules.xyz/client-go/meta"
appcatalog_cs "kmodules.xyz/custom-resources/client/clientset/versioned"
)
type hookOptions struct {
masterURL string
kubeConfigPath string
namespace string
hookType string
backupSessionName string
restoreSessionName string
targetKind string
targetName string
invokerType string
invokerName string
hostname string
config *rest.Config
kubeClient kubernetes.Interface
stashClient cs.Interface
appClient appcatalog_cs.Interface
metricOpts restic.MetricsOptions
outputDir string
}
func NewCmdRunHook() *cobra.Command {
opt := hookOptions{
masterURL: "",
kubeConfigPath: "",
namespace: meta.Namespace(),
hostname: apis.DefaultHost,
}
cmd := &cobra.Command{
Use: "run-hook",
Short: "Execute Backup or Restore Hooks",
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
config, err := clientcmd.BuildConfigFromFlags(opt.masterURL, opt.kubeConfigPath)
if err != nil {
glog.Fatalf("Could not get Kubernetes config: %s", err)
return err
}
opt.config = config
opt.kubeClient = kubernetes.NewForConfigOrDie(config)
opt.stashClient = cs.NewForConfigOrDie(config)
opt.appClient = appcatalog_cs.NewForConfigOrDie(config)
err = opt.executeHook()
if err != nil {
// For preBackup or preRestore hook failure, we will fail the container so that the task does to proceed to next step.
// We will also update the BackupSession/RestoreSession status as the update-status Function will not execute.
if opt.hookType == apis.PreBackupHook || opt.hookType == apis.PreRestoreHook {
return opt.handlePreTaskHookFailure(err)
}
// For other postBackup or postRestore hook failure, we will simply write the failure output into the output directory.
// The update-status Function will update the status of the BackupSession/RestoreSession
return opt.handlePostTaskHookFailure(err)
}
return nil
},
}
cmd.Flags().StringVar(&opt.masterURL, "master", opt.masterURL, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
cmd.Flags().StringVar(&opt.kubeConfigPath, "kubeconfig", opt.kubeConfigPath, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
cmd.Flags().StringVar(&opt.backupSessionName, "backupsession", opt.backupSessionName, "Name of the respective BackupSession object")
cmd.Flags().StringVar(&opt.restoreSessionName, "restoresession", opt.restoreSessionName, "Name of the respective RestoreSession")
cmd.Flags().StringVar(&opt.invokerType, "invoker-type", opt.invokerType, "Type of the backup invoker")
cmd.Flags().StringVar(&opt.invokerName, "invoker-name", opt.invokerName, "Name of the respective backup invoker")
cmd.Flags().StringVar(&opt.targetName, "target-name", opt.targetName, "Name of the Target")
cmd.Flags().StringVar(&opt.targetKind, "target-kind", opt.targetName, "Kind of the Target")
cmd.Flags().StringVar(&opt.hookType, "hook-type", opt.hookType, "Type of hook to execute")
cmd.Flags().StringVar(&opt.hostname, "hostname", opt.hostname, "Name of the host that is being backed up or restored")
cmd.Flags().BoolVar(&opt.metricOpts.Enabled, "metrics-enabled", opt.metricOpts.Enabled, "Specify whether to export Prometheus metrics")
cmd.Flags().StringVar(&opt.metricOpts.PushgatewayURL, "metrics-pushgateway-url", opt.metricOpts.PushgatewayURL, "Pushgateway URL where the metrics will be pushed")
cmd.Flags().StringSliceVar(&opt.metricOpts.Labels, "metrics-labels", opt.metricOpts.Labels, "Labels to apply in exported metrics")
cmd.Flags().StringVar(&opt.metricOpts.JobName, "prom-job-name", StashDefaultMetricJob, "Metrics job name")
cmd.Flags().StringVar(&opt.outputDir, "output-dir", opt.outputDir, "Directory where output.json file will be written (keep empty if you don't need to write output in file)")
return cmd
}
func (opt *hookOptions) executeHook() error {
var hook interface{}
var executorPodName string
if opt.backupSessionName != "" {
// For backup hooks, BackupSession name will be provided. We will read the hooks from the underlying backup invoker.
invoker, err := apis.ExtractBackupInvokerInfo(opt.stashClient, opt.invokerType, opt.invokerName, opt.namespace)
if err != nil {
return err
}
// We need to extract the hook only for the current target
for _, targetInfo := range invoker.TargetsInfo {
if targetInfo.Target != nil && targetInfo.Target.Ref.Kind == opt.targetKind && targetInfo.Target.Ref.Name == opt.targetName {
hook = targetInfo.Hooks
executorPodName, err = opt.getHookExecutorPodName(targetInfo.Target.Ref)
if err != nil {
return err
}
break
}
}
} else if opt.restoreSessionName != "" {
// For restore hooks, RestoreSession name will be provided. We will read the hooks from the RestoreSession.
restoreSession, err := opt.stashClient.StashV1beta1().RestoreSessions(opt.namespace).Get(context.TODO(), opt.restoreSessionName, metav1.GetOptions{})
if err != nil {
return err
}
hook = restoreSession.Spec.Hooks
if restoreSession.Spec.Target != nil {
executorPodName, err = opt.getHookExecutorPodName(restoreSession.Spec.Target.Ref)
if err != nil {
return err
}
} else {
executorPodName = os.Getenv(apis.KeyPodName)
}
} else {
return fmt.Errorf("can not execute hooks. Reason: Respective BackupSession or RestoreSession has not been specified")
}
// Execute the hooks
return util.ExecuteHook(opt.config, hook, opt.hookType, executorPodName, opt.namespace)
}
func (opt *hookOptions) getHookExecutorPodName(targetRef v1beta1.TargetRef) (string, error) {
switch targetRef.Kind {
case apis.KindAppBinding:
// For AppBinding, we will execute the hooks in the respective app pod
return opt.getAppPodName(targetRef.Name)
default:
// For other types of target, hook will be executed where this process is running.
return os.Getenv(apis.KeyPodName), nil
}
}
func (opt *hookOptions) getAppPodName(appbindingName string) (string, error) | }
}
// no pod found in ready addresses. now try in not ready addresses.
for _, notReadyAddrs := range subSets.NotReadyAddresses {
if notReadyAddrs.TargetRef != nil && notReadyAddrs.TargetRef.Kind == apis.KindPod {
return notReadyAddrs.TargetRef.Name, nil
}
}
}
}
return "", fmt.Errorf("no pod found for AppBinding %s/%s", opt.namespace, appbindingName)
}
func (opt *hookOptions) handlePreTaskHookFailure(hookErr error) error {
statusOpt := status.UpdateStatusOptions{
Config: opt.config,
KubeClient: opt.kubeClient,
StashClient: opt.stashClient,
Namespace: opt.namespace,
Metrics: opt.metricOpts,
TargetRef: v1beta1.TargetRef{
Kind: opt.targetKind,
Name: opt.targetName,
},
}
if opt.hookType == apis.PreBackupHook {
backupOutput := &restic.BackupOutput{
HostBackupStats: []v1beta1.HostBackupStats{
{
Hostname: opt.hostname,
Phase: v1beta1.HostBackupFailed,
Error: hookErr.Error(),
},
},
}
statusOpt.BackupSession = opt.backupSessionName
| {
// get the AppBinding
appbinding, err := opt.appClient.AppcatalogV1alpha1().AppBindings(opt.namespace).Get(context.TODO(), appbindingName, metav1.GetOptions{})
if err != nil {
return "", err
}
// AppBinding should have a Service in ClientConfig field. This service selects the app pod. We will execute the hooks in the app pod.
if appbinding.Spec.ClientConfig.Service != nil {
// there should be an endpoint with same name as the service which contains the name of the selected pods.
endPoint, err := opt.kubeClient.CoreV1().Endpoints(opt.namespace).Get(context.TODO(), appbinding.Spec.ClientConfig.Service.Name, metav1.GetOptions{})
if err != nil {
return "", err
}
for _, subSets := range endPoint.Subsets {
// get pod from the ready addresses
for _, readyAddrs := range subSets.Addresses {
if readyAddrs.TargetRef != nil && readyAddrs.TargetRef.Kind == apis.KindPod {
return readyAddrs.TargetRef.Name, nil | identifier_body |
run_hook.go | /tools/clientcmd"
"kmodules.xyz/client-go/meta"
appcatalog_cs "kmodules.xyz/custom-resources/client/clientset/versioned"
)
type hookOptions struct {
masterURL string
kubeConfigPath string
namespace string
hookType string
backupSessionName string
restoreSessionName string
targetKind string
targetName string
invokerType string
invokerName string
hostname string
config *rest.Config
kubeClient kubernetes.Interface
stashClient cs.Interface
appClient appcatalog_cs.Interface
metricOpts restic.MetricsOptions
outputDir string
}
func NewCmdRunHook() *cobra.Command {
opt := hookOptions{
masterURL: "",
kubeConfigPath: "",
namespace: meta.Namespace(),
hostname: apis.DefaultHost,
}
cmd := &cobra.Command{
Use: "run-hook",
Short: "Execute Backup or Restore Hooks",
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
config, err := clientcmd.BuildConfigFromFlags(opt.masterURL, opt.kubeConfigPath)
if err != nil {
glog.Fatalf("Could not get Kubernetes config: %s", err)
return err
}
opt.config = config
opt.kubeClient = kubernetes.NewForConfigOrDie(config)
opt.stashClient = cs.NewForConfigOrDie(config)
opt.appClient = appcatalog_cs.NewForConfigOrDie(config)
err = opt.executeHook() | if err != nil {
// For preBackup or preRestore hook failure, we will fail the container so that the task does to proceed to next step.
// We will also update the BackupSession/RestoreSession status as the update-status Function will not execute.
if opt.hookType == apis.PreBackupHook || opt.hookType == apis.PreRestoreHook {
return opt.handlePreTaskHookFailure(err)
}
// For other postBackup or postRestore hook failure, we will simply write the failure output into the output directory.
// The update-status Function will update the status of the BackupSession/RestoreSession
return opt.handlePostTaskHookFailure(err)
}
return nil
},
}
cmd.Flags().StringVar(&opt.masterURL, "master", opt.masterURL, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
cmd.Flags().StringVar(&opt.kubeConfigPath, "kubeconfig", opt.kubeConfigPath, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
cmd.Flags().StringVar(&opt.backupSessionName, "backupsession", opt.backupSessionName, "Name of the respective BackupSession object")
cmd.Flags().StringVar(&opt.restoreSessionName, "restoresession", opt.restoreSessionName, "Name of the respective RestoreSession")
cmd.Flags().StringVar(&opt.invokerType, "invoker-type", opt.invokerType, "Type of the backup invoker")
cmd.Flags().StringVar(&opt.invokerName, "invoker-name", opt.invokerName, "Name of the respective backup invoker")
cmd.Flags().StringVar(&opt.targetName, "target-name", opt.targetName, "Name of the Target")
cmd.Flags().StringVar(&opt.targetKind, "target-kind", opt.targetName, "Kind of the Target")
cmd.Flags().StringVar(&opt.hookType, "hook-type", opt.hookType, "Type of hook to execute")
cmd.Flags().StringVar(&opt.hostname, "hostname", opt.hostname, "Name of the host that is being backed up or restored")
cmd.Flags().BoolVar(&opt.metricOpts.Enabled, "metrics-enabled", opt.metricOpts.Enabled, "Specify whether to export Prometheus metrics")
cmd.Flags().StringVar(&opt.metricOpts.PushgatewayURL, "metrics-pushgateway-url", opt.metricOpts.PushgatewayURL, "Pushgateway URL where the metrics will be pushed")
cmd.Flags().StringSliceVar(&opt.metricOpts.Labels, "metrics-labels", opt.metricOpts.Labels, "Labels to apply in exported metrics")
cmd.Flags().StringVar(&opt.metricOpts.JobName, "prom-job-name", StashDefaultMetricJob, "Metrics job name")
cmd.Flags().StringVar(&opt.outputDir, "output-dir", opt.outputDir, "Directory where output.json file will be written (keep empty if you don't need to write output in file)")
return cmd
}
func (opt *hookOptions) executeHook() error {
var hook interface{}
var executorPodName string
if opt.backupSessionName != "" {
// For backup hooks, BackupSession name will be provided. We will read the hooks from the underlying backup invoker.
invoker, err := apis.ExtractBackupInvokerInfo(opt.stashClient, opt.invokerType, opt.invokerName, opt.namespace)
if err != nil {
return err
}
// We need to extract the hook only for the current target
for _, targetInfo := range invoker.TargetsInfo {
if targetInfo.Target != nil && targetInfo.Target.Ref.Kind == opt.targetKind && targetInfo.Target.Ref.Name == opt.targetName {
hook = targetInfo.Hooks
executorPodName, err = opt.getHookExecutorPodName(targetInfo.Target.Ref)
if err != nil {
return err
}
break
}
}
} else if opt.restoreSessionName != "" {
// For restore hooks, RestoreSession name will be provided. We will read the hooks from the RestoreSession.
restoreSession, err := opt.stashClient.StashV1beta1().RestoreSessions(opt.namespace).Get(context.TODO(), opt.restoreSessionName, metav1.GetOptions{})
if err != nil {
return err
}
hook = restoreSession.Spec.Hooks
if restoreSession.Spec.Target != nil {
executorPodName, err = opt.getHookExecutorPodName(restoreSession.Spec.Target.Ref)
if err != nil {
return err
}
} else {
executorPodName = os.Getenv(apis.KeyPodName)
}
} else {
return fmt.Errorf("can not execute hooks. Reason: Respective BackupSession or RestoreSession has not been specified")
}
// Execute the hooks
return util.ExecuteHook(opt.config, hook, opt.hookType, executorPodName, opt.namespace)
}
func (opt *hookOptions) getHookExecutorPodName(targetRef v1beta1.TargetRef) (string, error) {
switch targetRef.Kind {
case apis.KindAppBinding:
// For AppBinding, we will execute the hooks in the respective app pod
return opt.getAppPodName(targetRef.Name)
default:
// For other types of target, hook will be executed where this process is running.
return os.Getenv(apis.KeyPodName), nil
}
}
func (opt *hookOptions) getAppPodName(appbindingName string) (string, error) {
// get the AppBinding
appbinding, err := opt.appClient.AppcatalogV1alpha1().AppBindings(opt.namespace).Get(context.TODO(), appbindingName, metav1.GetOptions{})
if err != nil {
return "", err
}
// AppBinding should have a Service in ClientConfig field. This service selects the app pod. We will execute the hooks in the app pod.
if appbinding.Spec.ClientConfig.Service != nil {
// there should be an endpoint with same name as the service which contains the name of the selected pods.
endPoint, err := opt.kubeClient.CoreV1().Endpoints(opt.namespace).Get(context.TODO(), appbinding.Spec.ClientConfig.Service.Name, metav1.GetOptions{})
if err != nil {
return "", err
}
for _, subSets := range endPoint.Subsets {
// get pod from the ready addresses
for _, readyAddrs := range subSets.Addresses {
if readyAddrs.TargetRef != nil && readyAddrs.TargetRef.Kind == apis.KindPod {
return readyAddrs.TargetRef.Name, nil
}
}
// no pod found in ready addresses. now try in not ready addresses.
for _, notReadyAddrs := range subSets.NotReadyAddresses {
if notReadyAddrs.TargetRef != nil && notReadyAddrs.TargetRef.Kind == apis.KindPod {
return notReadyAddrs.TargetRef.Name, nil
}
}
}
}
return "", fmt.Errorf("no pod found for AppBinding %s/%s", opt.namespace, appbindingName)
}
func (opt *hookOptions) handlePreTaskHookFailure(hookErr error) error {
statusOpt := status.UpdateStatusOptions{
Config: opt.config,
KubeClient: opt.kubeClient,
StashClient: opt.stashClient,
Namespace: opt.namespace,
Metrics: opt.metricOpts,
TargetRef: v1beta1.TargetRef{
Kind: opt.targetKind,
Name: opt.targetName,
},
}
if opt.hookType == apis.PreBackupHook {
backupOutput := &restic.BackupOutput{
HostBackupStats: []v1beta1.HostBackupStats{
{
Hostname: opt.hostname,
Phase: v1beta1.HostBackupFailed,
Error: hookErr.Error(),
},
},
}
statusOpt.BackupSession = opt.backupSessionName
| random_line_split | |
console.rs | and remove all containers matching PATTERN\n\
update: Run update with provided ressources\n\
versions: Version list of installed applications";
pub async fn init(tx: &EventTx) -> Result<()> {
let rx = serve().await?;
let tx = tx.clone();
// Spawn a task that handles lines received on the debug port.
task::spawn(async move {
while let Ok((line, tx_reply)) = rx.recv().await {
tx.send(Event::Console(line, tx_reply)).await;
}
});
Ok(())
}
pub async fn process(state: &mut State, command: &str, reply: sync::Sender<String>) -> Result<()> {
info!("Running \'{}\'", command);
let mut commands = command.split_whitespace();
if let Some(cmd) = commands.next() {
let args = commands.collect::<Vec<&str>>();
let start_timestamp = time::Instant::now();
match match cmd {
"help" => help(),
"list" => list(state).await,
"ps" => ps(state).await,
"settings" => settings(),
"shutdown" => shutdown(state).await,
"start" => start(state, &args).await,
"stop" => stop(state, &args).await,
"uninstall" => uninstall(state, &args).await,
"update" => update(state, &args).await,
"versions" => versions(state),
_ => Err(anyhow!("Unknown command: {}", command)),
} {
Ok(mut r) => {
r.push_str(&format!("Duration: {:?}\n", start_timestamp.elapsed()));
reply.send(r).await
}
Err(e) => {
let msg = format!("Failed to run: {} {:?}: {}\n", cmd, args, e);
reply.send(msg).await
}
}
} else {
reply.send("Invalid command".into()).await
}
Ok(())
}
/// Return the help text
fn help() -> Result<String> {
Ok(HELP.into())
}
/// List all known containers instances and their state.
async fn list(state: &State) -> Result<String> | .unwrap_or_else(|| "No".to_string()),
if app.container().is_resource_container() {
"resource"
} else {
"app"
}
.to_owned(),
]
}),
),
)
}
/// List all running applications.
#[cfg(all(not(target_os = "android"), not(target_os = "linux")))]
async fn ps(state: &State) -> Result<String> {
to_table(
vec![vec![
"Name".to_string(),
"Version".to_string(),
"Uptime".to_string(),
]]
.iter()
.cloned()
.chain(
state
.applications()
.filter_map(|app| app.process_context().map(|p| (app, p)))
.sorted_by_key(|(app, _)| app.name())
.map(|(app, context)| {
vec![
app.name().to_string(),
app.version().to_string(),
format!("{:?}", context.uptime()),
]
}),
),
)
}
/// List all running applications.
#[cfg(any(target_os = "android", target_os = "linux"))]
async fn ps(state: &State) -> Result<String> {
use pretty_bytes::converter::convert;
const PAGE_SIZE: usize = 4096;
let mut result = vec![[
"Name", "Version", "PID", "Size", "Resident", "Shared", "Text", "Data", "Uptime",
]
.iter()
.map(ToString::to_string)
.collect()];
for app in state.applications().sorted_by_key(|app| app.name()) {
if let Some(ref context) = app.process_context() {
let pid = context.process().pid();
let statm = procinfo::pid::statm(pid as i32)?;
result.push(vec![
app.name().to_string(),
app.version().to_string(),
pid.to_string(),
convert((statm.size * PAGE_SIZE) as f64),
convert((statm.resident * PAGE_SIZE) as f64),
convert((statm.share * PAGE_SIZE) as f64),
convert((statm.text * PAGE_SIZE) as f64),
convert((statm.data * PAGE_SIZE) as f64),
format!("{:?}", context.uptime()),
]);
}
}
to_table(result)
}
/// Start applications. If `args` is empty *all* known applications that
/// are not in a running state are started. If a argument is supplied it
/// is used to construct a Regex and all container (names) matching that
/// Regex are attempted to be started.
async fn start(state: &mut State, args: &[&str]) -> Result<String> {
let re = arg_regex(args)?;
let mut result = vec![vec![
"Name".to_string(),
"Result".to_string(),
"Duration".to_string(),
]];
let apps = state
.applications()
// Filter for not already running containers
.filter(|app| app.process_context().is_none())
// Filter ressource container that are not startable
.filter(|app| !app.container().is_resource_container())
// Filter matching container
.filter(|app| re.is_match(app.name()))
// Sort container by name
.sorted_by_key(|app| app.name().clone())
.map(|app| app.name().clone())
.collect::<Vec<Name>>();
for app in &apps {
let start = time::Instant::now();
match state.start(&app, 0).await {
Ok(_) => result.push(vec![
app.to_string(),
"Ok".to_string(),
format!("{:?}", start.elapsed()),
]),
Err(e) => result.push(vec![
app.to_string(),
format!("Failed: {:?}", e),
format!("{:?}", start.elapsed()),
]),
}
}
to_table(result)
}
/// Dump settings
fn settings() -> Result<String> {
Ok(format!("{}", *SETTINGS))
}
/// Stop one, some or all containers. See start for the argument handling.
async fn stop(state: &mut State, args: &[&str]) -> Result<String> {
let re = arg_regex(args)?;
let mut result = vec![vec![
"Name".to_string(),
"Result".to_string(),
"Duration".to_string(),
]];
let apps = state
.applications()
.filter(|app| app.process_context().is_some())
.filter(|app| re.is_match(app.name()))
.map(|app| app.name().clone())
.collect::<Vec<Name>>();
for app in &apps {
let timeout = time::Duration::from_secs(10);
let reason = TerminationReason::Stopped;
let start = time::Instant::now();
match state.stop(&app, timeout, reason).await {
Ok(()) => result.push(vec![
app.to_string(),
"Ok".to_string(),
format!("{:?}", start.elapsed()),
]),
Err(e) => result.push(vec![
app.to_string(),
e.to_string(),
format!("{:?}", start.elapsed()),
]),
}
}
to_table(result)
}
/// Umount and remove a containers. See `start` for the argument handling.
/// The data directory is not removed. This needs discussion.
async fn uninstall(state: &mut State, args: &[&str]) -> Result<String> {
let re = arg_regex(args)?;
let mut result = vec![vec!["Name".to_string(), "Result".to_string()]];
let to_uninstall = state
.applications
.values()
.filter(|app| app.process_context().is_none())
.filter(|app| re.is_match(app.name()))
.map(|app| app.name())
.cloned()
.collect::<Vec<Name>>();
for app in &to_uninstall {
match state.uninstall(&app).await {
Ok(()) => result.push(vec![app.to_string(), "Ok".to_string()]),
Err(e) => result.push(vec![app.to_string(), e.to_string()]),
}
}
to_table(result)
}
/// Trigger the update module.
async fn update(state: &mut State, args: &[&str]) -> Result<String> {
if args.len() != 1 {
return Err(anyhow!("Invalid arguments for update command"));
}
let dir = PathBuf::from(args[0]);
if !dir.exists().await {
let err = anyhow!("Update directory {} does not exists", dir.display());
Err(err)
} else {
let updates = crate::update::update(state, &dir).await?;
let mut result = vec![vec![
"Name".to_string(),
"From".to_string(),
"To".to | {
to_table(
vec![vec![
"Name".to_string(),
"Version".to_string(),
"Running".to_string(),
"Type".to_string(),
]]
.iter()
.cloned()
.chain(
state
.applications()
.sorted_by_key(|app| app.name())
.map(|app| {
vec![
app.name().to_string(),
app.version().to_string(),
app.process_context()
.map(|c| format!("Yes (pid: {})", c.process().pid())) | identifier_body |
console.rs | and remove all containers matching PATTERN\n\
update: Run update with provided ressources\n\
versions: Version list of installed applications";
pub async fn | (tx: &EventTx) -> Result<()> {
let rx = serve().await?;
let tx = tx.clone();
// Spawn a task that handles lines received on the debug port.
task::spawn(async move {
while let Ok((line, tx_reply)) = rx.recv().await {
tx.send(Event::Console(line, tx_reply)).await;
}
});
Ok(())
}
pub async fn process(state: &mut State, command: &str, reply: sync::Sender<String>) -> Result<()> {
info!("Running \'{}\'", command);
let mut commands = command.split_whitespace();
if let Some(cmd) = commands.next() {
let args = commands.collect::<Vec<&str>>();
let start_timestamp = time::Instant::now();
match match cmd {
"help" => help(),
"list" => list(state).await,
"ps" => ps(state).await,
"settings" => settings(),
"shutdown" => shutdown(state).await,
"start" => start(state, &args).await,
"stop" => stop(state, &args).await,
"uninstall" => uninstall(state, &args).await,
"update" => update(state, &args).await,
"versions" => versions(state),
_ => Err(anyhow!("Unknown command: {}", command)),
} {
Ok(mut r) => {
r.push_str(&format!("Duration: {:?}\n", start_timestamp.elapsed()));
reply.send(r).await
}
Err(e) => {
let msg = format!("Failed to run: {} {:?}: {}\n", cmd, args, e);
reply.send(msg).await
}
}
} else {
reply.send("Invalid command".into()).await
}
Ok(())
}
/// Return the help text
fn help() -> Result<String> {
Ok(HELP.into())
}
/// List all known containers instances and their state.
async fn list(state: &State) -> Result<String> {
to_table(
vec![vec![
"Name".to_string(),
"Version".to_string(),
"Running".to_string(),
"Type".to_string(),
]]
.iter()
.cloned()
.chain(
state
.applications()
.sorted_by_key(|app| app.name())
.map(|app| {
vec![
app.name().to_string(),
app.version().to_string(),
app.process_context()
.map(|c| format!("Yes (pid: {})", c.process().pid()))
.unwrap_or_else(|| "No".to_string()),
if app.container().is_resource_container() {
"resource"
} else {
"app"
}
.to_owned(),
]
}),
),
)
}
/// List all running applications.
#[cfg(all(not(target_os = "android"), not(target_os = "linux")))]
async fn ps(state: &State) -> Result<String> {
to_table(
vec![vec![
"Name".to_string(),
"Version".to_string(),
"Uptime".to_string(),
]]
.iter()
.cloned()
.chain(
state
.applications()
.filter_map(|app| app.process_context().map(|p| (app, p)))
.sorted_by_key(|(app, _)| app.name())
.map(|(app, context)| {
vec![
app.name().to_string(),
app.version().to_string(),
format!("{:?}", context.uptime()),
]
}),
),
)
}
/// List all running applications.
#[cfg(any(target_os = "android", target_os = "linux"))]
async fn ps(state: &State) -> Result<String> {
use pretty_bytes::converter::convert;
const PAGE_SIZE: usize = 4096;
let mut result = vec![[
"Name", "Version", "PID", "Size", "Resident", "Shared", "Text", "Data", "Uptime",
]
.iter()
.map(ToString::to_string)
.collect()];
for app in state.applications().sorted_by_key(|app| app.name()) {
if let Some(ref context) = app.process_context() {
let pid = context.process().pid();
let statm = procinfo::pid::statm(pid as i32)?;
result.push(vec![
app.name().to_string(),
app.version().to_string(),
pid.to_string(),
convert((statm.size * PAGE_SIZE) as f64),
convert((statm.resident * PAGE_SIZE) as f64),
convert((statm.share * PAGE_SIZE) as f64),
convert((statm.text * PAGE_SIZE) as f64),
convert((statm.data * PAGE_SIZE) as f64),
format!("{:?}", context.uptime()),
]);
}
}
to_table(result)
}
/// Start applications. If `args` is empty *all* known applications that
/// are not in a running state are started. If a argument is supplied it
/// is used to construct a Regex and all container (names) matching that
/// Regex are attempted to be started.
async fn start(state: &mut State, args: &[&str]) -> Result<String> {
let re = arg_regex(args)?;
let mut result = vec![vec![
"Name".to_string(),
"Result".to_string(),
"Duration".to_string(),
]];
let apps = state
.applications()
// Filter for not already running containers
.filter(|app| app.process_context().is_none())
// Filter ressource container that are not startable
.filter(|app| !app.container().is_resource_container())
// Filter matching container
.filter(|app| re.is_match(app.name()))
// Sort container by name
.sorted_by_key(|app| app.name().clone())
.map(|app| app.name().clone())
.collect::<Vec<Name>>();
for app in &apps {
let start = time::Instant::now();
match state.start(&app, 0).await {
Ok(_) => result.push(vec![
app.to_string(),
"Ok".to_string(),
format!("{:?}", start.elapsed()),
]),
Err(e) => result.push(vec![
app.to_string(),
format!("Failed: {:?}", e),
format!("{:?}", start.elapsed()),
]),
}
}
to_table(result)
}
/// Dump settings
fn settings() -> Result<String> {
Ok(format!("{}", *SETTINGS))
}
/// Stop one, some or all containers. See start for the argument handling.
async fn stop(state: &mut State, args: &[&str]) -> Result<String> {
let re = arg_regex(args)?;
let mut result = vec![vec![
"Name".to_string(),
"Result".to_string(),
"Duration".to_string(),
]];
let apps = state
.applications()
.filter(|app| app.process_context().is_some())
.filter(|app| re.is_match(app.name()))
.map(|app| app.name().clone())
.collect::<Vec<Name>>();
for app in &apps {
let timeout = time::Duration::from_secs(10);
let reason = TerminationReason::Stopped;
let start = time::Instant::now();
match state.stop(&app, timeout, reason).await {
Ok(()) => result.push(vec![
app.to_string(),
"Ok".to_string(),
format!("{:?}", start.elapsed()),
]),
Err(e) => result.push(vec![
app.to_string(),
e.to_string(),
format!("{:?}", start.elapsed()),
]),
}
}
to_table(result)
}
/// Umount and remove a containers. See `start` for the argument handling.
/// The data directory is not removed. This needs discussion.
async fn uninstall(state: &mut State, args: &[&str]) -> Result<String> {
let re = arg_regex(args)?;
let mut result = vec![vec!["Name".to_string(), "Result".to_string()]];
let to_uninstall = state
.applications
.values()
.filter(|app| app.process_context().is_none())
.filter(|app| re.is_match(app.name()))
.map(|app| app.name())
.cloned()
.collect::<Vec<Name>>();
for app in &to_uninstall {
match state.uninstall(&app).await {
Ok(()) => result.push(vec![app.to_string(), "Ok".to_string()]),
Err(e) => result.push(vec![app.to_string(), e.to_string()]),
}
}
to_table(result)
}
/// Trigger the update module.
async fn update(state: &mut State, args: &[&str]) -> Result<String> {
if args.len() != 1 {
return Err(anyhow!("Invalid arguments for update command"));
}
let dir = PathBuf::from(args[0]);
if !dir.exists().await {
let err = anyhow!("Update directory {} does not exists", dir.display());
Err(err)
} else {
let updates = crate::update::update(state, &dir).await?;
let mut result = vec![vec![
"Name".to_string(),
"From".to_string(),
"To".to | init | identifier_name |
console.rs | and remove all containers matching PATTERN\n\
update: Run update with provided ressources\n\
versions: Version list of installed applications";
pub async fn init(tx: &EventTx) -> Result<()> {
let rx = serve().await?;
let tx = tx.clone();
// Spawn a task that handles lines received on the debug port.
task::spawn(async move {
while let Ok((line, tx_reply)) = rx.recv().await {
tx.send(Event::Console(line, tx_reply)).await;
}
});
Ok(())
}
pub async fn process(state: &mut State, command: &str, reply: sync::Sender<String>) -> Result<()> {
info!("Running \'{}\'", command);
let mut commands = command.split_whitespace();
if let Some(cmd) = commands.next() {
let args = commands.collect::<Vec<&str>>();
let start_timestamp = time::Instant::now();
match match cmd {
"help" => help(),
"list" => list(state).await,
"ps" => ps(state).await,
"settings" => settings(),
"shutdown" => shutdown(state).await,
"start" => start(state, &args).await,
"stop" => stop(state, &args).await,
"uninstall" => uninstall(state, &args).await,
"update" => update(state, &args).await,
"versions" => versions(state),
_ => Err(anyhow!("Unknown command: {}", command)),
} {
Ok(mut r) => {
r.push_str(&format!("Duration: {:?}\n", start_timestamp.elapsed()));
reply.send(r).await
}
Err(e) => {
let msg = format!("Failed to run: {} {:?}: {}\n", cmd, args, e);
reply.send(msg).await
}
}
} else {
reply.send("Invalid command".into()).await
}
Ok(())
}
/// Return the help text
fn help() -> Result<String> {
Ok(HELP.into())
}
/// List all known containers instances and their state.
async fn list(state: &State) -> Result<String> {
to_table(
vec![vec![
"Name".to_string(),
"Version".to_string(),
"Running".to_string(),
"Type".to_string(),
]]
.iter()
.cloned()
.chain(
state
.applications()
.sorted_by_key(|app| app.name())
.map(|app| {
vec![
app.name().to_string(),
app.version().to_string(),
app.process_context()
.map(|c| format!("Yes (pid: {})", c.process().pid()))
.unwrap_or_else(|| "No".to_string()),
if app.container().is_resource_container() {
"resource"
} else {
"app"
}
.to_owned(),
]
}),
),
)
}
/// List all running applications.
#[cfg(all(not(target_os = "android"), not(target_os = "linux")))]
async fn ps(state: &State) -> Result<String> {
to_table(
vec![vec![
"Name".to_string(),
"Version".to_string(),
"Uptime".to_string(),
]]
.iter()
.cloned()
.chain(
state
.applications()
.filter_map(|app| app.process_context().map(|p| (app, p)))
.sorted_by_key(|(app, _)| app.name())
.map(|(app, context)| {
vec![
app.name().to_string(),
app.version().to_string(),
format!("{:?}", context.uptime()),
]
}),
),
)
}
/// List all running applications.
#[cfg(any(target_os = "android", target_os = "linux"))]
async fn ps(state: &State) -> Result<String> {
use pretty_bytes::converter::convert;
const PAGE_SIZE: usize = 4096;
let mut result = vec![[
"Name", "Version", "PID", "Size", "Resident", "Shared", "Text", "Data", "Uptime",
]
.iter()
.map(ToString::to_string)
.collect()];
for app in state.applications().sorted_by_key(|app| app.name()) {
if let Some(ref context) = app.process_context() {
let pid = context.process().pid();
let statm = procinfo::pid::statm(pid as i32)?;
result.push(vec![
app.name().to_string(),
app.version().to_string(),
pid.to_string(),
convert((statm.size * PAGE_SIZE) as f64),
convert((statm.resident * PAGE_SIZE) as f64),
convert((statm.share * PAGE_SIZE) as f64),
convert((statm.text * PAGE_SIZE) as f64),
convert((statm.data * PAGE_SIZE) as f64),
format!("{:?}", context.uptime()),
]);
}
}
to_table(result) | /// Start applications. If `args` is empty *all* known applications that
/// are not in a running state are started. If a argument is supplied it
/// is used to construct a Regex and all container (names) matching that
/// Regex are attempted to be started.
async fn start(state: &mut State, args: &[&str]) -> Result<String> {
let re = arg_regex(args)?;
let mut result = vec![vec![
"Name".to_string(),
"Result".to_string(),
"Duration".to_string(),
]];
let apps = state
.applications()
// Filter for not already running containers
.filter(|app| app.process_context().is_none())
// Filter ressource container that are not startable
.filter(|app| !app.container().is_resource_container())
// Filter matching container
.filter(|app| re.is_match(app.name()))
// Sort container by name
.sorted_by_key(|app| app.name().clone())
.map(|app| app.name().clone())
.collect::<Vec<Name>>();
for app in &apps {
let start = time::Instant::now();
match state.start(&app, 0).await {
Ok(_) => result.push(vec![
app.to_string(),
"Ok".to_string(),
format!("{:?}", start.elapsed()),
]),
Err(e) => result.push(vec![
app.to_string(),
format!("Failed: {:?}", e),
format!("{:?}", start.elapsed()),
]),
}
}
to_table(result)
}
/// Dump settings
fn settings() -> Result<String> {
Ok(format!("{}", *SETTINGS))
}
/// Stop one, some or all containers. See start for the argument handling.
async fn stop(state: &mut State, args: &[&str]) -> Result<String> {
let re = arg_regex(args)?;
let mut result = vec![vec![
"Name".to_string(),
"Result".to_string(),
"Duration".to_string(),
]];
let apps = state
.applications()
.filter(|app| app.process_context().is_some())
.filter(|app| re.is_match(app.name()))
.map(|app| app.name().clone())
.collect::<Vec<Name>>();
for app in &apps {
let timeout = time::Duration::from_secs(10);
let reason = TerminationReason::Stopped;
let start = time::Instant::now();
match state.stop(&app, timeout, reason).await {
Ok(()) => result.push(vec![
app.to_string(),
"Ok".to_string(),
format!("{:?}", start.elapsed()),
]),
Err(e) => result.push(vec![
app.to_string(),
e.to_string(),
format!("{:?}", start.elapsed()),
]),
}
}
to_table(result)
}
/// Umount and remove a containers. See `start` for the argument handling.
/// The data directory is not removed. This needs discussion.
async fn uninstall(state: &mut State, args: &[&str]) -> Result<String> {
let re = arg_regex(args)?;
let mut result = vec![vec!["Name".to_string(), "Result".to_string()]];
let to_uninstall = state
.applications
.values()
.filter(|app| app.process_context().is_none())
.filter(|app| re.is_match(app.name()))
.map(|app| app.name())
.cloned()
.collect::<Vec<Name>>();
for app in &to_uninstall {
match state.uninstall(&app).await {
Ok(()) => result.push(vec![app.to_string(), "Ok".to_string()]),
Err(e) => result.push(vec![app.to_string(), e.to_string()]),
}
}
to_table(result)
}
/// Trigger the update module.
async fn update(state: &mut State, args: &[&str]) -> Result<String> {
if args.len() != 1 {
return Err(anyhow!("Invalid arguments for update command"));
}
let dir = PathBuf::from(args[0]);
if !dir.exists().await {
let err = anyhow!("Update directory {} does not exists", dir.display());
Err(err)
} else {
let updates = crate::update::update(state, &dir).await?;
let mut result = vec![vec![
"Name".to_string(),
"From".to_string(),
"To".to_string | }
| random_line_split |
basic.rs | U> Point2<T, U> {
fn mixup<V, W>(self, other: Point2<V, W>) -> Point2<T, W> {
Point2 {
x: self.x,
y: other.y,
}
}
}
// traits
trait Summarizable {
fn summarize_author(&self) -> String;
fn summarize(&self) -> String {
format!("(Read more from {}...)", self.summarize_author())
}
}
struct NewsArticle {
headline: String,
location: String,
author: String,
content: String,
}
impl Summarizable for NewsArticle {
fn summarize_author(&self) -> String {
format!("{}", self.author)
}
fn summarize(&self) -> String {
format!(
"{}, by {} ({})",
self.headline,
self.summarize_author(),
self.location
)
}
}
struct Tweet {
username: String,
content: String,
reply: bool,
retweet: bool,
}
impl Summarizable for Tweet {
fn summarize_author(&self) -> String {
format!("@{}", self.username)
}
fn summarize(&self) -> String {
format!("{}: {}", self.summarize_author(), self.content)
}
}
// traits as parameters/ Trait bounds
fn notify(item: impl Summarizable) {
println!("Breaking news! {}", item.summarize());
}
// ^ syntactic sugar for:
// fn notify<T: Summarizable>(item: T) {
// println!("Breaking news! {}", item.summarize());
// }
fn notfiy<T: Summarizable + std::fmt::Display>(item1: T) {}
// when many traits are used -> prefer 'where'-clauses to not clutter the funciton definition
fn some_function<T, U>(t: T, u: U) -> i32
where
T: std::fmt::Display + Clone,
U: Clone + std::fmt::Debug,
{
4
}
fn returns_summarizable() -> impl Summarizable {
Tweet {
username: String::from("horse_ebooks"),
content: String::from("of cource, as you probablay already know people"),
reply: false,
retweet: false,
}
}
fn largest<T: std::cmp::PartialOrd + Copy>(list: &[T]) -> T {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
// Trait bounds to conditionally implement Methods
struct Pair<T> {
x: T,
y: T,
}
impl<T> Pair<T> {
fn new(x: T, y: T) -> Self {
Self { x, y }
}
}
// conditional implementation (only if traits are Display + PartialOrd)
impl<T: std::fmt::Display + std::cmp::PartialOrd> Pair<T> {
fn cmp_disply(&self) {
if self.x >= self.y {
println!("The largest member is x = {}", self.x);
} else {
println!("The largest member is y = {}", self.y);
}
}
}
// implement a trait if the type implements another train --- alias blanket implementations
// impl<T: std::fmt::Display> ToString for T { // if T already implements Display, than it also implements ToString
// }
// lifetimes
// lifetimes gurantee, that references are still valid, when used.
// Most of the time they are implicitly inferred. If they can't, they have to be explicitly specified
// &i32; a reference
// &'a i32; a reference with the explicit lifetime "'a"
// &'a mut i32; a mutable reference with the explicit lifetime "'a"
fn longest<'a>(x: &'a str, y: &'a str) -> &'a str {
// now the compiler knows, how long the return value can live. (as long as the smaller lifetime of x or y)
if x.len() > y.len() {
x
} else {
y
}
}
struct ImportantExcerpt<'a> {
part: &'a str, // if struct holds reference, a explicit lifetime is required
}
impl<'a> ImportantExcerpt<'a> {
fn level(&self) -> i32 {
3
}
}
// static lifetimes (references live for entire duration of program)... applies to all string ltierals
fn code_holder_10() {
let s: &'static str = "I have a static lifetime.";
}
// all generics together
fn longest_with_an_announcement<'a, T>(x: &'a str, y: &'a str, ann: T) -> &'a str
where
T: std::fmt::Display,
{
println!("Announcement! {}", ann);
if x.len() > y.len() {
x
} else {
y
}
}
// closures
fn code_holder_11() {
// types are automatically inferred (but can be explicitly specified)
let some_closure = |arg| {
println!("this is the argument: {}", arg);
};
let minimalist_closure = |x| x; // returns itself
some_closure(5);
minimalist_closure("lel");
// pattern: memorization / lazy evaluation
struct NoArgsCacher<T>
where
T: Fn() -> u32,
{
calculation: T,
value: Option<u32>,
}
impl<T> NoArgsCacher<T>
where
T: Fn() -> u32,
{
fn new(calculation: T) -> NoArgsCacher<T> {
NoArgsCacher {
calculation,
value: None,
}
}
fn value(&mut self) -> u32 {
match self.value {
Some(v) => v,
None => {
let v = (self.calculation)();
self.value = Some(v);
v
}
}
}
}
use std::thread;
use std::time::Duration;
let mut expensive_result = NoArgsCacher::new(|| {
println!("performing expensive calculation...");
thread::sleep(Duration::from_secs(2));
420
});
// TODO: create better Cacher with generics and a hash-table (args-result pairs)
}
// iterators
// zero-cost abstraction -> are very fast USE THEM!
fn code_holder_12() {
let v1 = vec![1, 2, 3];
let v1_iter = v1.iter();
for val in v1_iter {
println!("Got: {}", val);
}
pub trait CustomIteratorTrait {
type Item; // associated type
fn next(&mut self) -> Option<Self::Item>;
}
#[test]
fn iterator_demonstration() {
let v1 = vec![1, 2, 3];
let mut v1_iter = v1.iter();
assert_eq!(v1_iter.next(), Some(&1));
assert_eq!(v1_iter.next(), Some(&2));
assert_eq!(v1_iter.next(), Some(&3));
assert_eq!(v1_iter.next(), None);
}
#[test]
fn iterator_sum() {
let v1 = vec![1, 2, 3];
let v1_iter = v1.iter();
let total: i32 = v1_iter.sum(); // iter has been consumed (moved) -> cannot be moved any more
}
#[test]
fn iterator_map() {
let v1: Vec<i32> = vec![1, 2, 3];
let v2: Vec<_> = v1.iter().map(|x| x + 1).collect(); // collect() must be called because iterators are lazy
assert_eq!(v2, vec![2, 3, 4]);
}
struct Shoe {
size: u32,
style: String,
}
fn shoes_in_my_size(shoes: Vec<Shoe>, shoe_size: u32) -> Vec<Shoe> {
shoes.into_iter().filter(|s| s.size == shoe_size).collect()
}
#[test]
fn filter_by_size() {
let shoes = vec![
Shoe {
size: 10,
style: String::from("sneaker"),
},
Shoe {
size: 13,
style: String::from("sandal"),
},
Shoe {
size: 10,
style: String::from("boot"),
},
];
let in_my_size = shoes_in_my_size(shoes, 10);
assert_eq!(
in_my_size,
vec![
Shoe {
size: 10,
style: String::from("sneaker")
},
Shoe {
size: 10,
style: String::from("boot")
},
]
);
}
// own iterator
struct Counter {
count: u32,
}
impl Counter {
fn new() -> Counter {
Counter { count: 0 }
}
}
impl Iterator for Counter {
type Item = u32;
fn next(&mut self) -> Option<Self::Item> {
self.count += 1;
if self.count < 6 {
Some(self.count)
} else { | None
} | random_line_split | |
basic.rs | V6(String),
}
fn route(ip_kind: IpAddrKind) {}
fn code_holder_4() {
let four = IpAddrKind::V4; // are of same type
let six = IpAddrKind::V6;
route(IpAddrKind::V4);
route(IpAddrKind::V6);
let home = IpAddr::V4(127, 0, 0, 1);
let loopback = IpAddr::V6(String::from("::1"));
}
enum Message {
Quit,
Move { x: i32, y: i32 }, // struct
Write(String), // tuple struct
ChangeColor(i32, i32, i32), // tuple struct
}
impl Message {
fn call(&self) {
// code
}
}
// option
enum CustomOption<T> {
// replaces 'null'-value
Some(T),
None,
}
fn code_block_5() {
let some_number = Some(5); // option
let some_string = Some("a string");
let absent_number: Option<i32> = None;
}
// match: control flow operator
#[derive(Debug)]
enum UsState {
Alabama,
Alaska,
}
enum Coin {
Penny,
Nickel,
Dime,
Quarter(UsState),
}
fn value_in_cents(coin: Coin) -> u8 {
match coin {
Coin::Penny => 1,
Coin::Nickel => 5,
Coin::Dime => 10,
Coin::Quarter(state) => {
println!("State quarter from {:?}!", state);
25
}
}
}
fn plus_one(x: Option<i32>) -> Option<i32> {
match x {
None => None,
Some(i) => Some(i + 1),
}
}
fn matches_are_exhaustive(val: u8) {
match val {
1 => println!("one"),
2 => println!("two"),
5 => println!("five"),
7 => println!("seven"),
_ => (),
}
}
// if let
fn if_let() {
let some_u8_value = Some(0u8);
match some_u8_value {
Some(3) => println!("three"),
_ => (),
}
// equivalent to
if let Some(3) = some_u8_value {
println!("three");
}
}
// collections
fn code_holder_6() {
let v: Vec<i32> = Vec::new();
let v = vec![1, 2, 3];
let mut v = Vec::new();
v.push(5);
v.push(6);
let v = vec![1, 2, 3, 4, 5];
// two ways to access vector
let third: &i32 = &v[2]; // panics if fails
match v.get(2) {
// doesn't panic
Some(third) => (),
None => (),
}
// iterating
let mut v = vec![100, 32, 57];
for i in &v {
println!("{}", i);
}
for i in &mut v {
*i += 50;
}
// multiple type vector
enum | {
Int(i32),
Float(f64),
Text(String),
}
let row = vec![
SpreadsheetCell::Int(3),
SpreadsheetCell::Text(String::from("blue")),
SpreadsheetCell::Float(10.12),
];
}
// strings
// str is implemented in the core language and String is in the standard library
fn code_holder_7() {
let mut s = String::new();
let data = "inital contents"; // implements 'Display' trait
let mut s = data.to_string();
s.push_str("bar");
s.push('a');
let s1 = String::from("Hello ");
let s2 = String::from("World");
let s3 = s1 + &s2; // s1 was moved! (fn add(self, s: &str) -> String)
let s1 = String::from("tic");
let s2 = String::from("tac");
let s3 = String::from("toe");
let s = format!("{}-{}-{}", s1, s2, s3);
// you can't index into string, because of ambigueties and other reasons -> be more percise
// slices... not so appropriate
let hello = "Здравствуйте";
let s = &hello[0..4]; // 4 bytes -> "Зд"
// best way: chars
for c in "नमस्ते".chars() {
println!("{}", c);
}
}
// Hash Maps
fn code_holder_8() {
use std::collections::HashMap;
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Yellow"), 50);
// morphing collections
let teams = vec![String::from("Blue"), String::from("Yellow")];
let inital_scores = vec![10, 50];
let scores: HashMap<_, _> = teams.iter().zip(inital_scores.iter()).collect();
}
// errors
fn code_holder_9() {
// panicing!
// If rust panics before it quite it's starts unwinding (stack is cleared up), which takes a lot of time -> alternative abort (in Cargo.toml: panic = 'abort')
panic!("crash and burn");
// Result
use std::fs::File;
use std::io::ErrorKind;
use std::io::Read;
let f = File::open("hello.txt");
let f = match f {
Ok(file) => file,
Err(error) => match error.kind() {
ErrorKind::NotFound => match File::create("hello.txt") {
Ok(fc) => fc,
Err(e) => panic!("Problem creating the file: {:?}", e),
},
other_error => panic!("Problem opening the file: {:?}", other_error),
},
};
let f = File::open("hello.txt").unwrap(); // returns value if okay, panics otherwise
let f = File::open("hello.txt").expect_err("Own error message"); // same as unwrap() just with custom error message
// propagating error
fn read_username_from_file_verbose() -> Result<String, std::io::Error> {
// verbose way
let f = File::open("hello.txt");
let mut f = match f {
Ok(file) => file,
Err(e) => return Err(e),
};
let mut s = String::new();
match f.read_to_string(&mut s) {
Ok(_) => Ok(s),
Err(e) => Err(e),
}
}
fn read_username_from_file() -> Result<String, std::io::Error> {
// better way with ? operator
let mut f = File::open("hello.txt")?;
let mut s = String::new();
f.read_to_string(&mut s)?; // if ok expression has value, if Err then function returns with error
Ok(s)
}
}
// generics (similar to C++ typenames/templates)
enum own_Result<T, E> {
Ok(T),
Err(E),
}
struct Point1<T> {
x: T,
y: T,
}
impl<T> Point1<T> {
fn x(&self) -> &T {
&self.x
}
}
impl Point1<f32> {
fn distance_from_origin(&self) -> f32 {
(self.x.powi(2) + self.y.powi(2)).sqrt()
}
}
struct Point2<T, U> {
x: T,
y: U,
}
impl<T, U> Point2<T, U> {
fn mixup<V, W>(self, other: Point2<V, W>) -> Point2<T, W> {
Point2 {
x: self.x,
y: other.y,
}
}
}
// traits
trait Summarizable {
fn summarize_author(&self) -> String;
fn summarize(&self) -> String {
format!("(Read more from {}...)", self.summarize_author())
}
}
struct NewsArticle {
headline: String,
location: String,
author: String,
content: String,
}
impl Summarizable for NewsArticle {
fn summarize_author(&self) -> String {
format!("{}", self.author)
}
fn summarize(&self) -> String {
format!(
"{}, by {} ({})",
self.headline,
self.summarize_author(),
self.location
)
}
}
struct Tweet {
username: String,
content: String,
reply: bool,
retweet: bool,
}
impl Summarizable for Tweet {
fn summarize_author(&self) -> String {
format!("@{}", self.username)
}
fn summarize(&self) -> String {
format!("{}: {}", self.summarize_author(), self.content)
}
}
// traits as parameters/ Trait bounds
fn notify(item: impl Summarizable) {
println!("Breaking news! {}", item.summar | SpreadsheetCell | identifier_name |
vechain.go | .Sprintf("%x", sha256.Sum256([]byte(strings.ToLower(str))))
return
}
//区块链浏览器浏览地址
func BlockChainExploreLink(transactionId string, config *VechainConfig) string {
return fmt.Sprintf(config.ExploreLink, transactionId)
}
//=========================Token======================
//返回Token结构
type Token struct {
Token string `json:"token"`
Expire int64 `json:"expire"`
}
var lock int32 = 0
var refreshError = fmt.Errorf("token refreshing")
func GetToken(config *VechainConfig) (token *Token, err error) {
if atomic.LoadInt32(&lock) == 1 {
err = refreshError
return
}
atomic.StoreInt32(&lock, 1)
defer atomic.StoreInt32(&lock, 0)
timestamp := time.Now().Unix()
form := new(Form)
form.AppId = config.DeveloperId
form.AppKey = config.DeveloperKey
form.Nonce = config.Nonce
form.Timestamp = strconv.FormatInt(timestamp, 10)
form.Signature = sign(timestamp, config)
requestUrl := config.SiteUrl + "v1/tokens"
formByte, err := json.Marshal(form)
if err != nil {
log.Error("%s", err.Error())
return
}
log.Debug("%+v", *form)
data := bytes.NewReader(formByte)
retryTimes := 0
Retry:
retryTimes++
if retryTimes > 100 {
time.Sleep(1 * time.Minute)
} else if retryTimes > 1000 {
time.Sleep(1 * time.Hour)
}
request, err := http.NewRequest("POST", requestUrl, data)
if err != nil {
log.Error("%s", err.Error())
goto Retry
}
defer request.Body.Close()
request.Header.Set("Content-Type", "application/json")
client := &http.Client{}
response, err := client.Do(request)
if err != nil {
log.Error("%s", err.Error())
goto Retry
}
defer response.Body.Close()
if response.StatusCode != 200 {
log.Error("%s", err.Error())
goto Retry
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Error("%s", err.Error()) | respData := new(ResponseData)
respData.Data = new(Token)
err = json.Unmarshal(body, respData)
if respData.Code != 1 {
err = fmt.Errorf("responseCode:%d error,message:%s\n", respData.Code, respData.Message)
log.Error(err.Error())
goto Retry
}
token = respData.Data.(*Token)
return
}
//======================Occupy=============
//抢占请求表单
type OccupyVidRequest struct {
RequestNo string `json:"requestNo"`
VidList []string `json:"vidList"`
}
//抢占响应结构
type OccupyVidResponse struct {
RequestNo string `json:"requestNo,omitempty"` // 请求编号
Url string `json:"url,omitempty"` // 扫码 url
Quantity int `json:"quantity,omitempty"` //请求的vid个数
Status string `json:"status,omitempty"` // 生成状态(GENERATING:抢占中,SUCCESS:成功)
SuccessList []string `json:"successList,omitempty"` // 抢占成功 vid 列表
FailureList []string `json:"failureList,omitempty"` // 抢占失败 vid 列表
}
// 抢占vid
func OccupyVid(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *OccupyVidResponse, err error) {
url := "v1/vid/occupy"
request := (ctx.Value("request")).(*OccupyVidRequest)
data, err := json.Marshal(request)
if err != nil {
log.Error(err.Error())
return
}
//log.Debug("request: %s \n",data)
var justReturn bool
go func() {
<-ctx.Done()
err = ctx.Err()
justReturn = true
}()
retryTimes := 0
RetryWithNewToken:
token := tokenServer.GetToken()
Retry:
retryTimes++
if justReturn {
return
}
if retryTimes > 100 {
time.Sleep(1 * time.Hour)
} else if retryTimes > 10 {
time.Sleep(1 * time.Minute)
}
req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBuffer(data))
if err != nil {
log.Error(err.Error())
goto Retry
}
req.Header.Add("Content-Type", "application/json;charset=utf-8")
req.Header.Add("language", "zh_hans")
req.Header.Add("x-api-token", token)
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
log.Error(err.Error())
goto Retry
}
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error(err.Error())
goto Retry
}
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("RemoteServerStatusError code:%d,body:%s", resp.StatusCode, respBody)
log.Error(err.Error())
goto Retry
}
respData := new(ResponseData)
respData.Data = new(OccupyVidResponse)
err = json.Unmarshal(respBody, respData)
if err != nil {
log.Error(err.Error())
return
}
if respData.Code == 1 {
response = respData.Data.(*OccupyVidResponse)
log.Debug("response %+v \n", *response)
if response.Status == "GENERATING" {
time.Sleep(1 * time.Minute)
goto Retry
}
return
} else if respData.Code == 100004 {
goto RetryWithNewToken
} else {
err = fmt.Errorf("Occupy vid error, remote response Code:%d, MSG: %s.", respData.Code, respData.Message)
log.Error(err.Error())
}
return
}
//================================Post========
type PostArtifactResponse struct {
RequestNo string `json:"requestNo,omitempty"` // 请求编号
Uid string `json:"uid,omitempty"` // 上链子账户id
Status string `json:"status,omitempty"` // 生成状态(PROCESSING:上链中,SUCCESS:成功,FAILURE: 失败,INSUFFICIENT:费用不足)
TxList []*PostArtifactResponseData `json:"txList,omitempty"` //上链结果
}
type PostArtifactResponseData struct {
TxId string `json:"txid"` //上链事务id
ClauseIndex string `json:"clauseIndex"` // 每40个vid组成一个clause
Vid string `json:"vid"` //商品ID
DataHash string `json:"dataHash"` //?
}
type PostArtifactRequest struct {
RequestNo string `json:"requestNo"` //请求编号
Uid string `json:"uid"` //用户 Id
Data []*PostArtifactRequestData `json:"data,omitempty"`
}
type PostArtifactRequestData struct {
DataHash string `json:"dataHash"`
Vid string `json:"vid"`
}
// 异步上链
//
func PostArtifact(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *PostArtifactResponse, err error) {
url := "v1/artifacts/hashinfo/create"
request := (ctx.Value("request")).(*PostArtifactRequest)
var data []byte
data, err = json.Marshal(request)
if err != nil {
log.Error(err.Error())
return
}
justReturn := false
go func() {
<-ctx.Done()
justReturn = true
err = ctx.Err()
log.Debug("ctx deadLine: %s", err.Error())
return
}()
retryTimes := 0
RetryWithNewToken:
token := tokenServer.GetToken()
Retry:
retryTimes++
if justReturn {
return
}
if retryTimes > 100 {
time.Sleep(1 * time.Minute)
} else if retryTimes > 1000 {
time.Sleep(1 * time.Hour)
}
req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBuffer(data))
if err != nil {
log.Error(err.Error())
return
}
req.Header.Add("Content-Type", "application/json;charset=utf-8")
req.Header.Add("language", "zh_hans")
req.Header.Add("x-api-token", token)
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
log.Error(err.Error())
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("[Error] After 10 times retry,RemoteServerStatusError code:%d", resp.StatusCode)
log.Error(err.Error())
goto Retry
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error(err.Error())
goto Retry
}
respData := new(ResponseData)
respData.Data = new(PostArtifactResponse)
err = json.Unmarshal(respBody, respData)
if err != nil {
log.Error(err.Error())
goto Retry
}
if respData.Code == 1 {
log.Debug("postArtifact response %s,%+v \n", respData.Message, resp | goto Retry
}
log.Debug("toke response :%s \n", body) | random_line_split |
vechain.go | > 100 {
time.Sleep(1 * time.Minute)
} else if retryTimes > 1000 {
time.Sleep(1 * time.Hour)
}
request, err := http.NewRequest("POST", requestUrl, data)
if err != nil {
log.Error("%s", err.Error())
goto Retry
}
defer request.Body.Close()
request.Header.Set("Content-Type", "application/json")
client := &http.Client{}
response, err := client.Do(request)
if err != nil {
log.Error("%s", err.Error())
goto Retry
}
defer response.Body.Close()
if response.StatusCode != 200 {
log.Error("%s", err.Error())
goto Retry
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Error("%s", err.Error())
goto Retry
}
log.Debug("toke response :%s \n", body)
respData := new(ResponseData)
respData.Data = new(Token)
err = json.Unmarshal(body, respData)
if respData.Code != 1 {
err = fmt.Errorf("responseCode:%d error,message:%s\n", respData.Code, respData.Message)
log.Error(err.Error())
goto Retry
}
token = respData.Data.(*Token)
return
}
//======================Occupy=============
//抢占请求表单
type OccupyVidRequest struct {
RequestNo string `json:"requestNo"`
VidList []string `json:"vidList"`
}
//抢占响应结构
type OccupyVidResponse struct {
RequestNo string `json:"requestNo,omitempty"` // 请求编号
Url string `json:"url,omitempty"` // 扫码 url
Quantity int `json:"quantity,omitempty"` //请求的vid个数
Status string `json:"status,omitempty"` // 生成状态(GENERATING:抢占中,SUCCESS:成功)
SuccessList []string `json:"successList,omitempty"` // 抢占成功 vid 列表
FailureList []string `json:"failureList,omitempty"` // 抢占失败 vid 列表
}
// 抢占vid
func OccupyVid(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *OccupyVidResponse, err error) {
url := "v1/vid/occupy"
request := (ctx.Value("request")).(*OccupyVidRequest)
data, err := json.Marshal(request)
if err != nil {
log.Error(err.Error())
return
}
//log.Debug("request: %s \n",data)
var justReturn bool
go func() {
<-ctx.Done()
err = ctx.Err()
justReturn = true
}()
retryTimes := 0
RetryWithNewToken:
token := tokenServer.GetToken()
Retry:
retryTimes++
if justReturn {
return
}
if retryTimes > 100 {
time.Sleep(1 * time.Hour)
} else if retryTimes > 10 {
time.Sleep(1 * time.Minute)
}
req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBuffer(data))
if err != nil {
log.Error(err.Error())
goto Retry
}
req.Header.Add("Content-Type", "application/json;charset=utf-8")
req.Header.Add("language", "zh_hans")
req.Header.Add("x-api-token", token)
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
log.Error(err.Error())
goto Retry
}
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error(err.Error())
goto Retry
}
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("RemoteServerStatusError code:%d,body:%s", resp.StatusCode, respBody)
log.Error(err.Error())
goto Retry
}
respData := new(ResponseData)
respData.Data = new(OccupyVidResponse)
err = json.Unmarshal(respBody, respData)
if err != nil {
log.Error(err.Error())
return
}
if respData.Code == 1 {
response = respData.Data.(*OccupyVidResponse)
log.Debug("response %+v \n", *response)
if response.Status == "GENERATING" {
time.Sleep(1 * time.Minute)
goto Retry
}
return
} else if respData.Code == 100004 {
goto RetryWithNewToken
} else {
err = fmt.Errorf("Occupy vid error, remote response Code:%d, MSG: %s.", respData.Code, respData.Message)
log.Error(err.Error())
}
return
}
//================================Post========
type PostArtifactResponse struct {
RequestNo string `json:"requestNo,omitempty"` // 请求编号
Uid string `json:"uid,omitempty"` // 上链子账户id
Status string `json:"status,omitempty"` // 生成状态(PROCESSING:上链中,SUCCESS:成功,FAILURE: 失败,INSUFFICIENT:费用不足)
TxList []*PostArtifactResponseData `json:"txList,omitempty"` //上链结果
}
type PostArtifactResponseData struct {
TxId string `json:"txid"` //上链事务id
ClauseIndex string `json:"clauseIndex"` // 每40个vid组成一个clause
Vid string `json:"vid"` //商品ID
DataHash string `json:"dataHash"` //?
}
type PostArtifactRequest struct {
RequestNo string `json:"requestNo"` //请求编号
Uid string `json:"uid"` //用户 Id
Data []*PostArtifactRequestData `json:"data,omitempty"`
}
type PostArtifactRequestData struct {
DataHash string `json:"dataHash"`
Vid string `json:"vid"`
}
// 异步上链
//
func PostArtifact(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *PostArtifactResponse, err error) {
url := "v1/artifacts/hashinfo/create"
request := (ctx.Value("request")).(*PostArtifactRequest)
var data []byte
data, err = json.Marshal(request)
if err != nil {
log.Error(err.Error())
return
}
justReturn := false
go func() {
<-ctx.Done()
justReturn = true
err = ctx.Err()
log.Debug("ctx deadLine: %s", err.Error())
return
}()
retryTimes := 0
RetryWithNewToken:
token := tokenServer.GetToken()
Retry:
retryTimes++
if justReturn {
return
}
if retryTimes > 100 {
time.Sleep(1 * time.Minute)
} else if retryTimes > 1000 {
time.Sleep(1 * time.Hour)
}
req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBuffer(data))
if err != nil {
log.Error(err.Error())
return
}
req.Header.Add("Content-Type", "application/json;charset=utf-8")
req.Header.Add("language", "zh_hans")
req.Header.Add("x-api-token", token)
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
log.Error(err.Error())
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("[Error] After 10 times retry,RemoteServerStatusError code:%d", resp.StatusCode)
log.Error(err.Error())
goto Retry
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error(err.Error())
goto Retry
}
respData := new(ResponseData)
respData.Data = new(PostArtifactResponse)
err = json.Unmarshal(respBody, respData)
if err != nil {
log.Error(err.Error())
goto Retry
}
if respData.Code == 1 {
log.Debug("postArtifact response %s,%+v \n", respData.Message, respData.Data)
response = respData.Data.(*PostArtifactResponse)
if response.Status == "PROCESSING" {
time.Sleep(1 * time.Minute)
goto Retry
}
} else if respData.Code == 100004 {
goto RetryWithNewToken
} else {
err = fmt.Errorf("PostArtifactResponseerror, remote response Code:%d, MSG: %s.", respData.Code, respData.Message)
log.Error(err.Error())
return
}
return
}
//================CreateAccount============
type CreateUser struct {
RequestNo string `json:"requestNo"` //请求编号
Uid string `json:"uid"` //用户 Id(已分配时返回)
Status string `json:"status"` //状态(PROCESSING:处理中,SUCCESS:成功,FAILURE:失败)
}
// 创建账号
// 在此系统只只需创建一个账号,无多账户的需求。
func GenerateSubAccount(requestNo, accountName string, config *VechainConfig, tokenServer IToken) (uid string, err error) {
url := "v1/artifacts/user/create"
postData := `
{
"requestNo":"%s",
"name":"%s"
}
`
req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBufferString(fmt.Sprintf(postData, requestNo, accountName)))
if err != nil {
log.Err | or(err.Error())
| identifier_name | |
vechain.go | .Sprintf("%x", sha256.Sum256([]byte(strings.ToLower(str))))
return
}
//区块链浏览器浏览地址
func BlockChainExploreLink(transactionId string, config *VechainConfig) string {
return fmt.Sprintf(config.ExploreLink, transactionId)
}
//=========================Token======================
//返回Token结构
type Token struct {
Token string `json:"token"`
Expire int64 `json:"expire"`
}
var lock int32 = 0
var refreshError = fmt.Errorf("token refreshing")
func GetToken(config *VechainConfig) (token *Token, err error) {
if atomic.LoadInt32(&lock) == 1 {
err = refreshError
return
}
atomic.StoreInt32(&lock, 1)
defer atomic.StoreInt32(&lock, 0)
timestamp := time.Now().Unix()
form := new(Form)
form.AppId = config.DeveloperId
form.AppKey = config.DeveloperKey
form.Nonce = config.Nonce
form.Timestamp = strconv.FormatInt(timestamp, 10)
form.Signature = sign(timestamp, config)
requestUrl := config.SiteUrl + "v1/tokens"
formByte, err := json.Marshal(form)
if err != nil {
log.Error("%s", err.Error())
return
}
log.Debug("%+v", *form)
data := bytes.NewReader(formByte)
retryTimes := 0
Retry:
retryTimes++
if retryTimes > 100 {
time.Sleep(1 * time.Minute)
} else if retryTimes > 1000 {
time.Sleep(1 * time.Hour)
}
request, err := http.NewRequest("POST", requestUrl, data)
if err != nil {
log.Error("%s", err.Error())
goto Retry
}
defer request.Body.Close()
request.Header.Set("Content-Type", "application/json")
client := &http.Client{}
response, err := client.Do(request)
if err != nil {
log.Error("%s", err.Error())
goto Retry
}
defer response.Body.Close()
if response.StatusCode != 200 {
log.Error("%s", err.Error())
goto Retry
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Error("%s", err.Error())
goto Retry
}
log.Debug("toke response :%s \n", body)
respData := new(ResponseData)
respData.Data = new(Token)
err = json.Unmarshal(body, respData)
if respData.Code != 1 {
err = fmt.Errorf("responseCode:%d error,message:%s\n", respData.Code, respData.Message)
log.Error(err.Error())
goto Retry
}
token = respData.Data.(*Token)
return
}
//======================Occupy=============
//抢占请求表单
type OccupyVidRequest struct {
RequestNo string `json:"requestNo"`
VidList []string `json:"vidList"`
}
//抢占响应结构
type OccupyVidResponse struct {
RequestNo string `json:"requestNo,omitempty"` // 请求编号
Url string `json:"url,omitempty"` // 扫码 url
Quantity int `json:"quantity,omitempty"` //请求的vid个数
Status string `json:"status,omitempty"` // 生成状态(GENERATING:抢占中,SUCCESS:成功)
SuccessList []string `json:"successList,omitempty"` // 抢占成功 vid 列表
FailureList []string `json:"failureList,omitempty"` // 抢占失败 vid 列表
}
// 抢占vid
func OccupyVid(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *OccupyVidResponse, err error) {
url := "v1/vid/occupy"
request := (ctx.Value("request")).(*OccupyVidRequest)
data, err := json.Marshal(request)
if err != nil {
log.Error(err.Error())
return
}
//log.Debug("request: %s \n",data)
var justReturn bool
go func() {
<-ctx.Done()
err = ctx.Err()
justReturn = true
}()
retryTimes := 0
RetryWithNewToken:
token := tokenServer.GetToken()
Retry:
retryTimes++
if justReturn {
return
}
if retryTimes > 100 {
time.Sleep(1 * time.Hour)
} else if retryTimes > 10 {
time.Sleep(1 * time.Minute)
}
req, err := http.NewRequest("POST", config.SiteUrl+url, | != nil {
log.Error(err.Error())
goto Retry
}
req.Header.Add("Content-Type", "application/json;charset=utf-8")
req.Header.Add("language", "zh_hans")
req.Header.Add("x-api-token", token)
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
log.Error(err.Error())
goto Retry
}
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error(err.Error())
goto Retry
}
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("RemoteServerStatusError code:%d,body:%s", resp.StatusCode, respBody)
log.Error(err.Error())
goto Retry
}
respData := new(ResponseData)
respData.Data = new(OccupyVidResponse)
err = json.Unmarshal(respBody, respData)
if err != nil {
log.Error(err.Error())
return
}
if respData.Code == 1 {
response = respData.Data.(*OccupyVidResponse)
log.Debug("response %+v \n", *response)
if response.Status == "GENERATING" {
time.Sleep(1 * time.Minute)
goto Retry
}
return
} else if respData.Code == 100004 {
goto RetryWithNewToken
} else {
err = fmt.Errorf("Occupy vid error, remote response Code:%d, MSG: %s.", respData.Code, respData.Message)
log.Error(err.Error())
}
return
}
//================================Post========
type PostArtifactResponse struct {
RequestNo string `json:"requestNo,omitempty"` // 请求编号
Uid string `json:"uid,omitempty"` // 上链子账户id
Status string `json:"status,omitempty"` // 生成状态(PROCESSING:上链中,SUCCESS:成功,FAILURE: 失败,INSUFFICIENT:费用不足)
TxList []*PostArtifactResponseData `json:"txList,omitempty"` //上链结果
}
type PostArtifactResponseData struct {
TxId string `json:"txid"` //上链事务id
ClauseIndex string `json:"clauseIndex"` // 每40个vid组成一个clause
Vid string `json:"vid"` //商品ID
DataHash string `json:"dataHash"` //?
}
type PostArtifactRequest struct {
RequestNo string `json:"requestNo"` //请求编号
Uid string `json:"uid"` //用户 Id
Data []*PostArtifactRequestData `json:"data,omitempty"`
}
type PostArtifactRequestData struct {
DataHash string `json:"dataHash"`
Vid string `json:"vid"`
}
// 异步上链
//
func PostArtifact(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *PostArtifactResponse, err error) {
url := "v1/artifacts/hashinfo/create"
request := (ctx.Value("request")).(*PostArtifactRequest)
var data []byte
data, err = json.Marshal(request)
if err != nil {
log.Error(err.Error())
return
}
justReturn := false
go func() {
<-ctx.Done()
justReturn = true
err = ctx.Err()
log.Debug("ctx deadLine: %s", err.Error())
return
}()
retryTimes := 0
RetryWithNewToken:
token := tokenServer.GetToken()
Retry:
retryTimes++
if justReturn {
return
}
if retryTimes > 100 {
time.Sleep(1 * time.Minute)
} else if retryTimes > 1000 {
time.Sleep(1 * time.Hour)
}
req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBuffer(data))
if err != nil {
log.Error(err.Error())
return
}
req.Header.Add("Content-Type", "application/json;charset=utf-8")
req.Header.Add("language", "zh_hans")
req.Header.Add("x-api-token", token)
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
log.Error(err.Error())
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("[Error] After 10 times retry,RemoteServerStatusError code:%d", resp.StatusCode)
log.Error(err.Error())
goto Retry
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error(err.Error())
goto Retry
}
respData := new(ResponseData)
respData.Data = new(PostArtifactResponse)
err = json.Unmarshal(respBody, respData)
if err != nil {
log.Error(err.Error())
goto Retry
}
if respData.Code == 1 {
log.Debug("postArtifact response %s,%+v \n", respData.Message, | bytes.NewBuffer(data))
if err | conditional_block |
vechain.go | .Sprintf("%x", sha256.Sum256([]byte(strings.ToLower(str))))
return
}
//区块链浏览器浏览地址
func BlockChainExploreLink(transactionId string, config *VechainConfig) string {
return fmt.Sprintf(config.ExploreLink, tr | ============
//返回Token结构
type Token struct {
Token string `json:"token"`
Expire int64 `json:"expire"`
}
var lock int32 = 0
var refreshError = fmt.Errorf("token refreshing")
func GetToken(config *VechainConfig) (token *Token, err error) {
if atomic.LoadInt32(&lock) == 1 {
err = refreshError
return
}
atomic.StoreInt32(&lock, 1)
defer atomic.StoreInt32(&lock, 0)
timestamp := time.Now().Unix()
form := new(Form)
form.AppId = config.DeveloperId
form.AppKey = config.DeveloperKey
form.Nonce = config.Nonce
form.Timestamp = strconv.FormatInt(timestamp, 10)
form.Signature = sign(timestamp, config)
requestUrl := config.SiteUrl + "v1/tokens"
formByte, err := json.Marshal(form)
if err != nil {
log.Error("%s", err.Error())
return
}
log.Debug("%+v", *form)
data := bytes.NewReader(formByte)
retryTimes := 0
Retry:
retryTimes++
if retryTimes > 100 {
time.Sleep(1 * time.Minute)
} else if retryTimes > 1000 {
time.Sleep(1 * time.Hour)
}
request, err := http.NewRequest("POST", requestUrl, data)
if err != nil {
log.Error("%s", err.Error())
goto Retry
}
defer request.Body.Close()
request.Header.Set("Content-Type", "application/json")
client := &http.Client{}
response, err := client.Do(request)
if err != nil {
log.Error("%s", err.Error())
goto Retry
}
defer response.Body.Close()
if response.StatusCode != 200 {
log.Error("%s", err.Error())
goto Retry
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Error("%s", err.Error())
goto Retry
}
log.Debug("toke response :%s \n", body)
respData := new(ResponseData)
respData.Data = new(Token)
err = json.Unmarshal(body, respData)
if respData.Code != 1 {
err = fmt.Errorf("responseCode:%d error,message:%s\n", respData.Code, respData.Message)
log.Error(err.Error())
goto Retry
}
token = respData.Data.(*Token)
return
}
//======================Occupy=============
//抢占请求表单
type OccupyVidRequest struct {
RequestNo string `json:"requestNo"`
VidList []string `json:"vidList"`
}
//抢占响应结构
type OccupyVidResponse struct {
RequestNo string `json:"requestNo,omitempty"` // 请求编号
Url string `json:"url,omitempty"` // 扫码 url
Quantity int `json:"quantity,omitempty"` //请求的vid个数
Status string `json:"status,omitempty"` // 生成状态(GENERATING:抢占中,SUCCESS:成功)
SuccessList []string `json:"successList,omitempty"` // 抢占成功 vid 列表
FailureList []string `json:"failureList,omitempty"` // 抢占失败 vid 列表
}
// 抢占vid
func OccupyVid(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *OccupyVidResponse, err error) {
url := "v1/vid/occupy"
request := (ctx.Value("request")).(*OccupyVidRequest)
data, err := json.Marshal(request)
if err != nil {
log.Error(err.Error())
return
}
//log.Debug("request: %s \n",data)
var justReturn bool
go func() {
<-ctx.Done()
err = ctx.Err()
justReturn = true
}()
retryTimes := 0
RetryWithNewToken:
token := tokenServer.GetToken()
Retry:
retryTimes++
if justReturn {
return
}
if retryTimes > 100 {
time.Sleep(1 * time.Hour)
} else if retryTimes > 10 {
time.Sleep(1 * time.Minute)
}
req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBuffer(data))
if err != nil {
log.Error(err.Error())
goto Retry
}
req.Header.Add("Content-Type", "application/json;charset=utf-8")
req.Header.Add("language", "zh_hans")
req.Header.Add("x-api-token", token)
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
log.Error(err.Error())
goto Retry
}
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error(err.Error())
goto Retry
}
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("RemoteServerStatusError code:%d,body:%s", resp.StatusCode, respBody)
log.Error(err.Error())
goto Retry
}
respData := new(ResponseData)
respData.Data = new(OccupyVidResponse)
err = json.Unmarshal(respBody, respData)
if err != nil {
log.Error(err.Error())
return
}
if respData.Code == 1 {
response = respData.Data.(*OccupyVidResponse)
log.Debug("response %+v \n", *response)
if response.Status == "GENERATING" {
time.Sleep(1 * time.Minute)
goto Retry
}
return
} else if respData.Code == 100004 {
goto RetryWithNewToken
} else {
err = fmt.Errorf("Occupy vid error, remote response Code:%d, MSG: %s.", respData.Code, respData.Message)
log.Error(err.Error())
}
return
}
//================================Post========
type PostArtifactResponse struct {
RequestNo string `json:"requestNo,omitempty"` // 请求编号
Uid string `json:"uid,omitempty"` // 上链子账户id
Status string `json:"status,omitempty"` // 生成状态(PROCESSING:上链中,SUCCESS:成功,FAILURE: 失败,INSUFFICIENT:费用不足)
TxList []*PostArtifactResponseData `json:"txList,omitempty"` //上链结果
}
type PostArtifactResponseData struct {
TxId string `json:"txid"` //上链事务id
ClauseIndex string `json:"clauseIndex"` // 每40个vid组成一个clause
Vid string `json:"vid"` //商品ID
DataHash string `json:"dataHash"` //?
}
type PostArtifactRequest struct {
RequestNo string `json:"requestNo"` //请求编号
Uid string `json:"uid"` //用户 Id
Data []*PostArtifactRequestData `json:"data,omitempty"`
}
type PostArtifactRequestData struct {
DataHash string `json:"dataHash"`
Vid string `json:"vid"`
}
// 异步上链
//
func PostArtifact(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *PostArtifactResponse, err error) {
url := "v1/artifacts/hashinfo/create"
request := (ctx.Value("request")).(*PostArtifactRequest)
var data []byte
data, err = json.Marshal(request)
if err != nil {
log.Error(err.Error())
return
}
justReturn := false
go func() {
<-ctx.Done()
justReturn = true
err = ctx.Err()
log.Debug("ctx deadLine: %s", err.Error())
return
}()
retryTimes := 0
RetryWithNewToken:
token := tokenServer.GetToken()
Retry:
retryTimes++
if justReturn {
return
}
if retryTimes > 100 {
time.Sleep(1 * time.Minute)
} else if retryTimes > 1000 {
time.Sleep(1 * time.Hour)
}
req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBuffer(data))
if err != nil {
log.Error(err.Error())
return
}
req.Header.Add("Content-Type", "application/json;charset=utf-8")
req.Header.Add("language", "zh_hans")
req.Header.Add("x-api-token", token)
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
log.Error(err.Error())
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("[Error] After 10 times retry,RemoteServerStatusError code:%d", resp.StatusCode)
log.Error(err.Error())
goto Retry
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error(err.Error())
goto Retry
}
respData := new(ResponseData)
respData.Data = new(PostArtifactResponse)
err = json.Unmarshal(respBody, respData)
if err != nil {
log.Error(err.Error())
goto Retry
}
if respData.Code == 1 {
log.Debug("postArtifact response %s,%+v \n", respData.Message | ansactionId)
}
//=========================Token========== | identifier_body |
pyvideo_scrape.py | )
raise ValueError("{} can't be null".format(mandatory_field))
self.issue = event_data['issue']
if isinstance(event_data['youtube_list'], str):
self.youtube_lists = [event_data['youtube_list']]
elif isinstance(event_data['youtube_list'], list):
self.youtube_lists = event_data['youtube_list']
else:
raise TypeError(
"youtube_list must be a string or a list of strings")
self.related_urls = event_data.get('related_urls', [])
self.language = event_data.get('language', None)
self.tags = event_data.get('tags', [])
if not self.tags:
self.tags = []
if 'dates' in event_data and event_data['dates']:
self.know_date = True
self.date_begin = event_data['dates']['begin']
self.date_end = event_data['dates'].get('end', self.date_begin)
self.date_default = event_data['dates'].get(
'default', self.date_begin)
else:
self.know_date = False
self.minimal_download = event_data.get('minimal_download', False)
if self.minimal_download:
self.branch = "{}--minimal-download".format(self.branch)
self.overwrite, self.add_new_files, self.wipe = False, False, False
self.overwrite_fields = []
if 'overwrite' in event_data and event_data['overwrite']:
overwrite = event_data['overwrite']
self.overwrite = True
if 'all' in overwrite and overwrite['all']:
self.wipe = True
else:
if 'add_new_files' in overwrite and overwrite['add_new_files']:
self.add_new_files = True
if ('existing_files_fields' in overwrite
and overwrite['existing_files_fields']):
self.overwrite_fields = overwrite['existing_files_fields']
def create_branch(self):
"""Create a new branch in pyvideo repository to add a new event"""
os.chdir(str(self.repository_path))
sh.git.checkout('master')
sh.git.checkout('-b', self.branch)
logger.debug('Branch {} created', self.branch)
def create_dirs(self):
"""Create new directories and conference file in pyvideo repository to
add a new event"""
for new_directory in [self.event_dir, self.event_dir / 'videos']:
new_directory.mkdir(exist_ok=self.overwrite)
logger.debug('Dir {} created', new_directory)
def create_category(self): # , conf_dir, title):
"""Create category.json for the conference"""
category_file_path = self.event_dir / 'category.json'
category_data = {
'title': self.title,
}
category_data_text = json.dumps(category_data, **
JSON_FORMAT_KWARGS) + '\n'
save_file(category_file_path, category_data_text)
logger.debug('File {} created', category_file_path)
def download_video_data(self):
"""Download youtube metadata corresponding to this event youtube
lists"""
def scrape_url(url):
"""Scrape the video list, youtube_dl does all the heavy lifting"""
ydl_opts = {
"ignoreerrors": True, # Skip private and unavaliable videos
}
ydl = youtube_dl.YoutubeDL(ydl_opts)
with ydl:
result_ydl = ydl.extract_info(
url,
download=False # No download needed, only the info
)
logger.debug('Url scraped {}', url)
if 'entries' in result_ydl:
# It's a playlist or a list of videos
return result_ydl['entries']
# Just a video
return [result_ydl]
| if youtube_video_data: # Valid video
self.youtube_videos.append(
Video.from_youtube(
video_data=youtube_video_data, event=self))
else:
logger.warning('Null youtube video')
def load_video_data(self):
"""Load video data form existing event video files"""
self.file_videos = [
Video.from_file(path, self)
for path in self.video_dir.glob('*.json')
]
def merge_video_data(self):
"""Merge old video data when configured so"""
if self.overwrite:
if self.wipe:
self.videos = self.youtube_videos
elif self.add_new_files or self.overwrite_fields:
old_videos = {
video.filename: video
for video in self.file_videos
}
old_videos_url = {
video.metadata['videos'][0]['url']: video
for video in self.file_videos
}
new_videos = {}
for video in self.youtube_videos:
new_video_url = video.metadata['videos'][0]['url']
if new_video_url in old_videos_url:
new_video_filename = old_videos_url[new_video_url].filename
else:
new_video_filename = video.filename
new_videos[new_video_filename] = video
if self.overwrite_fields:
forgotten = set(old_videos) - set(new_videos)
for name in forgotten:
logger.warning('Missing video: {} {}',
old_videos[name].filename,
old_videos[name].metadata['videos'][0]['url'],
)
changes = set(new_videos).intersection(set(old_videos))
for path in changes:
merged_video = old_videos[path].merge(
new_videos[path], self.overwrite_fields)
self.videos.append(merged_video)
else:
self.videos = self.file_videos
if self.add_new_files:
adds = set(new_videos) - set(old_videos)
self.videos.extend([new_videos[path] for path in adds])
else: # not self.overwrite
self.videos = self.youtube_videos
def save_video_data(self):
"""Save all event videos in PyVideo format"""
if self.overwrite:
# Erase old event videos
for path in self.video_dir.glob('*.json'):
path.unlink()
for video in self.videos:
video.save()
def create_commit(self, event_data_yaml):
"""Create a new commit in pyvideo repository with the new event data"""
os.chdir(str(self.repository_path))
sh.git.checkout(self.branch)
sh.git.add(self.event_dir)
message_body = (
'\n\nEvent config:\n~~~yaml\n{}\n~~~\n'.format(event_data_yaml)
+ '\nScraped with [pyvideo_scrape]'
+ '(https://github.com/pyvideo/pyvideo_scrape)')
if self.minimal_download:
message = ('Minimal download: '
+ '{}\n\nMinimal download executed for #{}'.format(
self.title, self.issue)
+ '\n\nOnly data that needs [no review](https://'
+ 'github.com/pyvideo/pyvideo_scrape#use-cases) was scraped.'
+ '\nThis event needs further scraping and human '
+ 'reviewing for the description and other data to show.'
+ message_body)
sh.git.commit('-m', message)
sh.git.push('--set-upstream', 'origin', self.branch)
# ~ sh.git.push('--set-upstream', '--force', 'origin', self.branch)
sh.git.checkout('master')
else:
message = (
'Scraped {}\n\nFixes #{}'.format(self.branch, self.issue)
+ message_body)
sh.git.commit('-m', message)
sh.git.checkout('master')
logger.debug('Conference {} commited', self.branch)
class Video:
"""PyVideo Video metadata"""
@staticmethod
def __calculate_title(video_data):
"""Calculate title from youtube fields"""
title = 'Unknown'
if 'fulltitle' in video_data.keys():
title = video_data['fulltitle']
elif 'title' in video_data.keys():
title = video_data['title']
elif '_filename' in video_data.keys():
title = video_data['_filename']
return title
def __calculate_slug(self):
"""Calculate slug from title"""
return slugify.slugify(self.metadata['title'])
def __calculate_date_recorded(self, upload_date_str):
"""Calculate record date from youtube field and event dates"""
upload_date = datetime.date(
int(upload_date_str[0:4]),
int(upload_date_str[4:6]), int(upload_date_str[6:8]))
if self.event.know_date:
if not (self.event.date_begin <= upload_date <=
self.event.date_end):
return self.event.date_default.isoformat()
return upload_date.isoformat()
def __init__(self, event):
self.event = event
self.filename = None
self.metadata = {}
@classmethod
def from_file(cls, path, event):
"""Contructor. Retrieves video metadata from file"""
self = cls(event)
self.filename = path.stem # Name without .json
try:
with path.open() as f_path:
self.metadata = json.load(f_path)
except ValueError:
print('Json syntax error in file {}'.format(path))
raise
return self
@classmethod
def from_youtube(cls, video_data, event):
"""Contructor. Retrieves video metadata with youtube-dl"""
self = cls(event)
metadata = self.metadata
metadata['title'] = self.__calculate_title(video_data)
self.filename = self.__calculate_slug()
metadata['speakers'] = ['TODO'] # Needs human intervention later
# youtube_id = video_data['display_id']
# metadata[' | youtube_list = sum((scrape_url(url) for url in self.youtube_lists), [])
for youtube_video_data in youtube_list: | random_line_split |
pyvideo_scrape.py | )
raise ValueError("{} can't be null".format(mandatory_field))
self.issue = event_data['issue']
if isinstance(event_data['youtube_list'], str):
self.youtube_lists = [event_data['youtube_list']]
elif isinstance(event_data['youtube_list'], list):
self.youtube_lists = event_data['youtube_list']
else:
raise TypeError(
"youtube_list must be a string or a list of strings")
self.related_urls = event_data.get('related_urls', [])
self.language = event_data.get('language', None)
self.tags = event_data.get('tags', [])
if not self.tags:
self.tags = []
if 'dates' in event_data and event_data['dates']:
self.know_date = True
self.date_begin = event_data['dates']['begin']
self.date_end = event_data['dates'].get('end', self.date_begin)
self.date_default = event_data['dates'].get(
'default', self.date_begin)
else:
self.know_date = False
self.minimal_download = event_data.get('minimal_download', False)
if self.minimal_download:
self.branch = "{}--minimal-download".format(self.branch)
self.overwrite, self.add_new_files, self.wipe = False, False, False
self.overwrite_fields = []
if 'overwrite' in event_data and event_data['overwrite']:
overwrite = event_data['overwrite']
self.overwrite = True
if 'all' in overwrite and overwrite['all']:
self.wipe = True
else:
if 'add_new_files' in overwrite and overwrite['add_new_files']:
self.add_new_files = True
if ('existing_files_fields' in overwrite
and overwrite['existing_files_fields']):
self.overwrite_fields = overwrite['existing_files_fields']
def create_branch(self):
"""Create a new branch in pyvideo repository to add a new event"""
os.chdir(str(self.repository_path))
sh.git.checkout('master')
sh.git.checkout('-b', self.branch)
logger.debug('Branch {} created', self.branch)
def create_dirs(self):
"""Create new directories and conference file in pyvideo repository to
add a new event"""
for new_directory in [self.event_dir, self.event_dir / 'videos']:
new_directory.mkdir(exist_ok=self.overwrite)
logger.debug('Dir {} created', new_directory)
def create_category(self): # , conf_dir, title):
"""Create category.json for the conference"""
category_file_path = self.event_dir / 'category.json'
category_data = {
'title': self.title,
}
category_data_text = json.dumps(category_data, **
JSON_FORMAT_KWARGS) + '\n'
save_file(category_file_path, category_data_text)
logger.debug('File {} created', category_file_path)
def download_video_data(self):
"""Download youtube metadata corresponding to this event youtube
lists"""
def scrape_url(url):
"""Scrape the video list, youtube_dl does all the heavy lifting"""
ydl_opts = {
"ignoreerrors": True, # Skip private and unavaliable videos
}
ydl = youtube_dl.YoutubeDL(ydl_opts)
with ydl:
result_ydl = ydl.extract_info(
url,
download=False # No download needed, only the info
)
logger.debug('Url scraped {}', url)
if 'entries' in result_ydl:
# It's a playlist or a list of videos
return result_ydl['entries']
# Just a video
return [result_ydl]
youtube_list = sum((scrape_url(url) for url in self.youtube_lists), [])
for youtube_video_data in youtube_list:
if youtube_video_data: # Valid video
self.youtube_videos.append(
Video.from_youtube(
video_data=youtube_video_data, event=self))
else:
logger.warning('Null youtube video')
def load_video_data(self):
"""Load video data form existing event video files"""
self.file_videos = [
Video.from_file(path, self)
for path in self.video_dir.glob('*.json')
]
def merge_video_data(self):
"""Merge old video data when configured so"""
if self.overwrite:
if self.wipe:
self.videos = self.youtube_videos
elif self.add_new_files or self.overwrite_fields:
old_videos = {
video.filename: video
for video in self.file_videos
}
old_videos_url = {
video.metadata['videos'][0]['url']: video
for video in self.file_videos
}
new_videos = {}
for video in self.youtube_videos:
new_video_url = video.metadata['videos'][0]['url']
if new_video_url in old_videos_url:
new_video_filename = old_videos_url[new_video_url].filename
else:
new_video_filename = video.filename
new_videos[new_video_filename] = video
if self.overwrite_fields:
forgotten = set(old_videos) - set(new_videos)
for name in forgotten:
logger.warning('Missing video: {} {}',
old_videos[name].filename,
old_videos[name].metadata['videos'][0]['url'],
)
changes = set(new_videos).intersection(set(old_videos))
for path in changes:
merged_video = old_videos[path].merge(
new_videos[path], self.overwrite_fields)
self.videos.append(merged_video)
else:
self.videos = self.file_videos
if self.add_new_files:
adds = set(new_videos) - set(old_videos)
self.videos.extend([new_videos[path] for path in adds])
else: # not self.overwrite
|
def save_video_data(self):
"""Save all event videos in PyVideo format"""
if self.overwrite:
# Erase old event videos
for path in self.video_dir.glob('*.json'):
path.unlink()
for video in self.videos:
video.save()
def create_commit(self, event_data_yaml):
"""Create a new commit in pyvideo repository with the new event data"""
os.chdir(str(self.repository_path))
sh.git.checkout(self.branch)
sh.git.add(self.event_dir)
message_body = (
'\n\nEvent config:\n~~~yaml\n{}\n~~~\n'.format(event_data_yaml)
+ '\nScraped with [pyvideo_scrape]'
+ '(https://github.com/pyvideo/pyvideo_scrape)')
if self.minimal_download:
message = ('Minimal download: '
+ '{}\n\nMinimal download executed for #{}'.format(
self.title, self.issue)
+ '\n\nOnly data that needs [no review](https://'
+ 'github.com/pyvideo/pyvideo_scrape#use-cases) was scraped.'
+ '\nThis event needs further scraping and human '
+ 'reviewing for the description and other data to show.'
+ message_body)
sh.git.commit('-m', message)
sh.git.push('--set-upstream', 'origin', self.branch)
# ~ sh.git.push('--set-upstream', '--force', 'origin', self.branch)
sh.git.checkout('master')
else:
message = (
'Scraped {}\n\nFixes #{}'.format(self.branch, self.issue)
+ message_body)
sh.git.commit('-m', message)
sh.git.checkout('master')
logger.debug('Conference {} commited', self.branch)
class Video:
"""PyVideo Video metadata"""
@staticmethod
def __calculate_title(video_data):
"""Calculate title from youtube fields"""
title = 'Unknown'
if 'fulltitle' in video_data.keys():
title = video_data['fulltitle']
elif 'title' in video_data.keys():
title = video_data['title']
elif '_filename' in video_data.keys():
title = video_data['_filename']
return title
def __calculate_slug(self):
"""Calculate slug from title"""
return slugify.slugify(self.metadata['title'])
def __calculate_date_recorded(self, upload_date_str):
"""Calculate record date from youtube field and event dates"""
upload_date = datetime.date(
int(upload_date_str[0:4]),
int(upload_date_str[4:6]), int(upload_date_str[6:8]))
if self.event.know_date:
if not (self.event.date_begin <= upload_date <=
self.event.date_end):
return self.event.date_default.isoformat()
return upload_date.isoformat()
def __init__(self, event):
self.event = event
self.filename = None
self.metadata = {}
@classmethod
def from_file(cls, path, event):
"""Contructor. Retrieves video metadata from file"""
self = cls(event)
self.filename = path.stem # Name without .json
try:
with path.open() as f_path:
self.metadata = json.load(f_path)
except ValueError:
print('Json syntax error in file {}'.format(path))
raise
return self
@classmethod
def from_youtube(cls, video_data, event):
"""Contructor. Retrieves video metadata with youtube-dl"""
self = cls(event)
metadata = self.metadata
metadata['title'] = self.__calculate_title(video_data)
self.filename = self.__calculate_slug()
metadata['speakers'] = ['TODO'] # Needs human intervention later
# youtube_id = video_data['display_id']
# metadata[' | self.videos = self.youtube_videos | conditional_block |
pyvideo_scrape.py | ']
def create_branch(self):
"""Create a new branch in pyvideo repository to add a new event"""
os.chdir(str(self.repository_path))
sh.git.checkout('master')
sh.git.checkout('-b', self.branch)
logger.debug('Branch {} created', self.branch)
def create_dirs(self):
"""Create new directories and conference file in pyvideo repository to
add a new event"""
for new_directory in [self.event_dir, self.event_dir / 'videos']:
new_directory.mkdir(exist_ok=self.overwrite)
logger.debug('Dir {} created', new_directory)
def create_category(self): # , conf_dir, title):
"""Create category.json for the conference"""
category_file_path = self.event_dir / 'category.json'
category_data = {
'title': self.title,
}
category_data_text = json.dumps(category_data, **
JSON_FORMAT_KWARGS) + '\n'
save_file(category_file_path, category_data_text)
logger.debug('File {} created', category_file_path)
def download_video_data(self):
"""Download youtube metadata corresponding to this event youtube
lists"""
def scrape_url(url):
"""Scrape the video list, youtube_dl does all the heavy lifting"""
ydl_opts = {
"ignoreerrors": True, # Skip private and unavaliable videos
}
ydl = youtube_dl.YoutubeDL(ydl_opts)
with ydl:
result_ydl = ydl.extract_info(
url,
download=False # No download needed, only the info
)
logger.debug('Url scraped {}', url)
if 'entries' in result_ydl:
# It's a playlist or a list of videos
return result_ydl['entries']
# Just a video
return [result_ydl]
youtube_list = sum((scrape_url(url) for url in self.youtube_lists), [])
for youtube_video_data in youtube_list:
if youtube_video_data: # Valid video
self.youtube_videos.append(
Video.from_youtube(
video_data=youtube_video_data, event=self))
else:
logger.warning('Null youtube video')
def load_video_data(self):
"""Load video data form existing event video files"""
self.file_videos = [
Video.from_file(path, self)
for path in self.video_dir.glob('*.json')
]
def merge_video_data(self):
"""Merge old video data when configured so"""
if self.overwrite:
if self.wipe:
self.videos = self.youtube_videos
elif self.add_new_files or self.overwrite_fields:
old_videos = {
video.filename: video
for video in self.file_videos
}
old_videos_url = {
video.metadata['videos'][0]['url']: video
for video in self.file_videos
}
new_videos = {}
for video in self.youtube_videos:
new_video_url = video.metadata['videos'][0]['url']
if new_video_url in old_videos_url:
new_video_filename = old_videos_url[new_video_url].filename
else:
new_video_filename = video.filename
new_videos[new_video_filename] = video
if self.overwrite_fields:
forgotten = set(old_videos) - set(new_videos)
for name in forgotten:
logger.warning('Missing video: {} {}',
old_videos[name].filename,
old_videos[name].metadata['videos'][0]['url'],
)
changes = set(new_videos).intersection(set(old_videos))
for path in changes:
merged_video = old_videos[path].merge(
new_videos[path], self.overwrite_fields)
self.videos.append(merged_video)
else:
self.videos = self.file_videos
if self.add_new_files:
adds = set(new_videos) - set(old_videos)
self.videos.extend([new_videos[path] for path in adds])
else: # not self.overwrite
self.videos = self.youtube_videos
def save_video_data(self):
"""Save all event videos in PyVideo format"""
if self.overwrite:
# Erase old event videos
for path in self.video_dir.glob('*.json'):
path.unlink()
for video in self.videos:
video.save()
def create_commit(self, event_data_yaml):
"""Create a new commit in pyvideo repository with the new event data"""
os.chdir(str(self.repository_path))
sh.git.checkout(self.branch)
sh.git.add(self.event_dir)
message_body = (
'\n\nEvent config:\n~~~yaml\n{}\n~~~\n'.format(event_data_yaml)
+ '\nScraped with [pyvideo_scrape]'
+ '(https://github.com/pyvideo/pyvideo_scrape)')
if self.minimal_download:
message = ('Minimal download: '
+ '{}\n\nMinimal download executed for #{}'.format(
self.title, self.issue)
+ '\n\nOnly data that needs [no review](https://'
+ 'github.com/pyvideo/pyvideo_scrape#use-cases) was scraped.'
+ '\nThis event needs further scraping and human '
+ 'reviewing for the description and other data to show.'
+ message_body)
sh.git.commit('-m', message)
sh.git.push('--set-upstream', 'origin', self.branch)
# ~ sh.git.push('--set-upstream', '--force', 'origin', self.branch)
sh.git.checkout('master')
else:
message = (
'Scraped {}\n\nFixes #{}'.format(self.branch, self.issue)
+ message_body)
sh.git.commit('-m', message)
sh.git.checkout('master')
logger.debug('Conference {} commited', self.branch)
class Video:
"""PyVideo Video metadata"""
@staticmethod
def __calculate_title(video_data):
"""Calculate title from youtube fields"""
title = 'Unknown'
if 'fulltitle' in video_data.keys():
title = video_data['fulltitle']
elif 'title' in video_data.keys():
title = video_data['title']
elif '_filename' in video_data.keys():
title = video_data['_filename']
return title
def __calculate_slug(self):
"""Calculate slug from title"""
return slugify.slugify(self.metadata['title'])
def __calculate_date_recorded(self, upload_date_str):
"""Calculate record date from youtube field and event dates"""
upload_date = datetime.date(
int(upload_date_str[0:4]),
int(upload_date_str[4:6]), int(upload_date_str[6:8]))
if self.event.know_date:
if not (self.event.date_begin <= upload_date <=
self.event.date_end):
return self.event.date_default.isoformat()
return upload_date.isoformat()
def __init__(self, event):
self.event = event
self.filename = None
self.metadata = {}
@classmethod
def from_file(cls, path, event):
"""Contructor. Retrieves video metadata from file"""
self = cls(event)
self.filename = path.stem # Name without .json
try:
with path.open() as f_path:
self.metadata = json.load(f_path)
except ValueError:
print('Json syntax error in file {}'.format(path))
raise
return self
@classmethod
def from_youtube(cls, video_data, event):
"""Contructor. Retrieves video metadata with youtube-dl"""
self = cls(event)
metadata = self.metadata
metadata['title'] = self.__calculate_title(video_data)
self.filename = self.__calculate_slug()
metadata['speakers'] = ['TODO'] # Needs human intervention later
# youtube_id = video_data['display_id']
# metadata['thumbnail_url'] =
# 'https://i.ytimg.com/vi/{}/maxresdefault.jpg'.format(youtube_id)
metadata['thumbnail_url'] = video_data['thumbnail']
metadata['videos'] = [{
'type': 'youtube',
'url': video_data['webpage_url']
}]
metadata['recorded'] = self.__calculate_date_recorded(
video_data['upload_date'])
# optional values
metadata['copyright_text'] = video_data['license']
metadata['duration'] = video_data['duration'] # In seconds
metadata['language'] = video_data['formats'][0].get(
'language', event.language)
if not metadata['language']:
metadata['language'] = event.language
metadata['related_urls'] = copy.deepcopy(event.related_urls)
if event.minimal_download:
metadata['speakers'] = []
metadata['tags'] = event.tags
metadata['description'] = ''
else:
metadata['tags'] = sorted(
set(video_data['tags']).union(set(event.tags)))
metadata['description'] = video_data['description']
description_urls = list(
set(
re.findall(r'http[s]?://[^ \\\n\t()[\]"`´\']+', video_data[
'description'])))
for url in description_urls:
metadata['related_urls'].append({'label': url, 'url': url})
return self
def merge(self, new_video, fields):
" | ""Create video copy overwriting fields """
merged_video = Video(self.event)
merged_video.filename = self.filename
for field in self.metadata:
if field in set(fields):
merged_video.metadata[field] = new_video.metadata.get(field)
else:
merged_video.metadata[field] = self.metadata.get(field)
return merged_video
| identifier_body | |
pyvideo_scrape.py | )
raise ValueError("{} can't be null".format(mandatory_field))
self.issue = event_data['issue']
if isinstance(event_data['youtube_list'], str):
self.youtube_lists = [event_data['youtube_list']]
elif isinstance(event_data['youtube_list'], list):
self.youtube_lists = event_data['youtube_list']
else:
raise TypeError(
"youtube_list must be a string or a list of strings")
self.related_urls = event_data.get('related_urls', [])
self.language = event_data.get('language', None)
self.tags = event_data.get('tags', [])
if not self.tags:
self.tags = []
if 'dates' in event_data and event_data['dates']:
self.know_date = True
self.date_begin = event_data['dates']['begin']
self.date_end = event_data['dates'].get('end', self.date_begin)
self.date_default = event_data['dates'].get(
'default', self.date_begin)
else:
self.know_date = False
self.minimal_download = event_data.get('minimal_download', False)
if self.minimal_download:
self.branch = "{}--minimal-download".format(self.branch)
self.overwrite, self.add_new_files, self.wipe = False, False, False
self.overwrite_fields = []
if 'overwrite' in event_data and event_data['overwrite']:
overwrite = event_data['overwrite']
self.overwrite = True
if 'all' in overwrite and overwrite['all']:
self.wipe = True
else:
if 'add_new_files' in overwrite and overwrite['add_new_files']:
self.add_new_files = True
if ('existing_files_fields' in overwrite
and overwrite['existing_files_fields']):
self.overwrite_fields = overwrite['existing_files_fields']
def create_branch(self):
"""Create a new branch in pyvideo repository to add a new event"""
os.chdir(str(self.repository_path))
sh.git.checkout('master')
sh.git.checkout('-b', self.branch)
logger.debug('Branch {} created', self.branch)
def create_dirs(self):
"""Create new directories and conference file in pyvideo repository to
add a new event"""
for new_directory in [self.event_dir, self.event_dir / 'videos']:
new_directory.mkdir(exist_ok=self.overwrite)
logger.debug('Dir {} created', new_directory)
def create_category(self): # , conf_dir, title):
"""Create category.json for the conference"""
category_file_path = self.event_dir / 'category.json'
category_data = {
'title': self.title,
}
category_data_text = json.dumps(category_data, **
JSON_FORMAT_KWARGS) + '\n'
save_file(category_file_path, category_data_text)
logger.debug('File {} created', category_file_path)
def download_video_data(self):
"""Download youtube metadata corresponding to this event youtube
lists"""
def scrape_url(url):
"""Scrape the video list, youtube_dl does all the heavy lifting"""
ydl_opts = {
"ignoreerrors": True, # Skip private and unavaliable videos
}
ydl = youtube_dl.YoutubeDL(ydl_opts)
with ydl:
result_ydl = ydl.extract_info(
url,
download=False # No download needed, only the info
)
logger.debug('Url scraped {}', url)
if 'entries' in result_ydl:
# It's a playlist or a list of videos
return result_ydl['entries']
# Just a video
return [result_ydl]
youtube_list = sum((scrape_url(url) for url in self.youtube_lists), [])
for youtube_video_data in youtube_list:
if youtube_video_data: # Valid video
self.youtube_videos.append(
Video.from_youtube(
video_data=youtube_video_data, event=self))
else:
logger.warning('Null youtube video')
def load_video_data(self):
"""Load video data form existing event video files"""
self.file_videos = [
Video.from_file(path, self)
for path in self.video_dir.glob('*.json')
]
def merge_video_data(self):
"""Merge old video data when configured so"""
if self.overwrite:
if self.wipe:
self.videos = self.youtube_videos
elif self.add_new_files or self.overwrite_fields:
old_videos = {
video.filename: video
for video in self.file_videos
}
old_videos_url = {
video.metadata['videos'][0]['url']: video
for video in self.file_videos
}
new_videos = {}
for video in self.youtube_videos:
new_video_url = video.metadata['videos'][0]['url']
if new_video_url in old_videos_url:
new_video_filename = old_videos_url[new_video_url].filename
else:
new_video_filename = video.filename
new_videos[new_video_filename] = video
if self.overwrite_fields:
forgotten = set(old_videos) - set(new_videos)
for name in forgotten:
logger.warning('Missing video: {} {}',
old_videos[name].filename,
old_videos[name].metadata['videos'][0]['url'],
)
changes = set(new_videos).intersection(set(old_videos))
for path in changes:
merged_video = old_videos[path].merge(
new_videos[path], self.overwrite_fields)
self.videos.append(merged_video)
else:
self.videos = self.file_videos
if self.add_new_files:
adds = set(new_videos) - set(old_videos)
self.videos.extend([new_videos[path] for path in adds])
else: # not self.overwrite
self.videos = self.youtube_videos
def save_video_data(self):
"""Save all event videos in PyVideo format"""
if self.overwrite:
# Erase old event videos
for path in self.video_dir.glob('*.json'):
path.unlink()
for video in self.videos:
video.save()
def create_commit(self, event_data_yaml):
"""Create a new commit in pyvideo repository with the new event data"""
os.chdir(str(self.repository_path))
sh.git.checkout(self.branch)
sh.git.add(self.event_dir)
message_body = (
'\n\nEvent config:\n~~~yaml\n{}\n~~~\n'.format(event_data_yaml)
+ '\nScraped with [pyvideo_scrape]'
+ '(https://github.com/pyvideo/pyvideo_scrape)')
if self.minimal_download:
message = ('Minimal download: '
+ '{}\n\nMinimal download executed for #{}'.format(
self.title, self.issue)
+ '\n\nOnly data that needs [no review](https://'
+ 'github.com/pyvideo/pyvideo_scrape#use-cases) was scraped.'
+ '\nThis event needs further scraping and human '
+ 'reviewing for the description and other data to show.'
+ message_body)
sh.git.commit('-m', message)
sh.git.push('--set-upstream', 'origin', self.branch)
# ~ sh.git.push('--set-upstream', '--force', 'origin', self.branch)
sh.git.checkout('master')
else:
message = (
'Scraped {}\n\nFixes #{}'.format(self.branch, self.issue)
+ message_body)
sh.git.commit('-m', message)
sh.git.checkout('master')
logger.debug('Conference {} commited', self.branch)
class Video:
"""PyVideo Video metadata"""
@staticmethod
def __calculate_title(video_data):
"""Calculate title from youtube fields"""
title = 'Unknown'
if 'fulltitle' in video_data.keys():
title = video_data['fulltitle']
elif 'title' in video_data.keys():
title = video_data['title']
elif '_filename' in video_data.keys():
title = video_data['_filename']
return title
def __calculate_slug(self):
"""Calculate slug from title"""
return slugify.slugify(self.metadata['title'])
def | (self, upload_date_str):
"""Calculate record date from youtube field and event dates"""
upload_date = datetime.date(
int(upload_date_str[0:4]),
int(upload_date_str[4:6]), int(upload_date_str[6:8]))
if self.event.know_date:
if not (self.event.date_begin <= upload_date <=
self.event.date_end):
return self.event.date_default.isoformat()
return upload_date.isoformat()
def __init__(self, event):
self.event = event
self.filename = None
self.metadata = {}
@classmethod
def from_file(cls, path, event):
"""Contructor. Retrieves video metadata from file"""
self = cls(event)
self.filename = path.stem # Name without .json
try:
with path.open() as f_path:
self.metadata = json.load(f_path)
except ValueError:
print('Json syntax error in file {}'.format(path))
raise
return self
@classmethod
def from_youtube(cls, video_data, event):
"""Contructor. Retrieves video metadata with youtube-dl"""
self = cls(event)
metadata = self.metadata
metadata['title'] = self.__calculate_title(video_data)
self.filename = self.__calculate_slug()
metadata['speakers'] = ['TODO'] # Needs human intervention later
# youtube_id = video_data['display_id']
# metadata | __calculate_date_recorded | identifier_name |
verbose.py | end':[]}
for start_end in ('start', 'end'):
for tmptime in lout.iter(start_end + '-valid-time'):
moment = parse_noaa_time_string(tmptime.text)
layouts[name][start_end].append(moment)
return layouts
def combine_days(action, pdata, debug=False):
"""
Perform <action> for all the values within each day, where <action> is either sum or mean.
"""
assert action == 'sum' or action == 'mean'
starts, ends, values, weight_sum = [], [], [], []
def get_time_delta_in_hours(start, end):
""" NOTE assumes no overflows or wraps or nothing """
dhour = end.hour - start.hour
dmin = end.minute - start.minute
dsec = end.second - start.second
dtime = timedelta(hours=dhour, minutes=dmin, seconds=dsec) # NOTE rounds to nearest second
# print start, end, dtime
return float(dtime.seconds) / (60*60)
def add_new_day(dstart, dend, dval):
weight = '-'
starts.append(dstart)
ends.append(dend)
if action == 'sum':
values.append(dval)
elif action == 'mean':
weight = float(get_time_delta_in_hours(dstart, dend))
values.append(weight*dval)
weight_sum.append(weight)
else:
raise Exception('invalid action'+action)
if debug:
print ' new day', dstart, dend, weight, dval
def increment_day(dstart, dend, dval):
ends[-1] = dend
weight = '-'
if action == 'sum':
values[-1] += dval
elif action == 'mean':
weight = float(get_time_delta_in_hours(dstart, dend))
values[-1] += weight * dval
weight_sum[-1] += weight
else:
raise Exception('invalid action'+action)
if debug:
print ' increment', starts[-1], dend, weight, dval, ' ', values[-1]
def incorporate_value(istart, iend, ival):
# if debug:
# print ' incorporate', istart, iend, ival
if len(values) == 0 or ends[-1].day != istart.day:
add_new_day(istart, iend, ival)
else:
increment_day(istart, iend, ival)
for ival in range(len(pdata['values'])):
start = pdata['time-layout']['start'][ival]
if len(pdata['time-layout']['end']) > 0: # some of them only have start times
end = pdata['time-layout']['end'][ival]
elif len(pdata['time-layout']['start']) > ival+1: # so use the next start time minus a ms if we can
end = pdata['time-layout']['start'][ival+1] - timedelta(milliseconds=-1)
else:
end = pdata['time-layout']['start'][ival] + timedelta(hours=6) # otherwise just, hell, add six hours
if debug:
print ' day %3d-%-3d hour %3d-%-3d %s' % (start.day, end.day, start.hour, end.hour, pdata['values'][ival])
# skip null values (probably from cloud cover)
if pdata['values'][ival] == None:
if debug:
print ' skipping null value'
continue
val = float(pdata['values'][ival])
if start.day == end.day:
incorporate_value(start, end, val)
else:
if debug:
print ' start (%s) and end (%s) days differ' % (start, end)
assert start.day + 1 == end.day # for now only handle the case where they differ by one day
midnight = datetime(year=end.year, month=end.month, day=end.day, hour=0, minute=0, second=0)
if action == 'sum':
hours_before = get_time_delta_in_hours(start, midnight) #24 - start.hour
hours_after = get_time_delta_in_hours(midnight, end) #end.hour
val_before = val * float(hours_before) / (hours_before + hours_after)
val_after = val * float(hours_after) / (hours_before + hours_after)
if debug:
print ' apportioning between',
print 'first %f * %f / (%f + %f) = %f' % (val, hours_before, hours_before, hours_after, val_before),
print 'and second %f * %f / (%f + %f) = %f' % (val, hours_after, hours_before, hours_after, val_after)
else:
val_before, val_after = val, val
incorporate_value(start, midnight + timedelta(milliseconds=-1), val_before) #start + timedelta(hours=24-start.hour, milliseconds=-1), val_before)
incorporate_value(midnight, end + timedelta(milliseconds=-1), val_after) # end - timedelta(hours=end.hour), end, val_after)
dailyvals = {}
for ival in range(len(values)):
dailyvals[int(starts[ival].day)] = values[ival]
if action == 'mean':
# if debug:
# print 'total', get_time_delta_in_hours(starts[ival], ends[ival])
dailyvals[int(starts[ival].day)] /= weight_sum[ival] #get_time_delta_in_hours(starts[ival], ends[ival])
if debug:
print ' final:'
for key in sorted(dailyvals.keys()):
print ' ', key, dailyvals[key]
return dailyvals
def parse_data(root, time_layouts, debug=False):
pars = root.find('data').find('parameters')
data = {}
for vardata in pars:
# first figure out the name
all_names = list(vardata.iter('name'))
if len(all_names) != 1:
raise Exception('ERROR too many names for %s: %s' % (vardata.tag, ', '.join(all_names)))
name = all_names[0].text
if name in data:
raise Exception('ERROR %s already in data' % key)
# then get the data
data[name] = {}
if vardata.get('time-layout') is None: # single-point data
if debug:
print ' no layout %s' % name
continue
else: # time series data
data[name]['time-layout'] = time_layouts[vardata.get('time-layout')]
data[name]['values'] = [ val.text for val in vardata.findall('value') ]
if debug:
print 'added %s (%s)' % (name, vardata.get('time-layout'))
if len(data[name]['time-layout']['start']) != len(data[name]['values']):
if debug:
print ' time layout different length for %s' % name
else:
pass
return data
def find_min_temp(pdata, prev_day, next_day):
|
def find_max_temp(pdata, day):
""" find min temp for the night of <prev_day> to <next_day> """
for ival in range(len(pdata['values'])):
start = pdata['time-layout']['start'][ival]
end = pdata['time-layout']['end'][ival]
if start.day == day and end.day == day:
return int(pdata['values'][ival])
# raise Exception('ERROR didn\'t find max temp for %d in %s' % (day, pdata['time-layout']))
return None
def prettify_values(data, ndays=5, debug=False):
mintemps = data['Daily Minimum Temperature']
maxtemps = data['Daily Maximum Temperature']
liquid = combine_days('sum', data['Liquid Precipitation Amount'])
snow = combine_days('sum', data['Snow Amount'])
wind_speed = combine_days('mean', data['Wind Speed'])
cloud = combine_days('mean', data['Cloud Cover Amount'])
percent_precip = combine_days('mean', data['12 Hourly Probability of Precipitation'])
txtvals = {'days':[], 'tmax':[], 'tmin':[], 'liquid':[], 'snow':[], 'wind':[], 'cloud':[], 'precip':[]}
if debug:
print '%-5s %4s %5s%5s %5s %5s' % ('', 'hi lo', 'precip (snow)', '%', 'wind', 'cloud')
rowlist = []
for iday in range(ndays):
day = datetime.now() + timedelta(days=iday)
tmax = find_max_temp(maxtemps, day | """ find min temp for the night of <prev_day> to <next_day> """
for ival in range(len(pdata['values'])):
start = pdata['time-layout']['start'][ival]
end = pdata['time-layout']['end'][ival]
if start.day == prev_day and end.day == next_day:
return int(pdata['values'][ival])
# raise Exception('ERROR didn\'t find min temp for night of %d-%d in %s' % (prev_day, next_day, pdata['time-layout']))
return None | identifier_body |
verbose.py | end':[]}
for start_end in ('start', 'end'):
for tmptime in lout.iter(start_end + '-valid-time'):
moment = parse_noaa_time_string(tmptime.text)
layouts[name][start_end].append(moment)
return layouts
def combine_days(action, pdata, debug=False):
"""
Perform <action> for all the values within each day, where <action> is either sum or mean.
"""
assert action == 'sum' or action == 'mean'
starts, ends, values, weight_sum = [], [], [], []
def get_time_delta_in_hours(start, end):
""" NOTE assumes no overflows or wraps or nothing """
dhour = end.hour - start.hour
dmin = end.minute - start.minute
dsec = end.second - start.second
dtime = timedelta(hours=dhour, minutes=dmin, seconds=dsec) # NOTE rounds to nearest second
# print start, end, dtime
return float(dtime.seconds) / (60*60)
def add_new_day(dstart, dend, dval):
weight = '-'
starts.append(dstart)
ends.append(dend)
if action == 'sum':
values.append(dval)
elif action == 'mean':
weight = float(get_time_delta_in_hours(dstart, dend))
values.append(weight*dval)
weight_sum.append(weight)
else:
raise Exception('invalid action'+action)
if debug:
print ' new day', dstart, dend, weight, dval
def increment_day(dstart, dend, dval):
ends[-1] = dend
weight = '-'
if action == 'sum':
values[-1] += dval
elif action == 'mean':
weight = float(get_time_delta_in_hours(dstart, dend))
values[-1] += weight * dval
weight_sum[-1] += weight
else:
raise Exception('invalid action'+action)
if debug:
print ' increment', starts[-1], dend, weight, dval, ' ', values[-1]
def incorporate_value(istart, iend, ival):
# if debug:
# print ' incorporate', istart, iend, ival
if len(values) == 0 or ends[-1].day != istart.day:
add_new_day(istart, iend, ival)
else:
increment_day(istart, iend, ival)
for ival in range(len(pdata['values'])):
start = pdata['time-layout']['start'][ival]
if len(pdata['time-layout']['end']) > 0: # some of them only have start times
end = pdata['time-layout']['end'][ival]
elif len(pdata['time-layout']['start']) > ival+1: # so use the next start time minus a ms if we can
end = pdata['time-layout']['start'][ival+1] - timedelta(milliseconds=-1)
else:
end = pdata['time-layout']['start'][ival] + timedelta(hours=6) # otherwise just, hell, add six hours
if debug:
print ' day %3d-%-3d hour %3d-%-3d %s' % (start.day, end.day, start.hour, end.hour, pdata['values'][ival])
# skip null values (probably from cloud cover)
if pdata['values'][ival] == None:
if debug:
print ' skipping null value'
continue
val = float(pdata['values'][ival])
if start.day == end.day:
incorporate_value(start, end, val)
else:
if debug:
print ' start (%s) and end (%s) days differ' % (start, end)
assert start.day + 1 == end.day # for now only handle the case where they differ by one day
midnight = datetime(year=end.year, month=end.month, day=end.day, hour=0, minute=0, second=0)
if action == 'sum':
hours_before = get_time_delta_in_hours(start, midnight) #24 - start.hour
hours_after = get_time_delta_in_hours(midnight, end) #end.hour
val_before = val * float(hours_before) / (hours_before + hours_after)
val_after = val * float(hours_after) / (hours_before + hours_after)
if debug:
print ' apportioning between',
print 'first %f * %f / (%f + %f) = %f' % (val, hours_before, hours_before, hours_after, val_before),
print 'and second %f * %f / (%f + %f) = %f' % (val, hours_after, hours_before, hours_after, val_after)
else:
val_before, val_after = val, val
incorporate_value(start, midnight + timedelta(milliseconds=-1), val_before) #start + timedelta(hours=24-start.hour, milliseconds=-1), val_before)
incorporate_value(midnight, end + timedelta(milliseconds=-1), val_after) # end - timedelta(hours=end.hour), end, val_after)
dailyvals = {}
for ival in range(len(values)):
dailyvals[int(starts[ival].day)] = values[ival]
if action == 'mean':
# if debug:
# print 'total', get_time_delta_in_hours(starts[ival], ends[ival])
dailyvals[int(starts[ival].day)] /= weight_sum[ival] #get_time_delta_in_hours(starts[ival], ends[ival])
if debug:
print ' final:'
for key in sorted(dailyvals.keys()):
print ' ', key, dailyvals[key]
return dailyvals
def parse_data(root, time_layouts, debug=False):
pars = root.find('data').find('parameters')
data = {}
for vardata in pars:
# first figure out the name
all_names = list(vardata.iter('name'))
if len(all_names) != 1:
raise Exception('ERROR too many names for %s: %s' % (vardata.tag, ', '.join(all_names)))
name = all_names[0].text
if name in data:
raise Exception('ERROR %s already in data' % key)
# then get the data
data[name] = {}
if vardata.get('time-layout') is None: # single-point data
if debug:
print ' no layout %s' % name
continue
else: # time series data
data[name]['time-layout'] = time_layouts[vardata.get('time-layout')]
data[name]['values'] = [ val.text for val in vardata.findall('value') ]
if debug:
print 'added %s (%s)' % (name, vardata.get('time-layout'))
if len(data[name]['time-layout']['start']) != len(data[name]['values']):
if debug:
print ' time layout different length for %s' % name
else:
pass
return data
def find_min_temp(pdata, prev_day, next_day):
""" find min temp for the night of <prev_day> to <next_day> """
for ival in range(len(pdata['values'])):
start = pdata['time-layout']['start'][ival]
end = pdata['time-layout']['end'][ival]
if start.day == prev_day and end.day == next_day:
return int(pdata['values'][ival])
# raise Exception('ERROR didn\'t find min temp for night of %d-%d in %s' % (prev_day, next_day, pdata['time-layout']))
return None
def | (pdata, day):
""" find min temp for the night of <prev_day> to <next_day> """
for ival in range(len(pdata['values'])):
start = pdata['time-layout']['start'][ival]
end = pdata['time-layout']['end'][ival]
if start.day == day and end.day == day:
return int(pdata['values'][ival])
# raise Exception('ERROR didn\'t find max temp for %d in %s' % (day, pdata['time-layout']))
return None
def prettify_values(data, ndays=5, debug=False):
mintemps = data['Daily Minimum Temperature']
maxtemps = data['Daily Maximum Temperature']
liquid = combine_days('sum', data['Liquid Precipitation Amount'])
snow = combine_days('sum', data['Snow Amount'])
wind_speed = combine_days('mean', data['Wind Speed'])
cloud = combine_days('mean', data['Cloud Cover Amount'])
percent_precip = combine_days('mean', data['12 Hourly Probability of Precipitation'])
txtvals = {'days':[], 'tmax':[], 'tmin':[], 'liquid':[], 'snow':[], 'wind':[], 'cloud':[], 'precip':[]}
if debug:
print '%-5s %4s %5s%5s %5s %5s' % ('', 'hi lo', 'precip (snow)', '%', 'wind', 'cloud')
rowlist = []
for iday in range(ndays):
day = datetime.now() + timedelta(days=iday)
tmax = find_max_temp(maxtemps, | find_max_temp | identifier_name |
verbose.py | end':[]}
for start_end in ('start', 'end'):
for tmptime in lout.iter(start_end + '-valid-time'):
moment = parse_noaa_time_string(tmptime.text)
layouts[name][start_end].append(moment)
return layouts
def combine_days(action, pdata, debug=False):
"""
Perform <action> for all the values within each day, where <action> is either sum or mean.
"""
assert action == 'sum' or action == 'mean'
starts, ends, values, weight_sum = [], [], [], []
def get_time_delta_in_hours(start, end):
""" NOTE assumes no overflows or wraps or nothing """
dhour = end.hour - start.hour
dmin = end.minute - start.minute
dsec = end.second - start.second
dtime = timedelta(hours=dhour, minutes=dmin, seconds=dsec) # NOTE rounds to nearest second
# print start, end, dtime
return float(dtime.seconds) / (60*60)
def add_new_day(dstart, dend, dval):
weight = '-'
starts.append(dstart)
ends.append(dend)
if action == 'sum':
values.append(dval)
elif action == 'mean':
weight = float(get_time_delta_in_hours(dstart, dend))
values.append(weight*dval)
weight_sum.append(weight)
else:
raise Exception('invalid action'+action)
if debug:
print ' new day', dstart, dend, weight, dval
def increment_day(dstart, dend, dval):
ends[-1] = dend
weight = '-'
if action == 'sum':
values[-1] += dval
elif action == 'mean':
weight = float(get_time_delta_in_hours(dstart, dend))
values[-1] += weight * dval
weight_sum[-1] += weight
else:
raise Exception('invalid action'+action)
if debug:
print ' increment', starts[-1], dend, weight, dval, ' ', values[-1]
def incorporate_value(istart, iend, ival):
# if debug:
# print ' incorporate', istart, iend, ival
if len(values) == 0 or ends[-1].day != istart.day:
add_new_day(istart, iend, ival)
else:
increment_day(istart, iend, ival)
for ival in range(len(pdata['values'])):
start = pdata['time-layout']['start'][ival]
if len(pdata['time-layout']['end']) > 0: # some of them only have start times
end = pdata['time-layout']['end'][ival]
elif len(pdata['time-layout']['start']) > ival+1: # so use the next start time minus a ms if we can
end = pdata['time-layout']['start'][ival+1] - timedelta(milliseconds=-1)
else:
end = pdata['time-layout']['start'][ival] + timedelta(hours=6) # otherwise just, hell, add six hours
if debug:
print ' day %3d-%-3d hour %3d-%-3d %s' % (start.day, end.day, start.hour, end.hour, pdata['values'][ival])
# skip null values (probably from cloud cover)
if pdata['values'][ival] == None:
if debug:
print ' skipping null value'
continue
val = float(pdata['values'][ival])
if start.day == end.day:
incorporate_value(start, end, val)
else:
if debug:
print ' start (%s) and end (%s) days differ' % (start, end)
assert start.day + 1 == end.day # for now only handle the case where they differ by one day
midnight = datetime(year=end.year, month=end.month, day=end.day, hour=0, minute=0, second=0)
if action == 'sum':
hours_before = get_time_delta_in_hours(start, midnight) #24 - start.hour
hours_after = get_time_delta_in_hours(midnight, end) #end.hour
val_before = val * float(hours_before) / (hours_before + hours_after)
val_after = val * float(hours_after) / (hours_before + hours_after)
if debug:
print ' apportioning between',
print 'first %f * %f / (%f + %f) = %f' % (val, hours_before, hours_before, hours_after, val_before),
print 'and second %f * %f / (%f + %f) = %f' % (val, hours_after, hours_before, hours_after, val_after)
else:
val_before, val_after = val, val
incorporate_value(start, midnight + timedelta(milliseconds=-1), val_before) #start + timedelta(hours=24-start.hour, milliseconds=-1), val_before)
incorporate_value(midnight, end + timedelta(milliseconds=-1), val_after) # end - timedelta(hours=end.hour), end, val_after)
dailyvals = {}
for ival in range(len(values)):
|
if debug:
print ' final:'
for key in sorted(dailyvals.keys()):
print ' ', key, dailyvals[key]
return dailyvals
def parse_data(root, time_layouts, debug=False):
pars = root.find('data').find('parameters')
data = {}
for vardata in pars:
# first figure out the name
all_names = list(vardata.iter('name'))
if len(all_names) != 1:
raise Exception('ERROR too many names for %s: %s' % (vardata.tag, ', '.join(all_names)))
name = all_names[0].text
if name in data:
raise Exception('ERROR %s already in data' % key)
# then get the data
data[name] = {}
if vardata.get('time-layout') is None: # single-point data
if debug:
print ' no layout %s' % name
continue
else: # time series data
data[name]['time-layout'] = time_layouts[vardata.get('time-layout')]
data[name]['values'] = [ val.text for val in vardata.findall('value') ]
if debug:
print 'added %s (%s)' % (name, vardata.get('time-layout'))
if len(data[name]['time-layout']['start']) != len(data[name]['values']):
if debug:
print ' time layout different length for %s' % name
else:
pass
return data
def find_min_temp(pdata, prev_day, next_day):
""" find min temp for the night of <prev_day> to <next_day> """
for ival in range(len(pdata['values'])):
start = pdata['time-layout']['start'][ival]
end = pdata['time-layout']['end'][ival]
if start.day == prev_day and end.day == next_day:
return int(pdata['values'][ival])
# raise Exception('ERROR didn\'t find min temp for night of %d-%d in %s' % (prev_day, next_day, pdata['time-layout']))
return None
def find_max_temp(pdata, day):
""" find min temp for the night of <prev_day> to <next_day> """
for ival in range(len(pdata['values'])):
start = pdata['time-layout']['start'][ival]
end = pdata['time-layout']['end'][ival]
if start.day == day and end.day == day:
return int(pdata['values'][ival])
# raise Exception('ERROR didn\'t find max temp for %d in %s' % (day, pdata['time-layout']))
return None
def prettify_values(data, ndays=5, debug=False):
mintemps = data['Daily Minimum Temperature']
maxtemps = data['Daily Maximum Temperature']
liquid = combine_days('sum', data['Liquid Precipitation Amount'])
snow = combine_days('sum', data['Snow Amount'])
wind_speed = combine_days('mean', data['Wind Speed'])
cloud = combine_days('mean', data['Cloud Cover Amount'])
percent_precip = combine_days('mean', data['12 Hourly Probability of Precipitation'])
txtvals = {'days':[], 'tmax':[], 'tmin':[], 'liquid':[], 'snow':[], 'wind':[], 'cloud':[], 'precip':[]}
if debug:
print '%-5s %4s %5s%5s %5s %5s' % ('', 'hi lo', 'precip (snow)', '%', 'wind', 'cloud')
rowlist = []
for iday in range(ndays):
day = datetime.now() + timedelta(days=iday)
tmax = find_max_temp(maxtemps, | dailyvals[int(starts[ival].day)] = values[ival]
if action == 'mean':
# if debug:
# print 'total', get_time_delta_in_hours(starts[ival], ends[ival])
dailyvals[int(starts[ival].day)] /= weight_sum[ival] #get_time_delta_in_hours(starts[ival], ends[ival]) | conditional_block |
verbose.py | 'sum' or action == 'mean'
starts, ends, values, weight_sum = [], [], [], []
def get_time_delta_in_hours(start, end):
""" NOTE assumes no overflows or wraps or nothing """
dhour = end.hour - start.hour
dmin = end.minute - start.minute
dsec = end.second - start.second
dtime = timedelta(hours=dhour, minutes=dmin, seconds=dsec) # NOTE rounds to nearest second
# print start, end, dtime
return float(dtime.seconds) / (60*60)
def add_new_day(dstart, dend, dval):
weight = '-'
starts.append(dstart)
ends.append(dend)
if action == 'sum':
values.append(dval)
elif action == 'mean':
weight = float(get_time_delta_in_hours(dstart, dend))
values.append(weight*dval)
weight_sum.append(weight)
else:
raise Exception('invalid action'+action)
if debug:
print ' new day', dstart, dend, weight, dval
def increment_day(dstart, dend, dval):
ends[-1] = dend
weight = '-'
if action == 'sum':
values[-1] += dval
elif action == 'mean':
weight = float(get_time_delta_in_hours(dstart, dend))
values[-1] += weight * dval
weight_sum[-1] += weight
else:
raise Exception('invalid action'+action)
if debug:
print ' increment', starts[-1], dend, weight, dval, ' ', values[-1]
def incorporate_value(istart, iend, ival):
# if debug:
# print ' incorporate', istart, iend, ival
if len(values) == 0 or ends[-1].day != istart.day:
add_new_day(istart, iend, ival)
else:
increment_day(istart, iend, ival)
for ival in range(len(pdata['values'])):
start = pdata['time-layout']['start'][ival]
if len(pdata['time-layout']['end']) > 0: # some of them only have start times
end = pdata['time-layout']['end'][ival]
elif len(pdata['time-layout']['start']) > ival+1: # so use the next start time minus a ms if we can
end = pdata['time-layout']['start'][ival+1] - timedelta(milliseconds=-1)
else:
end = pdata['time-layout']['start'][ival] + timedelta(hours=6) # otherwise just, hell, add six hours
if debug:
print ' day %3d-%-3d hour %3d-%-3d %s' % (start.day, end.day, start.hour, end.hour, pdata['values'][ival])
# skip null values (probably from cloud cover)
if pdata['values'][ival] == None:
if debug:
print ' skipping null value'
continue
val = float(pdata['values'][ival])
if start.day == end.day:
incorporate_value(start, end, val)
else:
if debug:
print ' start (%s) and end (%s) days differ' % (start, end)
assert start.day + 1 == end.day # for now only handle the case where they differ by one day
midnight = datetime(year=end.year, month=end.month, day=end.day, hour=0, minute=0, second=0)
if action == 'sum':
hours_before = get_time_delta_in_hours(start, midnight) #24 - start.hour
hours_after = get_time_delta_in_hours(midnight, end) #end.hour
val_before = val * float(hours_before) / (hours_before + hours_after)
val_after = val * float(hours_after) / (hours_before + hours_after)
if debug:
print ' apportioning between',
print 'first %f * %f / (%f + %f) = %f' % (val, hours_before, hours_before, hours_after, val_before),
print 'and second %f * %f / (%f + %f) = %f' % (val, hours_after, hours_before, hours_after, val_after)
else:
val_before, val_after = val, val
incorporate_value(start, midnight + timedelta(milliseconds=-1), val_before) #start + timedelta(hours=24-start.hour, milliseconds=-1), val_before)
incorporate_value(midnight, end + timedelta(milliseconds=-1), val_after) # end - timedelta(hours=end.hour), end, val_after)
dailyvals = {}
for ival in range(len(values)):
dailyvals[int(starts[ival].day)] = values[ival]
if action == 'mean':
# if debug:
# print 'total', get_time_delta_in_hours(starts[ival], ends[ival])
dailyvals[int(starts[ival].day)] /= weight_sum[ival] #get_time_delta_in_hours(starts[ival], ends[ival])
if debug:
print ' final:'
for key in sorted(dailyvals.keys()):
print ' ', key, dailyvals[key]
return dailyvals
def parse_data(root, time_layouts, debug=False):
pars = root.find('data').find('parameters')
data = {}
for vardata in pars:
# first figure out the name
all_names = list(vardata.iter('name'))
if len(all_names) != 1:
raise Exception('ERROR too many names for %s: %s' % (vardata.tag, ', '.join(all_names)))
name = all_names[0].text
if name in data:
raise Exception('ERROR %s already in data' % key)
# then get the data
data[name] = {}
if vardata.get('time-layout') is None: # single-point data
if debug:
print ' no layout %s' % name
continue
else: # time series data
data[name]['time-layout'] = time_layouts[vardata.get('time-layout')]
data[name]['values'] = [ val.text for val in vardata.findall('value') ]
if debug:
print 'added %s (%s)' % (name, vardata.get('time-layout'))
if len(data[name]['time-layout']['start']) != len(data[name]['values']):
if debug:
print ' time layout different length for %s' % name
else:
pass
return data
def find_min_temp(pdata, prev_day, next_day):
""" find min temp for the night of <prev_day> to <next_day> """
for ival in range(len(pdata['values'])):
start = pdata['time-layout']['start'][ival]
end = pdata['time-layout']['end'][ival]
if start.day == prev_day and end.day == next_day:
return int(pdata['values'][ival])
# raise Exception('ERROR didn\'t find min temp for night of %d-%d in %s' % (prev_day, next_day, pdata['time-layout']))
return None
def find_max_temp(pdata, day):
""" find min temp for the night of <prev_day> to <next_day> """
for ival in range(len(pdata['values'])):
start = pdata['time-layout']['start'][ival]
end = pdata['time-layout']['end'][ival]
if start.day == day and end.day == day:
return int(pdata['values'][ival])
# raise Exception('ERROR didn\'t find max temp for %d in %s' % (day, pdata['time-layout']))
return None
def prettify_values(data, ndays=5, debug=False):
mintemps = data['Daily Minimum Temperature']
maxtemps = data['Daily Maximum Temperature']
liquid = combine_days('sum', data['Liquid Precipitation Amount'])
snow = combine_days('sum', data['Snow Amount'])
wind_speed = combine_days('mean', data['Wind Speed'])
cloud = combine_days('mean', data['Cloud Cover Amount'])
percent_precip = combine_days('mean', data['12 Hourly Probability of Precipitation'])
txtvals = {'days':[], 'tmax':[], 'tmin':[], 'liquid':[], 'snow':[], 'wind':[], 'cloud':[], 'precip':[]}
if debug:
print '%-5s %4s %5s%5s %5s %5s' % ('', 'hi lo', 'precip (snow)', '%', 'wind', 'cloud')
rowlist = []
for iday in range(ndays):
day = datetime.now() + timedelta(days=iday)
tmax = find_max_temp(maxtemps, day.day)
tmin = find_min_temp(mintemps, day.day, day.day+1)
row = ''
if tmax is not None:
row += ' %d' % tmax
if tmin is not None:
row += ' %d<br>' % tmin | if day.day in percent_precip:
row += ' %.0f<font size=1>%%</font>' % percent_precip[day.day]
| random_line_split | |
ethereum-block.ts | this block. This
* can be calculated from the previous block’s difficulty level and the
* timestamp.
*/
difficulty: bigint;
/**
* A scalar value equal to the number of ancestor blocks. The genesis block
* has a number of zero.
*/
blockNumber: bigint;
/**
* A scalar value equal to the current limit of gas expenditure per block.
*/
gasLimit: bigint;
/**
* A scalar value equal to the total gas used in transactions in this block.
*/
gasUsed: bigint;
/**
* A scalar value equal to the reasonable output of Unix’s time() at this
* block’s inception.
*/
timestamp: bigint;
/**
* An arbitrary byte array containing data relevant to this block. This must
* be 32 bytes or fewer.
*/
extraData: Buffer;
/**
* A 256-bit hash which proves combined with the nonce that a sufficient
* amount of computation has been carried out on this block.
*/
mixHash: bigint;
/**
* A 64-bit hash which proves combined with the mix-hash that a sufficient
* amount of computation has been carried out on this block.
*/
nonce: bigint;
}
/** The data stored in a block for a signed Ethereum transaction */
export interface EthereumTransaction {
/**
* A scalar value equal to the number of transactions sent from this address
* or, in the case of accounts with associated code, the number of
* contract-creations made by this account.
*/
nonce: bigint;
/**
* A scalar value equal to the number of Wei to be paid per unit of gas for
* all computation costs incurred as a result of the execution of this
* transaction.
*/
gasPrice: bigint;
/**
* A scalar value equal to the maximum amount of gas that should be used in
* executing this transaction.
*/
gasLimit: bigint;
/**
* A scalar value equal to the number of Wei to be transferred to the message
* call’s recipient or, in the case of contract creation, as an endowment to
* the newly created account.
*/
value: bigint;
/**
* The 160-bit address of the message call’s recipient or, for a contract
* creation transaction, CONTRACT_CREATION (-1), to distinguish against
* account 0x0000000000000000000000000000000000000000.
*/
to: bigint;
/**
* An unlimited size byte array specifying the EVM-code for the account
* initialisation procedure, for a contract transaction, or an unlimited size
* byte array specifying the input data of the message call, for a message
* call.
*/
data: Buffer;
/** The 160-bit address of the message caller. */
from: bigint;
}
export class EthereumBlockDecoderError extends Error {
constructor(message: string) {
super(message);
}
}
const HEADER_PARENT_HASH = 0;
const HEADER_UNCLE_HASH = 1;
const HEADER_BENEFICIARY = 2;
const HEADER_STATE_ROOT = 3;
const HEADER_TRANSACTIONS_ROOT = 4;
const HEADER_RECEIPTS_ROOT = 5;
const HEADER_LOGSBLOOM = 6;
const HEADER_DIFFICULTY = 7;
const HEADER_BLOCK_NUMBER = 8;
const HEADER_GAS_LIMIT = 9;
const HEADER_GAS_USED = 10;
const HEADER_TIMESTAMP = 11;
const HEADER_EXTRADATA = 12;
const HEADER_MIXHASH = 13;
const HEADER_NONCE = 14;
/**
* Given a RLP-serialized list with an Ethereum header, decodes the list and
* validates the Ethereum header.
*
* @param header The RLP-encoded list with the header to decode.
*
* @returns A validated and decoded EthereumHeader.
*/
export function decodeHeader(header: RlpList): EthereumHeader {
if (!Array.isArray(header)) {
throw new EthereumBlockDecoderError(
`Expected block header as RLP-encoded list!`);
}
return {
parentHash: toBigIntBE(header[HEADER_PARENT_HASH] as Buffer),
uncleHash: toBigIntBE(header[HEADER_UNCLE_HASH] as Buffer),
beneficiary: toBigIntBE(header[HEADER_BENEFICIARY] as Buffer),
stateRoot: toBigIntBE(header[HEADER_STATE_ROOT] as Buffer),
transactionsRoot: toBigIntBE(header[HEADER_TRANSACTIONS_ROOT] as Buffer),
receiptsRoot: toBigIntBE(header[HEADER_RECEIPTS_ROOT] as Buffer),
logsBloom: header[HEADER_LOGSBLOOM] as Buffer,
difficulty: toBigIntBE(header[HEADER_DIFFICULTY] as Buffer),
blockNumber: toBigIntBE(header[HEADER_BLOCK_NUMBER] as Buffer),
gasLimit: toBigIntBE(header[HEADER_GAS_LIMIT] as Buffer),
gasUsed: toBigIntBE(header[HEADER_GAS_USED] as Buffer),
timestamp: toBigIntBE(header[HEADER_TIMESTAMP] as Buffer),
extraData: (header[HEADER_EXTRADATA] as Buffer),
mixHash: toBigIntBE(header[HEADER_MIXHASH] as Buffer),
nonce: toBigIntBE(header[HEADER_NONCE] as Buffer)
};
}
const TRANSACTION_NONCE = 0;
const TRANSACTION_GASPRICE = 1;
const TRANSACTION_STARTGAS = 2;
const TRANSACTION_TO = 3;
const TRANSACTION_VALUE = 4;
const TRANSACTION_DATA = 5;
const TRANSACTION_V = 6;
const TRANSACTION_R = 7;
const TRANSACTION_S = 8;
/**
* Given a RLP-serialized list with an Ethereum transaction, decodes the list
* and validates the Ethereum transaction.
*
* @param header The RLP-encoded list with the transaction to decode.
*
* @returns A validated and decoded EthereumTransaction.
*/
export async function decodeTransaction(
transaction: RlpList,
options: EthereumBlockDecoderOptions =
defaultOptions): Promise<EthereumTransaction> {
const v = transaction[TRANSACTION_V] as Buffer;
const r = transaction[TRANSACTION_R] as Buffer;
const s = transaction[TRANSACTION_S] as Buffer;
if (r.length > 32) {
throw | gth > 32) {
throw new Error(`s > 32 bytes!`);
}
const signature = Buffer.alloc(64, 0);
r.copy(signature, 32 - r.length);
s.copy(signature, 64 - s.length);
const chainV = options.chainId * 2 + 35;
const verifySignature =
options.eip155 ? v[0] === chainV || v[0] === chainV + 1 : false;
const recovery =
verifySignature ? v[0] - (options.chainId * 2 + 8) - 27 : v[0] - 27;
if (recovery !== 0 && recovery !== 1) {
throw new EthereumBlockDecoderError(
`Invalid infinite recovery = ${recovery}`);
}
// TODO: Get existing buffer from stream instead of regenerating it.
const toHash = verifySignature ?
RlpEncode([
(transaction[TRANSACTION_NONCE] as Buffer),
(transaction[TRANSACTION_GASPRICE] as Buffer),
(transaction[TRANSACTION_STARTGAS] as Buffer),
(transaction[TRANSACTION_TO] as Buffer),
(transaction[TRANSACTION_VALUE] as Buffer),
(transaction[TRANSACTION_DATA] as Buffer),
Buffer.from([options.chainId]),
Buffer.from([]),
Buffer.from([]),
]) :
RlpEncode([
(transaction[TRANSACTION_NONCE] as Buffer),
(transaction[TRANSACTION_GASPRICE] as Buffer),
(transaction[TRANSACTION_STARTGAS] as Buffer),
(transaction[TRANSACTION_TO] as Buffer),
(transaction[TRANSACTION_VALUE] as Buffer),
(transaction[TRANSACTION_DATA] as Buffer)
]);
let from: bigint;
if (process.browser || native === undefined || !options.native) {
const hash = keccak('keccak256').update(toHash).digest();
// Recover and decompress the public key
const pubKey = secp256k1.recover(hash, signature, recovery, false).slice(1);
if (pubKey.length !== 64) {
throw new EthereumBlockDecoderError(
`Incorrect public key length ${pubKey.length}`);
}
from = toBigIntBE(keccak('keccak256').update(pubKey).digest().slice(-20));
if (from === undefined) {
throw new EthereumBlockDecoderError(`Failed to get from account`);
}
} else {
from = await native.recoverFromAddress(toHash, signature, recovery === 1);
}
const toBuffer = transaction[TRANSACTION_TO] as Buffer;
return {
nonce: toBigIntBE(transaction[TRANSACTION_NONCE] as Buffer),
gasPrice: toBigIntBE(transaction[TRANSACTION_GASPRICE] as Buffer),
gasLimit: toBigIntBE(transaction[TRANSACTION_STARTGAS] as Buffer),
to: toBuffer.length === 0 ? BigInt(-1) : toBigIntBE(toBuffer | new Error(`r > 32 bytes!`);
}
if (s.len | conditional_block |
ethereum-block.ts | [HEADER_UNCLE_HASH] as Buffer),
beneficiary: toBigIntBE(header[HEADER_BENEFICIARY] as Buffer),
stateRoot: toBigIntBE(header[HEADER_STATE_ROOT] as Buffer),
transactionsRoot: toBigIntBE(header[HEADER_TRANSACTIONS_ROOT] as Buffer),
receiptsRoot: toBigIntBE(header[HEADER_RECEIPTS_ROOT] as Buffer),
logsBloom: header[HEADER_LOGSBLOOM] as Buffer,
difficulty: toBigIntBE(header[HEADER_DIFFICULTY] as Buffer),
blockNumber: toBigIntBE(header[HEADER_BLOCK_NUMBER] as Buffer),
gasLimit: toBigIntBE(header[HEADER_GAS_LIMIT] as Buffer),
gasUsed: toBigIntBE(header[HEADER_GAS_USED] as Buffer),
timestamp: toBigIntBE(header[HEADER_TIMESTAMP] as Buffer),
extraData: (header[HEADER_EXTRADATA] as Buffer),
mixHash: toBigIntBE(header[HEADER_MIXHASH] as Buffer),
nonce: toBigIntBE(header[HEADER_NONCE] as Buffer)
};
}
const TRANSACTION_NONCE = 0;
const TRANSACTION_GASPRICE = 1;
const TRANSACTION_STARTGAS = 2;
const TRANSACTION_TO = 3;
const TRANSACTION_VALUE = 4;
const TRANSACTION_DATA = 5;
const TRANSACTION_V = 6;
const TRANSACTION_R = 7;
const TRANSACTION_S = 8;
/**
* Given a RLP-serialized list with an Ethereum transaction, decodes the list
* and validates the Ethereum transaction.
*
* @param header The RLP-encoded list with the transaction to decode.
*
* @returns A validated and decoded EthereumTransaction.
*/
export async function decodeTransaction(
transaction: RlpList,
options: EthereumBlockDecoderOptions =
defaultOptions): Promise<EthereumTransaction> {
const v = transaction[TRANSACTION_V] as Buffer;
const r = transaction[TRANSACTION_R] as Buffer;
const s = transaction[TRANSACTION_S] as Buffer;
if (r.length > 32) {
throw new Error(`r > 32 bytes!`);
}
if (s.length > 32) {
throw new Error(`s > 32 bytes!`);
}
const signature = Buffer.alloc(64, 0);
r.copy(signature, 32 - r.length);
s.copy(signature, 64 - s.length);
const chainV = options.chainId * 2 + 35;
const verifySignature =
options.eip155 ? v[0] === chainV || v[0] === chainV + 1 : false;
const recovery =
verifySignature ? v[0] - (options.chainId * 2 + 8) - 27 : v[0] - 27;
if (recovery !== 0 && recovery !== 1) {
throw new EthereumBlockDecoderError(
`Invalid infinite recovery = ${recovery}`);
}
// TODO: Get existing buffer from stream instead of regenerating it.
const toHash = verifySignature ?
RlpEncode([
(transaction[TRANSACTION_NONCE] as Buffer),
(transaction[TRANSACTION_GASPRICE] as Buffer),
(transaction[TRANSACTION_STARTGAS] as Buffer),
(transaction[TRANSACTION_TO] as Buffer),
(transaction[TRANSACTION_VALUE] as Buffer),
(transaction[TRANSACTION_DATA] as Buffer),
Buffer.from([options.chainId]),
Buffer.from([]),
Buffer.from([]),
]) :
RlpEncode([
(transaction[TRANSACTION_NONCE] as Buffer),
(transaction[TRANSACTION_GASPRICE] as Buffer),
(transaction[TRANSACTION_STARTGAS] as Buffer),
(transaction[TRANSACTION_TO] as Buffer),
(transaction[TRANSACTION_VALUE] as Buffer),
(transaction[TRANSACTION_DATA] as Buffer)
]);
let from: bigint;
if (process.browser || native === undefined || !options.native) {
const hash = keccak('keccak256').update(toHash).digest();
// Recover and decompress the public key
const pubKey = secp256k1.recover(hash, signature, recovery, false).slice(1);
if (pubKey.length !== 64) {
throw new EthereumBlockDecoderError(
`Incorrect public key length ${pubKey.length}`);
}
from = toBigIntBE(keccak('keccak256').update(pubKey).digest().slice(-20));
if (from === undefined) {
throw new EthereumBlockDecoderError(`Failed to get from account`);
}
} else {
from = await native.recoverFromAddress(toHash, signature, recovery === 1);
}
const toBuffer = transaction[TRANSACTION_TO] as Buffer;
return {
nonce: toBigIntBE(transaction[TRANSACTION_NONCE] as Buffer),
gasPrice: toBigIntBE(transaction[TRANSACTION_GASPRICE] as Buffer),
gasLimit: toBigIntBE(transaction[TRANSACTION_STARTGAS] as Buffer),
to: toBuffer.length === 0 ? BigInt(-1) : toBigIntBE(toBuffer),
value: toBigIntBE(transaction[TRANSACTION_VALUE] as Buffer),
data: transaction[TRANSACTION_DATA] as Buffer,
from
};
}
/**
* Given a RLP-serialized list with an Ethereum block, decodes the list and
* validates the Ethereum block.
*
* @param header The RLP-encoded list with the transaction to decode.
*
* @returns A validated and decoded EthereumTransaction.
*/
export async function decodeBlock(
rlp: RlpList, options: EthereumBlockDecoderOptions = defaultOptions):
Promise<EthereumBlock> {
// Each incoming block should be an RLP list.
if (!Array.isArray(rlp)) {
throw new EthereumBlockDecoderError(`Expected RLP-encoded list!`);
}
// The RlpList should have 3 parts: the header, the transaction list and the
// uncle list.
const header: EthereumHeader = decodeHeader(rlp[0] as RlpList);
if (header.blockNumber >= defaultOptions.eip155Block) {
defaultOptions.eip155 = true;
}
const transactionPromises: Array<Promise<EthereumTransaction>> =
(rlp[1] as RlpList).map(tx => decodeTransaction(tx as RlpList, options));
const transactions: EthereumTransaction[] =
await Promise.all(transactionPromises);
const uncles: EthereumHeader[] =
(rlp[2] as RlpList).map(buf => decodeHeader(buf as RlpList));
return {header, transactions, uncles} as EthereumBlock;
}
/**
* Remove leading null bytes from a buffer.
*
* @param buf Buffer to remove null bytes from
*
* @returns A slice of the buffer without null bytes.
*/
function removeNullPrefix(buf: Buffer): Buffer {
for (let i = 0; i < buf.length; i++) {
if (buf[i] !== 0) {
return buf.slice(i);
}
}
return Buffer.from([]);
}
/**
* Encodes an Ethereum header as a RLP list
*
* @param header The Ethreum header to encode.
*
* @return A RlpList with the encoded Ethereum header.
*/
export function encodeHeaderAsRLP(header: EthereumHeader): RlpList {
const asRlpList: RlpList = [];
asRlpList[HEADER_PARENT_HASH] = toBufferBE(header.parentHash, 32);
asRlpList[HEADER_UNCLE_HASH] = toBufferBE(header.uncleHash, 32);
asRlpList[HEADER_BENEFICIARY] = toBufferBE(header.beneficiary, 20);
asRlpList[HEADER_STATE_ROOT] = toBufferBE(header.stateRoot, 32);
asRlpList[HEADER_TRANSACTIONS_ROOT] = toBufferBE(header.transactionsRoot, 32);
asRlpList[HEADER_RECEIPTS_ROOT] = toBufferBE(header.receiptsRoot, 32);
asRlpList[HEADER_LOGSBLOOM] = header.logsBloom;
asRlpList[HEADER_DIFFICULTY] =
removeNullPrefix(toBufferBE(header.difficulty, 32));
asRlpList[HEADER_BLOCK_NUMBER] =
removeNullPrefix(toBufferBE(header.blockNumber, 32));
asRlpList[HEADER_GAS_LIMIT] =
removeNullPrefix(toBufferBE(header.gasLimit, 32));
asRlpList[HEADER_GAS_USED] = removeNullPrefix(toBufferBE(header.gasUsed, 32));
asRlpList[HEADER_TIMESTAMP] =
removeNullPrefix(toBufferBE(header.timestamp, 32));
asRlpList[HEADER_EXTRADATA] = header.extraData;
asRlpList[HEADER_MIXHASH] = toBufferBE(header.mixHash, 32);
asRlpList[HEADER_NONCE] = toBufferBE(header.nonce, 8);
return asRlpList;
}
/**
* Encodes a new block. Transactions must be encoded and signed as a RLPList
*
* @param header The Ethreum header to encode.
* @param transactions Encoded, signed transactions to include
* @param uncleList A list of uncles to include
*
* @return A new RLP encoded Ethereum block.
*/
export function encodeBlock( |
header | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.