file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
SearchContainer.js | import React, { Component } from 'react';
import InfoSection from '../components/InfoSection';
import InfoSectionToggle from '../components/InfoSectionToggle';
class SearchContainer extends React.Component {
constructor(props) {
super(props); | showContent: true,
content: `The field below accepts search criteria for those on Odecee bench. Try searching by technologies like: 'node', or 'javascript' and you will see a list of candidates with those skill sets.`
};
}
onPressError() {
this.setState({
error: ''
});
}
onPressInfo() {
this.setState({
showContent: !this.state.showContent
});
}
renderError() {
if(this.state.error) {
return (
<li>
<div className="error">
<span className="message">{this.state.error}</span>
<span className="error-icon" onClick={this.onPressError.bind(this)}>
<i className="fa fa-times-circle"></i>
</span>
</div>
</li>
)
}
}
renderInfoSection() {
return (
<InfoSection
revealContent={this.state.showContent}
content={this.state.content}
title={'Add Skill'}
/>
);
}
render() {
return (
<div>
{this.renderInfoSection()}
<section>
<InfoSectionToggle onPressInfo={this.onPressInfo.bind(this)} />
<ul className="input-list style-4 clearfix">
{this.renderError()}
<li>
<label className="search" htmlFor="search">Search: </label>
<input
type="text"
style={{width: '100%'}}
placeholder="Search"
ref="email"
id="search"
/>
</li>
</ul>
</section>
</div>
)
}
}
export default SearchContainer; | this.state = {
message: '',
error: '', | random_line_split |
SearchContainer.js | import React, { Component } from 'react';
import InfoSection from '../components/InfoSection';
import InfoSectionToggle from '../components/InfoSectionToggle';
class SearchContainer extends React.Component {
constructor(props) {
super(props);
this.state = {
message: '',
error: '',
showContent: true,
content: `The field below accepts search criteria for those on Odecee bench. Try searching by technologies like: 'node', or 'javascript' and you will see a list of candidates with those skill sets.`
};
}
onPressError() {
this.setState({
error: ''
});
}
onPressInfo() {
this.setState({
showContent: !this.state.showContent
});
}
renderError() {
if(this.state.error) {
return (
<li>
<div className="error">
<span className="message">{this.state.error}</span>
<span className="error-icon" onClick={this.onPressError.bind(this)}>
<i className="fa fa-times-circle"></i>
</span>
</div>
</li>
)
}
}
renderInfoSection() |
render() {
return (
<div>
{this.renderInfoSection()}
<section>
<InfoSectionToggle onPressInfo={this.onPressInfo.bind(this)} />
<ul className="input-list style-4 clearfix">
{this.renderError()}
<li>
<label className="search" htmlFor="search">Search: </label>
<input
type="text"
style={{width: '100%'}}
placeholder="Search"
ref="email"
id="search"
/>
</li>
</ul>
</section>
</div>
)
}
}
export default SearchContainer;
| {
return (
<InfoSection
revealContent={this.state.showContent}
content={this.state.content}
title={'Add Skill'}
/>
);
} | identifier_body |
SearchContainer.js | import React, { Component } from 'react';
import InfoSection from '../components/InfoSection';
import InfoSectionToggle from '../components/InfoSectionToggle';
class SearchContainer extends React.Component {
constructor(props) {
super(props);
this.state = {
message: '',
error: '',
showContent: true,
content: `The field below accepts search criteria for those on Odecee bench. Try searching by technologies like: 'node', or 'javascript' and you will see a list of candidates with those skill sets.`
};
}
onPressError() {
this.setState({
error: ''
});
}
onPressInfo() {
this.setState({
showContent: !this.state.showContent
});
}
renderError() {
if(this.state.error) {
return (
<li>
<div className="error">
<span className="message">{this.state.error}</span>
<span className="error-icon" onClick={this.onPressError.bind(this)}>
<i className="fa fa-times-circle"></i>
</span>
</div>
</li>
)
}
}
renderInfoSection() {
return (
<InfoSection
revealContent={this.state.showContent}
content={this.state.content}
title={'Add Skill'}
/>
);
}
| () {
return (
<div>
{this.renderInfoSection()}
<section>
<InfoSectionToggle onPressInfo={this.onPressInfo.bind(this)} />
<ul className="input-list style-4 clearfix">
{this.renderError()}
<li>
<label className="search" htmlFor="search">Search: </label>
<input
type="text"
style={{width: '100%'}}
placeholder="Search"
ref="email"
id="search"
/>
</li>
</ul>
</section>
</div>
)
}
}
export default SearchContainer;
| render | identifier_name |
ofx_test.py | import os
import pytest
from . import ofx
from .source_test import check_source_example
testdata_dir = os.path.realpath(
os.path.join(
os.path.dirname(__file__), '..', '..', 'testdata', 'source', 'ofx'))
examples = [
('test_vanguard_basic', 'vanguard.ofx'),
('test_vanguard_matching', 'vanguard.ofx'),
('test_vanguard_invalid', 'vanguard.ofx'),
('test_vanguard_with_cash_account', 'vanguard.ofx'),
('test_vanguard_with_cash_account_matching_missing_transfer',
'vanguard.ofx'),
('test_vanguard_with_cash_account_matching_missing_primary',
'vanguard.ofx'),
('test_vanguard401k', 'vanguard401k.ofx'),
('test_vanguard_401k_matching', 'vanguard401k.ofx'),
('test_vanguard_xfer_in', 'vanguard_xfer_in.ofx'),
('test_fidelity_savings', 'fidelity-savings.ofx'),
('test_suncorp', 'suncorp.ofx'),
('test_checking', 'checking.ofx'),
('test_checking_emptyledgerbal', 'checking-emptyledgerbal.ofx'),
('test_td_ameritrade', 'td_ameritrade.ofx'),
('test_anzcc', 'anzcc.ofx'),
('test_multiple_accounts', 'multiple_accounts.ofx'),
('test_bank_medium', 'bank_medium.ofx'),
('test_investment_401k', 'investment_401k.ofx'),
('test_investment_buy_sell_income', 'investment_buy_sell_income.ofx'),
('test_vanguard_roth_ira', 'vanguard_roth_ira.ofx'),
('test_vanguard_roth_ira_matching', 'vanguard_roth_ira.ofx'),
('test_checking2', 'checking2.ofx'),
('test_checking2_matching', 'checking2.ofx'),
('test_amex', 'amex.ofx'),
('test_fidelity', 'fidelity.ofx'),
('test_non_default_capital_gains', 'vanguard401k.ofx'),
]
@pytest.mark.parametrize('name,ofx_filename', examples)
def test_source(name: str, ofx_filename: str):
check_source_example(
example_dir=os.path.join(testdata_dir, name),
source_spec={
'module': 'beancount_import.source.ofx',
'ofx_filenames': [os.path.join(testdata_dir, ofx_filename)],
},
replacements=[(testdata_dir, '<testdata>')])
def test_find_ofx_id_for_account():
| ofx_ids = {
'Assets:Vanguard:401k': 1,
}
for (account, want) in [
('Assets:Vanguard:401k:PreTax:VGI1', 1),
('Assets:Vanguard:401k:PreTax', 1),
('Assets:Vanguard:401k:VG1', 1),
('Assets:Vanguard:401k', 1),
('Assets:Vanguard:Unknown', None),
('Assets:Vanguard:401k:PreTax:Excessive:VGI1', None),
]:
assert ofx.find_ofx_id_for_account(account, ofx_ids) == want, account | identifier_body | |
ofx_test.py | import os
import pytest
from . import ofx
from .source_test import check_source_example
testdata_dir = os.path.realpath(
os.path.join(
os.path.dirname(__file__), '..', '..', 'testdata', 'source', 'ofx'))
examples = [
('test_vanguard_basic', 'vanguard.ofx'),
('test_vanguard_matching', 'vanguard.ofx'),
('test_vanguard_invalid', 'vanguard.ofx'),
('test_vanguard_with_cash_account', 'vanguard.ofx'),
('test_vanguard_with_cash_account_matching_missing_transfer',
'vanguard.ofx'),
('test_vanguard_with_cash_account_matching_missing_primary',
'vanguard.ofx'),
('test_vanguard401k', 'vanguard401k.ofx'),
('test_vanguard_401k_matching', 'vanguard401k.ofx'),
('test_vanguard_xfer_in', 'vanguard_xfer_in.ofx'),
('test_fidelity_savings', 'fidelity-savings.ofx'),
('test_suncorp', 'suncorp.ofx'),
('test_checking', 'checking.ofx'),
('test_checking_emptyledgerbal', 'checking-emptyledgerbal.ofx'),
('test_td_ameritrade', 'td_ameritrade.ofx'),
('test_anzcc', 'anzcc.ofx'),
('test_multiple_accounts', 'multiple_accounts.ofx'),
('test_bank_medium', 'bank_medium.ofx'),
('test_investment_401k', 'investment_401k.ofx'),
('test_investment_buy_sell_income', 'investment_buy_sell_income.ofx'),
('test_vanguard_roth_ira', 'vanguard_roth_ira.ofx'),
('test_vanguard_roth_ira_matching', 'vanguard_roth_ira.ofx'),
('test_checking2', 'checking2.ofx'),
('test_checking2_matching', 'checking2.ofx'),
('test_amex', 'amex.ofx'),
('test_fidelity', 'fidelity.ofx'),
('test_non_default_capital_gains', 'vanguard401k.ofx'),
]
@pytest.mark.parametrize('name,ofx_filename', examples)
def test_source(name: str, ofx_filename: str):
check_source_example(
example_dir=os.path.join(testdata_dir, name),
source_spec={
'module': 'beancount_import.source.ofx',
'ofx_filenames': [os.path.join(testdata_dir, ofx_filename)],
},
replacements=[(testdata_dir, '<testdata>')])
def test_find_ofx_id_for_account():
ofx_ids = {
'Assets:Vanguard:401k': 1,
}
for (account, want) in [
('Assets:Vanguard:401k:PreTax:VGI1', 1),
('Assets:Vanguard:401k:PreTax', 1),
('Assets:Vanguard:401k:VG1', 1),
('Assets:Vanguard:401k', 1),
('Assets:Vanguard:Unknown', None),
('Assets:Vanguard:401k:PreTax:Excessive:VGI1', None),
]:
| assert ofx.find_ofx_id_for_account(account, ofx_ids) == want, account | conditional_block | |
ofx_test.py | import os
import pytest
from . import ofx
from .source_test import check_source_example
testdata_dir = os.path.realpath(
os.path.join(
os.path.dirname(__file__), '..', '..', 'testdata', 'source', 'ofx'))
examples = [
('test_vanguard_basic', 'vanguard.ofx'),
('test_vanguard_matching', 'vanguard.ofx'),
('test_vanguard_invalid', 'vanguard.ofx'),
('test_vanguard_with_cash_account', 'vanguard.ofx'),
('test_vanguard_with_cash_account_matching_missing_transfer',
'vanguard.ofx'),
('test_vanguard_with_cash_account_matching_missing_primary',
'vanguard.ofx'),
('test_vanguard401k', 'vanguard401k.ofx'),
('test_vanguard_401k_matching', 'vanguard401k.ofx'),
('test_vanguard_xfer_in', 'vanguard_xfer_in.ofx'),
('test_fidelity_savings', 'fidelity-savings.ofx'),
('test_suncorp', 'suncorp.ofx'),
('test_checking', 'checking.ofx'), | ('test_td_ameritrade', 'td_ameritrade.ofx'),
('test_anzcc', 'anzcc.ofx'),
('test_multiple_accounts', 'multiple_accounts.ofx'),
('test_bank_medium', 'bank_medium.ofx'),
('test_investment_401k', 'investment_401k.ofx'),
('test_investment_buy_sell_income', 'investment_buy_sell_income.ofx'),
('test_vanguard_roth_ira', 'vanguard_roth_ira.ofx'),
('test_vanguard_roth_ira_matching', 'vanguard_roth_ira.ofx'),
('test_checking2', 'checking2.ofx'),
('test_checking2_matching', 'checking2.ofx'),
('test_amex', 'amex.ofx'),
('test_fidelity', 'fidelity.ofx'),
('test_non_default_capital_gains', 'vanguard401k.ofx'),
]
@pytest.mark.parametrize('name,ofx_filename', examples)
def test_source(name: str, ofx_filename: str):
check_source_example(
example_dir=os.path.join(testdata_dir, name),
source_spec={
'module': 'beancount_import.source.ofx',
'ofx_filenames': [os.path.join(testdata_dir, ofx_filename)],
},
replacements=[(testdata_dir, '<testdata>')])
def test_find_ofx_id_for_account():
ofx_ids = {
'Assets:Vanguard:401k': 1,
}
for (account, want) in [
('Assets:Vanguard:401k:PreTax:VGI1', 1),
('Assets:Vanguard:401k:PreTax', 1),
('Assets:Vanguard:401k:VG1', 1),
('Assets:Vanguard:401k', 1),
('Assets:Vanguard:Unknown', None),
('Assets:Vanguard:401k:PreTax:Excessive:VGI1', None),
]:
assert ofx.find_ofx_id_for_account(account, ofx_ids) == want, account | ('test_checking_emptyledgerbal', 'checking-emptyledgerbal.ofx'), | random_line_split |
ofx_test.py | import os
import pytest
from . import ofx
from .source_test import check_source_example
testdata_dir = os.path.realpath(
os.path.join(
os.path.dirname(__file__), '..', '..', 'testdata', 'source', 'ofx'))
examples = [
('test_vanguard_basic', 'vanguard.ofx'),
('test_vanguard_matching', 'vanguard.ofx'),
('test_vanguard_invalid', 'vanguard.ofx'),
('test_vanguard_with_cash_account', 'vanguard.ofx'),
('test_vanguard_with_cash_account_matching_missing_transfer',
'vanguard.ofx'),
('test_vanguard_with_cash_account_matching_missing_primary',
'vanguard.ofx'),
('test_vanguard401k', 'vanguard401k.ofx'),
('test_vanguard_401k_matching', 'vanguard401k.ofx'),
('test_vanguard_xfer_in', 'vanguard_xfer_in.ofx'),
('test_fidelity_savings', 'fidelity-savings.ofx'),
('test_suncorp', 'suncorp.ofx'),
('test_checking', 'checking.ofx'),
('test_checking_emptyledgerbal', 'checking-emptyledgerbal.ofx'),
('test_td_ameritrade', 'td_ameritrade.ofx'),
('test_anzcc', 'anzcc.ofx'),
('test_multiple_accounts', 'multiple_accounts.ofx'),
('test_bank_medium', 'bank_medium.ofx'),
('test_investment_401k', 'investment_401k.ofx'),
('test_investment_buy_sell_income', 'investment_buy_sell_income.ofx'),
('test_vanguard_roth_ira', 'vanguard_roth_ira.ofx'),
('test_vanguard_roth_ira_matching', 'vanguard_roth_ira.ofx'),
('test_checking2', 'checking2.ofx'),
('test_checking2_matching', 'checking2.ofx'),
('test_amex', 'amex.ofx'),
('test_fidelity', 'fidelity.ofx'),
('test_non_default_capital_gains', 'vanguard401k.ofx'),
]
@pytest.mark.parametrize('name,ofx_filename', examples)
def | (name: str, ofx_filename: str):
check_source_example(
example_dir=os.path.join(testdata_dir, name),
source_spec={
'module': 'beancount_import.source.ofx',
'ofx_filenames': [os.path.join(testdata_dir, ofx_filename)],
},
replacements=[(testdata_dir, '<testdata>')])
def test_find_ofx_id_for_account():
ofx_ids = {
'Assets:Vanguard:401k': 1,
}
for (account, want) in [
('Assets:Vanguard:401k:PreTax:VGI1', 1),
('Assets:Vanguard:401k:PreTax', 1),
('Assets:Vanguard:401k:VG1', 1),
('Assets:Vanguard:401k', 1),
('Assets:Vanguard:Unknown', None),
('Assets:Vanguard:401k:PreTax:Excessive:VGI1', None),
]:
assert ofx.find_ofx_id_for_account(account, ofx_ids) == want, account
| test_source | identifier_name |
RolesDialog.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from cairis.core.Role import Role
from RoleDialog import RoleDialog
from DialogClassParameters import DialogClassParameters
from cairis.core.ARM import *
from DimensionBaseDialog import DimensionBaseDialog
__author__ = 'Shamal Faily'
class RolesDialog(DimensionBaseDialog):
def __init__(self,parent):
DimensionBaseDialog.__init__(self,parent,ROLES_ID,'Roles',(800,300),'role.png')
idList = [ROLES_LISTROLES_ID,ROLES_BUTTONADD_ID,ROLES_BUTTONDELETE_ID]
columnList = ['Name','Short Code','Type']
self.buildControls(idList,columnList,self.dbProxy.getRoles,'role')
listCtrl = self.FindWindowById(ROLES_LISTROLES_ID)
listCtrl.SetColumnWidth(0,150)
listCtrl.SetColumnWidth(1,100)
listCtrl.SetColumnWidth(2,400)
def addObjectRow(self,listCtrl,listRow,role):
listCtrl.InsertStringItem(listRow,role.name())
listCtrl.SetStringItem(listRow,1,role.shortCode())
listCtrl.SetStringItem(listRow,2,role.type())
def onAdd(self,evt):
try:
addParameters = DialogClassParameters(ROLE_ID,'Add role',RoleDialog,ROLE_BUTTONCOMMIT_ID,self.dbProxy.addRole,True)
self.addObject(addParameters)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Add role',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def onUpdate(self,evt):
selectedObjt = self.objts[self.selectedLabel]
try:
updateParameters = DialogClassParameters(ROLE_ID,'Edit role',RoleDialog,ROLE_BUTTONCOMMIT_ID,self.dbProxy.updateRole,False) | dlg.ShowModal()
dlg.Destroy()
return
def onDelete(self,evt):
try:
self.deleteObject('No role','Delete role',self.dbProxy.deleteRole)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Delete role',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return | self.updateObject(selectedObjt,updateParameters)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Edit role',wx.OK | wx.ICON_ERROR) | random_line_split |
RolesDialog.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from cairis.core.Role import Role
from RoleDialog import RoleDialog
from DialogClassParameters import DialogClassParameters
from cairis.core.ARM import *
from DimensionBaseDialog import DimensionBaseDialog
__author__ = 'Shamal Faily'
class RolesDialog(DimensionBaseDialog):
def __init__(self,parent):
DimensionBaseDialog.__init__(self,parent,ROLES_ID,'Roles',(800,300),'role.png')
idList = [ROLES_LISTROLES_ID,ROLES_BUTTONADD_ID,ROLES_BUTTONDELETE_ID]
columnList = ['Name','Short Code','Type']
self.buildControls(idList,columnList,self.dbProxy.getRoles,'role')
listCtrl = self.FindWindowById(ROLES_LISTROLES_ID)
listCtrl.SetColumnWidth(0,150)
listCtrl.SetColumnWidth(1,100)
listCtrl.SetColumnWidth(2,400)
def addObjectRow(self,listCtrl,listRow,role):
listCtrl.InsertStringItem(listRow,role.name())
listCtrl.SetStringItem(listRow,1,role.shortCode())
listCtrl.SetStringItem(listRow,2,role.type())
def onAdd(self,evt):
try:
addParameters = DialogClassParameters(ROLE_ID,'Add role',RoleDialog,ROLE_BUTTONCOMMIT_ID,self.dbProxy.addRole,True)
self.addObject(addParameters)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Add role',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def onUpdate(self,evt):
|
def onDelete(self,evt):
try:
self.deleteObject('No role','Delete role',self.dbProxy.deleteRole)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Delete role',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
| selectedObjt = self.objts[self.selectedLabel]
try:
updateParameters = DialogClassParameters(ROLE_ID,'Edit role',RoleDialog,ROLE_BUTTONCOMMIT_ID,self.dbProxy.updateRole,False)
self.updateObject(selectedObjt,updateParameters)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Edit role',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return | identifier_body |
RolesDialog.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from cairis.core.Role import Role
from RoleDialog import RoleDialog
from DialogClassParameters import DialogClassParameters
from cairis.core.ARM import *
from DimensionBaseDialog import DimensionBaseDialog
__author__ = 'Shamal Faily'
class RolesDialog(DimensionBaseDialog):
def | (self,parent):
DimensionBaseDialog.__init__(self,parent,ROLES_ID,'Roles',(800,300),'role.png')
idList = [ROLES_LISTROLES_ID,ROLES_BUTTONADD_ID,ROLES_BUTTONDELETE_ID]
columnList = ['Name','Short Code','Type']
self.buildControls(idList,columnList,self.dbProxy.getRoles,'role')
listCtrl = self.FindWindowById(ROLES_LISTROLES_ID)
listCtrl.SetColumnWidth(0,150)
listCtrl.SetColumnWidth(1,100)
listCtrl.SetColumnWidth(2,400)
def addObjectRow(self,listCtrl,listRow,role):
listCtrl.InsertStringItem(listRow,role.name())
listCtrl.SetStringItem(listRow,1,role.shortCode())
listCtrl.SetStringItem(listRow,2,role.type())
def onAdd(self,evt):
try:
addParameters = DialogClassParameters(ROLE_ID,'Add role',RoleDialog,ROLE_BUTTONCOMMIT_ID,self.dbProxy.addRole,True)
self.addObject(addParameters)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Add role',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def onUpdate(self,evt):
selectedObjt = self.objts[self.selectedLabel]
try:
updateParameters = DialogClassParameters(ROLE_ID,'Edit role',RoleDialog,ROLE_BUTTONCOMMIT_ID,self.dbProxy.updateRole,False)
self.updateObject(selectedObjt,updateParameters)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Edit role',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def onDelete(self,evt):
try:
self.deleteObject('No role','Delete role',self.dbProxy.deleteRole)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Delete role',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
| __init__ | identifier_name |
translation.ts | <?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="hr_HR">
<context> | </message>
<message>
<source>Empty</source>
<translation>Prazno</translation>
</message>
</context>
<context>
<name>extension/ezxmkinstaller</name>
<message>
<source>XML Publisher</source>
<translation>XML objavljivač</translation>
</message>
</context>
<context>
<name>ezxmlinstaller/datatypes</name>
<message>
<source>Feature Select</source>
<comment>Datatype name</comment>
<translation>Odabir svojstava</translation>
</message>
</context>
</TS> | <name>design/standard/class/datatype</name>
<message>
<source>Template Location</source>
<translation>Lokacija predloška</translation> | random_line_split |
instanceof.ts | class A {}
class B extends A {}
var a: A;
var b: B;
var i: i32;
var I: i64;
var f: f32;
var F: f64;
assert( a instanceof A );
assert( b instanceof A );
assert(!(i instanceof A));
assert(!(I instanceof A));
assert(!(f instanceof A));
assert(!(F instanceof A));
// assert(!(a instanceof B)); // dynamic upcast, checked in rt/instanceof
assert( b instanceof B );
assert(!(i instanceof B));
assert(!(I instanceof B));
assert(!(f instanceof B));
assert(!(F instanceof B));
assert(!(a instanceof i32));
assert(!(b instanceof i32));
assert( i instanceof i32 );
assert(!(I instanceof i32));
assert(!(f instanceof i32));
assert(!(F instanceof i32));
assert(!(a instanceof i64));
assert(!(b instanceof i64));
assert(!(i instanceof i64));
assert( I instanceof i64 );
assert(!(f instanceof i64));
assert(!(F instanceof i64));
assert(!(a instanceof f32));
assert(!(b instanceof f32));
assert(!(i instanceof f32));
assert(!(I instanceof f32));
assert( f instanceof f32 );
assert(!(F instanceof f32));
assert(!(a instanceof f64));
assert(!(b instanceof f64));
assert(!(i instanceof f64));
assert(!(I instanceof f64));
assert(!(f instanceof f64));
assert( F instanceof f64 );
function isI32<T>(v: T): bool {
// should eliminate non-applicable branches (see fixture)
if (v instanceof i32) {
return true;
} else { | assert(!isI32(0.0));
assert(!isI32(<u32>0)); // signedness is relevant
assert(!isI32(<u16>0)); // byte size is relevant
var an: A | null = null;
assert(!(an instanceof A)); // TS: ==null is not an instance of A
assert( an instanceof A | null); // AS: ==null is an instance of A | null
an = changetype<A | null>(1);
assert( an instanceof A); // TS: !=null is an instance of A
assert( an instanceof A | null); // AS: !=null is an instance of A | null
// TODO: keep track of nullability during flows, so this becomes precomputable:
// assert(an !== null && an instanceof A); | return false;
}
}
assert( isI32(0)); | random_line_split |
instanceof.ts | class | {}
class B extends A {}
var a: A;
var b: B;
var i: i32;
var I: i64;
var f: f32;
var F: f64;
assert( a instanceof A );
assert( b instanceof A );
assert(!(i instanceof A));
assert(!(I instanceof A));
assert(!(f instanceof A));
assert(!(F instanceof A));
// assert(!(a instanceof B)); // dynamic upcast, checked in rt/instanceof
assert( b instanceof B );
assert(!(i instanceof B));
assert(!(I instanceof B));
assert(!(f instanceof B));
assert(!(F instanceof B));
assert(!(a instanceof i32));
assert(!(b instanceof i32));
assert( i instanceof i32 );
assert(!(I instanceof i32));
assert(!(f instanceof i32));
assert(!(F instanceof i32));
assert(!(a instanceof i64));
assert(!(b instanceof i64));
assert(!(i instanceof i64));
assert( I instanceof i64 );
assert(!(f instanceof i64));
assert(!(F instanceof i64));
assert(!(a instanceof f32));
assert(!(b instanceof f32));
assert(!(i instanceof f32));
assert(!(I instanceof f32));
assert( f instanceof f32 );
assert(!(F instanceof f32));
assert(!(a instanceof f64));
assert(!(b instanceof f64));
assert(!(i instanceof f64));
assert(!(I instanceof f64));
assert(!(f instanceof f64));
assert( F instanceof f64 );
function isI32<T>(v: T): bool {
// should eliminate non-applicable branches (see fixture)
if (v instanceof i32) {
return true;
} else {
return false;
}
}
assert( isI32(0));
assert(!isI32(0.0));
assert(!isI32(<u32>0)); // signedness is relevant
assert(!isI32(<u16>0)); // byte size is relevant
var an: A | null = null;
assert(!(an instanceof A)); // TS: ==null is not an instance of A
assert( an instanceof A | null); // AS: ==null is an instance of A | null
an = changetype<A | null>(1);
assert( an instanceof A); // TS: !=null is an instance of A
assert( an instanceof A | null); // AS: !=null is an instance of A | null
// TODO: keep track of nullability during flows, so this becomes precomputable:
// assert(an !== null && an instanceof A);
| A | identifier_name |
instanceof.ts | class A {}
class B extends A {}
var a: A;
var b: B;
var i: i32;
var I: i64;
var f: f32;
var F: f64;
assert( a instanceof A );
assert( b instanceof A );
assert(!(i instanceof A));
assert(!(I instanceof A));
assert(!(f instanceof A));
assert(!(F instanceof A));
// assert(!(a instanceof B)); // dynamic upcast, checked in rt/instanceof
assert( b instanceof B );
assert(!(i instanceof B));
assert(!(I instanceof B));
assert(!(f instanceof B));
assert(!(F instanceof B));
assert(!(a instanceof i32));
assert(!(b instanceof i32));
assert( i instanceof i32 );
assert(!(I instanceof i32));
assert(!(f instanceof i32));
assert(!(F instanceof i32));
assert(!(a instanceof i64));
assert(!(b instanceof i64));
assert(!(i instanceof i64));
assert( I instanceof i64 );
assert(!(f instanceof i64));
assert(!(F instanceof i64));
assert(!(a instanceof f32));
assert(!(b instanceof f32));
assert(!(i instanceof f32));
assert(!(I instanceof f32));
assert( f instanceof f32 );
assert(!(F instanceof f32));
assert(!(a instanceof f64));
assert(!(b instanceof f64));
assert(!(i instanceof f64));
assert(!(I instanceof f64));
assert(!(f instanceof f64));
assert( F instanceof f64 );
function isI32<T>(v: T): bool {
// should eliminate non-applicable branches (see fixture)
if (v instanceof i32) {
return true;
} else |
}
assert( isI32(0));
assert(!isI32(0.0));
assert(!isI32(<u32>0)); // signedness is relevant
assert(!isI32(<u16>0)); // byte size is relevant
var an: A | null = null;
assert(!(an instanceof A)); // TS: ==null is not an instance of A
assert( an instanceof A | null); // AS: ==null is an instance of A | null
an = changetype<A | null>(1);
assert( an instanceof A); // TS: !=null is an instance of A
assert( an instanceof A | null); // AS: !=null is an instance of A | null
// TODO: keep track of nullability during flows, so this becomes precomputable:
// assert(an !== null && an instanceof A);
| {
return false;
} | conditional_block |
theme.ts | import { blue } from "@material-ui/core/colors";
import { createTheme } from "@material-ui/core";
import createPalette from "@material-ui/core/styles/createPalette";
const palette = createPalette({
primary: {
main: "#292",
light: "#3d3"
}
});
const theme = createTheme({
typography: {
fontSize: 24,
fontFamily: ["Roboto", "sans-serif"].join(",")
},
overrides: {
MuiPaper: {
root: {
width: "100%"
}
},
MuiToolbar: {
root: {
color: palette.text.primary,
backgroundColor: palette.background.default
}
},
MuiTabs: {
root: {
color: "white",
backgroundColor: palette.grey[800],
},
indicator: { | },
MuiTab: {
textColorInherit: { opacity: 1 },
root: {
textTransform: "none",
maxWidth: "none",
padding: 12,
transition: "background-color .2s",
"&$selected": { backgroundColor: palette.primary.main },
"&:hover": { backgroundColor: palette.primary.light }
}
},
MuiFormGroup: {
root: {
marginLeft: 48
}
},
MuiFormControlLabel: {
root: {
"&:not(.Mui-disabled):hover > .MuiTypography-root": { backgroundColor: palette.action.hover },
"&:not(.Mui-disabled):hover > .MuiIconButton-root": { color: palette.primary.light }
},
label: {
borderRadius: 12,
transition: "background-color .2s",
}
},
MuiFormControl: {
root: {
width: "100%",
boxSizing: "border-box",
padding: 24
}
},
MuiFormLabel: {
root: {
color: palette.text.primary,
marginBottom: 8,
"&$focused": { color: palette.text.primary }
},
},
MuiCheckbox: {
root: {
padding: 4
}
},
MuiRadio: {
root: {
padding: 4
}
},
MuiSvgIcon: {
root: {
width: "0.7em",
height: "0.7em"
}
},
MuiSlider: {
root: {
"&:hover .MuiSlider-thumb": { color: palette.primary.light },
"&:hover .MuiSlider-track": { color: palette.primary.light }
},
thumb: {
width: 24,
height: 24,
marginTop: -12,
marginLeft: -12,
color: palette.primary.main,
},
track: {
color: palette.primary.main,
height: 3
},
rail: {
color: palette.action.active
}
},
MuiLink: {
root: {
fontFamily: ["Roboto", "sans-serif"].join(","), // otherwise not set
fontSize: 16, // if not set, prod is different
color: blue[700],
margin: 8,
borderRadius: 12,
transition: "background-color .2s",
"&:hover": {
color: palette.text.primary,
backgroundColor: palette.action.hover
}
},
underlineHover: {
"&:hover": { textDecoration: "none" }
}
},
MuiTooltip: {
tooltip: {
maxWidth: 'none'
}
}
},
palette
});
export default theme; | display: "none"
} | random_line_split |
lib.rs | //! Parallel mutation of vectors via non-overlapping slices.
#![cfg_attr(feature = "bench", feature(test, step_by))]
use std::fmt::{Formatter, Debug};
use std::fmt::Error as FmtError;
use std::sync::{Arc, Condvar, Mutex};
use std::mem;
use std::ops;
/// Our inner `Vec` container.
struct VecBox<T> {
slice_count: usize,
data: Vec<T>,
}
impl<T> VecBox<T> {
fn new(slice_count: usize, data: Vec<T>) -> VecBox<T> {
VecBox {
slice_count: slice_count,
data: data,
}
}
/// Decrement the slice count
fn decrement(&mut self) {
self.slice_count -= 1;
}
/// Try to unwrap this box, replacing `data` with an empty vector if `slice_count == 0`
fn try_unwrap(&mut self) -> Option<Vec<T>> {
match self.slice_count {
0 => Some(mem::replace(&mut self.data, Vec::new())),
_ => None,
}
}
}
struct ParVecInner<T> {
inner: Mutex<VecBox<T>>,
cvar: Condvar,
}
impl<T: Send> ParVecInner<T> {
fn new(slice_count: usize, data: Vec<T>) -> ParVecInner<T> {
ParVecInner {
inner: Mutex::new(VecBox::new(slice_count, data)),
cvar: Condvar::new(),
}
}
fn decrement(&self) {
self.inner.lock().unwrap().decrement();
self.cvar.notify_one();
}
fn try_unwrap(&self, timeout: u32) -> Option<Vec<T>> {
let mut lock = self.inner.lock().unwrap();
if let Some(data) = lock.try_unwrap() {
return Some(data);
}
let (mut lock, _) = self.cvar.wait_timeout_ms(lock, timeout).unwrap();
lock.try_unwrap()
}
fn unwrap(&self) -> Vec<T> {
let mut lock = self.inner.lock().unwrap();
loop {
if let Some(data) = lock.try_unwrap() {
return data;
}
lock = self.cvar.wait(lock).unwrap();
}
}
}
/// A vector that can be mutated in-parallel via non-overlapping slices.
///
/// Get a `ParVec` and a vector of slices via `new()`, send the slices to other threads
/// and mutate them, then get the mutated vector with `.unwrap()` when finished.
pub struct ParVec<T> {
inner: Arc<ParVecInner<T>>,
}
impl<T: Send> ParVec<T> {
/// Create a new `ParVec`, returning it and a number of slices equal to
/// `slice_count`, that can be sent to other threads and mutated in-parallel.
///
/// The vector's length will be divided up amongst the slices as evenly as possible.
pub fn new(vec: Vec<T>, slice_count: usize) -> (ParVec<T>, Vec<ParSlice<T>>) {
let slices = sub_slices(&vec, slice_count);
let inner = Arc::new(ParVecInner::new(slice_count, vec));
let par_slices = slices.into_iter().map(|slice|
ParSlice {
inner: inner.clone(),
data: slice,
}
).collect();
let par_vec = ParVec {
inner: inner,
};
(par_vec, par_slices)
}
/// Attempt to take the inner `Vec` before `timeout` if there are no slices remaining.
/// Returns `None` if the timeout elapses and there are still slices remaining.
pub fn try_unwrap(&self, timeout: u32) -> Option<Vec<T>> {
self.inner.try_unwrap(timeout)
}
/// Take the inner `Vec`, waiting until all slices have been freed.
///
/// ###Deadlock Warning
/// Before calling this method, you should ensure that all `ParSlice` instances have either been:
///
/// - moved to other threads that will quit sometime in the future, or;
/// - dropped, implicitly (left in an inner scope) or explicitly (passed to `mem::drop()`)
///
/// Otherwise, a deadlock will likely occur.
pub fn unwrap(self) -> Vec<T> {
self.inner.unwrap()
}
}
/// Create a vector of raw subslices that are as close to each other in size as possible.
fn sub_slices<T>(parent: &[T], slice_count: usize) -> Vec<*mut [T]> {
use std::cmp;
let len = parent.len();
let mut start = 0;
// By iteratively dividing the length remaining in the vector by the number of slices
// remaining, we get a set of slices with a minimal deviation of lengths.
//
// For example, taking 8 slices of a vector of length 42 should yield 6 slices of length 5 and
// 2 slices of length 6. In contrast, taking 7 slices should yield 7 slices of length 6.
(1 .. slice_count + 1).rev().map(|curr| {
let slice_len = (len - start) / curr;
let end = cmp::min(start + slice_len, len);
let slice = &parent[start..end];
start += slice_len;
slice as *const [T] as *mut [T]
}).collect()
}
/// A slice of `ParVec` that can be sent to another task for processing.
/// Automatically releases the slice on drop.
pub struct ParSlice<T: Send> {
inner: Arc<ParVecInner<T>>,
data: *mut [T],
}
unsafe impl<T: Send> Send for ParSlice<T> {}
impl<T: Send> ops::Deref for ParSlice<T> {
type Target = [T];
fn deref<'a>(&'a self) -> &'a [T] {
unsafe { & *self.data }
}
}
impl<T: Send> ops::DerefMut for ParSlice<T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut [T] {
unsafe { &mut *self.data }
}
}
impl<T: Send> Debug for ParSlice<T> where T: Debug {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
write!(f, "{:?}", &*self)
}
}
impl<T: Send> Drop for ParSlice<T> {
fn drop(&mut self) {
self.inner.decrement();
}
}
// place these constants here so both the `test` and `bench` modules can use them
const TEST_SLICES: usize = 8;
const TEST_MAX: u32 = 1000;
#[cfg(test)]
mod test {
use ::{ParVec, TEST_SLICES, TEST_MAX};
#[test]
fn test_unwrap_safely() {
let (vec, slices) = ParVec::new([5u32; TEST_MAX as usize].to_vec(), TEST_SLICES);
drop(slices);
let vec = vec.unwrap();
assert_eq!(&*vec, &[5u32; TEST_MAX as usize][..]);
}
#[test]
fn test_slices() {
let (_, slices) = ParVec::new((1u32 .. TEST_MAX).collect(), TEST_SLICES);
assert_eq!(slices.len(), TEST_SLICES);
}
#[test]
fn test_nonoverlapping_slices() {
fn are_nonoverlapping<T>(left: &[T], right: &[T]) -> bool {
let left_start = left.as_ptr() as usize;
let right_start = right.as_ptr() as usize;
let left_end = left_start + left.len();
let right_end = right_start + right.len();
// `left` starts and ends before `right`
left_end < right_start ||
// `right` ends before `left`
right_end < left_start
}
let data: Vec<u32> = (1 .. TEST_MAX).collect();
let start_ptr = data.as_ptr() as usize;
let (_, slices) = ParVec::new(data, TEST_SLICES);
// This can probably be done in O(n log n) instead of O(n^2).
// Suggestions are welcome.
for (left_idx, left) in slices.iter().enumerate() {
for (_, right) in slices.iter().enumerate()
.filter(|&(right_idx, _)| right_idx != left_idx)
{
let left_start = left.as_ptr() as usize - start_ptr;
let right_start = right.as_ptr() as usize - start_ptr;
assert!(
are_nonoverlapping(left, right),
"Slices overlapped! left: {left:?} right: {right:?}",
left = (left_start, left_start + left.len()),
right = (right_start, right_start + right.len())
)
}
}
}
}
#[cfg(feature = "bench")]
mod bench {
extern crate rand;
extern crate threadpool;
extern crate test;
use ::{ParVec, TEST_SLICES, TEST_MAX};
use self::rand::{thread_rng, Rng};
use self::test::Bencher;
use self::threadpool::ThreadPool;
#[bench]
fn seq_prime_factors_1000(b: &mut Bencher) {
let vec: Vec<u32> = (1 .. TEST_MAX).collect();
b.iter(|| {
let _: Vec<(u32, Vec<u32>)> = vec.iter()
.map(|&x| (x, get_prime_factors(x)))
.collect();
});
}
#[bench]
fn par_prime_factors_1000(b: &mut Bencher) {
let mut rng = thread_rng();
let pool = ThreadPool::new(TEST_SLICES);
b.iter(|| {
let mut vec: Vec<(u32, Vec<u32>)> = (1 .. TEST_MAX)
.map(|x| (x, Vec::new())).collect();
// Shuffle so each thread gets an even distribution of work.
// Otherwise, the threads with the lower numbers will quit early.
rng.shuffle(&mut *vec);
let (par_vec, par_slices) = ParVec::new(vec, TEST_SLICES);
for mut slice in par_slices {
pool.execute(move ||
for pair in &mut *slice {
let (x, ref mut x_primes) = *pair;
*x_primes = get_prime_factors(x);
}
);
}
let mut vec = par_vec.unwrap();
// Sort so they're in the same order as sequential.
vec.sort();
});
}
fn get_prime_factors(x: u32) -> Vec<u32> {
(1 .. x).filter(|&y| x % y == 0 && is_prime(y)).collect()
}
fn is_prime(x: u32) -> bool {
// 2 and 3 are prime, but 0 and 1 are not.
(x > 1 && x < 4) ||
// Fast check for even-ness.
x & 1 != 0 &&
// If `x mod i` for every odd number `i < x`, then x is prime.
// Intentionally naive for the sake of the benchmark.
(3 .. x).step_by(2).all(|i| x % i != 0)
}
#[test]
fn | () {
// Test a reasonable number of primes to make sure the function actually works
for &i in &[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37] {
assert!(is_prime(i));
}
for i in (4..40).step_by(2) {
assert!(!is_prime(i));
}
}
}
| test_is_prime | identifier_name |
lib.rs | //! Parallel mutation of vectors via non-overlapping slices.
#![cfg_attr(feature = "bench", feature(test, step_by))]
use std::fmt::{Formatter, Debug};
use std::fmt::Error as FmtError;
use std::sync::{Arc, Condvar, Mutex};
use std::mem;
use std::ops;
/// Our inner `Vec` container.
struct VecBox<T> {
slice_count: usize,
data: Vec<T>,
}
impl<T> VecBox<T> {
fn new(slice_count: usize, data: Vec<T>) -> VecBox<T> {
VecBox {
slice_count: slice_count,
data: data,
}
}
/// Decrement the slice count
fn decrement(&mut self) {
self.slice_count -= 1;
}
/// Try to unwrap this box, replacing `data` with an empty vector if `slice_count == 0`
fn try_unwrap(&mut self) -> Option<Vec<T>> {
match self.slice_count {
0 => Some(mem::replace(&mut self.data, Vec::new())),
_ => None,
}
}
}
struct ParVecInner<T> {
inner: Mutex<VecBox<T>>,
cvar: Condvar,
}
impl<T: Send> ParVecInner<T> {
fn new(slice_count: usize, data: Vec<T>) -> ParVecInner<T> {
ParVecInner {
inner: Mutex::new(VecBox::new(slice_count, data)),
cvar: Condvar::new(),
}
}
fn decrement(&self) {
self.inner.lock().unwrap().decrement();
self.cvar.notify_one();
}
fn try_unwrap(&self, timeout: u32) -> Option<Vec<T>> {
let mut lock = self.inner.lock().unwrap();
if let Some(data) = lock.try_unwrap() {
return Some(data);
}
let (mut lock, _) = self.cvar.wait_timeout_ms(lock, timeout).unwrap();
lock.try_unwrap()
}
fn unwrap(&self) -> Vec<T> {
let mut lock = self.inner.lock().unwrap();
loop {
if let Some(data) = lock.try_unwrap() {
return data;
}
lock = self.cvar.wait(lock).unwrap();
}
}
}
/// A vector that can be mutated in-parallel via non-overlapping slices.
///
/// Get a `ParVec` and a vector of slices via `new()`, send the slices to other threads
/// and mutate them, then get the mutated vector with `.unwrap()` when finished.
pub struct ParVec<T> {
inner: Arc<ParVecInner<T>>,
}
impl<T: Send> ParVec<T> {
/// Create a new `ParVec`, returning it and a number of slices equal to
/// `slice_count`, that can be sent to other threads and mutated in-parallel.
///
/// The vector's length will be divided up amongst the slices as evenly as possible.
pub fn new(vec: Vec<T>, slice_count: usize) -> (ParVec<T>, Vec<ParSlice<T>>) {
let slices = sub_slices(&vec, slice_count);
let inner = Arc::new(ParVecInner::new(slice_count, vec));
let par_slices = slices.into_iter().map(|slice|
ParSlice {
inner: inner.clone(),
data: slice,
}
).collect();
let par_vec = ParVec {
inner: inner,
};
(par_vec, par_slices)
}
/// Attempt to take the inner `Vec` before `timeout` if there are no slices remaining.
/// Returns `None` if the timeout elapses and there are still slices remaining.
pub fn try_unwrap(&self, timeout: u32) -> Option<Vec<T>> {
self.inner.try_unwrap(timeout)
}
/// Take the inner `Vec`, waiting until all slices have been freed.
///
/// ###Deadlock Warning
/// Before calling this method, you should ensure that all `ParSlice` instances have either been:
///
/// - moved to other threads that will quit sometime in the future, or;
/// - dropped, implicitly (left in an inner scope) or explicitly (passed to `mem::drop()`)
///
/// Otherwise, a deadlock will likely occur.
pub fn unwrap(self) -> Vec<T> {
self.inner.unwrap()
}
}
/// Create a vector of raw subslices that are as close to each other in size as possible.
fn sub_slices<T>(parent: &[T], slice_count: usize) -> Vec<*mut [T]> {
use std::cmp;
let len = parent.len();
let mut start = 0;
// By iteratively dividing the length remaining in the vector by the number of slices
// remaining, we get a set of slices with a minimal deviation of lengths.
//
// For example, taking 8 slices of a vector of length 42 should yield 6 slices of length 5 and
// 2 slices of length 6. In contrast, taking 7 slices should yield 7 slices of length 6. | let slice = &parent[start..end];
start += slice_len;
slice as *const [T] as *mut [T]
}).collect()
}
/// A slice of `ParVec` that can be sent to another task for processing.
/// Automatically releases the slice on drop.
pub struct ParSlice<T: Send> {
inner: Arc<ParVecInner<T>>,
data: *mut [T],
}
unsafe impl<T: Send> Send for ParSlice<T> {}
impl<T: Send> ops::Deref for ParSlice<T> {
type Target = [T];
fn deref<'a>(&'a self) -> &'a [T] {
unsafe { & *self.data }
}
}
impl<T: Send> ops::DerefMut for ParSlice<T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut [T] {
unsafe { &mut *self.data }
}
}
impl<T: Send> Debug for ParSlice<T> where T: Debug {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
write!(f, "{:?}", &*self)
}
}
impl<T: Send> Drop for ParSlice<T> {
fn drop(&mut self) {
self.inner.decrement();
}
}
// place these constants here so both the `test` and `bench` modules can use them
const TEST_SLICES: usize = 8;
const TEST_MAX: u32 = 1000;
#[cfg(test)]
mod test {
use ::{ParVec, TEST_SLICES, TEST_MAX};
#[test]
fn test_unwrap_safely() {
let (vec, slices) = ParVec::new([5u32; TEST_MAX as usize].to_vec(), TEST_SLICES);
drop(slices);
let vec = vec.unwrap();
assert_eq!(&*vec, &[5u32; TEST_MAX as usize][..]);
}
#[test]
fn test_slices() {
let (_, slices) = ParVec::new((1u32 .. TEST_MAX).collect(), TEST_SLICES);
assert_eq!(slices.len(), TEST_SLICES);
}
#[test]
fn test_nonoverlapping_slices() {
fn are_nonoverlapping<T>(left: &[T], right: &[T]) -> bool {
let left_start = left.as_ptr() as usize;
let right_start = right.as_ptr() as usize;
let left_end = left_start + left.len();
let right_end = right_start + right.len();
// `left` starts and ends before `right`
left_end < right_start ||
// `right` ends before `left`
right_end < left_start
}
let data: Vec<u32> = (1 .. TEST_MAX).collect();
let start_ptr = data.as_ptr() as usize;
let (_, slices) = ParVec::new(data, TEST_SLICES);
// This can probably be done in O(n log n) instead of O(n^2).
// Suggestions are welcome.
for (left_idx, left) in slices.iter().enumerate() {
for (_, right) in slices.iter().enumerate()
.filter(|&(right_idx, _)| right_idx != left_idx)
{
let left_start = left.as_ptr() as usize - start_ptr;
let right_start = right.as_ptr() as usize - start_ptr;
assert!(
are_nonoverlapping(left, right),
"Slices overlapped! left: {left:?} right: {right:?}",
left = (left_start, left_start + left.len()),
right = (right_start, right_start + right.len())
)
}
}
}
}
#[cfg(feature = "bench")]
mod bench {
extern crate rand;
extern crate threadpool;
extern crate test;
use ::{ParVec, TEST_SLICES, TEST_MAX};
use self::rand::{thread_rng, Rng};
use self::test::Bencher;
use self::threadpool::ThreadPool;
#[bench]
fn seq_prime_factors_1000(b: &mut Bencher) {
let vec: Vec<u32> = (1 .. TEST_MAX).collect();
b.iter(|| {
let _: Vec<(u32, Vec<u32>)> = vec.iter()
.map(|&x| (x, get_prime_factors(x)))
.collect();
});
}
#[bench]
fn par_prime_factors_1000(b: &mut Bencher) {
let mut rng = thread_rng();
let pool = ThreadPool::new(TEST_SLICES);
b.iter(|| {
let mut vec: Vec<(u32, Vec<u32>)> = (1 .. TEST_MAX)
.map(|x| (x, Vec::new())).collect();
// Shuffle so each thread gets an even distribution of work.
// Otherwise, the threads with the lower numbers will quit early.
rng.shuffle(&mut *vec);
let (par_vec, par_slices) = ParVec::new(vec, TEST_SLICES);
for mut slice in par_slices {
pool.execute(move ||
for pair in &mut *slice {
let (x, ref mut x_primes) = *pair;
*x_primes = get_prime_factors(x);
}
);
}
let mut vec = par_vec.unwrap();
// Sort so they're in the same order as sequential.
vec.sort();
});
}
fn get_prime_factors(x: u32) -> Vec<u32> {
(1 .. x).filter(|&y| x % y == 0 && is_prime(y)).collect()
}
fn is_prime(x: u32) -> bool {
// 2 and 3 are prime, but 0 and 1 are not.
(x > 1 && x < 4) ||
// Fast check for even-ness.
x & 1 != 0 &&
// If `x mod i` for every odd number `i < x`, then x is prime.
// Intentionally naive for the sake of the benchmark.
(3 .. x).step_by(2).all(|i| x % i != 0)
}
#[test]
fn test_is_prime() {
// Test a reasonable number of primes to make sure the function actually works
for &i in &[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37] {
assert!(is_prime(i));
}
for i in (4..40).step_by(2) {
assert!(!is_prime(i));
}
}
} | (1 .. slice_count + 1).rev().map(|curr| {
let slice_len = (len - start) / curr;
let end = cmp::min(start + slice_len, len);
| random_line_split |
lib.rs | //! Parallel mutation of vectors via non-overlapping slices.
#![cfg_attr(feature = "bench", feature(test, step_by))]
use std::fmt::{Formatter, Debug};
use std::fmt::Error as FmtError;
use std::sync::{Arc, Condvar, Mutex};
use std::mem;
use std::ops;
/// Our inner `Vec` container.
struct VecBox<T> {
slice_count: usize,
data: Vec<T>,
}
impl<T> VecBox<T> {
fn new(slice_count: usize, data: Vec<T>) -> VecBox<T> {
VecBox {
slice_count: slice_count,
data: data,
}
}
/// Decrement the slice count
fn decrement(&mut self) {
self.slice_count -= 1;
}
/// Try to unwrap this box, replacing `data` with an empty vector if `slice_count == 0`
fn try_unwrap(&mut self) -> Option<Vec<T>> {
match self.slice_count {
0 => Some(mem::replace(&mut self.data, Vec::new())),
_ => None,
}
}
}
struct ParVecInner<T> {
inner: Mutex<VecBox<T>>,
cvar: Condvar,
}
impl<T: Send> ParVecInner<T> {
fn new(slice_count: usize, data: Vec<T>) -> ParVecInner<T> {
ParVecInner {
inner: Mutex::new(VecBox::new(slice_count, data)),
cvar: Condvar::new(),
}
}
fn decrement(&self) {
self.inner.lock().unwrap().decrement();
self.cvar.notify_one();
}
fn try_unwrap(&self, timeout: u32) -> Option<Vec<T>> {
let mut lock = self.inner.lock().unwrap();
if let Some(data) = lock.try_unwrap() {
return Some(data);
}
let (mut lock, _) = self.cvar.wait_timeout_ms(lock, timeout).unwrap();
lock.try_unwrap()
}
fn unwrap(&self) -> Vec<T> {
let mut lock = self.inner.lock().unwrap();
loop {
if let Some(data) = lock.try_unwrap() {
return data;
}
lock = self.cvar.wait(lock).unwrap();
}
}
}
/// A vector that can be mutated in-parallel via non-overlapping slices.
///
/// Get a `ParVec` and a vector of slices via `new()`, send the slices to other threads
/// and mutate them, then get the mutated vector with `.unwrap()` when finished.
pub struct ParVec<T> {
inner: Arc<ParVecInner<T>>,
}
impl<T: Send> ParVec<T> {
/// Create a new `ParVec`, returning it and a number of slices equal to
/// `slice_count`, that can be sent to other threads and mutated in-parallel.
///
/// The vector's length will be divided up amongst the slices as evenly as possible.
pub fn new(vec: Vec<T>, slice_count: usize) -> (ParVec<T>, Vec<ParSlice<T>>) {
let slices = sub_slices(&vec, slice_count);
let inner = Arc::new(ParVecInner::new(slice_count, vec));
let par_slices = slices.into_iter().map(|slice|
ParSlice {
inner: inner.clone(),
data: slice,
}
).collect();
let par_vec = ParVec {
inner: inner,
};
(par_vec, par_slices)
}
/// Attempt to take the inner `Vec` before `timeout` if there are no slices remaining.
/// Returns `None` if the timeout elapses and there are still slices remaining.
pub fn try_unwrap(&self, timeout: u32) -> Option<Vec<T>> {
self.inner.try_unwrap(timeout)
}
/// Take the inner `Vec`, waiting until all slices have been freed.
///
/// ###Deadlock Warning
/// Before calling this method, you should ensure that all `ParSlice` instances have either been:
///
/// - moved to other threads that will quit sometime in the future, or;
/// - dropped, implicitly (left in an inner scope) or explicitly (passed to `mem::drop()`)
///
/// Otherwise, a deadlock will likely occur.
pub fn unwrap(self) -> Vec<T> {
self.inner.unwrap()
}
}
/// Create a vector of raw subslices that are as close to each other in size as possible.
fn sub_slices<T>(parent: &[T], slice_count: usize) -> Vec<*mut [T]> {
use std::cmp;
let len = parent.len();
let mut start = 0;
// By iteratively dividing the length remaining in the vector by the number of slices
// remaining, we get a set of slices with a minimal deviation of lengths.
//
// For example, taking 8 slices of a vector of length 42 should yield 6 slices of length 5 and
// 2 slices of length 6. In contrast, taking 7 slices should yield 7 slices of length 6.
(1 .. slice_count + 1).rev().map(|curr| {
let slice_len = (len - start) / curr;
let end = cmp::min(start + slice_len, len);
let slice = &parent[start..end];
start += slice_len;
slice as *const [T] as *mut [T]
}).collect()
}
/// A slice of `ParVec` that can be sent to another task for processing.
/// Automatically releases the slice on drop.
pub struct ParSlice<T: Send> {
inner: Arc<ParVecInner<T>>,
data: *mut [T],
}
unsafe impl<T: Send> Send for ParSlice<T> {}
impl<T: Send> ops::Deref for ParSlice<T> {
type Target = [T];
fn deref<'a>(&'a self) -> &'a [T] {
unsafe { & *self.data }
}
}
impl<T: Send> ops::DerefMut for ParSlice<T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut [T] {
unsafe { &mut *self.data }
}
}
impl<T: Send> Debug for ParSlice<T> where T: Debug {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
write!(f, "{:?}", &*self)
}
}
impl<T: Send> Drop for ParSlice<T> {
fn drop(&mut self) {
self.inner.decrement();
}
}
// place these constants here so both the `test` and `bench` modules can use them
const TEST_SLICES: usize = 8;
const TEST_MAX: u32 = 1000;
#[cfg(test)]
mod test {
use ::{ParVec, TEST_SLICES, TEST_MAX};
#[test]
fn test_unwrap_safely() {
let (vec, slices) = ParVec::new([5u32; TEST_MAX as usize].to_vec(), TEST_SLICES);
drop(slices);
let vec = vec.unwrap();
assert_eq!(&*vec, &[5u32; TEST_MAX as usize][..]);
}
#[test]
fn test_slices() {
let (_, slices) = ParVec::new((1u32 .. TEST_MAX).collect(), TEST_SLICES);
assert_eq!(slices.len(), TEST_SLICES);
}
#[test]
fn test_nonoverlapping_slices() {
fn are_nonoverlapping<T>(left: &[T], right: &[T]) -> bool {
let left_start = left.as_ptr() as usize;
let right_start = right.as_ptr() as usize;
let left_end = left_start + left.len();
let right_end = right_start + right.len();
// `left` starts and ends before `right`
left_end < right_start ||
// `right` ends before `left`
right_end < left_start
}
let data: Vec<u32> = (1 .. TEST_MAX).collect();
let start_ptr = data.as_ptr() as usize;
let (_, slices) = ParVec::new(data, TEST_SLICES);
// This can probably be done in O(n log n) instead of O(n^2).
// Suggestions are welcome.
for (left_idx, left) in slices.iter().enumerate() {
for (_, right) in slices.iter().enumerate()
.filter(|&(right_idx, _)| right_idx != left_idx)
{
let left_start = left.as_ptr() as usize - start_ptr;
let right_start = right.as_ptr() as usize - start_ptr;
assert!(
are_nonoverlapping(left, right),
"Slices overlapped! left: {left:?} right: {right:?}",
left = (left_start, left_start + left.len()),
right = (right_start, right_start + right.len())
)
}
}
}
}
#[cfg(feature = "bench")]
mod bench {
extern crate rand;
extern crate threadpool;
extern crate test;
use ::{ParVec, TEST_SLICES, TEST_MAX};
use self::rand::{thread_rng, Rng};
use self::test::Bencher;
use self::threadpool::ThreadPool;
#[bench]
fn seq_prime_factors_1000(b: &mut Bencher) |
#[bench]
fn par_prime_factors_1000(b: &mut Bencher) {
let mut rng = thread_rng();
let pool = ThreadPool::new(TEST_SLICES);
b.iter(|| {
let mut vec: Vec<(u32, Vec<u32>)> = (1 .. TEST_MAX)
.map(|x| (x, Vec::new())).collect();
// Shuffle so each thread gets an even distribution of work.
// Otherwise, the threads with the lower numbers will quit early.
rng.shuffle(&mut *vec);
let (par_vec, par_slices) = ParVec::new(vec, TEST_SLICES);
for mut slice in par_slices {
pool.execute(move ||
for pair in &mut *slice {
let (x, ref mut x_primes) = *pair;
*x_primes = get_prime_factors(x);
}
);
}
let mut vec = par_vec.unwrap();
// Sort so they're in the same order as sequential.
vec.sort();
});
}
fn get_prime_factors(x: u32) -> Vec<u32> {
(1 .. x).filter(|&y| x % y == 0 && is_prime(y)).collect()
}
fn is_prime(x: u32) -> bool {
// 2 and 3 are prime, but 0 and 1 are not.
(x > 1 && x < 4) ||
// Fast check for even-ness.
x & 1 != 0 &&
// If `x mod i` for every odd number `i < x`, then x is prime.
// Intentionally naive for the sake of the benchmark.
(3 .. x).step_by(2).all(|i| x % i != 0)
}
#[test]
fn test_is_prime() {
// Test a reasonable number of primes to make sure the function actually works
for &i in &[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37] {
assert!(is_prime(i));
}
for i in (4..40).step_by(2) {
assert!(!is_prime(i));
}
}
}
| {
let vec: Vec<u32> = (1 .. TEST_MAX).collect();
b.iter(|| {
let _: Vec<(u32, Vec<u32>)> = vec.iter()
.map(|&x| (x, get_prime_factors(x)))
.collect();
});
} | identifier_body |
test-async-wrap-disabled-propagate-parent.js | 'use strict';
require('../common');
const assert = require('assert');
const net = require('net');
const async_wrap = process.binding('async_wrap');
const providers = Object.keys(async_wrap.Providers);
const uidSymbol = Symbol('uid');
let cntr = 0;
let client;
function init(uid, type, parentUid, parentHandle) {
this[uidSymbol] = uid;
if (parentHandle) |
}
function noop() { }
async_wrap.setupHooks({ init });
async_wrap.enable();
const server = net.createServer(function(c) {
client = c;
// Allow init callback to run before closing.
setImmediate(() => {
c.end();
this.close();
});
}).listen(0, function() {
net.connect(this.address().port, noop);
});
async_wrap.disable();
process.on('exit', function() {
// init should have only been called once with a parent.
assert.equal(cntr, 1);
});
| {
cntr++;
// Cannot assert in init callback or will abort.
process.nextTick(() => {
assert.equal(providers[type], 'TCPWRAP');
assert.equal(parentUid, server._handle[uidSymbol],
'server uid doesn\'t match parent uid');
assert.equal(parentHandle, server._handle,
'server handle doesn\'t match parent handle');
assert.equal(this, client._handle, 'client doesn\'t match context');
});
} | conditional_block |
test-async-wrap-disabled-propagate-parent.js | 'use strict';
require('../common');
const assert = require('assert');
const net = require('net');
const async_wrap = process.binding('async_wrap');
const providers = Object.keys(async_wrap.Providers);
const uidSymbol = Symbol('uid');
let cntr = 0;
let client;
function init(uid, type, parentUid, parentHandle) {
this[uidSymbol] = uid;
if (parentHandle) {
cntr++;
// Cannot assert in init callback or will abort.
process.nextTick(() => {
assert.equal(providers[type], 'TCPWRAP');
assert.equal(parentUid, server._handle[uidSymbol],
'server uid doesn\'t match parent uid');
assert.equal(parentHandle, server._handle,
'server handle doesn\'t match parent handle');
assert.equal(this, client._handle, 'client doesn\'t match context');
});
}
}
function noop() { }
async_wrap.setupHooks({ init });
async_wrap.enable();
| const server = net.createServer(function(c) {
client = c;
// Allow init callback to run before closing.
setImmediate(() => {
c.end();
this.close();
});
}).listen(0, function() {
net.connect(this.address().port, noop);
});
async_wrap.disable();
process.on('exit', function() {
// init should have only been called once with a parent.
assert.equal(cntr, 1);
}); | random_line_split | |
test-async-wrap-disabled-propagate-parent.js | 'use strict';
require('../common');
const assert = require('assert');
const net = require('net');
const async_wrap = process.binding('async_wrap');
const providers = Object.keys(async_wrap.Providers);
const uidSymbol = Symbol('uid');
let cntr = 0;
let client;
function init(uid, type, parentUid, parentHandle) |
function noop() { }
async_wrap.setupHooks({ init });
async_wrap.enable();
const server = net.createServer(function(c) {
client = c;
// Allow init callback to run before closing.
setImmediate(() => {
c.end();
this.close();
});
}).listen(0, function() {
net.connect(this.address().port, noop);
});
async_wrap.disable();
process.on('exit', function() {
// init should have only been called once with a parent.
assert.equal(cntr, 1);
});
| {
this[uidSymbol] = uid;
if (parentHandle) {
cntr++;
// Cannot assert in init callback or will abort.
process.nextTick(() => {
assert.equal(providers[type], 'TCPWRAP');
assert.equal(parentUid, server._handle[uidSymbol],
'server uid doesn\'t match parent uid');
assert.equal(parentHandle, server._handle,
'server handle doesn\'t match parent handle');
assert.equal(this, client._handle, 'client doesn\'t match context');
});
}
} | identifier_body |
test-async-wrap-disabled-propagate-parent.js | 'use strict';
require('../common');
const assert = require('assert');
const net = require('net');
const async_wrap = process.binding('async_wrap');
const providers = Object.keys(async_wrap.Providers);
const uidSymbol = Symbol('uid');
let cntr = 0;
let client;
function init(uid, type, parentUid, parentHandle) {
this[uidSymbol] = uid;
if (parentHandle) {
cntr++;
// Cannot assert in init callback or will abort.
process.nextTick(() => {
assert.equal(providers[type], 'TCPWRAP');
assert.equal(parentUid, server._handle[uidSymbol],
'server uid doesn\'t match parent uid');
assert.equal(parentHandle, server._handle,
'server handle doesn\'t match parent handle');
assert.equal(this, client._handle, 'client doesn\'t match context');
});
}
}
function | () { }
async_wrap.setupHooks({ init });
async_wrap.enable();
const server = net.createServer(function(c) {
client = c;
// Allow init callback to run before closing.
setImmediate(() => {
c.end();
this.close();
});
}).listen(0, function() {
net.connect(this.address().port, noop);
});
async_wrap.disable();
process.on('exit', function() {
// init should have only been called once with a parent.
assert.equal(cntr, 1);
});
| noop | identifier_name |
rpc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use std::sync::Arc;
use std::path::PathBuf;
use std::collections::HashSet;
use dapps;
use dir::default_data_path;
use helpers::{parity_ipc_path, replace_home};
use jsonrpc_core::MetaIoHandler;
use parity_reactor::TokioRemote;
use parity_rpc::informant::{RpcStats, Middleware};
use parity_rpc::{self as rpc, Metadata, DomainsValidation};
use rpc_apis::{self, ApiSet};
pub use parity_rpc::{IpcServer, HttpServer, RequestMiddleware};
pub use parity_rpc::ws::Server as WsServer;
pub use parity_rpc::informant::CpuPool;
pub const DAPPS_DOMAIN: &'static str = "web3.site";
#[derive(Debug, Clone, PartialEq)]
pub struct HttpConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub cors: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub server_threads: usize,
pub processing_threads: usize,
}
impl HttpConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
}
impl Default for HttpConfiguration {
fn default() -> Self {
HttpConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8545,
apis: ApiSet::UnsafeContext,
cors: None,
hosts: Some(Vec::new()),
server_threads: 1,
processing_threads: 4,
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct UiConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub hosts: Option<Vec<String>>,
}
impl UiConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
pub fn redirection_address(&self) -> Option<(String, u16)> {
self.address().map(|host| {
let mut it = host.split(':');
let hostname: Option<String> = it.next().map(|s| s.to_owned());
let port: Option<u16> = it.next().and_then(|s| s.parse().ok());
(hostname.unwrap_or_else(|| "localhost".into()), port.unwrap_or(8180))
})
}
}
impl From<UiConfiguration> for HttpConfiguration {
fn from(conf: UiConfiguration) -> Self {
HttpConfiguration {
enabled: conf.enabled,
interface: conf.interface,
port: conf.port,
apis: rpc_apis::ApiSet::UnsafeContext,
cors: None,
hosts: conf.hosts,
server_threads: 1,
processing_threads: 0,
}
}
}
impl Default for UiConfiguration {
fn default() -> Self {
UiConfiguration {
enabled: true && cfg!(feature = "ui-enabled"),
port: 8180,
interface: "127.0.0.1".into(),
hosts: Some(vec![]),
}
}
}
#[derive(Debug, PartialEq)]
pub struct IpcConfiguration {
pub enabled: bool,
pub socket_addr: String,
pub apis: ApiSet,
}
impl Default for IpcConfiguration {
fn default() -> Self {
IpcConfiguration {
enabled: true,
socket_addr: if cfg!(windows) {
r"\\.\pipe\jsonrpc.ipc".into()
} else | ,
apis: ApiSet::IpcContext,
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct WsConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub origins: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub signer_path: PathBuf,
pub support_token_api: bool,
pub ui_address: Option<rpc::Host>,
pub dapps_address: Option<rpc::Host>,
}
impl Default for WsConfiguration {
fn default() -> Self {
let data_dir = default_data_path();
WsConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8546,
apis: ApiSet::UnsafeContext,
origins: Some(vec!["chrome-extension://*".into(), "moz-extension://*".into()]),
hosts: Some(Vec::new()),
signer_path: replace_home(&data_dir, "$BASE/signer").into(),
support_token_api: true,
ui_address: Some("127.0.0.1:8180".into()),
dapps_address: Some("127.0.0.1:8545".into()),
}
}
}
impl WsConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
}
fn address(enabled: bool, bind_iface: &str, bind_port: u16, hosts: &Option<Vec<String>>) -> Option<rpc::Host> {
if !enabled {
return None;
}
match *hosts {
Some(ref hosts) if !hosts.is_empty() => Some(hosts[0].clone().into()),
_ => Some(format!("{}:{}", bind_iface, bind_port).into()),
}
}
pub struct Dependencies<D: rpc_apis::Dependencies> {
pub apis: Arc<D>,
pub remote: TokioRemote,
pub stats: Arc<RpcStats>,
pub pool: Option<CpuPool>,
}
pub fn new_ws<D: rpc_apis::Dependencies>(
conf: WsConfiguration,
deps: &Dependencies<D>,
) -> Result<Option<WsServer>, String> {
if !conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid WebSockets listen host/port given: {}", url))?;
let full_handler = setup_apis(rpc_apis::ApiSet::SafeContext, deps);
let handler = {
let mut handler = MetaIoHandler::with_middleware((
rpc::WsDispatcher::new(full_handler),
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
));
let apis = conf.apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
};
let remote = deps.remote.clone();
let ui_address = conf.ui_address.clone();
let allowed_origins = into_domains(with_domain(conf.origins, domain, &ui_address, &conf.dapps_address));
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let signer_path;
let path = match conf.support_token_api && conf.ui_address.is_some() {
true => {
signer_path = ::signer::codes_path(&conf.signer_path);
Some(signer_path.as_path())
},
false => None
};
let start_result = rpc::start_ws(
&addr,
handler,
remote.clone(),
allowed_origins,
allowed_hosts,
rpc::WsExtractor::new(path.clone()),
rpc::WsExtractor::new(path.clone()),
rpc::WsStats::new(deps.stats.clone()),
);
match start_result {
Ok(server) => Ok(Some(server)),
Err(rpc::ws::Error(rpc::ws::ErrorKind::Io(ref err), _)) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("WebSockets address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --ws-port and --ws-interface options.", url)
),
Err(e) => Err(format!("WebSockets error: {:?}", e)),
}
}
pub fn new_http<D: rpc_apis::Dependencies>(
id: &str,
options: &str,
conf: HttpConfiguration,
deps: &Dependencies<D>,
middleware: Option<dapps::Middleware>,
) -> Result<Option<HttpServer>, String> {
if !conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid {} listen host/port given: {}", id, url))?;
let handler = setup_apis(conf.apis, deps);
let remote = deps.remote.clone();
let cors_domains = into_domains(conf.cors);
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let start_result = rpc::start_http(
&addr,
cors_domains,
allowed_hosts,
handler,
remote,
rpc::RpcExtractor,
middleware,
conf.server_threads,
);
match start_result {
Ok(server) => Ok(Some(server)),
Err(ref err) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("{} address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --{}-port and --{}-interface options.", id, url, options, options)
),
Err(e) => Err(format!("{} error: {:?}", id, e)),
}
}
pub fn new_ipc<D: rpc_apis::Dependencies>(
conf: IpcConfiguration,
dependencies: &Dependencies<D>
) -> Result<Option<IpcServer>, String> {
if !conf.enabled {
return Ok(None);
}
let handler = setup_apis(conf.apis, dependencies);
let remote = dependencies.remote.clone();
let path = PathBuf::from(&conf.socket_addr);
// Make sure socket file can be created on unix-like OS.
// Windows pipe paths are not on the FS.
if !cfg!(windows) {
if let Some(dir) = path.parent() {
::std::fs::create_dir_all(&dir)
.map_err(|err| format!("Unable to create IPC directory at {}: {}", dir.display(), err))?;
}
}
match rpc::start_ipc(&conf.socket_addr, handler, remote, rpc::RpcExtractor) {
Ok(server) => Ok(Some(server)),
Err(io_error) => Err(format!("IPC error: {}", io_error)),
}
}
fn into_domains<T: From<String>>(items: Option<Vec<String>>) -> DomainsValidation<T> {
items.map(|vals| vals.into_iter().map(T::from).collect()).into()
}
fn with_domain(items: Option<Vec<String>>, domain: &str, ui_address: &Option<rpc::Host>, dapps_address: &Option<rpc::Host>) -> Option<Vec<String>> {
fn extract_port(s: &str) -> Option<u16> {
s.split(':').nth(1).and_then(|s| s.parse().ok())
}
items.map(move |items| {
let mut items = items.into_iter().collect::<HashSet<_>>();
{
let mut add_hosts = |address: &Option<rpc::Host>| {
if let Some(host) = address.clone() {
items.insert(host.to_string());
items.insert(host.replace("127.0.0.1", "localhost"));
items.insert(format!("http://*.{}", domain)); //proxypac
if let Some(port) = extract_port(&*host) {
items.insert(format!("http://*.{}:{}", domain, port));
}
}
};
add_hosts(ui_address);
add_hosts(dapps_address);
}
items.into_iter().collect()
})
}
fn setup_apis<D>(apis: ApiSet, deps: &Dependencies<D>) -> MetaIoHandler<Metadata, Middleware<D::Notifier>>
where D: rpc_apis::Dependencies
{
let mut handler = MetaIoHandler::with_middleware(
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
);
let apis = apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
}
#[cfg(test)]
mod tests {
use super::address;
#[test]
fn should_return_proper_address() {
assert_eq!(address(false, "localhost", 8180, &None), None);
assert_eq!(address(true, "localhost", 8180, &None), Some("localhost:8180".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host:443".into()])), Some("host:443".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host".into()])), Some("host".into()));
}
}
| {
let data_dir = ::dir::default_data_path();
parity_ipc_path(&data_dir, "$BASE/jsonrpc.ipc", 0)
} | conditional_block |
rpc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use std::sync::Arc;
use std::path::PathBuf;
use std::collections::HashSet;
use dapps;
use dir::default_data_path;
use helpers::{parity_ipc_path, replace_home};
use jsonrpc_core::MetaIoHandler;
use parity_reactor::TokioRemote;
use parity_rpc::informant::{RpcStats, Middleware};
use parity_rpc::{self as rpc, Metadata, DomainsValidation};
use rpc_apis::{self, ApiSet};
pub use parity_rpc::{IpcServer, HttpServer, RequestMiddleware};
pub use parity_rpc::ws::Server as WsServer;
pub use parity_rpc::informant::CpuPool;
pub const DAPPS_DOMAIN: &'static str = "web3.site";
#[derive(Debug, Clone, PartialEq)]
pub struct HttpConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub cors: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub server_threads: usize,
pub processing_threads: usize,
}
impl HttpConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
}
impl Default for HttpConfiguration {
fn default() -> Self {
HttpConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8545,
apis: ApiSet::UnsafeContext,
cors: None,
hosts: Some(Vec::new()),
server_threads: 1,
processing_threads: 4,
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct UiConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub hosts: Option<Vec<String>>,
}
impl UiConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
pub fn redirection_address(&self) -> Option<(String, u16)> {
self.address().map(|host| {
let mut it = host.split(':');
let hostname: Option<String> = it.next().map(|s| s.to_owned());
let port: Option<u16> = it.next().and_then(|s| s.parse().ok());
(hostname.unwrap_or_else(|| "localhost".into()), port.unwrap_or(8180))
})
}
}
impl From<UiConfiguration> for HttpConfiguration {
fn from(conf: UiConfiguration) -> Self {
HttpConfiguration {
enabled: conf.enabled,
interface: conf.interface,
port: conf.port,
apis: rpc_apis::ApiSet::UnsafeContext,
cors: None,
hosts: conf.hosts,
server_threads: 1,
processing_threads: 0,
}
}
}
impl Default for UiConfiguration {
fn default() -> Self {
UiConfiguration {
enabled: true && cfg!(feature = "ui-enabled"),
port: 8180,
interface: "127.0.0.1".into(),
hosts: Some(vec![]),
}
}
}
#[derive(Debug, PartialEq)]
pub struct IpcConfiguration {
pub enabled: bool,
pub socket_addr: String,
pub apis: ApiSet,
}
impl Default for IpcConfiguration {
fn default() -> Self {
IpcConfiguration {
enabled: true,
socket_addr: if cfg!(windows) {
r"\\.\pipe\jsonrpc.ipc".into()
} else {
let data_dir = ::dir::default_data_path();
parity_ipc_path(&data_dir, "$BASE/jsonrpc.ipc", 0)
},
apis: ApiSet::IpcContext,
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct WsConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub origins: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub signer_path: PathBuf,
pub support_token_api: bool,
pub ui_address: Option<rpc::Host>,
pub dapps_address: Option<rpc::Host>,
}
impl Default for WsConfiguration {
fn default() -> Self {
let data_dir = default_data_path();
WsConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8546,
apis: ApiSet::UnsafeContext,
origins: Some(vec!["chrome-extension://*".into(), "moz-extension://*".into()]),
hosts: Some(Vec::new()),
signer_path: replace_home(&data_dir, "$BASE/signer").into(),
support_token_api: true,
ui_address: Some("127.0.0.1:8180".into()),
dapps_address: Some("127.0.0.1:8545".into()),
}
}
}
impl WsConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
}
fn address(enabled: bool, bind_iface: &str, bind_port: u16, hosts: &Option<Vec<String>>) -> Option<rpc::Host> {
if !enabled {
return None;
}
match *hosts {
Some(ref hosts) if !hosts.is_empty() => Some(hosts[0].clone().into()),
_ => Some(format!("{}:{}", bind_iface, bind_port).into()),
}
}
pub struct | <D: rpc_apis::Dependencies> {
pub apis: Arc<D>,
pub remote: TokioRemote,
pub stats: Arc<RpcStats>,
pub pool: Option<CpuPool>,
}
pub fn new_ws<D: rpc_apis::Dependencies>(
conf: WsConfiguration,
deps: &Dependencies<D>,
) -> Result<Option<WsServer>, String> {
if !conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid WebSockets listen host/port given: {}", url))?;
let full_handler = setup_apis(rpc_apis::ApiSet::SafeContext, deps);
let handler = {
let mut handler = MetaIoHandler::with_middleware((
rpc::WsDispatcher::new(full_handler),
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
));
let apis = conf.apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
};
let remote = deps.remote.clone();
let ui_address = conf.ui_address.clone();
let allowed_origins = into_domains(with_domain(conf.origins, domain, &ui_address, &conf.dapps_address));
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let signer_path;
let path = match conf.support_token_api && conf.ui_address.is_some() {
true => {
signer_path = ::signer::codes_path(&conf.signer_path);
Some(signer_path.as_path())
},
false => None
};
let start_result = rpc::start_ws(
&addr,
handler,
remote.clone(),
allowed_origins,
allowed_hosts,
rpc::WsExtractor::new(path.clone()),
rpc::WsExtractor::new(path.clone()),
rpc::WsStats::new(deps.stats.clone()),
);
match start_result {
Ok(server) => Ok(Some(server)),
Err(rpc::ws::Error(rpc::ws::ErrorKind::Io(ref err), _)) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("WebSockets address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --ws-port and --ws-interface options.", url)
),
Err(e) => Err(format!("WebSockets error: {:?}", e)),
}
}
pub fn new_http<D: rpc_apis::Dependencies>(
id: &str,
options: &str,
conf: HttpConfiguration,
deps: &Dependencies<D>,
middleware: Option<dapps::Middleware>,
) -> Result<Option<HttpServer>, String> {
if !conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid {} listen host/port given: {}", id, url))?;
let handler = setup_apis(conf.apis, deps);
let remote = deps.remote.clone();
let cors_domains = into_domains(conf.cors);
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let start_result = rpc::start_http(
&addr,
cors_domains,
allowed_hosts,
handler,
remote,
rpc::RpcExtractor,
middleware,
conf.server_threads,
);
match start_result {
Ok(server) => Ok(Some(server)),
Err(ref err) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("{} address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --{}-port and --{}-interface options.", id, url, options, options)
),
Err(e) => Err(format!("{} error: {:?}", id, e)),
}
}
pub fn new_ipc<D: rpc_apis::Dependencies>(
conf: IpcConfiguration,
dependencies: &Dependencies<D>
) -> Result<Option<IpcServer>, String> {
if !conf.enabled {
return Ok(None);
}
let handler = setup_apis(conf.apis, dependencies);
let remote = dependencies.remote.clone();
let path = PathBuf::from(&conf.socket_addr);
// Make sure socket file can be created on unix-like OS.
// Windows pipe paths are not on the FS.
if !cfg!(windows) {
if let Some(dir) = path.parent() {
::std::fs::create_dir_all(&dir)
.map_err(|err| format!("Unable to create IPC directory at {}: {}", dir.display(), err))?;
}
}
match rpc::start_ipc(&conf.socket_addr, handler, remote, rpc::RpcExtractor) {
Ok(server) => Ok(Some(server)),
Err(io_error) => Err(format!("IPC error: {}", io_error)),
}
}
fn into_domains<T: From<String>>(items: Option<Vec<String>>) -> DomainsValidation<T> {
items.map(|vals| vals.into_iter().map(T::from).collect()).into()
}
fn with_domain(items: Option<Vec<String>>, domain: &str, ui_address: &Option<rpc::Host>, dapps_address: &Option<rpc::Host>) -> Option<Vec<String>> {
fn extract_port(s: &str) -> Option<u16> {
s.split(':').nth(1).and_then(|s| s.parse().ok())
}
items.map(move |items| {
let mut items = items.into_iter().collect::<HashSet<_>>();
{
let mut add_hosts = |address: &Option<rpc::Host>| {
if let Some(host) = address.clone() {
items.insert(host.to_string());
items.insert(host.replace("127.0.0.1", "localhost"));
items.insert(format!("http://*.{}", domain)); //proxypac
if let Some(port) = extract_port(&*host) {
items.insert(format!("http://*.{}:{}", domain, port));
}
}
};
add_hosts(ui_address);
add_hosts(dapps_address);
}
items.into_iter().collect()
})
}
fn setup_apis<D>(apis: ApiSet, deps: &Dependencies<D>) -> MetaIoHandler<Metadata, Middleware<D::Notifier>>
where D: rpc_apis::Dependencies
{
let mut handler = MetaIoHandler::with_middleware(
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
);
let apis = apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
}
#[cfg(test)]
mod tests {
use super::address;
#[test]
fn should_return_proper_address() {
assert_eq!(address(false, "localhost", 8180, &None), None);
assert_eq!(address(true, "localhost", 8180, &None), Some("localhost:8180".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host:443".into()])), Some("host:443".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host".into()])), Some("host".into()));
}
}
| Dependencies | identifier_name |
rpc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use std::sync::Arc;
use std::path::PathBuf;
use std::collections::HashSet;
use dapps;
use dir::default_data_path;
use helpers::{parity_ipc_path, replace_home};
use jsonrpc_core::MetaIoHandler;
use parity_reactor::TokioRemote;
use parity_rpc::informant::{RpcStats, Middleware};
use parity_rpc::{self as rpc, Metadata, DomainsValidation};
use rpc_apis::{self, ApiSet};
pub use parity_rpc::{IpcServer, HttpServer, RequestMiddleware};
pub use parity_rpc::ws::Server as WsServer;
pub use parity_rpc::informant::CpuPool;
pub const DAPPS_DOMAIN: &'static str = "web3.site";
#[derive(Debug, Clone, PartialEq)]
pub struct HttpConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub cors: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub server_threads: usize,
pub processing_threads: usize,
}
impl HttpConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
}
impl Default for HttpConfiguration {
fn default() -> Self {
HttpConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8545,
apis: ApiSet::UnsafeContext,
cors: None,
hosts: Some(Vec::new()),
server_threads: 1,
processing_threads: 4,
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct UiConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub hosts: Option<Vec<String>>,
}
impl UiConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
pub fn redirection_address(&self) -> Option<(String, u16)> {
self.address().map(|host| {
let mut it = host.split(':');
let hostname: Option<String> = it.next().map(|s| s.to_owned());
let port: Option<u16> = it.next().and_then(|s| s.parse().ok());
(hostname.unwrap_or_else(|| "localhost".into()), port.unwrap_or(8180))
})
}
}
impl From<UiConfiguration> for HttpConfiguration {
fn from(conf: UiConfiguration) -> Self {
HttpConfiguration {
enabled: conf.enabled,
interface: conf.interface,
port: conf.port,
apis: rpc_apis::ApiSet::UnsafeContext,
cors: None,
hosts: conf.hosts,
server_threads: 1,
processing_threads: 0,
}
}
}
impl Default for UiConfiguration {
fn default() -> Self {
UiConfiguration {
enabled: true && cfg!(feature = "ui-enabled"),
port: 8180,
interface: "127.0.0.1".into(),
hosts: Some(vec![]),
}
}
}
#[derive(Debug, PartialEq)]
pub struct IpcConfiguration {
pub enabled: bool,
pub socket_addr: String,
pub apis: ApiSet,
}
impl Default for IpcConfiguration {
fn default() -> Self {
IpcConfiguration {
enabled: true,
socket_addr: if cfg!(windows) {
r"\\.\pipe\jsonrpc.ipc".into()
} else {
let data_dir = ::dir::default_data_path();
parity_ipc_path(&data_dir, "$BASE/jsonrpc.ipc", 0)
},
apis: ApiSet::IpcContext,
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct WsConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub origins: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub signer_path: PathBuf,
pub support_token_api: bool,
pub ui_address: Option<rpc::Host>,
pub dapps_address: Option<rpc::Host>,
}
impl Default for WsConfiguration {
fn default() -> Self {
let data_dir = default_data_path();
WsConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8546,
apis: ApiSet::UnsafeContext,
origins: Some(vec!["chrome-extension://*".into(), "moz-extension://*".into()]),
hosts: Some(Vec::new()),
signer_path: replace_home(&data_dir, "$BASE/signer").into(),
support_token_api: true,
ui_address: Some("127.0.0.1:8180".into()),
dapps_address: Some("127.0.0.1:8545".into()),
}
}
}
impl WsConfiguration {
pub fn address(&self) -> Option<rpc::Host> |
}
fn address(enabled: bool, bind_iface: &str, bind_port: u16, hosts: &Option<Vec<String>>) -> Option<rpc::Host> {
if !enabled {
return None;
}
match *hosts {
Some(ref hosts) if !hosts.is_empty() => Some(hosts[0].clone().into()),
_ => Some(format!("{}:{}", bind_iface, bind_port).into()),
}
}
pub struct Dependencies<D: rpc_apis::Dependencies> {
pub apis: Arc<D>,
pub remote: TokioRemote,
pub stats: Arc<RpcStats>,
pub pool: Option<CpuPool>,
}
pub fn new_ws<D: rpc_apis::Dependencies>(
conf: WsConfiguration,
deps: &Dependencies<D>,
) -> Result<Option<WsServer>, String> {
if !conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid WebSockets listen host/port given: {}", url))?;
let full_handler = setup_apis(rpc_apis::ApiSet::SafeContext, deps);
let handler = {
let mut handler = MetaIoHandler::with_middleware((
rpc::WsDispatcher::new(full_handler),
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
));
let apis = conf.apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
};
let remote = deps.remote.clone();
let ui_address = conf.ui_address.clone();
let allowed_origins = into_domains(with_domain(conf.origins, domain, &ui_address, &conf.dapps_address));
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let signer_path;
let path = match conf.support_token_api && conf.ui_address.is_some() {
true => {
signer_path = ::signer::codes_path(&conf.signer_path);
Some(signer_path.as_path())
},
false => None
};
let start_result = rpc::start_ws(
&addr,
handler,
remote.clone(),
allowed_origins,
allowed_hosts,
rpc::WsExtractor::new(path.clone()),
rpc::WsExtractor::new(path.clone()),
rpc::WsStats::new(deps.stats.clone()),
);
match start_result {
Ok(server) => Ok(Some(server)),
Err(rpc::ws::Error(rpc::ws::ErrorKind::Io(ref err), _)) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("WebSockets address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --ws-port and --ws-interface options.", url)
),
Err(e) => Err(format!("WebSockets error: {:?}", e)),
}
}
pub fn new_http<D: rpc_apis::Dependencies>(
id: &str,
options: &str,
conf: HttpConfiguration,
deps: &Dependencies<D>,
middleware: Option<dapps::Middleware>,
) -> Result<Option<HttpServer>, String> {
if !conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid {} listen host/port given: {}", id, url))?;
let handler = setup_apis(conf.apis, deps);
let remote = deps.remote.clone();
let cors_domains = into_domains(conf.cors);
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let start_result = rpc::start_http(
&addr,
cors_domains,
allowed_hosts,
handler,
remote,
rpc::RpcExtractor,
middleware,
conf.server_threads,
);
match start_result {
Ok(server) => Ok(Some(server)),
Err(ref err) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("{} address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --{}-port and --{}-interface options.", id, url, options, options)
),
Err(e) => Err(format!("{} error: {:?}", id, e)),
}
}
pub fn new_ipc<D: rpc_apis::Dependencies>(
conf: IpcConfiguration,
dependencies: &Dependencies<D>
) -> Result<Option<IpcServer>, String> {
if !conf.enabled {
return Ok(None);
}
let handler = setup_apis(conf.apis, dependencies);
let remote = dependencies.remote.clone();
let path = PathBuf::from(&conf.socket_addr);
// Make sure socket file can be created on unix-like OS.
// Windows pipe paths are not on the FS.
if !cfg!(windows) {
if let Some(dir) = path.parent() {
::std::fs::create_dir_all(&dir)
.map_err(|err| format!("Unable to create IPC directory at {}: {}", dir.display(), err))?;
}
}
match rpc::start_ipc(&conf.socket_addr, handler, remote, rpc::RpcExtractor) {
Ok(server) => Ok(Some(server)),
Err(io_error) => Err(format!("IPC error: {}", io_error)),
}
}
fn into_domains<T: From<String>>(items: Option<Vec<String>>) -> DomainsValidation<T> {
items.map(|vals| vals.into_iter().map(T::from).collect()).into()
}
fn with_domain(items: Option<Vec<String>>, domain: &str, ui_address: &Option<rpc::Host>, dapps_address: &Option<rpc::Host>) -> Option<Vec<String>> {
fn extract_port(s: &str) -> Option<u16> {
s.split(':').nth(1).and_then(|s| s.parse().ok())
}
items.map(move |items| {
let mut items = items.into_iter().collect::<HashSet<_>>();
{
let mut add_hosts = |address: &Option<rpc::Host>| {
if let Some(host) = address.clone() {
items.insert(host.to_string());
items.insert(host.replace("127.0.0.1", "localhost"));
items.insert(format!("http://*.{}", domain)); //proxypac
if let Some(port) = extract_port(&*host) {
items.insert(format!("http://*.{}:{}", domain, port));
}
}
};
add_hosts(ui_address);
add_hosts(dapps_address);
}
items.into_iter().collect()
})
}
fn setup_apis<D>(apis: ApiSet, deps: &Dependencies<D>) -> MetaIoHandler<Metadata, Middleware<D::Notifier>>
where D: rpc_apis::Dependencies
{
let mut handler = MetaIoHandler::with_middleware(
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
);
let apis = apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
}
#[cfg(test)]
mod tests {
use super::address;
#[test]
fn should_return_proper_address() {
assert_eq!(address(false, "localhost", 8180, &None), None);
assert_eq!(address(true, "localhost", 8180, &None), Some("localhost:8180".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host:443".into()])), Some("host:443".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host".into()])), Some("host".into()));
}
}
| {
address(self.enabled, &self.interface, self.port, &self.hosts)
} | identifier_body |
rpc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use std::sync::Arc;
use std::path::PathBuf;
use std::collections::HashSet;
use dapps;
use dir::default_data_path;
use helpers::{parity_ipc_path, replace_home};
use jsonrpc_core::MetaIoHandler;
use parity_reactor::TokioRemote;
use parity_rpc::informant::{RpcStats, Middleware};
use parity_rpc::{self as rpc, Metadata, DomainsValidation};
use rpc_apis::{self, ApiSet};
pub use parity_rpc::{IpcServer, HttpServer, RequestMiddleware};
pub use parity_rpc::ws::Server as WsServer;
pub use parity_rpc::informant::CpuPool;
pub const DAPPS_DOMAIN: &'static str = "web3.site";
#[derive(Debug, Clone, PartialEq)]
pub struct HttpConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub cors: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub server_threads: usize,
pub processing_threads: usize,
}
impl HttpConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
}
impl Default for HttpConfiguration {
fn default() -> Self {
HttpConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8545,
apis: ApiSet::UnsafeContext,
cors: None,
hosts: Some(Vec::new()),
server_threads: 1,
processing_threads: 4,
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct UiConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub hosts: Option<Vec<String>>,
}
impl UiConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
pub fn redirection_address(&self) -> Option<(String, u16)> {
self.address().map(|host| {
let mut it = host.split(':');
let hostname: Option<String> = it.next().map(|s| s.to_owned());
let port: Option<u16> = it.next().and_then(|s| s.parse().ok());
(hostname.unwrap_or_else(|| "localhost".into()), port.unwrap_or(8180))
})
}
}
impl From<UiConfiguration> for HttpConfiguration {
fn from(conf: UiConfiguration) -> Self {
HttpConfiguration {
enabled: conf.enabled,
interface: conf.interface,
port: conf.port,
apis: rpc_apis::ApiSet::UnsafeContext,
cors: None,
hosts: conf.hosts,
server_threads: 1,
processing_threads: 0,
}
}
}
impl Default for UiConfiguration {
fn default() -> Self {
UiConfiguration {
enabled: true && cfg!(feature = "ui-enabled"),
port: 8180,
interface: "127.0.0.1".into(),
hosts: Some(vec![]),
}
}
}
#[derive(Debug, PartialEq)]
pub struct IpcConfiguration {
pub enabled: bool,
pub socket_addr: String,
pub apis: ApiSet,
}
impl Default for IpcConfiguration {
fn default() -> Self {
IpcConfiguration {
enabled: true,
socket_addr: if cfg!(windows) {
r"\\.\pipe\jsonrpc.ipc".into()
} else {
let data_dir = ::dir::default_data_path();
parity_ipc_path(&data_dir, "$BASE/jsonrpc.ipc", 0)
},
apis: ApiSet::IpcContext,
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct WsConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub origins: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub signer_path: PathBuf,
pub support_token_api: bool,
pub ui_address: Option<rpc::Host>,
pub dapps_address: Option<rpc::Host>,
}
impl Default for WsConfiguration {
fn default() -> Self {
let data_dir = default_data_path();
WsConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8546,
apis: ApiSet::UnsafeContext,
origins: Some(vec!["chrome-extension://*".into(), "moz-extension://*".into()]),
hosts: Some(Vec::new()),
signer_path: replace_home(&data_dir, "$BASE/signer").into(),
support_token_api: true,
ui_address: Some("127.0.0.1:8180".into()),
dapps_address: Some("127.0.0.1:8545".into()),
}
}
}
impl WsConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
}
fn address(enabled: bool, bind_iface: &str, bind_port: u16, hosts: &Option<Vec<String>>) -> Option<rpc::Host> {
if !enabled {
return None;
}
match *hosts {
Some(ref hosts) if !hosts.is_empty() => Some(hosts[0].clone().into()),
_ => Some(format!("{}:{}", bind_iface, bind_port).into()),
}
}
pub struct Dependencies<D: rpc_apis::Dependencies> {
pub apis: Arc<D>,
pub remote: TokioRemote,
pub stats: Arc<RpcStats>,
pub pool: Option<CpuPool>,
}
pub fn new_ws<D: rpc_apis::Dependencies>(
conf: WsConfiguration,
deps: &Dependencies<D>,
) -> Result<Option<WsServer>, String> {
if !conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid WebSockets listen host/port given: {}", url))?;
let full_handler = setup_apis(rpc_apis::ApiSet::SafeContext, deps);
let handler = {
let mut handler = MetaIoHandler::with_middleware((
rpc::WsDispatcher::new(full_handler),
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
));
let apis = conf.apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
};
let remote = deps.remote.clone();
let ui_address = conf.ui_address.clone();
let allowed_origins = into_domains(with_domain(conf.origins, domain, &ui_address, &conf.dapps_address));
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let signer_path;
let path = match conf.support_token_api && conf.ui_address.is_some() {
true => {
signer_path = ::signer::codes_path(&conf.signer_path);
Some(signer_path.as_path())
},
false => None
};
let start_result = rpc::start_ws(
&addr,
handler,
remote.clone(),
allowed_origins,
allowed_hosts,
rpc::WsExtractor::new(path.clone()),
rpc::WsExtractor::new(path.clone()),
rpc::WsStats::new(deps.stats.clone()),
);
match start_result {
Ok(server) => Ok(Some(server)),
Err(rpc::ws::Error(rpc::ws::ErrorKind::Io(ref err), _)) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("WebSockets address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --ws-port and --ws-interface options.", url)
),
Err(e) => Err(format!("WebSockets error: {:?}", e)),
}
}
pub fn new_http<D: rpc_apis::Dependencies>(
id: &str,
options: &str,
conf: HttpConfiguration,
deps: &Dependencies<D>,
middleware: Option<dapps::Middleware>,
) -> Result<Option<HttpServer>, String> {
if !conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid {} listen host/port given: {}", id, url))?;
let handler = setup_apis(conf.apis, deps);
let remote = deps.remote.clone();
let cors_domains = into_domains(conf.cors);
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let start_result = rpc::start_http(
&addr,
cors_domains,
allowed_hosts,
handler,
remote,
rpc::RpcExtractor,
middleware,
conf.server_threads,
);
match start_result {
Ok(server) => Ok(Some(server)), | }
}
pub fn new_ipc<D: rpc_apis::Dependencies>(
conf: IpcConfiguration,
dependencies: &Dependencies<D>
) -> Result<Option<IpcServer>, String> {
if !conf.enabled {
return Ok(None);
}
let handler = setup_apis(conf.apis, dependencies);
let remote = dependencies.remote.clone();
let path = PathBuf::from(&conf.socket_addr);
// Make sure socket file can be created on unix-like OS.
// Windows pipe paths are not on the FS.
if !cfg!(windows) {
if let Some(dir) = path.parent() {
::std::fs::create_dir_all(&dir)
.map_err(|err| format!("Unable to create IPC directory at {}: {}", dir.display(), err))?;
}
}
match rpc::start_ipc(&conf.socket_addr, handler, remote, rpc::RpcExtractor) {
Ok(server) => Ok(Some(server)),
Err(io_error) => Err(format!("IPC error: {}", io_error)),
}
}
fn into_domains<T: From<String>>(items: Option<Vec<String>>) -> DomainsValidation<T> {
items.map(|vals| vals.into_iter().map(T::from).collect()).into()
}
fn with_domain(items: Option<Vec<String>>, domain: &str, ui_address: &Option<rpc::Host>, dapps_address: &Option<rpc::Host>) -> Option<Vec<String>> {
fn extract_port(s: &str) -> Option<u16> {
s.split(':').nth(1).and_then(|s| s.parse().ok())
}
items.map(move |items| {
let mut items = items.into_iter().collect::<HashSet<_>>();
{
let mut add_hosts = |address: &Option<rpc::Host>| {
if let Some(host) = address.clone() {
items.insert(host.to_string());
items.insert(host.replace("127.0.0.1", "localhost"));
items.insert(format!("http://*.{}", domain)); //proxypac
if let Some(port) = extract_port(&*host) {
items.insert(format!("http://*.{}:{}", domain, port));
}
}
};
add_hosts(ui_address);
add_hosts(dapps_address);
}
items.into_iter().collect()
})
}
fn setup_apis<D>(apis: ApiSet, deps: &Dependencies<D>) -> MetaIoHandler<Metadata, Middleware<D::Notifier>>
where D: rpc_apis::Dependencies
{
let mut handler = MetaIoHandler::with_middleware(
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
);
let apis = apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
}
#[cfg(test)]
mod tests {
use super::address;
#[test]
fn should_return_proper_address() {
assert_eq!(address(false, "localhost", 8180, &None), None);
assert_eq!(address(true, "localhost", 8180, &None), Some("localhost:8180".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host:443".into()])), Some("host:443".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host".into()])), Some("host".into()));
}
} | Err(ref err) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("{} address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --{}-port and --{}-interface options.", id, url, options, options)
),
Err(e) => Err(format!("{} error: {:?}", id, e)), | random_line_split |
main.js | $(function() {
var Y = YUI().use("profiler", function(Y){
var profile = [
{
"namespace": ["sakai", "api", "Security"],
"functions": ["saneHTML"]
},
{
"namespace": ["sakai", "api", "Widgets", "widgetLoader"],
"functions": ["loadWidgets"]
},
{
"namespace": ["$"],
"functions": ["TemplateRenderer", "trim", "curCSS", "parseJSON", "ajax"]
},
{
"namespace": ["sakai", "api", "i18n", "General"],
"functions": ["process", "getValueForKey"]
}
];
var allFns = [];
$("#ifr").attr("src", "/dev/my_sakai.html");
console.log($("#ifr")[0].contentWindow.document);
$("#ifr").bind("load", function() {
console.log("iframe loaded, attaching profilers");
for (var i=0, j=profile.length; i<j; i++) |
setTimeout(function() {
// get the report for all the registered function
var report = Y.Profiler.getFullReport();
console.log(report);
},5000);
});
});
}); | {
var ns = profile[i].namespace;
var fns = profile[i].functions;
// construct the namespace of the function properly
var nsobj = $("#ifr")[0].contentWindow[ns[0]];
for (var x=1, y=ns.length; x<y; x++) {
nsobj = nsobj[ns[x]];
}
// Register each function in this namespace to be profiled
for (var k=0,l=fns.length; k<l; k++) {
Y.Profiler.registerFunction(fns[k], nsobj);
allFns.push(fns[k]);
}
} | conditional_block |
main.js | $(function() {
var Y = YUI().use("profiler", function(Y){
var profile = [
{
"namespace": ["sakai", "api", "Security"],
"functions": ["saneHTML"]
},
{
"namespace": ["sakai", "api", "Widgets", "widgetLoader"],
"functions": ["loadWidgets"]
},
{
"namespace": ["$"],
"functions": ["TemplateRenderer", "trim", "curCSS", "parseJSON", "ajax"]
},
{
"namespace": ["sakai", "api", "i18n", "General"],
"functions": ["process", "getValueForKey"]
}
];
var allFns = [];
$("#ifr").attr("src", "/dev/my_sakai.html");
console.log($("#ifr")[0].contentWindow.document);
$("#ifr").bind("load", function() {
console.log("iframe loaded, attaching profilers");
for (var i=0, j=profile.length; i<j; i++) {
var ns = profile[i].namespace;
var fns = profile[i].functions;
// construct the namespace of the function properly
var nsobj = $("#ifr")[0].contentWindow[ns[0]];
for (var x=1, y=ns.length; x<y; x++) {
nsobj = nsobj[ns[x]];
}
// Register each function in this namespace to be profiled
for (var k=0,l=fns.length; k<l; k++) { | }
setTimeout(function() {
// get the report for all the registered function
var report = Y.Profiler.getFullReport();
console.log(report);
},5000);
});
});
}); | Y.Profiler.registerFunction(fns[k], nsobj);
allFns.push(fns[k]);
} | random_line_split |
assistStorageEntry.js | // Licensed to Cloudera, Inc. under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. Cloudera, Inc. licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import $ from 'jquery';
import * as ko from 'knockout';
import apiHelper from 'api/apiHelper';
import huePubSub from 'utils/huePubSub';
import { findBrowserConnector, getRootFilePath } from 'config/hueConfig';
const PAGE_SIZE = 100;
const TYPE_SPECIFICS = {
adls: {
apiHelperFetchFunction: 'fetchAdlsPath',
dblClickPubSubId: 'assist.dblClickAdlsItem'
},
abfs: {
apiHelperFetchFunction: 'fetchAbfsPath',
dblClickPubSubId: 'assist.dblClickAbfsItem'
},
hdfs: {
apiHelperFetchFunction: 'fetchHdfsPath',
dblClickPubSubId: 'assist.dblClickHdfsItem'
},
s3: {
apiHelperFetchFunction: 'fetchS3Path',
dblClickPubSubId: 'assist.dblClickS3Item'
}
};
class AssistStorageEntry {
/**
* @param {object} options
* @param {object} options.definition
* @param {string} options.definition.name
* @param {string} options.definition.type (file, dir)
* @param {string} options.source - The storage source
* @param {string} [options.originalType] - The original storage type ('adl', 's3a')
* @param {AssistStorageEntry} options.parent
* @constructor
*/
constructor(options) {
const self = this;
self.source = options.source;
self.originalType = options.originalType;
self.definition = options.definition;
self.parent = options.parent;
self.rootPath = options.rootPath || '';
self.path = '';
if (self.parent !== null) {
self.path = self.parent.path;
if (self.parent.path !== '/' && !/\/$/.test(self.path)) {
self.path += '/';
}
}
self.path += self.definition.name;
self.abfsPath = (/^\//.test(self.path) ? 'abfs:/' : 'abfs://') + self.path;
self.currentPage = 1;
self.hasMorePages = true;
self.preview = ko.observable();
self.contextPopoverVisible = ko.observable(false);
self.filter = ko.observable('').extend({ rateLimit: 400 });
self.filter.subscribe(() => {
self.currentPage = 1;
self.hasMorePages = true;
self.loadEntries();
});
self.entries = ko.observableArray([]);
self.loaded = false;
self.loading = ko.observable(false);
self.loadingMore = ko.observable(false);
self.errorText = ko.observable();
self.hasErrors = ko.observable(false);
self.open = ko.observable(false);
self.open.subscribe(newValue => {
if (newValue && self.entries().length === 0) {
if (self.definition.type === 'dir') {
self.loadEntries();
} else {
self.loadPreview();
}
}
});
self.hasEntries = ko.pureComputed(() => self.entries().length > 0);
}
dblClick() {
huePubSub.publish(TYPE_SPECIFICS[self.source.type].dblClickPubSubId, this);
}
loadPreview() {
const self = this;
self.loading(true);
apiHelper
.fetchStoragePreview({
path: self.getHierarchy(),
type: self.source.type,
silenceErrors: true
})
.done(data => {
self.preview(data);
})
.fail(errorText => {
self.hasErrors(true);
self.errorText(errorText);
})
.always(() => {
self.loading(false);
});
}
loadEntries(callback) {
const self = this;
if (self.loading()) {
return;
}
self.loading(true);
self.hasErrors(false);
apiHelper[TYPE_SPECIFICS[self.source.type].apiHelperFetchFunction]({
pageSize: PAGE_SIZE,
page: self.currentPage,
filter: self.filter().trim() ? self.filter() : undefined,
pathParts: self.getHierarchy(),
rootPath: self.rootPath,
successCallback: data => {
self.hasMorePages = data.page.next_page_number > self.currentPage;
const filteredFiles = data.files.filter(file => file.name !== '.' && file.name !== '..');
self.entries(
filteredFiles.map(file => {
return new AssistStorageEntry({
originalType: self.originalType,
rootPath: self.rootPath,
source: self.source,
definition: file,
parent: self
});
})
);
self.loaded = true;
self.loading(false);
self.hasErrors(!!data.s3_listing_not_allowed); // Special case where we want errors inline instead of the default popover. We don't want errorCallback handling
self.errorText(data.s3_listing_not_allowed);
if (callback) {
callback();
}
},
errorCallback: errorText => {
self.hasErrors(true);
self.errorText(errorText);
self.loading(false);
if (callback) {
callback();
}
}
});
}
goHome() {
huePubSub.publish('assist.storage.go.home');
}
loadDeep(folders, callback) {
const self = this;
if (folders.length === 0) {
callback(self);
return;
}
if (this.rootPath) {
const relativeFolders = folders.join('/').replace(new RegExp('^' + this.rootPath, ''), '');
folders = relativeFolders.split('/');
}
const nextName = folders.shift();
let loadedPages = 0;
const findNextAndLoadDeep = () => {
const foundEntry = self.entries().filter(entry => entry.definition.name === nextName);
const passedAlphabetically =
self.entries().length > 0 &&
self.entries()[self.entries().length - 1].definition.name.localeCompare(nextName) > 0;
if (foundEntry.length === 1) {
foundEntry[0].loadDeep(folders, callback);
} else if (!passedAlphabetically && self.hasMorePages && loadedPages < 50) {
loadedPages++;
self.fetchMore(findNextAndLoadDeep, () => {
callback(self);
});
} else {
callback(self);
}
};
if (!self.loaded) {
self.loadEntries(findNextAndLoadDeep);
} else {
findNextAndLoadDeep();
}
}
getHierarchy() {
const self = this;
let parts = [];
let entry = self;
while (entry) {
if (!entry.parent && entry.definition.name) {
const rootParts = entry.definition.name.split('/').filter(Boolean);
rootParts.reverse();
parts = parts.concat(rootParts);
} else if (entry.definition.name) {
parts.push(entry.definition.name);
}
entry = entry.parent;
}
parts.reverse();
return parts;
}
toggleOpen(data, event) {
const self = this;
if (self.definition.type === 'file') {
if (event.ctrlKey || event.metaKey || event.which === 2) {
window.open('/hue' + self.definition.url, '_blank');
} else {
huePubSub.publish('open.link', self.definition.url);
}
return;
}
self.open(!self.open());
if (self.definition.name === '..') {
if (self.parent.parent) {
huePubSub.publish('assist.selectStorageEntry', self.parent.parent);
}
} else {
huePubSub.publish('assist.selectStorageEntry', self);
}
}
fetchMore(successCallback, errorCallback) {
const self = this;
if (!self.hasMorePages || self.loadingMore()) {
return;
}
self.currentPage++;
self.loadingMore(true);
self.hasErrors(false);
apiHelper[TYPE_SPECIFICS[self.source.type].apiHelperFetchFunction]({
pageSize: PAGE_SIZE,
page: self.currentPage,
filter: self.filter().trim() ? self.filter() : undefined,
pathParts: self.getHierarchy(),
successCallback: data => {
self.hasMorePages = data.page.next_page_number > self.currentPage;
const filteredFiles = data.files.filter(file => file.name !== '.' && file.name !== '..');
self.entries(
self.entries().concat(
filteredFiles.map(
file =>
new AssistStorageEntry({
originalType: self.originalType,
rootPath: self.rootPath,
source: self.source,
definition: file,
parent: self
})
)
)
);
self.loadingMore(false);
if (successCallback) {
successCallback();
}
},
errorCallback: () => {
self.hasErrors(true);
if (errorCallback) {
errorCallback();
}
}
});
}
showContextPopover(entry, event, positionAdjustment) {
const $source = $(event.target);
const offset = $source.offset();
entry.contextPopoverVisible(true);
if (positionAdjustment) {
offset.left += positionAdjustment.left;
offset.top += positionAdjustment.top;
}
huePubSub.publish('context.popover.show', {
data: {
type: 'storageEntry',
storageEntry: entry
},
pinEnabled: true,
orientation: 'right',
source: {
element: event.target,
left: offset.left,
top: offset.top - 3,
right: offset.left + $source.width() + 3,
bottom: offset.top + $source.height() - 3
}
});
huePubSub.subscribeOnce('context.popover.hidden', () => {
entry.contextPopoverVisible(false);
});
}
openInImporter() {
huePubSub.publish('open.in.importer', this.definition.path);
}
/**
* Helper function to create an assistStorageEntry. It will load the entries starting from the root up until the
* path or stop when a part is not found.
*
* @param {string} path - The path, can include the type i.e. '/tmp' or 's3:/tmp'.
* @param {string} [type] - Optional type, if not specified here or in the path 'hdfs' will be used.
* @return {JQueryPromise<AssistStorageEntry>}
*/
static getEntry(path, type) |
}
export default AssistStorageEntry;
| {
const deferred = $.Deferred();
const typeMatch = path.match(/^([^:]+):\/(\/.*)\/?/i);
type = typeMatch ? typeMatch[1] : type || 'hdfs';
type = type.replace(/s3.*/i, 's3');
type = type.replace(/adl.*/i, 'adls');
type = type.replace(/abfs.*/i, 'abfs');
// TODO: connector.id for browser connectors
const connector = findBrowserConnector(connector => connector.type === type);
if (connector) {
const rootPath = getRootFilePath(connector);
const rootEntry = new AssistStorageEntry({
source: connector,
rootPath: rootPath,
originalType: typeMatch && typeMatch[1],
definition: {
name: rootPath,
type: 'dir'
},
parent: null,
apiHelper: apiHelper
});
if (type === 'abfs' || type === 'adls') {
// ABFS / ADLS can have domain name in path. To prevent regression with s3 which allow periods in bucket name handle separately.
const azureMatch = path.match(
/^([^:]+):\/(\/((\w+)@)?[\w]+([\-\.]{1}\w+)*\.[\w]*)?(\/.*)?\/?/i
);
path = (azureMatch ? azureMatch[6] || '' : path).replace(/(?:^\/)|(?:\/$)/g, '').split('/');
if (azureMatch && azureMatch[4]) {
path.unshift(azureMatch[4]);
}
} else {
path = (typeMatch ? typeMatch[2] : path).replace(/(?:^\/)|(?:\/$)/g, '').split('/');
}
rootEntry.loadDeep(path, deferred.resolve);
} else {
deferred.reject();
}
return deferred.promise();
} | identifier_body |
assistStorageEntry.js | // Licensed to Cloudera, Inc. under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. Cloudera, Inc. licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import $ from 'jquery';
import * as ko from 'knockout';
import apiHelper from 'api/apiHelper';
import huePubSub from 'utils/huePubSub';
import { findBrowserConnector, getRootFilePath } from 'config/hueConfig';
const PAGE_SIZE = 100;
const TYPE_SPECIFICS = {
adls: {
apiHelperFetchFunction: 'fetchAdlsPath',
dblClickPubSubId: 'assist.dblClickAdlsItem'
},
abfs: {
apiHelperFetchFunction: 'fetchAbfsPath',
dblClickPubSubId: 'assist.dblClickAbfsItem'
},
hdfs: {
apiHelperFetchFunction: 'fetchHdfsPath',
dblClickPubSubId: 'assist.dblClickHdfsItem'
},
s3: {
apiHelperFetchFunction: 'fetchS3Path',
dblClickPubSubId: 'assist.dblClickS3Item'
}
};
class AssistStorageEntry {
/**
* @param {object} options
* @param {object} options.definition
* @param {string} options.definition.name
* @param {string} options.definition.type (file, dir)
* @param {string} options.source - The storage source
* @param {string} [options.originalType] - The original storage type ('adl', 's3a')
* @param {AssistStorageEntry} options.parent
* @constructor
*/
constructor(options) {
const self = this;
self.source = options.source;
self.originalType = options.originalType;
self.definition = options.definition;
self.parent = options.parent;
self.rootPath = options.rootPath || '';
self.path = '';
if (self.parent !== null) {
self.path = self.parent.path;
if (self.parent.path !== '/' && !/\/$/.test(self.path)) {
self.path += '/';
}
}
self.path += self.definition.name;
self.abfsPath = (/^\//.test(self.path) ? 'abfs:/' : 'abfs://') + self.path;
self.currentPage = 1;
self.hasMorePages = true;
self.preview = ko.observable();
self.contextPopoverVisible = ko.observable(false);
self.filter = ko.observable('').extend({ rateLimit: 400 });
self.filter.subscribe(() => {
self.currentPage = 1;
self.hasMorePages = true;
self.loadEntries();
});
self.entries = ko.observableArray([]);
self.loaded = false;
self.loading = ko.observable(false);
self.loadingMore = ko.observable(false);
self.errorText = ko.observable();
self.hasErrors = ko.observable(false);
self.open = ko.observable(false);
self.open.subscribe(newValue => {
if (newValue && self.entries().length === 0) {
if (self.definition.type === 'dir') {
self.loadEntries();
} else {
self.loadPreview();
}
}
});
self.hasEntries = ko.pureComputed(() => self.entries().length > 0);
}
dblClick() {
huePubSub.publish(TYPE_SPECIFICS[self.source.type].dblClickPubSubId, this);
}
loadPreview() {
const self = this;
self.loading(true);
apiHelper
.fetchStoragePreview({
path: self.getHierarchy(),
type: self.source.type,
silenceErrors: true
})
.done(data => {
self.preview(data);
})
.fail(errorText => {
self.hasErrors(true);
self.errorText(errorText);
})
.always(() => {
self.loading(false);
});
}
loadEntries(callback) {
const self = this;
if (self.loading()) {
return;
}
self.loading(true);
self.hasErrors(false);
apiHelper[TYPE_SPECIFICS[self.source.type].apiHelperFetchFunction]({
pageSize: PAGE_SIZE,
page: self.currentPage,
filter: self.filter().trim() ? self.filter() : undefined,
pathParts: self.getHierarchy(),
rootPath: self.rootPath,
successCallback: data => {
self.hasMorePages = data.page.next_page_number > self.currentPage;
const filteredFiles = data.files.filter(file => file.name !== '.' && file.name !== '..');
self.entries(
filteredFiles.map(file => {
return new AssistStorageEntry({
originalType: self.originalType,
rootPath: self.rootPath,
source: self.source,
definition: file,
parent: self
});
})
);
self.loaded = true;
self.loading(false);
self.hasErrors(!!data.s3_listing_not_allowed); // Special case where we want errors inline instead of the default popover. We don't want errorCallback handling
self.errorText(data.s3_listing_not_allowed);
if (callback) {
callback();
}
},
errorCallback: errorText => {
self.hasErrors(true);
self.errorText(errorText);
self.loading(false);
if (callback) {
callback();
}
}
});
}
goHome() {
huePubSub.publish('assist.storage.go.home');
}
loadDeep(folders, callback) {
const self = this;
if (folders.length === 0) {
callback(self);
return;
}
if (this.rootPath) {
const relativeFolders = folders.join('/').replace(new RegExp('^' + this.rootPath, ''), '');
folders = relativeFolders.split('/');
}
const nextName = folders.shift();
let loadedPages = 0;
const findNextAndLoadDeep = () => {
const foundEntry = self.entries().filter(entry => entry.definition.name === nextName);
const passedAlphabetically =
self.entries().length > 0 &&
self.entries()[self.entries().length - 1].definition.name.localeCompare(nextName) > 0;
if (foundEntry.length === 1) {
foundEntry[0].loadDeep(folders, callback);
} else if (!passedAlphabetically && self.hasMorePages && loadedPages < 50) {
loadedPages++;
self.fetchMore(findNextAndLoadDeep, () => {
callback(self);
});
} else {
callback(self);
}
};
if (!self.loaded) {
self.loadEntries(findNextAndLoadDeep);
} else {
findNextAndLoadDeep();
}
}
getHierarchy() {
const self = this;
let parts = [];
let entry = self;
while (entry) {
if (!entry.parent && entry.definition.name) {
const rootParts = entry.definition.name.split('/').filter(Boolean);
rootParts.reverse();
parts = parts.concat(rootParts);
} else if (entry.definition.name) {
parts.push(entry.definition.name);
}
entry = entry.parent;
}
parts.reverse();
return parts;
}
toggleOpen(data, event) {
const self = this;
if (self.definition.type === 'file') {
if (event.ctrlKey || event.metaKey || event.which === 2) {
window.open('/hue' + self.definition.url, '_blank');
} else {
huePubSub.publish('open.link', self.definition.url);
}
return;
}
self.open(!self.open());
if (self.definition.name === '..') {
if (self.parent.parent) {
huePubSub.publish('assist.selectStorageEntry', self.parent.parent);
}
} else {
huePubSub.publish('assist.selectStorageEntry', self);
}
}
fetchMore(successCallback, errorCallback) {
const self = this;
if (!self.hasMorePages || self.loadingMore()) {
return;
}
self.currentPage++;
self.loadingMore(true);
self.hasErrors(false);
apiHelper[TYPE_SPECIFICS[self.source.type].apiHelperFetchFunction]({
pageSize: PAGE_SIZE,
page: self.currentPage,
filter: self.filter().trim() ? self.filter() : undefined,
pathParts: self.getHierarchy(),
successCallback: data => {
self.hasMorePages = data.page.next_page_number > self.currentPage;
const filteredFiles = data.files.filter(file => file.name !== '.' && file.name !== '..');
self.entries(
self.entries().concat(
filteredFiles.map(
file =>
new AssistStorageEntry({
originalType: self.originalType,
rootPath: self.rootPath,
source: self.source,
definition: file,
parent: self
})
)
)
);
self.loadingMore(false);
if (successCallback) {
successCallback();
}
},
errorCallback: () => {
self.hasErrors(true);
if (errorCallback) {
errorCallback();
}
}
});
}
| (entry, event, positionAdjustment) {
const $source = $(event.target);
const offset = $source.offset();
entry.contextPopoverVisible(true);
if (positionAdjustment) {
offset.left += positionAdjustment.left;
offset.top += positionAdjustment.top;
}
huePubSub.publish('context.popover.show', {
data: {
type: 'storageEntry',
storageEntry: entry
},
pinEnabled: true,
orientation: 'right',
source: {
element: event.target,
left: offset.left,
top: offset.top - 3,
right: offset.left + $source.width() + 3,
bottom: offset.top + $source.height() - 3
}
});
huePubSub.subscribeOnce('context.popover.hidden', () => {
entry.contextPopoverVisible(false);
});
}
openInImporter() {
huePubSub.publish('open.in.importer', this.definition.path);
}
/**
* Helper function to create an assistStorageEntry. It will load the entries starting from the root up until the
* path or stop when a part is not found.
*
* @param {string} path - The path, can include the type i.e. '/tmp' or 's3:/tmp'.
* @param {string} [type] - Optional type, if not specified here or in the path 'hdfs' will be used.
* @return {JQueryPromise<AssistStorageEntry>}
*/
static getEntry(path, type) {
const deferred = $.Deferred();
const typeMatch = path.match(/^([^:]+):\/(\/.*)\/?/i);
type = typeMatch ? typeMatch[1] : type || 'hdfs';
type = type.replace(/s3.*/i, 's3');
type = type.replace(/adl.*/i, 'adls');
type = type.replace(/abfs.*/i, 'abfs');
// TODO: connector.id for browser connectors
const connector = findBrowserConnector(connector => connector.type === type);
if (connector) {
const rootPath = getRootFilePath(connector);
const rootEntry = new AssistStorageEntry({
source: connector,
rootPath: rootPath,
originalType: typeMatch && typeMatch[1],
definition: {
name: rootPath,
type: 'dir'
},
parent: null,
apiHelper: apiHelper
});
if (type === 'abfs' || type === 'adls') {
// ABFS / ADLS can have domain name in path. To prevent regression with s3 which allow periods in bucket name handle separately.
const azureMatch = path.match(
/^([^:]+):\/(\/((\w+)@)?[\w]+([\-\.]{1}\w+)*\.[\w]*)?(\/.*)?\/?/i
);
path = (azureMatch ? azureMatch[6] || '' : path).replace(/(?:^\/)|(?:\/$)/g, '').split('/');
if (azureMatch && azureMatch[4]) {
path.unshift(azureMatch[4]);
}
} else {
path = (typeMatch ? typeMatch[2] : path).replace(/(?:^\/)|(?:\/$)/g, '').split('/');
}
rootEntry.loadDeep(path, deferred.resolve);
} else {
deferred.reject();
}
return deferred.promise();
}
}
export default AssistStorageEntry;
| showContextPopover | identifier_name |
assistStorageEntry.js | // Licensed to Cloudera, Inc. under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. Cloudera, Inc. licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import $ from 'jquery';
import * as ko from 'knockout';
import apiHelper from 'api/apiHelper';
import huePubSub from 'utils/huePubSub';
import { findBrowserConnector, getRootFilePath } from 'config/hueConfig';
const PAGE_SIZE = 100;
const TYPE_SPECIFICS = {
adls: {
apiHelperFetchFunction: 'fetchAdlsPath',
dblClickPubSubId: 'assist.dblClickAdlsItem'
},
abfs: {
apiHelperFetchFunction: 'fetchAbfsPath',
dblClickPubSubId: 'assist.dblClickAbfsItem'
},
hdfs: {
apiHelperFetchFunction: 'fetchHdfsPath',
dblClickPubSubId: 'assist.dblClickHdfsItem'
},
s3: {
apiHelperFetchFunction: 'fetchS3Path',
dblClickPubSubId: 'assist.dblClickS3Item'
}
};
class AssistStorageEntry {
/**
* @param {object} options
* @param {object} options.definition
* @param {string} options.definition.name
* @param {string} options.definition.type (file, dir)
* @param {string} options.source - The storage source
* @param {string} [options.originalType] - The original storage type ('adl', 's3a')
* @param {AssistStorageEntry} options.parent
* @constructor
*/
constructor(options) {
const self = this;
self.source = options.source;
self.originalType = options.originalType;
self.definition = options.definition;
self.parent = options.parent;
self.rootPath = options.rootPath || '';
self.path = '';
if (self.parent !== null) {
self.path = self.parent.path;
if (self.parent.path !== '/' && !/\/$/.test(self.path)) {
self.path += '/';
}
}
self.path += self.definition.name;
self.abfsPath = (/^\//.test(self.path) ? 'abfs:/' : 'abfs://') + self.path;
self.currentPage = 1;
self.hasMorePages = true;
self.preview = ko.observable();
self.contextPopoverVisible = ko.observable(false);
self.filter = ko.observable('').extend({ rateLimit: 400 });
self.filter.subscribe(() => {
self.currentPage = 1;
self.hasMorePages = true;
self.loadEntries();
});
self.entries = ko.observableArray([]);
self.loaded = false;
self.loading = ko.observable(false);
self.loadingMore = ko.observable(false);
self.errorText = ko.observable();
self.hasErrors = ko.observable(false);
self.open = ko.observable(false);
self.open.subscribe(newValue => {
if (newValue && self.entries().length === 0) {
if (self.definition.type === 'dir') {
self.loadEntries();
} else {
self.loadPreview();
}
}
});
self.hasEntries = ko.pureComputed(() => self.entries().length > 0);
}
dblClick() {
huePubSub.publish(TYPE_SPECIFICS[self.source.type].dblClickPubSubId, this);
}
loadPreview() {
const self = this;
self.loading(true);
apiHelper
.fetchStoragePreview({
path: self.getHierarchy(),
type: self.source.type,
silenceErrors: true
})
.done(data => {
self.preview(data);
})
.fail(errorText => {
self.hasErrors(true);
self.errorText(errorText);
})
.always(() => {
self.loading(false);
});
}
loadEntries(callback) {
const self = this;
if (self.loading()) {
return;
}
self.loading(true);
self.hasErrors(false);
apiHelper[TYPE_SPECIFICS[self.source.type].apiHelperFetchFunction]({
pageSize: PAGE_SIZE,
page: self.currentPage,
filter: self.filter().trim() ? self.filter() : undefined,
pathParts: self.getHierarchy(),
rootPath: self.rootPath,
successCallback: data => {
self.hasMorePages = data.page.next_page_number > self.currentPage;
const filteredFiles = data.files.filter(file => file.name !== '.' && file.name !== '..');
self.entries(
filteredFiles.map(file => {
return new AssistStorageEntry({
originalType: self.originalType,
rootPath: self.rootPath,
source: self.source,
definition: file,
parent: self
});
})
);
self.loaded = true;
self.loading(false);
self.hasErrors(!!data.s3_listing_not_allowed); // Special case where we want errors inline instead of the default popover. We don't want errorCallback handling
self.errorText(data.s3_listing_not_allowed);
if (callback) {
callback();
}
},
errorCallback: errorText => {
self.hasErrors(true);
self.errorText(errorText);
self.loading(false);
if (callback) {
callback();
}
}
});
}
goHome() {
huePubSub.publish('assist.storage.go.home');
}
loadDeep(folders, callback) {
const self = this;
if (folders.length === 0) {
callback(self);
return;
}
if (this.rootPath) {
const relativeFolders = folders.join('/').replace(new RegExp('^' + this.rootPath, ''), '');
folders = relativeFolders.split('/');
}
const nextName = folders.shift();
let loadedPages = 0;
const findNextAndLoadDeep = () => {
const foundEntry = self.entries().filter(entry => entry.definition.name === nextName);
const passedAlphabetically =
self.entries().length > 0 &&
self.entries()[self.entries().length - 1].definition.name.localeCompare(nextName) > 0;
if (foundEntry.length === 1) {
foundEntry[0].loadDeep(folders, callback);
} else if (!passedAlphabetically && self.hasMorePages && loadedPages < 50) {
loadedPages++;
self.fetchMore(findNextAndLoadDeep, () => {
callback(self);
});
} else {
callback(self);
}
};
if (!self.loaded) {
self.loadEntries(findNextAndLoadDeep);
} else {
findNextAndLoadDeep();
}
} | let entry = self;
while (entry) {
if (!entry.parent && entry.definition.name) {
const rootParts = entry.definition.name.split('/').filter(Boolean);
rootParts.reverse();
parts = parts.concat(rootParts);
} else if (entry.definition.name) {
parts.push(entry.definition.name);
}
entry = entry.parent;
}
parts.reverse();
return parts;
}
toggleOpen(data, event) {
const self = this;
if (self.definition.type === 'file') {
if (event.ctrlKey || event.metaKey || event.which === 2) {
window.open('/hue' + self.definition.url, '_blank');
} else {
huePubSub.publish('open.link', self.definition.url);
}
return;
}
self.open(!self.open());
if (self.definition.name === '..') {
if (self.parent.parent) {
huePubSub.publish('assist.selectStorageEntry', self.parent.parent);
}
} else {
huePubSub.publish('assist.selectStorageEntry', self);
}
}
fetchMore(successCallback, errorCallback) {
const self = this;
if (!self.hasMorePages || self.loadingMore()) {
return;
}
self.currentPage++;
self.loadingMore(true);
self.hasErrors(false);
apiHelper[TYPE_SPECIFICS[self.source.type].apiHelperFetchFunction]({
pageSize: PAGE_SIZE,
page: self.currentPage,
filter: self.filter().trim() ? self.filter() : undefined,
pathParts: self.getHierarchy(),
successCallback: data => {
self.hasMorePages = data.page.next_page_number > self.currentPage;
const filteredFiles = data.files.filter(file => file.name !== '.' && file.name !== '..');
self.entries(
self.entries().concat(
filteredFiles.map(
file =>
new AssistStorageEntry({
originalType: self.originalType,
rootPath: self.rootPath,
source: self.source,
definition: file,
parent: self
})
)
)
);
self.loadingMore(false);
if (successCallback) {
successCallback();
}
},
errorCallback: () => {
self.hasErrors(true);
if (errorCallback) {
errorCallback();
}
}
});
}
showContextPopover(entry, event, positionAdjustment) {
const $source = $(event.target);
const offset = $source.offset();
entry.contextPopoverVisible(true);
if (positionAdjustment) {
offset.left += positionAdjustment.left;
offset.top += positionAdjustment.top;
}
huePubSub.publish('context.popover.show', {
data: {
type: 'storageEntry',
storageEntry: entry
},
pinEnabled: true,
orientation: 'right',
source: {
element: event.target,
left: offset.left,
top: offset.top - 3,
right: offset.left + $source.width() + 3,
bottom: offset.top + $source.height() - 3
}
});
huePubSub.subscribeOnce('context.popover.hidden', () => {
entry.contextPopoverVisible(false);
});
}
openInImporter() {
huePubSub.publish('open.in.importer', this.definition.path);
}
/**
* Helper function to create an assistStorageEntry. It will load the entries starting from the root up until the
* path or stop when a part is not found.
*
* @param {string} path - The path, can include the type i.e. '/tmp' or 's3:/tmp'.
* @param {string} [type] - Optional type, if not specified here or in the path 'hdfs' will be used.
* @return {JQueryPromise<AssistStorageEntry>}
*/
static getEntry(path, type) {
const deferred = $.Deferred();
const typeMatch = path.match(/^([^:]+):\/(\/.*)\/?/i);
type = typeMatch ? typeMatch[1] : type || 'hdfs';
type = type.replace(/s3.*/i, 's3');
type = type.replace(/adl.*/i, 'adls');
type = type.replace(/abfs.*/i, 'abfs');
// TODO: connector.id for browser connectors
const connector = findBrowserConnector(connector => connector.type === type);
if (connector) {
const rootPath = getRootFilePath(connector);
const rootEntry = new AssistStorageEntry({
source: connector,
rootPath: rootPath,
originalType: typeMatch && typeMatch[1],
definition: {
name: rootPath,
type: 'dir'
},
parent: null,
apiHelper: apiHelper
});
if (type === 'abfs' || type === 'adls') {
// ABFS / ADLS can have domain name in path. To prevent regression with s3 which allow periods in bucket name handle separately.
const azureMatch = path.match(
/^([^:]+):\/(\/((\w+)@)?[\w]+([\-\.]{1}\w+)*\.[\w]*)?(\/.*)?\/?/i
);
path = (azureMatch ? azureMatch[6] || '' : path).replace(/(?:^\/)|(?:\/$)/g, '').split('/');
if (azureMatch && azureMatch[4]) {
path.unshift(azureMatch[4]);
}
} else {
path = (typeMatch ? typeMatch[2] : path).replace(/(?:^\/)|(?:\/$)/g, '').split('/');
}
rootEntry.loadDeep(path, deferred.resolve);
} else {
deferred.reject();
}
return deferred.promise();
}
}
export default AssistStorageEntry; |
getHierarchy() {
const self = this;
let parts = []; | random_line_split |
assistStorageEntry.js | // Licensed to Cloudera, Inc. under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. Cloudera, Inc. licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import $ from 'jquery';
import * as ko from 'knockout';
import apiHelper from 'api/apiHelper';
import huePubSub from 'utils/huePubSub';
import { findBrowserConnector, getRootFilePath } from 'config/hueConfig';
const PAGE_SIZE = 100;
const TYPE_SPECIFICS = {
adls: {
apiHelperFetchFunction: 'fetchAdlsPath',
dblClickPubSubId: 'assist.dblClickAdlsItem'
},
abfs: {
apiHelperFetchFunction: 'fetchAbfsPath',
dblClickPubSubId: 'assist.dblClickAbfsItem'
},
hdfs: {
apiHelperFetchFunction: 'fetchHdfsPath',
dblClickPubSubId: 'assist.dblClickHdfsItem'
},
s3: {
apiHelperFetchFunction: 'fetchS3Path',
dblClickPubSubId: 'assist.dblClickS3Item'
}
};
class AssistStorageEntry {
/**
* @param {object} options
* @param {object} options.definition
* @param {string} options.definition.name
* @param {string} options.definition.type (file, dir)
* @param {string} options.source - The storage source
* @param {string} [options.originalType] - The original storage type ('adl', 's3a')
* @param {AssistStorageEntry} options.parent
* @constructor
*/
constructor(options) {
const self = this;
self.source = options.source;
self.originalType = options.originalType;
self.definition = options.definition;
self.parent = options.parent;
self.rootPath = options.rootPath || '';
self.path = '';
if (self.parent !== null) {
self.path = self.parent.path;
if (self.parent.path !== '/' && !/\/$/.test(self.path)) {
self.path += '/';
}
}
self.path += self.definition.name;
self.abfsPath = (/^\//.test(self.path) ? 'abfs:/' : 'abfs://') + self.path;
self.currentPage = 1;
self.hasMorePages = true;
self.preview = ko.observable();
self.contextPopoverVisible = ko.observable(false);
self.filter = ko.observable('').extend({ rateLimit: 400 });
self.filter.subscribe(() => {
self.currentPage = 1;
self.hasMorePages = true;
self.loadEntries();
});
self.entries = ko.observableArray([]);
self.loaded = false;
self.loading = ko.observable(false);
self.loadingMore = ko.observable(false);
self.errorText = ko.observable();
self.hasErrors = ko.observable(false);
self.open = ko.observable(false);
self.open.subscribe(newValue => {
if (newValue && self.entries().length === 0) {
if (self.definition.type === 'dir') {
self.loadEntries();
} else {
self.loadPreview();
}
}
});
self.hasEntries = ko.pureComputed(() => self.entries().length > 0);
}
dblClick() {
huePubSub.publish(TYPE_SPECIFICS[self.source.type].dblClickPubSubId, this);
}
loadPreview() {
const self = this;
self.loading(true);
apiHelper
.fetchStoragePreview({
path: self.getHierarchy(),
type: self.source.type,
silenceErrors: true
})
.done(data => {
self.preview(data);
})
.fail(errorText => {
self.hasErrors(true);
self.errorText(errorText);
})
.always(() => {
self.loading(false);
});
}
loadEntries(callback) {
const self = this;
if (self.loading()) {
return;
}
self.loading(true);
self.hasErrors(false);
apiHelper[TYPE_SPECIFICS[self.source.type].apiHelperFetchFunction]({
pageSize: PAGE_SIZE,
page: self.currentPage,
filter: self.filter().trim() ? self.filter() : undefined,
pathParts: self.getHierarchy(),
rootPath: self.rootPath,
successCallback: data => {
self.hasMorePages = data.page.next_page_number > self.currentPage;
const filteredFiles = data.files.filter(file => file.name !== '.' && file.name !== '..');
self.entries(
filteredFiles.map(file => {
return new AssistStorageEntry({
originalType: self.originalType,
rootPath: self.rootPath,
source: self.source,
definition: file,
parent: self
});
})
);
self.loaded = true;
self.loading(false);
self.hasErrors(!!data.s3_listing_not_allowed); // Special case where we want errors inline instead of the default popover. We don't want errorCallback handling
self.errorText(data.s3_listing_not_allowed);
if (callback) {
callback();
}
},
errorCallback: errorText => {
self.hasErrors(true);
self.errorText(errorText);
self.loading(false);
if (callback) {
callback();
}
}
});
}
goHome() {
huePubSub.publish('assist.storage.go.home');
}
loadDeep(folders, callback) {
const self = this;
if (folders.length === 0) {
callback(self);
return;
}
if (this.rootPath) {
const relativeFolders = folders.join('/').replace(new RegExp('^' + this.rootPath, ''), '');
folders = relativeFolders.split('/');
}
const nextName = folders.shift();
let loadedPages = 0;
const findNextAndLoadDeep = () => {
const foundEntry = self.entries().filter(entry => entry.definition.name === nextName);
const passedAlphabetically =
self.entries().length > 0 &&
self.entries()[self.entries().length - 1].definition.name.localeCompare(nextName) > 0;
if (foundEntry.length === 1) {
foundEntry[0].loadDeep(folders, callback);
} else if (!passedAlphabetically && self.hasMorePages && loadedPages < 50) | else {
callback(self);
}
};
if (!self.loaded) {
self.loadEntries(findNextAndLoadDeep);
} else {
findNextAndLoadDeep();
}
}
getHierarchy() {
const self = this;
let parts = [];
let entry = self;
while (entry) {
if (!entry.parent && entry.definition.name) {
const rootParts = entry.definition.name.split('/').filter(Boolean);
rootParts.reverse();
parts = parts.concat(rootParts);
} else if (entry.definition.name) {
parts.push(entry.definition.name);
}
entry = entry.parent;
}
parts.reverse();
return parts;
}
toggleOpen(data, event) {
const self = this;
if (self.definition.type === 'file') {
if (event.ctrlKey || event.metaKey || event.which === 2) {
window.open('/hue' + self.definition.url, '_blank');
} else {
huePubSub.publish('open.link', self.definition.url);
}
return;
}
self.open(!self.open());
if (self.definition.name === '..') {
if (self.parent.parent) {
huePubSub.publish('assist.selectStorageEntry', self.parent.parent);
}
} else {
huePubSub.publish('assist.selectStorageEntry', self);
}
}
fetchMore(successCallback, errorCallback) {
const self = this;
if (!self.hasMorePages || self.loadingMore()) {
return;
}
self.currentPage++;
self.loadingMore(true);
self.hasErrors(false);
apiHelper[TYPE_SPECIFICS[self.source.type].apiHelperFetchFunction]({
pageSize: PAGE_SIZE,
page: self.currentPage,
filter: self.filter().trim() ? self.filter() : undefined,
pathParts: self.getHierarchy(),
successCallback: data => {
self.hasMorePages = data.page.next_page_number > self.currentPage;
const filteredFiles = data.files.filter(file => file.name !== '.' && file.name !== '..');
self.entries(
self.entries().concat(
filteredFiles.map(
file =>
new AssistStorageEntry({
originalType: self.originalType,
rootPath: self.rootPath,
source: self.source,
definition: file,
parent: self
})
)
)
);
self.loadingMore(false);
if (successCallback) {
successCallback();
}
},
errorCallback: () => {
self.hasErrors(true);
if (errorCallback) {
errorCallback();
}
}
});
}
showContextPopover(entry, event, positionAdjustment) {
const $source = $(event.target);
const offset = $source.offset();
entry.contextPopoverVisible(true);
if (positionAdjustment) {
offset.left += positionAdjustment.left;
offset.top += positionAdjustment.top;
}
huePubSub.publish('context.popover.show', {
data: {
type: 'storageEntry',
storageEntry: entry
},
pinEnabled: true,
orientation: 'right',
source: {
element: event.target,
left: offset.left,
top: offset.top - 3,
right: offset.left + $source.width() + 3,
bottom: offset.top + $source.height() - 3
}
});
huePubSub.subscribeOnce('context.popover.hidden', () => {
entry.contextPopoverVisible(false);
});
}
openInImporter() {
huePubSub.publish('open.in.importer', this.definition.path);
}
/**
* Helper function to create an assistStorageEntry. It will load the entries starting from the root up until the
* path or stop when a part is not found.
*
* @param {string} path - The path, can include the type i.e. '/tmp' or 's3:/tmp'.
* @param {string} [type] - Optional type, if not specified here or in the path 'hdfs' will be used.
* @return {JQueryPromise<AssistStorageEntry>}
*/
static getEntry(path, type) {
const deferred = $.Deferred();
const typeMatch = path.match(/^([^:]+):\/(\/.*)\/?/i);
type = typeMatch ? typeMatch[1] : type || 'hdfs';
type = type.replace(/s3.*/i, 's3');
type = type.replace(/adl.*/i, 'adls');
type = type.replace(/abfs.*/i, 'abfs');
// TODO: connector.id for browser connectors
const connector = findBrowserConnector(connector => connector.type === type);
if (connector) {
const rootPath = getRootFilePath(connector);
const rootEntry = new AssistStorageEntry({
source: connector,
rootPath: rootPath,
originalType: typeMatch && typeMatch[1],
definition: {
name: rootPath,
type: 'dir'
},
parent: null,
apiHelper: apiHelper
});
if (type === 'abfs' || type === 'adls') {
// ABFS / ADLS can have domain name in path. To prevent regression with s3 which allow periods in bucket name handle separately.
const azureMatch = path.match(
/^([^:]+):\/(\/((\w+)@)?[\w]+([\-\.]{1}\w+)*\.[\w]*)?(\/.*)?\/?/i
);
path = (azureMatch ? azureMatch[6] || '' : path).replace(/(?:^\/)|(?:\/$)/g, '').split('/');
if (azureMatch && azureMatch[4]) {
path.unshift(azureMatch[4]);
}
} else {
path = (typeMatch ? typeMatch[2] : path).replace(/(?:^\/)|(?:\/$)/g, '').split('/');
}
rootEntry.loadDeep(path, deferred.resolve);
} else {
deferred.reject();
}
return deferred.promise();
}
}
export default AssistStorageEntry;
| {
loadedPages++;
self.fetchMore(findNextAndLoadDeep, () => {
callback(self);
});
} | conditional_block |
id.js | $.extend(window.lang_id, {
"Title": "Apakah Anda Melainkan ..",
"installApp": "Gunakan Aplikasi untuk membagikan jajak pendapat ini!",
"installAppComments": "Download gratis.",
"haveApp": "Aku sudah App!",
"SecondPageButton": "Bermain",
"Welcome": "Halo selamat datang!",
"rules": "1. Klik link aplikasi untuk memuat votasi <br/> 2. Pilih suara anda <br/> 3. Bagikan gambar yang diperbarui kepada siapa saja!",
"MakeNew": "Poll baru",
"Create": "Membuat",
"VotedPolls": "Voting polling:",
"swipeToRemove": "Gesek untuk menghapus",
"showYourPolls": "Tunjukkan jajak pendapatmu",
"Return": "Kembali",
"Share+": "Bagikan +",
"Share": "Bagikan",
"Save": "Menyimpan",
"Voters": "Pemilih",
"Cancel": "Membatalkan",
"needsPhone": "Untuk mendapatkan suara di poling umum, kami mencegah agar bots mengirim SMS ke nomor telepon Anda",
"needsPhoneComment": "Identifikasi ini disediakan oleh rangkaian pengembangan twitter auth dan aplikasi ini akan mengenkripsi nomor telepon Anda.",
"multipleChoice": "Biarkan memilih beberapa pilihan.",
"onlyDevice": "Pilih hanya suara dari App.",
"openVotes": "Membuka suara",
"firstOk": "Baik",
"Activate": "Mengaktifkan",
"downloadAppStore": "Pasang aplikasi dan bagikan!",
"createPoll": "Bagikan polis ke manapun Anda inginkan:",
"QuestionPlaceholder": "Tuliskan disini pertanyaannya, sertakan info tambahan di jalur lain.",
"OptionsPlaceholder": "Masukkan setiap pilihan pada baris yang berbeda.",
"UsernamePlaceholder": "Nama anda (opsional)",
"PublicOnlyFromApp": "Untuk memilih di poling umum, Anda perlu menggunakan <u> APP </ u>",
"PollOnlyAvailableIn": "Jajak pendapat ini hanya tersedia di",
"AllowAppServices": "Tolong, <b> perbaiki </ b> pengalaman pengguna Anda sehingga memungkinkan penggunaan akses <b> '</ b>.",
"whyNeedServicesPermission": "Mengapa perlu ini? <br/> Sejak versi 5.0 Lollipop 5.0, diperlukan izin khusus untuk membaca penggunaan aplikasi. Kami membutuhkan ini untuk meningkatkan penerapan dengan aplikasi sosial lainnya.",
"votationNotExists": "Pemungutan suara tidak ada",
"votationNotSaved": "Tidak dapat terhubung ke server: voting tidak disimpan",
"waitingKey": "Membuat demokrasi ..",
"waitingKeyExpired": "Kesalahan saat menyimpan jajak pendapat Coba lagi..",
"onlyMostVotedShows": "Secara default hanya 4 pilihan yang paling terpilih yang ditampilkan di gambar bersama.", | "notPublicUsers": "Tidak ada nama pemilih",
"missingAjaxKey": "Batas waktu meminta kunci polling, silakan cek koneksi internet anda.",
"more": "lebih",
"min1Option": "Letakkan setidaknya 1 pilihan",
"min2Options": "Pilihan tidak boleh kosong",
"duplicatedOptions": "Pilihan duplikat!",
"myName": "Masukkan nama panggilan",
"requiredName": "Masukkan nama anda (wajib)",
"privateNeedUserName": "Tambahkan nama Anda diperlukan (hanya untuk polling pribadi)",
"unknown": "Tidak diketahui",
"browser": "Browser",
"notInApp": "Gunakan App untuk meneruskan polling.",
"pollWillVisible": "Terima kasih. Jajak pendapat akan terlihat dalam beberapa jam ke depan!",
"error": "kesalahan",
"errorAjaxResponse": "Kesalahan saat mengambil respon ajax",
"votationNotFound": "Pemungutan suara tidak ditemukan",
"WrongCountry": "Anda tidak berada di tempat",
"notValidUserId": "Id pengguna tidak valid",
"notLoadingPolls": "Kesalahan pemuatan jajak pendapat",
"warnNotPublic": "Peringatkan: jajak pendapat ini bersifat pribadi dan tidak memvalidasi pemilih, jadi tidak berlaku untuk pemungutan suara publik",
"e_noDataReceived": "Tidak ada data yang diambil",
"e_phoneValidationNotWork": "Masalah untuk melakukan verifikasi SMS? Tolong kalau begitu hubungi kami.",
"warnMaximumOptions": "Verifikasi gambar tidak dipotong di lingkungan bersama",
"e_retrievingData": "Polling tidak ditemukan, periksa koneksi internet anda:",
"loadingPublicKey": "Memvalidasi nomor telepon, tunggu sebentar dan coba lagi ..",
"PopularPolls": "Jajak pendapat populer:",
"total": "total",
"New": "Baru",
"search": "pencarian",
"showAll": "Tunjukkan semua negara",
"showMine": "Default",
"duplicatedKey": "Digandakan kunci",
"noPollsFound": "Bukan jajak pendapat yang ditemukan",
"forgetPoll": "lupakan ini",
"UNDO": "MEMBUKA",
"Votes": "Suara",
"ClickTheLink": "Link untuk memilih!",
"CustomStyles": "Gaya default:",
"defaults": "Default",
"Symbol": "Simbol:",
"ChooseFile": "Upload gambar",
"QuestionColor": "Warna pertanyaan:",
"OptionTextColor": "Warna teks opsi:",
"BackgroundColor": "Warna latar belakang:",
"GradientBackground": "Latar belakang gradasi:",
"GradientColor1": "Gradien warna 1:",
"GradientColor2": "Warna gradien 2:",
"LimitByCountry": "Batas menurut wilayah:",
"WillShowPublic": "- Jajak pendapat publik akan ditampilkan di halaman 'jajak pendapat publik'.",
"ChooseCountry": "Memilih",
"ExampleQuestion": "Ini adalah contoh pertanyaan <br> Dan ini adalah beberapa teks tambahan",
"OptionExample1": "Contoh pilihan A",
"OptionExample2": "Contoh pilihan B",
"ownerNameExample": "nama pemilik",
"AddPollLink": "Tambahkan LINK web tambahan untuk polling ini",
"Public": "Publik",
"StyleNamePlaceholder": "Nama gaya",
"SaveStyle": "Menyimpan",
"Delete": "Menghapus",
"allCountries": "Negara manapun ..",
"Yes": "iya nih",
"Not": "Tidak",
"cantChangePublic": "Jajak pendapat ini sudah disimpan. Untuk mengubah nilai Publik, batalkan ini dan buatlah yang baru.",
"noUserCountryFound": "Kesalahan: tidak dapat mengambil negara telepon Anda, hubungi kami",
"p_notEditable": "Tidak tersedia pilihan disini.",
"e_votationRemoved": "Maaf, pemungutan suara ini telah dihapus",
"e_connectionLost": "Koneksi terputus",
"e_votationWithErrors": "Maaf, pemungutan suara memiliki kesalahan. Silahkan hubungi kami.",
"e_flagNotFound": "Tidak bisa memuat bendera",
"e_parsingIndexFile": "Kesalahan parsing file _index.txt",
"e_connection": "Silahkan cek koneksi internet anda dan coba lagi",
"e_publicPolls": "Tidak ditemukan jajak pendapat umum",
"e_imageNotFound": "Kesalahan: gambar tidak ditemukan",
}); | "hidePolls": "Menyembunyikan jajak pendapat", | random_line_split |
buffer.rs | use crate::parsing::ParsingContext;
use amq_protocol::frame::{BackToTheBuffer, GenError, GenResult, WriteContext};
use futures_io::{AsyncRead, AsyncWrite};
use std::{
cmp,
io::{self, IoSlice, IoSliceMut},
pin::Pin,
task::{Context, Poll},
};
#[derive(Debug, PartialEq, Clone)]
pub(crate) struct Buffer {
memory: Vec<u8>,
capacity: usize,
position: usize,
end: usize,
available_data: usize,
}
pub(crate) struct Checkpoint {
end: usize,
backwards: bool,
}
impl Buffer {
pub(crate) fn with_capacity(capacity: usize) -> Buffer {
Buffer {
memory: vec![0; capacity],
capacity,
position: 0,
end: 0,
available_data: 0,
}
}
pub(crate) fn checkpoint(&self) -> Checkpoint {
Checkpoint {
end: self.end,
backwards: true,
}
}
pub(crate) fn rollback(&mut self, checkpoint: Checkpoint) {
if checkpoint.end == self.end {
return;
}
if checkpoint.backwards {
if self.end > checkpoint.end {
self.available_data -= self.end - checkpoint.end;
} else {
self.available_data -= self.end + (self.capacity - checkpoint.end);
}
} else if self.end > checkpoint.end {
self.available_data += (self.capacity - self.end) + checkpoint.end;
} else {
self.available_data += checkpoint.end - self.end;
}
self.end = checkpoint.end;
}
pub(crate) fn grow(&mut self, new_size: usize) -> bool {
if self.capacity >= new_size {
return false;
}
let old_capacity = self.capacity;
let growth = new_size - old_capacity;
self.memory.resize(new_size, 0);
self.capacity = new_size;
if self.end <= self.position && self.available_data > 0 {
// We have data and the "tail" was at the beginning of the buffer.
// We need to move it in the new end.
let (old, new) = self.memory.split_at_mut(old_capacity);
if self.end < growth {
// There is enough room in the new end for this whole "tail".
new[..].copy_from_slice(&old[..self.end]);
self.end += old_capacity;
} else {
// Fill the new end with as much data as we can.
// We also update the end pointer to the future right location.
// We still have [growth..old_end] to move into [..new_end]
new[..].copy_from_slice(&old[..growth]);
self.end -= growth;
if self.end < growth {
// Less than half the data is yet to be moved, we can split + copy.
let (start, data) = self.memory.split_at_mut(growth);
start[..].copy_from_slice(&data[..self.end])
} else {
// Not enough room to split + copy, we copy each byte one at a time.
for i in 0..=self.end {
self.memory[i] = self.memory[i + growth];
}
}
}
}
true
}
pub(crate) fn available_data(&self) -> usize {
self.available_data
}
pub(crate) fn available_space(&self) -> usize {
self.capacity - self.available_data
}
pub(crate) fn consume(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_data());
self.position += cnt;
self.position %= self.capacity;
self.available_data -= cnt;
cnt
}
pub(crate) fn fill(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_space());
self.end += cnt;
self.end %= self.capacity;
self.available_data += cnt;
cnt
}
pub(crate) fn poll_write_to<T: AsyncWrite>(
&self,
cx: &mut Context<'_>,
writer: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_data() == 0 {
Poll::Ready(Ok(0))
} else if self.end > self.position {
writer.poll_write(cx, &self.memory[self.position..self.end])
} else {
writer.poll_write_vectored(
cx,
&[
IoSlice::new(&self.memory[self.position..]),
IoSlice::new(&self.memory[..self.end]),
],
)
}
}
pub(crate) fn poll_read_from<T: AsyncRead>(
&mut self,
cx: &mut Context<'_>,
reader: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_space() == 0 | else if self.end >= self.position {
let (start, end) = self.memory.split_at_mut(self.end);
reader.poll_read_vectored(
cx,
&mut [
IoSliceMut::new(&mut *end),
IoSliceMut::new(&mut start[..self.position]),
][..],
)
} else {
reader.poll_read(cx, &mut self.memory[self.end..self.position])
}
}
pub(crate) fn offset(&self, buf: ParsingContext<'_>) -> usize {
let data = &self.memory[self.position..self.position];
let dataptr = data.as_ptr() as usize;
let bufptr = buf.as_ptr() as usize;
if dataptr < bufptr {
bufptr - dataptr
} else {
let start = &self.memory[..0];
let startptr = start.as_ptr() as usize;
bufptr + self.capacity - self.position - startptr
}
}
pub(crate) fn parsing_context(&self) -> ParsingContext<'_> {
if self.available_data() == 0 {
self.memory[self.end..self.end].into()
} else if self.end > self.position {
self.memory[self.position..self.end].into()
} else {
[&self.memory[self.position..], &self.memory[..self.end]].into()
}
}
}
impl io::Write for &mut Buffer {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
let amt = if self.available_space() == 0 {
0
} else if self.end >= self.position {
let mut space = &mut self.memory[self.end..];
let mut amt = space.write(data)?;
if amt == self.capacity - self.end {
let mut space = &mut self.memory[..self.position];
amt += space.write(&data[amt..])?;
}
amt
} else {
let mut space = &mut self.memory[self.end..self.position];
space.write(data)?
};
self.fill(amt);
Ok(amt)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl BackToTheBuffer for &mut Buffer {
fn reserve_write_use<
Tmp,
Gen: Fn(WriteContext<Self>) -> Result<(WriteContext<Self>, Tmp), GenError>,
Before: Fn(WriteContext<Self>, Tmp) -> GenResult<Self>,
>(
s: WriteContext<Self>,
reserved: usize,
gen: &Gen,
before: &Before,
) -> Result<WriteContext<Self>, GenError> {
if s.write.available_space() < reserved {
return Err(GenError::BufferTooSmall(
reserved - s.write.available_space(),
));
}
let start = s.write.checkpoint();
s.write.fill(reserved);
gen(s).and_then(|(s, tmp)| {
let mut end = s.write.checkpoint();
end.backwards = false;
s.write.rollback(start);
before(s, tmp).map(|s| {
s.write.rollback(end);
s
})
})
}
}
| {
Poll::Ready(Ok(0))
} | conditional_block |
buffer.rs | use crate::parsing::ParsingContext;
use amq_protocol::frame::{BackToTheBuffer, GenError, GenResult, WriteContext};
use futures_io::{AsyncRead, AsyncWrite};
use std::{
cmp,
io::{self, IoSlice, IoSliceMut},
pin::Pin,
task::{Context, Poll},
};
#[derive(Debug, PartialEq, Clone)]
pub(crate) struct Buffer {
memory: Vec<u8>,
capacity: usize,
position: usize,
end: usize,
available_data: usize,
}
pub(crate) struct Checkpoint {
end: usize,
backwards: bool,
}
impl Buffer {
pub(crate) fn with_capacity(capacity: usize) -> Buffer {
Buffer {
memory: vec![0; capacity],
capacity,
position: 0,
end: 0,
available_data: 0,
}
}
pub(crate) fn checkpoint(&self) -> Checkpoint |
pub(crate) fn rollback(&mut self, checkpoint: Checkpoint) {
if checkpoint.end == self.end {
return;
}
if checkpoint.backwards {
if self.end > checkpoint.end {
self.available_data -= self.end - checkpoint.end;
} else {
self.available_data -= self.end + (self.capacity - checkpoint.end);
}
} else if self.end > checkpoint.end {
self.available_data += (self.capacity - self.end) + checkpoint.end;
} else {
self.available_data += checkpoint.end - self.end;
}
self.end = checkpoint.end;
}
pub(crate) fn grow(&mut self, new_size: usize) -> bool {
if self.capacity >= new_size {
return false;
}
let old_capacity = self.capacity;
let growth = new_size - old_capacity;
self.memory.resize(new_size, 0);
self.capacity = new_size;
if self.end <= self.position && self.available_data > 0 {
// We have data and the "tail" was at the beginning of the buffer.
// We need to move it in the new end.
let (old, new) = self.memory.split_at_mut(old_capacity);
if self.end < growth {
// There is enough room in the new end for this whole "tail".
new[..].copy_from_slice(&old[..self.end]);
self.end += old_capacity;
} else {
// Fill the new end with as much data as we can.
// We also update the end pointer to the future right location.
// We still have [growth..old_end] to move into [..new_end]
new[..].copy_from_slice(&old[..growth]);
self.end -= growth;
if self.end < growth {
// Less than half the data is yet to be moved, we can split + copy.
let (start, data) = self.memory.split_at_mut(growth);
start[..].copy_from_slice(&data[..self.end])
} else {
// Not enough room to split + copy, we copy each byte one at a time.
for i in 0..=self.end {
self.memory[i] = self.memory[i + growth];
}
}
}
}
true
}
pub(crate) fn available_data(&self) -> usize {
self.available_data
}
pub(crate) fn available_space(&self) -> usize {
self.capacity - self.available_data
}
pub(crate) fn consume(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_data());
self.position += cnt;
self.position %= self.capacity;
self.available_data -= cnt;
cnt
}
pub(crate) fn fill(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_space());
self.end += cnt;
self.end %= self.capacity;
self.available_data += cnt;
cnt
}
pub(crate) fn poll_write_to<T: AsyncWrite>(
&self,
cx: &mut Context<'_>,
writer: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_data() == 0 {
Poll::Ready(Ok(0))
} else if self.end > self.position {
writer.poll_write(cx, &self.memory[self.position..self.end])
} else {
writer.poll_write_vectored(
cx,
&[
IoSlice::new(&self.memory[self.position..]),
IoSlice::new(&self.memory[..self.end]),
],
)
}
}
pub(crate) fn poll_read_from<T: AsyncRead>(
&mut self,
cx: &mut Context<'_>,
reader: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_space() == 0 {
Poll::Ready(Ok(0))
} else if self.end >= self.position {
let (start, end) = self.memory.split_at_mut(self.end);
reader.poll_read_vectored(
cx,
&mut [
IoSliceMut::new(&mut *end),
IoSliceMut::new(&mut start[..self.position]),
][..],
)
} else {
reader.poll_read(cx, &mut self.memory[self.end..self.position])
}
}
pub(crate) fn offset(&self, buf: ParsingContext<'_>) -> usize {
let data = &self.memory[self.position..self.position];
let dataptr = data.as_ptr() as usize;
let bufptr = buf.as_ptr() as usize;
if dataptr < bufptr {
bufptr - dataptr
} else {
let start = &self.memory[..0];
let startptr = start.as_ptr() as usize;
bufptr + self.capacity - self.position - startptr
}
}
pub(crate) fn parsing_context(&self) -> ParsingContext<'_> {
if self.available_data() == 0 {
self.memory[self.end..self.end].into()
} else if self.end > self.position {
self.memory[self.position..self.end].into()
} else {
[&self.memory[self.position..], &self.memory[..self.end]].into()
}
}
}
impl io::Write for &mut Buffer {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
let amt = if self.available_space() == 0 {
0
} else if self.end >= self.position {
let mut space = &mut self.memory[self.end..];
let mut amt = space.write(data)?;
if amt == self.capacity - self.end {
let mut space = &mut self.memory[..self.position];
amt += space.write(&data[amt..])?;
}
amt
} else {
let mut space = &mut self.memory[self.end..self.position];
space.write(data)?
};
self.fill(amt);
Ok(amt)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl BackToTheBuffer for &mut Buffer {
fn reserve_write_use<
Tmp,
Gen: Fn(WriteContext<Self>) -> Result<(WriteContext<Self>, Tmp), GenError>,
Before: Fn(WriteContext<Self>, Tmp) -> GenResult<Self>,
>(
s: WriteContext<Self>,
reserved: usize,
gen: &Gen,
before: &Before,
) -> Result<WriteContext<Self>, GenError> {
if s.write.available_space() < reserved {
return Err(GenError::BufferTooSmall(
reserved - s.write.available_space(),
));
}
let start = s.write.checkpoint();
s.write.fill(reserved);
gen(s).and_then(|(s, tmp)| {
let mut end = s.write.checkpoint();
end.backwards = false;
s.write.rollback(start);
before(s, tmp).map(|s| {
s.write.rollback(end);
s
})
})
}
}
| {
Checkpoint {
end: self.end,
backwards: true,
}
} | identifier_body |
buffer.rs | use crate::parsing::ParsingContext;
use amq_protocol::frame::{BackToTheBuffer, GenError, GenResult, WriteContext};
use futures_io::{AsyncRead, AsyncWrite};
use std::{
cmp,
io::{self, IoSlice, IoSliceMut},
pin::Pin,
task::{Context, Poll},
};
#[derive(Debug, PartialEq, Clone)]
pub(crate) struct Buffer {
memory: Vec<u8>,
capacity: usize,
position: usize,
end: usize,
available_data: usize,
}
pub(crate) struct Checkpoint {
end: usize,
backwards: bool,
}
impl Buffer {
pub(crate) fn with_capacity(capacity: usize) -> Buffer {
Buffer {
memory: vec![0; capacity],
capacity,
position: 0,
end: 0,
available_data: 0,
}
}
pub(crate) fn checkpoint(&self) -> Checkpoint {
Checkpoint {
end: self.end,
backwards: true,
}
}
pub(crate) fn rollback(&mut self, checkpoint: Checkpoint) {
if checkpoint.end == self.end {
return;
}
if checkpoint.backwards {
if self.end > checkpoint.end {
self.available_data -= self.end - checkpoint.end;
} else {
self.available_data -= self.end + (self.capacity - checkpoint.end);
}
} else if self.end > checkpoint.end {
self.available_data += (self.capacity - self.end) + checkpoint.end;
} else {
self.available_data += checkpoint.end - self.end;
}
self.end = checkpoint.end;
}
pub(crate) fn grow(&mut self, new_size: usize) -> bool {
if self.capacity >= new_size {
return false;
}
let old_capacity = self.capacity;
let growth = new_size - old_capacity;
self.memory.resize(new_size, 0);
self.capacity = new_size;
if self.end <= self.position && self.available_data > 0 {
// We have data and the "tail" was at the beginning of the buffer.
// We need to move it in the new end.
let (old, new) = self.memory.split_at_mut(old_capacity);
if self.end < growth {
// There is enough room in the new end for this whole "tail".
new[..].copy_from_slice(&old[..self.end]);
self.end += old_capacity;
} else {
// Fill the new end with as much data as we can.
// We also update the end pointer to the future right location.
// We still have [growth..old_end] to move into [..new_end]
new[..].copy_from_slice(&old[..growth]);
self.end -= growth;
if self.end < growth {
// Less than half the data is yet to be moved, we can split + copy.
let (start, data) = self.memory.split_at_mut(growth);
start[..].copy_from_slice(&data[..self.end])
} else {
// Not enough room to split + copy, we copy each byte one at a time.
for i in 0..=self.end {
self.memory[i] = self.memory[i + growth];
}
}
}
}
true
}
pub(crate) fn available_data(&self) -> usize {
self.available_data
}
pub(crate) fn available_space(&self) -> usize {
self.capacity - self.available_data
}
pub(crate) fn consume(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_data());
self.position += cnt;
self.position %= self.capacity;
self.available_data -= cnt;
cnt
}
pub(crate) fn fill(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_space());
self.end += cnt;
self.end %= self.capacity;
self.available_data += cnt;
cnt
}
pub(crate) fn poll_write_to<T: AsyncWrite>(
&self,
cx: &mut Context<'_>,
writer: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_data() == 0 {
Poll::Ready(Ok(0))
} else if self.end > self.position {
writer.poll_write(cx, &self.memory[self.position..self.end])
} else {
writer.poll_write_vectored(
cx,
&[
IoSlice::new(&self.memory[self.position..]),
IoSlice::new(&self.memory[..self.end]),
],
)
}
}
pub(crate) fn poll_read_from<T: AsyncRead>(
&mut self,
cx: &mut Context<'_>,
reader: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_space() == 0 {
Poll::Ready(Ok(0))
} else if self.end >= self.position {
let (start, end) = self.memory.split_at_mut(self.end);
reader.poll_read_vectored(
cx,
&mut [
IoSliceMut::new(&mut *end),
IoSliceMut::new(&mut start[..self.position]),
][..],
)
} else {
reader.poll_read(cx, &mut self.memory[self.end..self.position])
}
}
pub(crate) fn offset(&self, buf: ParsingContext<'_>) -> usize {
let data = &self.memory[self.position..self.position];
let dataptr = data.as_ptr() as usize;
let bufptr = buf.as_ptr() as usize;
if dataptr < bufptr {
bufptr - dataptr
} else {
let start = &self.memory[..0];
let startptr = start.as_ptr() as usize;
bufptr + self.capacity - self.position - startptr
}
}
pub(crate) fn parsing_context(&self) -> ParsingContext<'_> {
if self.available_data() == 0 {
self.memory[self.end..self.end].into()
} else if self.end > self.position {
self.memory[self.position..self.end].into()
} else {
[&self.memory[self.position..], &self.memory[..self.end]].into()
}
}
}
impl io::Write for &mut Buffer {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
let amt = if self.available_space() == 0 {
0
} else if self.end >= self.position {
let mut space = &mut self.memory[self.end..];
let mut amt = space.write(data)?;
if amt == self.capacity - self.end {
let mut space = &mut self.memory[..self.position];
amt += space.write(&data[amt..])?;
}
amt
} else {
let mut space = &mut self.memory[self.end..self.position];
space.write(data)?
};
self.fill(amt);
Ok(amt)
}
fn | (&mut self) -> io::Result<()> {
Ok(())
}
}
impl BackToTheBuffer for &mut Buffer {
fn reserve_write_use<
Tmp,
Gen: Fn(WriteContext<Self>) -> Result<(WriteContext<Self>, Tmp), GenError>,
Before: Fn(WriteContext<Self>, Tmp) -> GenResult<Self>,
>(
s: WriteContext<Self>,
reserved: usize,
gen: &Gen,
before: &Before,
) -> Result<WriteContext<Self>, GenError> {
if s.write.available_space() < reserved {
return Err(GenError::BufferTooSmall(
reserved - s.write.available_space(),
));
}
let start = s.write.checkpoint();
s.write.fill(reserved);
gen(s).and_then(|(s, tmp)| {
let mut end = s.write.checkpoint();
end.backwards = false;
s.write.rollback(start);
before(s, tmp).map(|s| {
s.write.rollback(end);
s
})
})
}
}
| flush | identifier_name |
buffer.rs | use crate::parsing::ParsingContext;
use amq_protocol::frame::{BackToTheBuffer, GenError, GenResult, WriteContext};
use futures_io::{AsyncRead, AsyncWrite};
use std::{
cmp,
io::{self, IoSlice, IoSliceMut},
pin::Pin,
task::{Context, Poll}, | pub(crate) struct Buffer {
memory: Vec<u8>,
capacity: usize,
position: usize,
end: usize,
available_data: usize,
}
pub(crate) struct Checkpoint {
end: usize,
backwards: bool,
}
impl Buffer {
pub(crate) fn with_capacity(capacity: usize) -> Buffer {
Buffer {
memory: vec![0; capacity],
capacity,
position: 0,
end: 0,
available_data: 0,
}
}
pub(crate) fn checkpoint(&self) -> Checkpoint {
Checkpoint {
end: self.end,
backwards: true,
}
}
pub(crate) fn rollback(&mut self, checkpoint: Checkpoint) {
if checkpoint.end == self.end {
return;
}
if checkpoint.backwards {
if self.end > checkpoint.end {
self.available_data -= self.end - checkpoint.end;
} else {
self.available_data -= self.end + (self.capacity - checkpoint.end);
}
} else if self.end > checkpoint.end {
self.available_data += (self.capacity - self.end) + checkpoint.end;
} else {
self.available_data += checkpoint.end - self.end;
}
self.end = checkpoint.end;
}
pub(crate) fn grow(&mut self, new_size: usize) -> bool {
if self.capacity >= new_size {
return false;
}
let old_capacity = self.capacity;
let growth = new_size - old_capacity;
self.memory.resize(new_size, 0);
self.capacity = new_size;
if self.end <= self.position && self.available_data > 0 {
// We have data and the "tail" was at the beginning of the buffer.
// We need to move it in the new end.
let (old, new) = self.memory.split_at_mut(old_capacity);
if self.end < growth {
// There is enough room in the new end for this whole "tail".
new[..].copy_from_slice(&old[..self.end]);
self.end += old_capacity;
} else {
// Fill the new end with as much data as we can.
// We also update the end pointer to the future right location.
// We still have [growth..old_end] to move into [..new_end]
new[..].copy_from_slice(&old[..growth]);
self.end -= growth;
if self.end < growth {
// Less than half the data is yet to be moved, we can split + copy.
let (start, data) = self.memory.split_at_mut(growth);
start[..].copy_from_slice(&data[..self.end])
} else {
// Not enough room to split + copy, we copy each byte one at a time.
for i in 0..=self.end {
self.memory[i] = self.memory[i + growth];
}
}
}
}
true
}
pub(crate) fn available_data(&self) -> usize {
self.available_data
}
pub(crate) fn available_space(&self) -> usize {
self.capacity - self.available_data
}
pub(crate) fn consume(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_data());
self.position += cnt;
self.position %= self.capacity;
self.available_data -= cnt;
cnt
}
pub(crate) fn fill(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_space());
self.end += cnt;
self.end %= self.capacity;
self.available_data += cnt;
cnt
}
pub(crate) fn poll_write_to<T: AsyncWrite>(
&self,
cx: &mut Context<'_>,
writer: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_data() == 0 {
Poll::Ready(Ok(0))
} else if self.end > self.position {
writer.poll_write(cx, &self.memory[self.position..self.end])
} else {
writer.poll_write_vectored(
cx,
&[
IoSlice::new(&self.memory[self.position..]),
IoSlice::new(&self.memory[..self.end]),
],
)
}
}
pub(crate) fn poll_read_from<T: AsyncRead>(
&mut self,
cx: &mut Context<'_>,
reader: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_space() == 0 {
Poll::Ready(Ok(0))
} else if self.end >= self.position {
let (start, end) = self.memory.split_at_mut(self.end);
reader.poll_read_vectored(
cx,
&mut [
IoSliceMut::new(&mut *end),
IoSliceMut::new(&mut start[..self.position]),
][..],
)
} else {
reader.poll_read(cx, &mut self.memory[self.end..self.position])
}
}
pub(crate) fn offset(&self, buf: ParsingContext<'_>) -> usize {
let data = &self.memory[self.position..self.position];
let dataptr = data.as_ptr() as usize;
let bufptr = buf.as_ptr() as usize;
if dataptr < bufptr {
bufptr - dataptr
} else {
let start = &self.memory[..0];
let startptr = start.as_ptr() as usize;
bufptr + self.capacity - self.position - startptr
}
}
pub(crate) fn parsing_context(&self) -> ParsingContext<'_> {
if self.available_data() == 0 {
self.memory[self.end..self.end].into()
} else if self.end > self.position {
self.memory[self.position..self.end].into()
} else {
[&self.memory[self.position..], &self.memory[..self.end]].into()
}
}
}
impl io::Write for &mut Buffer {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
let amt = if self.available_space() == 0 {
0
} else if self.end >= self.position {
let mut space = &mut self.memory[self.end..];
let mut amt = space.write(data)?;
if amt == self.capacity - self.end {
let mut space = &mut self.memory[..self.position];
amt += space.write(&data[amt..])?;
}
amt
} else {
let mut space = &mut self.memory[self.end..self.position];
space.write(data)?
};
self.fill(amt);
Ok(amt)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl BackToTheBuffer for &mut Buffer {
fn reserve_write_use<
Tmp,
Gen: Fn(WriteContext<Self>) -> Result<(WriteContext<Self>, Tmp), GenError>,
Before: Fn(WriteContext<Self>, Tmp) -> GenResult<Self>,
>(
s: WriteContext<Self>,
reserved: usize,
gen: &Gen,
before: &Before,
) -> Result<WriteContext<Self>, GenError> {
if s.write.available_space() < reserved {
return Err(GenError::BufferTooSmall(
reserved - s.write.available_space(),
));
}
let start = s.write.checkpoint();
s.write.fill(reserved);
gen(s).and_then(|(s, tmp)| {
let mut end = s.write.checkpoint();
end.backwards = false;
s.write.rollback(start);
before(s, tmp).map(|s| {
s.write.rollback(end);
s
})
})
}
} | };
#[derive(Debug, PartialEq, Clone)] | random_line_split |
RPCServer.py | #!/usr/bin/python -OO
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage MCPServer
# @author Joseph Perry <joseph@artefactual.com>
import archivematicaMCP
import sys
from linkTaskManagerChoice import choicesAvailableForUnits
import logging
import lxml.etree as etree
import gearman
import cPickle
import time
import traceback
from socket import gethostname
sys.path.append("/usr/lib/archivematica/archivematicaCommon")
from custom_handlers import GroupWriteRotatingFileHandler
import databaseInterface
def rpcError(code="", details=""):
ret = etree.Element("Error")
etree.SubElement(ret, "code").text = code.__str__()
etree.SubElement(ret, "details").text = details.__str__()
return ret
def verifyDatabaseIsNotLocked():
timeBeforeReturningErrorLockedDB = 4
timeToSleep = 0.1
numberOfRuns = 0 #count of number of runs in loop
while not databaseInterface.sqlLock.acquire(False):
time.sleep(timeToSleep)
numberOfRuns += 1
if numberOfRuns * timeToSleep > timeBeforeReturningErrorLockedDB:
return rpcError(code="DatabaseLock", details="Couldn't acquire database lock")
databaseInterface.sqlLock.release()
return None
def getJobsAwaitingApproval():
ret = etree.Element("choicesAvailableForUnits")
dbStatus = verifyDatabaseIsNotLocked()
if dbStatus:
#print etree.tostring(dbStatus)
return etree.tostring(dbStatus)
for UUID, choice in choicesAvailableForUnits.items():
|
return etree.tostring(ret, pretty_print=True)
def approveJob(jobUUID, chain, agent):
print "approving: ", jobUUID, chain, agent
if jobUUID in choicesAvailableForUnits:
choicesAvailableForUnits[jobUUID].proceedWithChoice(chain, agent)
return "approving: ", jobUUID, chain
def gearmanApproveJob(gearman_worker, gearman_job):
try:
#execute = gearman_job.task
data = cPickle.loads(gearman_job.data)
jobUUID = data["jobUUID"]
chain = data["chain"]
agent = str(data["uid"])
ret = cPickle.dumps(approveJob(jobUUID, chain, agent))
if not ret:
ret = ""
return ""
#catch OS errors
except Exception as inst:
print >>sys.stderr, "DEBUG EXCEPTION! gearmanApproveJob"
traceback.print_exc(file=sys.stdout)
print >>sys.stderr, type(inst) # the exception instance
print >>sys.stderr, inst.args
return ""
def gearmanGetJobsAwaitingApproval(gearman_worker, gearman_job):
try:
#print "DEBUG - getting list of jobs"
#execute = gearman_job.task
ret = cPickle.dumps(getJobsAwaitingApproval())
#print ret
if not ret:
ret = ""
return ret
#catch OS errors
except Exception as inst:
print >>sys.stderr, "DEBUG EXCEPTION! gearmanGetJobsAwaitingApproval"
traceback.print_exc(file=sys.stdout)
print >>sys.stderr, type(inst) # the exception instance
print >>sys.stderr, inst.args
return ""
def startRPCServer():
logger = logging.getLogger("archivematica")
logger.addHandler(GroupWriteRotatingFileHandler("/var/log/archivematica/MCPServer/MCPServer.log", maxBytes=4194304))
gm_worker = gearman.GearmanWorker([archivematicaMCP.config.get('MCPServer', 'GearmanServerWorker')])
hostID = gethostname() + "_MCPServer"
gm_worker.set_client_id(hostID)
gm_worker.register_task("approveJob", gearmanApproveJob)
gm_worker.register_task("getJobsAwaitingApproval", gearmanGetJobsAwaitingApproval)
failMaxSleep = 30
failSleep = 1
failSleepIncrementor = 2
while True:
try:
gm_worker.work()
except gearman.errors.ServerUnavailable as inst:
#print >>sys.stderr, inst.args
#print >>sys.stderr, "Retrying in %d seconds." % (failSleep)
time.sleep(failSleep)
if failSleep < failMaxSleep:
failSleep += failSleepIncrementor
| ret.append(choice.xmlify()) | conditional_block |
RPCServer.py | #!/usr/bin/python -OO
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage MCPServer
# @author Joseph Perry <joseph@artefactual.com>
import archivematicaMCP
import sys
from linkTaskManagerChoice import choicesAvailableForUnits
import logging
import lxml.etree as etree
import gearman
import cPickle
import time
import traceback
from socket import gethostname
sys.path.append("/usr/lib/archivematica/archivematicaCommon")
from custom_handlers import GroupWriteRotatingFileHandler
import databaseInterface
def rpcError(code="", details=""):
ret = etree.Element("Error")
etree.SubElement(ret, "code").text = code.__str__()
etree.SubElement(ret, "details").text = details.__str__()
return ret
def verifyDatabaseIsNotLocked():
timeBeforeReturningErrorLockedDB = 4
timeToSleep = 0.1
numberOfRuns = 0 #count of number of runs in loop
while not databaseInterface.sqlLock.acquire(False):
time.sleep(timeToSleep)
numberOfRuns += 1
if numberOfRuns * timeToSleep > timeBeforeReturningErrorLockedDB:
return rpcError(code="DatabaseLock", details="Couldn't acquire database lock")
databaseInterface.sqlLock.release()
return None
def getJobsAwaitingApproval():
ret = etree.Element("choicesAvailableForUnits")
dbStatus = verifyDatabaseIsNotLocked()
if dbStatus:
#print etree.tostring(dbStatus)
return etree.tostring(dbStatus)
for UUID, choice in choicesAvailableForUnits.items():
ret.append(choice.xmlify())
return etree.tostring(ret, pretty_print=True)
def approveJob(jobUUID, chain, agent):
print "approving: ", jobUUID, chain, agent
if jobUUID in choicesAvailableForUnits:
choicesAvailableForUnits[jobUUID].proceedWithChoice(chain, agent)
return "approving: ", jobUUID, chain
def gearmanApproveJob(gearman_worker, gearman_job):
try:
#execute = gearman_job.task
data = cPickle.loads(gearman_job.data)
jobUUID = data["jobUUID"]
chain = data["chain"]
agent = str(data["uid"])
ret = cPickle.dumps(approveJob(jobUUID, chain, agent))
if not ret:
ret = ""
return ""
#catch OS errors
except Exception as inst:
print >>sys.stderr, "DEBUG EXCEPTION! gearmanApproveJob"
traceback.print_exc(file=sys.stdout)
print >>sys.stderr, type(inst) # the exception instance
print >>sys.stderr, inst.args
return ""
def | (gearman_worker, gearman_job):
try:
#print "DEBUG - getting list of jobs"
#execute = gearman_job.task
ret = cPickle.dumps(getJobsAwaitingApproval())
#print ret
if not ret:
ret = ""
return ret
#catch OS errors
except Exception as inst:
print >>sys.stderr, "DEBUG EXCEPTION! gearmanGetJobsAwaitingApproval"
traceback.print_exc(file=sys.stdout)
print >>sys.stderr, type(inst) # the exception instance
print >>sys.stderr, inst.args
return ""
def startRPCServer():
logger = logging.getLogger("archivematica")
logger.addHandler(GroupWriteRotatingFileHandler("/var/log/archivematica/MCPServer/MCPServer.log", maxBytes=4194304))
gm_worker = gearman.GearmanWorker([archivematicaMCP.config.get('MCPServer', 'GearmanServerWorker')])
hostID = gethostname() + "_MCPServer"
gm_worker.set_client_id(hostID)
gm_worker.register_task("approveJob", gearmanApproveJob)
gm_worker.register_task("getJobsAwaitingApproval", gearmanGetJobsAwaitingApproval)
failMaxSleep = 30
failSleep = 1
failSleepIncrementor = 2
while True:
try:
gm_worker.work()
except gearman.errors.ServerUnavailable as inst:
#print >>sys.stderr, inst.args
#print >>sys.stderr, "Retrying in %d seconds." % (failSleep)
time.sleep(failSleep)
if failSleep < failMaxSleep:
failSleep += failSleepIncrementor
| gearmanGetJobsAwaitingApproval | identifier_name |
RPCServer.py | #!/usr/bin/python -OO
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage MCPServer
# @author Joseph Perry <joseph@artefactual.com>
import archivematicaMCP
import sys
from linkTaskManagerChoice import choicesAvailableForUnits
import logging
import lxml.etree as etree
import gearman
import cPickle
import time
import traceback
from socket import gethostname
sys.path.append("/usr/lib/archivematica/archivematicaCommon")
from custom_handlers import GroupWriteRotatingFileHandler
import databaseInterface
def rpcError(code="", details=""):
ret = etree.Element("Error")
etree.SubElement(ret, "code").text = code.__str__()
etree.SubElement(ret, "details").text = details.__str__()
return ret
def verifyDatabaseIsNotLocked():
|
def getJobsAwaitingApproval():
ret = etree.Element("choicesAvailableForUnits")
dbStatus = verifyDatabaseIsNotLocked()
if dbStatus:
#print etree.tostring(dbStatus)
return etree.tostring(dbStatus)
for UUID, choice in choicesAvailableForUnits.items():
ret.append(choice.xmlify())
return etree.tostring(ret, pretty_print=True)
def approveJob(jobUUID, chain, agent):
print "approving: ", jobUUID, chain, agent
if jobUUID in choicesAvailableForUnits:
choicesAvailableForUnits[jobUUID].proceedWithChoice(chain, agent)
return "approving: ", jobUUID, chain
def gearmanApproveJob(gearman_worker, gearman_job):
try:
#execute = gearman_job.task
data = cPickle.loads(gearman_job.data)
jobUUID = data["jobUUID"]
chain = data["chain"]
agent = str(data["uid"])
ret = cPickle.dumps(approveJob(jobUUID, chain, agent))
if not ret:
ret = ""
return ""
#catch OS errors
except Exception as inst:
print >>sys.stderr, "DEBUG EXCEPTION! gearmanApproveJob"
traceback.print_exc(file=sys.stdout)
print >>sys.stderr, type(inst) # the exception instance
print >>sys.stderr, inst.args
return ""
def gearmanGetJobsAwaitingApproval(gearman_worker, gearman_job):
try:
#print "DEBUG - getting list of jobs"
#execute = gearman_job.task
ret = cPickle.dumps(getJobsAwaitingApproval())
#print ret
if not ret:
ret = ""
return ret
#catch OS errors
except Exception as inst:
print >>sys.stderr, "DEBUG EXCEPTION! gearmanGetJobsAwaitingApproval"
traceback.print_exc(file=sys.stdout)
print >>sys.stderr, type(inst) # the exception instance
print >>sys.stderr, inst.args
return ""
def startRPCServer():
logger = logging.getLogger("archivematica")
logger.addHandler(GroupWriteRotatingFileHandler("/var/log/archivematica/MCPServer/MCPServer.log", maxBytes=4194304))
gm_worker = gearman.GearmanWorker([archivematicaMCP.config.get('MCPServer', 'GearmanServerWorker')])
hostID = gethostname() + "_MCPServer"
gm_worker.set_client_id(hostID)
gm_worker.register_task("approveJob", gearmanApproveJob)
gm_worker.register_task("getJobsAwaitingApproval", gearmanGetJobsAwaitingApproval)
failMaxSleep = 30
failSleep = 1
failSleepIncrementor = 2
while True:
try:
gm_worker.work()
except gearman.errors.ServerUnavailable as inst:
#print >>sys.stderr, inst.args
#print >>sys.stderr, "Retrying in %d seconds." % (failSleep)
time.sleep(failSleep)
if failSleep < failMaxSleep:
failSleep += failSleepIncrementor
| timeBeforeReturningErrorLockedDB = 4
timeToSleep = 0.1
numberOfRuns = 0 #count of number of runs in loop
while not databaseInterface.sqlLock.acquire(False):
time.sleep(timeToSleep)
numberOfRuns += 1
if numberOfRuns * timeToSleep > timeBeforeReturningErrorLockedDB:
return rpcError(code="DatabaseLock", details="Couldn't acquire database lock")
databaseInterface.sqlLock.release()
return None | identifier_body |
RPCServer.py | #!/usr/bin/python -OO
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage MCPServer
# @author Joseph Perry <joseph@artefactual.com>
import archivematicaMCP
import sys | import gearman
import cPickle
import time
import traceback
from socket import gethostname
sys.path.append("/usr/lib/archivematica/archivematicaCommon")
from custom_handlers import GroupWriteRotatingFileHandler
import databaseInterface
def rpcError(code="", details=""):
ret = etree.Element("Error")
etree.SubElement(ret, "code").text = code.__str__()
etree.SubElement(ret, "details").text = details.__str__()
return ret
def verifyDatabaseIsNotLocked():
timeBeforeReturningErrorLockedDB = 4
timeToSleep = 0.1
numberOfRuns = 0 #count of number of runs in loop
while not databaseInterface.sqlLock.acquire(False):
time.sleep(timeToSleep)
numberOfRuns += 1
if numberOfRuns * timeToSleep > timeBeforeReturningErrorLockedDB:
return rpcError(code="DatabaseLock", details="Couldn't acquire database lock")
databaseInterface.sqlLock.release()
return None
def getJobsAwaitingApproval():
ret = etree.Element("choicesAvailableForUnits")
dbStatus = verifyDatabaseIsNotLocked()
if dbStatus:
#print etree.tostring(dbStatus)
return etree.tostring(dbStatus)
for UUID, choice in choicesAvailableForUnits.items():
ret.append(choice.xmlify())
return etree.tostring(ret, pretty_print=True)
def approveJob(jobUUID, chain, agent):
print "approving: ", jobUUID, chain, agent
if jobUUID in choicesAvailableForUnits:
choicesAvailableForUnits[jobUUID].proceedWithChoice(chain, agent)
return "approving: ", jobUUID, chain
def gearmanApproveJob(gearman_worker, gearman_job):
try:
#execute = gearman_job.task
data = cPickle.loads(gearman_job.data)
jobUUID = data["jobUUID"]
chain = data["chain"]
agent = str(data["uid"])
ret = cPickle.dumps(approveJob(jobUUID, chain, agent))
if not ret:
ret = ""
return ""
#catch OS errors
except Exception as inst:
print >>sys.stderr, "DEBUG EXCEPTION! gearmanApproveJob"
traceback.print_exc(file=sys.stdout)
print >>sys.stderr, type(inst) # the exception instance
print >>sys.stderr, inst.args
return ""
def gearmanGetJobsAwaitingApproval(gearman_worker, gearman_job):
try:
#print "DEBUG - getting list of jobs"
#execute = gearman_job.task
ret = cPickle.dumps(getJobsAwaitingApproval())
#print ret
if not ret:
ret = ""
return ret
#catch OS errors
except Exception as inst:
print >>sys.stderr, "DEBUG EXCEPTION! gearmanGetJobsAwaitingApproval"
traceback.print_exc(file=sys.stdout)
print >>sys.stderr, type(inst) # the exception instance
print >>sys.stderr, inst.args
return ""
def startRPCServer():
logger = logging.getLogger("archivematica")
logger.addHandler(GroupWriteRotatingFileHandler("/var/log/archivematica/MCPServer/MCPServer.log", maxBytes=4194304))
gm_worker = gearman.GearmanWorker([archivematicaMCP.config.get('MCPServer', 'GearmanServerWorker')])
hostID = gethostname() + "_MCPServer"
gm_worker.set_client_id(hostID)
gm_worker.register_task("approveJob", gearmanApproveJob)
gm_worker.register_task("getJobsAwaitingApproval", gearmanGetJobsAwaitingApproval)
failMaxSleep = 30
failSleep = 1
failSleepIncrementor = 2
while True:
try:
gm_worker.work()
except gearman.errors.ServerUnavailable as inst:
#print >>sys.stderr, inst.args
#print >>sys.stderr, "Retrying in %d seconds." % (failSleep)
time.sleep(failSleep)
if failSleep < failMaxSleep:
failSleep += failSleepIncrementor | from linkTaskManagerChoice import choicesAvailableForUnits
import logging
import lxml.etree as etree | random_line_split |
feed.js | "use strict";
Object.defineProperty(exports, "__esModule", { | value: true
});
var feed = exports.feed = { "viewBox": "0 0 16 16", "children": [{ "name": "path", "attribs": { "fill": "#000000", "d": "M6 8c0-1.105 0.895-2 2-2s2 0.895 2 2c0 1.105-0.895 2-2 2s-2-0.895-2-2zM10.38 3.602c1.56 0.846 2.62 2.498 2.62 4.398s-1.059 3.552-2.62 4.398c0.689-1.096 1.12-2.66 1.12-4.398s-0.431-3.302-1.12-4.398zM4.5 8c0 1.738 0.431 3.302 1.12 4.398-1.56-0.846-2.62-2.498-2.62-4.398s1.059-3.552 2.62-4.398c-0.689 1.096-1.12 2.66-1.12 4.398zM1.5 8c0 2.686 0.85 5.097 2.198 6.746-2.223-1.421-3.698-3.911-3.698-6.746s1.474-5.325 3.698-6.746c-1.348 1.649-2.198 4.060-2.198 6.746zM12.302 1.254c2.223 1.421 3.698 3.911 3.698 6.746s-1.474 5.325-3.698 6.746c1.348-1.649 2.198-4.060 2.198-6.746s-0.85-5.097-2.198-6.746z" } }] }; | random_line_split | |
elfinder.ar.js | /**
* Arabic translation (Syrian Localization, it may differ if you aren't from Syria or any Country in Middle East)
* @author Tawfek Daghistani <tawfekov@gmail.com>
* @version 2011-07-09
*/
if (elFinder && elFinder.prototype && typeof(elFinder.prototype.i18) == 'object') {
elFinder.prototype.i18.ar = {
translator : 'Tawfek Daghistani <tawfekov@gmail.com>',
language : 'العربية',
direction : 'rtl',
messages : {
/********************************** errors **********************************/
'error' : 'خطأ',
'errUnknown' : 'خطأ غير معروف .',
'errUnknownCmd' : 'أمر غير معروف .',
'errJqui' : 'إعدادات jQuery UI غير كاملة الرجاء التأكد من وجود كل من selectable, draggable and droppable',
'errNode' : '. موجود DOM إلى عنصر elFinder تحتاج ',
'errURL' : 'إعدادات خاطئة , عليك وضع الرابط ضمن الإعدادات',
'errAccess' : 'وصول مرفوض .',
'errConnect' : 'غير قادر على الاتصال بالخادم الخلفي (backend)',
'errAbort' : 'تم فصل الإتصال',
'errTimeout' : 'مهلة الإتصال قد إنتهت .',
'errNotFound' : 'الخادم الخلفي غير موجود .',
'errResponse' : 'رد غير مقبول من الخادم الخلفي',
'errConf' : 'خطأ في الإعدادات الخاصة بالخادم الخلفي ',
'errJSON' : 'الميزة PHP JSON module غير موجودة ',
'errNoVolumes' : 'لا يمكن القراءة من أي من الوسائط الموجودة ',
'errCmdParams' : 'البيانات المرسلة للأمر غير مقبولة "$1".',
'errDataNotJSON' : 'المعلومات المرسلة ليست من نوع JSON ',
'errDataEmpty' : 'لا يوجد معلومات مرسلة',
'errCmdReq' : 'الخادم الخلفي يطلب وجود اسم الأمر ',
'errOpen' : 'غير قادر على فتح "$1".',
'errNotFolder' : 'العنصر المختار ليس مجلد',
'errNotFile' : 'العنصر المختار ليس ملف',
'errRead' : 'غير قادر على القراءة "$1".',
'errWrite' : 'غير قادر على الكتابة "$1".',
'errPerm' : 'وصول مرفوض ',
'errLocked' : ' محمي و لا يمكن التعديل أو النقل أو إعادة التسمية"$1"',
'errExists' : ' موجود مسبقاً "$1"',
'errInvName' : 'الاسم مرفوض',
'errFolderNotFound' : 'المجلد غير موجود',
'errFileNotFound' : 'الملف غير موجود',
'errTrgFolderNotFound' : 'الملف الهدف "$1" غير موجود ',
'errPopup' : 'يمنعني المتصفح من إنشاء نافذة منبثقة , الرجاء تعديل الخيارات الخاصة من إعدادات المتصفح ',
'errMkdir' : ' غير قادر على إنشاء مجلد جديد "$1".',
'errMkfile' : ' غير قادر على إنشاء ملف جديد"$1".',
'errRename' : 'غير قادر على إعادة تسمية ال "$1".',
'errCopyFrom' : 'نسخ الملفات من الوسط المحدد "$1"غير مسموح.',
'errCopyTo' : 'نسخ الملفات إلى الوسط المحدد "$1" غير مسموح .',
'errUploadCommon' : 'خطأ أثناء عملية الرفع',
'errUpload' : 'غير قادر على رفع "$1".',
'errUploadNoFiles' : 'لم يتم رفع أي ملف ',
'errMaxSize' : 'حجم البيانات أكبر من الحجم المسموح به ',
'errFileMaxSize' : 'حجم الملف أكبر من الحجم المسموح به',
'errUploadMime' : 'نوع ملف غير مسموح ',
'errUploadTransfer' : '"$1" خطأ أثناء عملية النقل',
'errSave' : 'غير قادر على الحفظ في "$1".',
'errCopy' : 'غير قادر على النسخ إلى"$1".',
'errMove' : 'غير قادر على القص إلى "$1".',
'errCopyInItself' : 'غير قادر على نسخ الملف "$1" ضمن الملف نفسه.',
'errRm' : 'غير قادر على الحذف "$1".',
'errExtract' : 'غير قادر على استخراج الملفات من "$1".',
'errArchive' : 'غير قادر على إنشاء ملف مضغوط',
'errArcType' : 'نوع الملف المضغوط غير مدعومة',
'errNoArchive' : 'هذا الملف ليس ملف مضغوط أو ذو صسغة غير مدعومة ',
'errCmdNoSupport' : 'الخادم الخلفي لا يدعم هذا الأمر ',
'errReplByChild' : 'The folder “$1” can’t be replaced by an item it contains.',
'errArcSymlinks' : 'For security reason denied to unpack archives contains symlinks.', | /******************************* commands names ********************************/
'cmdarchive' : 'أنشئ مجلد مضغوط',
'cmdback' : 'الخلف',
'cmdcopy' : 'نسخ',
'cmdcut' : 'قص',
'cmddownload' : 'تحميل',
'cmdduplicate' : 'تكرار',
'cmdedit' : 'تعديل الملف',
'cmdextract' : 'استخراج الملفات',
'cmdforward' : 'الأمام',
'cmdgetfile' : 'أختيار الملفات',
'cmdhelp' : 'عن هذا المشروع',
'cmdhome' : 'المجلد الرئيسي',
'cmdinfo' : 'معلومات ',
'cmdmkdir' : 'مجلد جديد',
'cmdmkfile' : 'ملف نصي جديد',
'cmdopen' : 'فتح',
'cmdpaste' : 'لصق',
'cmdquicklook' : 'معاينة',
'cmdreload' : 'إعادة تحميل',
'cmdrename' : 'إعادة تسمية',
'cmdrm' : 'حذف',
'cmdsearch' : 'بحث عن ملفات',
'cmdup' : 'تغيير المسار إلى مستوى أعلى',
'cmdupload' : 'رفع ملفات',
'cmdview' : 'عرض',
/*********************************** buttons ***********************************/
'btnClose' : 'إغلاق',
'btnSave' : 'حفظ',
'btnRm' : 'إزالة',
'btnCancel' : 'إلغاء',
'btnNo' : 'لا',
'btnYes' : 'نعم',
/******************************** notifications ********************************/
'ntfopen' : 'فتح مجلد',
'ntffile' : 'فتح ملف',
'ntfreload' : 'إعادة عرض محتويات المجلد ',
'ntfmkdir' : 'ينشئ المجلدات',
'ntfmkfile' : 'ينشئ الملفات',
'ntfrm' : 'حذف الملفات',
'ntfcopy' : 'نسخ الملفات',
'ntfmove' : 'نقل الملفات',
'ntfprepare' : 'تحضير لنسخ الملفات',
'ntfrename' : 'إعادة تسمية الملفات',
'ntfupload' : 'رفع الملفات',
'ntfdownload' : 'تحميل الملفات',
'ntfsave' : 'حفظ الملفات',
'ntfarchive' : 'ينشئ ملف مضغوط',
'ntfextract' : 'استخراج ملفات من الملف المضغوط ',
'ntfsearch' : 'يبحث عن ملفات',
'ntfsmth' : 'يحضر لشيء ما >_<',
/************************************ dates **********************************/
'dateUnknown' : 'غير معلوم',
'Today' : 'اليوم',
'Yesterday' : 'البارحة',
'Jan' : 'كانون الثاني',
'Feb' : 'شباط',
'Mar' : 'آذار',
'Apr' : 'نيسان',
'May' : 'أيار',
'Jun' : 'حزيران',
'Jul' : 'تموز',
'Aug' : 'آب',
'Sep' : 'أيلول',
'Oct' : 'تشرين الأول',
'Nov' : 'تشرين الثاني',
'Dec' : 'كانون الأول ',
/********************************** messages **********************************/
'confirmReq' : 'يرجى التأكيد',
'confirmRm' : 'هل انت متأكد من انك تريد الحذف<br/>لا يمكن التراجع عن هذه العملية ',
'confirmRepl' : 'استبدال الملف القديم بملف جديد ؟',
'apllyAll' : 'تطبيق على الكل',
'name' : 'الأسم',
'size' : 'الحجم',
'perms' : 'الصلاحيات',
'modify' : 'أخر تعديل',
'kind' : 'نوع الملف',
'read' : 'قراءة',
'write' : 'كتابة',
'noaccess' : 'وصول ممنوع',
'and' : 'و',
'unknown' : 'غير معروف',
'selectall' : 'تحديد كل الملفات',
'selectfiles' : 'تحديد ملفات',
'selectffile' : 'تحديد الملف الاول',
'selectlfile' : 'تحديد الملف الأخير',
'viewlist' : 'اعرض ك قائمة',
'viewicons' : 'اعرض ك ايقونات',
'places' : 'المواقع',
'calc' : 'حساب',
'path' : 'مسار',
'aliasfor' : 'Alias for',
'locked' : 'مقفول',
'dim' : 'الابعاد',
'files' : 'ملفات',
'folders' : 'مجلدات',
'items' : 'عناصر',
'yes' : 'نعم',
'no' : 'لا',
'link' : 'اربتاط',
'searcresult' : 'نتائج البحث',
'selected' : 'العناصر المحددة',
'about' : 'عن البرنامج',
'shortcuts' : 'الاختصارات',
'help' : 'مساعدة',
'webfm' : 'مدير ملفات الويب',
'ver' : 'رقم الإصدار',
'protocol' : 'اصدار البرتوكول',
'homepage' : 'الصفحة الرئيسية',
'docs' : 'التعليمات',
'github' : 'شاركنا بتطوير المشروع على Github',
'twitter' : 'تابعنا على تويتر',
'facebook' : 'انضم إلينا على الفيس بوك',
'team' : 'الفريق',
'chiefdev' : 'رئيس المبرمجين',
'developer' : 'مبرمح',
'contributor' : 'مبرمح',
'maintainer' : 'مشارك',
'translator' : 'مترجم',
'icons' : 'أيقونات',
'dontforget' : 'and don\'t forget to take your towel',
'shortcutsof' : 'الاختصارات غير مفعلة',
'dropFiles' : 'لصق الملفات هنا',
'or' : 'أو',
'selectForUpload' : 'اختر الملفات التي تريد رفعها',
'moveFiles' : 'قص الملفات',
'copyFiles' : 'نسخ الملفات',
'rmFromPlaces' : 'Remove from places',
'untitled folder' : 'untitled folder',
'untitled file.txt' : 'untitled file.txt',
/********************************** mimetypes **********************************/
'kindUnknown' : 'غير معروف',
'kindFolder' : 'مجلد',
'kindAlias' : 'اختصار',
'kindAliasBroken' : 'اختصار غير صالح',
// applications
'kindApp' : 'برنامج',
'kindPostscript' : 'Postscript ملف',
'kindMsOffice' : 'Microsoft Office ملف',
'kindMsWord' : 'Microsoft Word ملف',
'kindMsExcel' : 'Microsoft Excel ملف',
'kindMsPP' : 'Microsoft Powerpoint عرض تقديمي ',
'kindOO' : 'Open Office ملف',
'kindAppFlash' : 'تطبيق فلاش',
'kindPDF' : 'ملف (PDF)',
'kindTorrent' : 'Bittorrent ملف',
'kind7z' : '7z ملف',
'kindTAR' : 'TAR ملف',
'kindGZIP' : 'GZIP ملف',
'kindBZIP' : 'BZIP ملف',
'kindZIP' : 'ZIP ملف',
'kindRAR' : 'RAR ملف',
'kindJAR' : 'Java JAR ملف',
'kindTTF' : 'True Type خط ',
'kindOTF' : 'Open Type خط ',
'kindRPM' : 'RPM ملف تنصيب',
// texts
'kindText' : 'Text ملف',
'kindTextPlain' : 'مستند نصي',
'kindPHP' : 'PHP ملف نصي برمجي لـ',
'kindCSS' : 'Cascading style sheet',
'kindHTML' : 'HTML ملف',
'kindJS' : 'Javascript ملف نصي برمجي لـ',
'kindRTF' : 'Rich Text Format',
'kindC' : 'C ملف نصي برمجي لـ',
'kindCHeader' : 'C header ملف نصي برمجي لـ',
'kindCPP' : 'C++ ملف نصي برمجي لـ',
'kindCPPHeader' : 'C++ header ملف نصي برمجي لـ',
'kindShell' : 'Unix shell script',
'kindPython' : 'Python ملف نصي برمجي لـ',
'kindJava' : 'Java ملف نصي برمجي لـ',
'kindRuby' : 'Ruby ملف نصي برمجي لـ',
'kindPerl' : 'Perl script',
'kindSQL' : 'SQL ملف نصي برمجي لـ',
'kindXML' : 'XML ملف',
'kindAWK' : 'AWK ملف نصي برمجي لـ',
'kindCSV' : 'ملف CSV',
'kindDOCBOOK' : 'Docbook XML ملف',
// images
'kindصورة' : 'صورة',
'kindBMP' : 'BMP صورة',
'kindJPEG' : 'JPEG صورة',
'kindGIF' : 'GIF صورة',
'kindPNG' : 'PNG صورة',
'kindTIFF' : 'TIFF صورة',
'kindTGA' : 'TGA صورة',
'kindPSD' : 'Adobe Photoshop صورة',
'kindXBITMAP' : 'X bitmap صورة',
'kindPXM' : 'Pixelmator صورة',
// media
'kindAudio' : 'ملف صوتي',
'kindAudioMPEG' : 'MPEG ملف صوتي',
'kindAudioMPEG4' : 'MPEG-4 ملف صوتي',
'kindAudioMIDI' : 'MIDI ملف صوتي',
'kindAudioOGG' : 'Ogg Vorbis ملف صوتي',
'kindAudioWAV' : 'WAV ملف صوتي',
'AudioPlaylist' : 'MP3 قائمة تشغيل',
'kindVideo' : 'ملف فيديو',
'kindVideoDV' : 'DV ملف فيديو',
'kindVideoMPEG' : 'MPEG ملف فيديو',
'kindVideoMPEG4' : 'MPEG-4 ملف فيديو',
'kindVideoAVI' : 'AVI ملف فيديو',
'kindVideoMOV' : 'Quick Time ملف فيديو',
'kindVideoWM' : 'Windows Media ملف فيديو',
'kindVideoFlash' : 'Flash ملف فيديو',
'kindVideoMKV' : 'Matroska ملف فيديو',
'kindVideoOGG' : 'Ogg ملف فيديو'
}
}
} | 'errArcMaxSize' : 'Archive files exceeds maximum allowed size.',
| random_line_split |
elfinder.ar.js | /**
* Arabic translation (Syrian Localization, it may differ if you aren't from Syria or any Country in Middle East)
* @author Tawfek Daghistani <tawfekov@gmail.com>
* @version 2011-07-09
*/
if (elFinder && elFinder.prototype && typeof(elFinder.prototype.i18) == 'object') | {
elFinder.prototype.i18.ar = {
translator : 'Tawfek Daghistani <tawfekov@gmail.com>',
language : 'العربية',
direction : 'rtl',
messages : {
/********************************** errors **********************************/
'error' : 'خطأ',
'errUnknown' : 'خطأ غير معروف .',
'errUnknownCmd' : 'أمر غير معروف .',
'errJqui' : 'إعدادات jQuery UI غير كاملة الرجاء التأكد من وجود كل من selectable, draggable and droppable',
'errNode' : '. موجود DOM إلى عنصر elFinder تحتاج ',
'errURL' : 'إعدادات خاطئة , عليك وضع الرابط ضمن الإعدادات',
'errAccess' : 'وصول مرفوض .',
'errConnect' : 'غير قادر على الاتصال بالخادم الخلفي (backend)',
'errAbort' : 'تم فصل الإتصال',
'errTimeout' : 'مهلة الإتصال قد إنتهت .',
'errNotFound' : 'الخادم الخلفي غير موجود .',
'errResponse' : 'رد غير مقبول من الخادم الخلفي',
'errConf' : 'خطأ في الإعدادات الخاصة بالخادم الخلفي ',
'errJSON' : 'الميزة PHP JSON module غير موجودة ',
'errNoVolumes' : 'لا يمكن القراءة من أي من الوسائط الموجودة ',
'errCmdParams' : 'البيانات المرسلة للأمر غير مقبولة "$1".',
'errDataNotJSON' : 'المعلومات المرسلة ليست من نوع JSON ',
'errDataEmpty' : 'لا يوجد معلومات مرسلة',
'errCmdReq' : 'الخادم الخلفي يطلب وجود اسم الأمر ',
'errOpen' : 'غير قادر على فتح "$1".',
'errNotFolder' : 'العنصر المختار ليس مجلد',
'errNotFile' : 'العنصر المختار ليس ملف',
'errRead' : 'غير قادر على القراءة "$1".',
'errWrite' : 'غير قادر على الكتابة "$1".',
'errPerm' : 'وصول مرفوض ',
'errLocked' : ' محمي و لا يمكن التعديل أو النقل أو إعادة التسمية"$1"',
'errExists' : ' موجود مسبقاً "$1"',
'errInvName' : 'الاسم مرفوض',
'errFolderNotFound' : 'المجلد غير موجود',
'errFileNotFound' : 'الملف غير موجود',
'errTrgFolderNotFound' : 'الملف الهدف "$1" غير موجود ',
'errPopup' : 'يمنعني المتصفح من إنشاء نافذة منبثقة , الرجاء تعديل الخيارات الخاصة من إعدادات المتصفح ',
'errMkdir' : ' غير قادر على إنشاء مجلد جديد "$1".',
'errMkfile' : ' غير قادر على إنشاء ملف جديد"$1".',
'errRename' : 'غير قادر على إعادة تسمية ال "$1".',
'errCopyFrom' : 'نسخ الملفات من الوسط المحدد "$1"غير مسموح.',
'errCopyTo' : 'نسخ الملفات إلى الوسط المحدد "$1" غير مسموح .',
'errUploadCommon' : 'خطأ أثناء عملية الرفع',
'errUpload' : 'غير قادر على رفع "$1".',
'errUploadNoFiles' : 'لم يتم رفع أي ملف ',
'errMaxSize' : 'حجم البيانات أكبر من الحجم المسموح به ',
'errFileMaxSize' : 'حجم الملف أكبر من الحجم المسموح به',
'errUploadMime' : 'نوع ملف غير مسموح ',
'errUploadTransfer' : '"$1" خطأ أثناء عملية النقل',
'errSave' : 'غير قادر على الحفظ في "$1".',
'errCopy' : 'غير قادر على النسخ إلى"$1".',
'errMove' : 'غير قادر على القص إلى "$1".',
'errCopyInItself' : 'غير قادر على نسخ الملف "$1" ضمن الملف نفسه.',
'errRm' : 'غير قادر على الحذف "$1".',
'errExtract' : 'غير قادر على استخراج الملفات من "$1".',
'errArchive' : 'غير قادر على إنشاء ملف مضغوط',
'errArcType' : 'نوع الملف المضغوط غير مدعومة',
'errNoArchive' : 'هذا الملف ليس ملف مضغوط أو ذو صسغة غير مدعومة ',
'errCmdNoSupport' : 'الخادم الخلفي لا يدعم هذا الأمر ',
'errReplByChild' : 'The folder “$1” can’t be replaced by an item it contains.',
'errArcSymlinks' : 'For security reason denied to unpack archives contains symlinks.',
'errArcMaxSize' : 'Archive files exceeds maximum allowed size.',
/******************************* commands names ********************************/
'cmdarchive' : 'أنشئ مجلد مضغوط',
'cmdback' : 'الخلف',
'cmdcopy' : 'نسخ',
'cmdcut' : 'قص',
'cmddownload' : 'تحميل',
'cmdduplicate' : 'تكرار',
'cmdedit' : 'تعديل الملف',
'cmdextract' : 'استخراج الملفات',
'cmdforward' : 'الأمام',
'cmdgetfile' : 'أختيار الملفات',
'cmdhelp' : 'عن هذا المشروع',
'cmdhome' : 'المجلد الرئيسي',
'cmdinfo' : 'معلومات ',
'cmdmkdir' : 'مجلد جديد',
'cmdmkfile' : 'ملف نصي جديد',
'cmdopen' : 'فتح',
'cmdpaste' : 'لصق',
'cmdquicklook' : 'معاينة',
'cmdreload' : 'إعادة تحميل',
'cmdrename' : 'إعادة تسمية',
'cmdrm' : 'حذف',
'cmdsearch' : 'بحث عن ملفات',
'cmdup' : 'تغيير المسار إلى مستوى أعلى',
'cmdupload' : 'رفع ملفات',
'cmdview' : 'عرض',
/*********************************** buttons ***********************************/
'btnClose' : 'إغلاق',
'btnSave' : 'حفظ',
'btnRm' : 'إزالة',
'btnCancel' : 'إلغاء',
'btnNo' : 'لا',
'btnYes' : 'نعم',
/******************************** notifications ********************************/
'ntfopen' : 'فتح مجلد',
'ntffile' : 'فتح ملف',
'ntfreload' : 'إعادة عرض محتويات المجلد ',
'ntfmkdir' : 'ينشئ المجلدات',
'ntfmkfile' : 'ينشئ الملفات',
'ntfrm' : 'حذف الملفات',
'ntfcopy' : 'نسخ الملفات',
'ntfmove' : 'نقل الملفات',
'ntfprepare' : 'تحضير لنسخ الملفات',
'ntfrename' : 'إعادة تسمية الملفات',
'ntfupload' : 'رفع الملفات',
'ntfdownload' : 'تحميل الملفات',
'ntfsave' : 'حفظ الملفات',
'ntfarchive' : 'ينشئ ملف مضغوط',
'ntfextract' : 'استخراج ملفات من الملف المضغوط ',
'ntfsearch' : 'يبحث عن ملفات',
'ntfsmth' : 'يحضر لشيء ما >_<',
/************************************ dates **********************************/
'dateUnknown' : 'غير معلوم',
'Today' : 'اليوم',
'Yesterday' : 'البارحة',
'Jan' : 'كانون الثاني',
'Feb' : 'شباط',
'Mar' : 'آذار',
'Apr' : 'نيسان',
'May' : 'أيار',
'Jun' : 'حزيران',
'Jul' : 'تموز',
'Aug' : 'آب',
'Sep' : 'أيلول',
'Oct' : 'تشرين الأول',
'Nov' : 'تشرين الثاني',
'Dec' : 'كانون الأول ',
/********************************** messages **********************************/
'confirmReq' : 'يرجى التأكيد',
'confirmRm' : 'هل انت متأكد من انك تريد الحذف<br/>لا يمكن التراجع عن هذه العملية ',
'confirmRepl' : 'استبدال الملف القديم بملف جديد ؟',
'apllyAll' : 'تطبيق على الكل',
'name' : 'الأسم',
'size' : 'الحجم',
'perms' : 'الصلاحيات',
'modify' : 'أخر تعديل',
'kind' : 'نوع الملف',
'read' : 'قراءة',
'write' : 'كتابة',
'noaccess' : 'وصول ممنوع',
'and' : 'و',
'unknown' : 'غير معروف',
'selectall' : 'تحديد كل الملفات',
'selectfiles' : 'تحديد ملفات',
'selectffile' : 'تحديد الملف الاول',
'selectlfile' : 'تحديد الملف الأخير',
'viewlist' : 'اعرض ك قائمة',
'viewicons' : 'اعرض ك ايقونات',
'places' : 'المواقع',
'calc' : 'حساب',
'path' : 'مسار',
'aliasfor' : 'Alias for',
'locked' : 'مقفول',
'dim' : 'الابعاد',
'files' : 'ملفات',
'folders' : 'مجلدات',
'items' : 'عناصر',
'yes' : 'نعم',
'no' : 'لا',
'link' : 'اربتاط',
'searcresult' : 'نتائج البحث',
'selected' : 'العناصر المحددة',
'about' : 'عن البرنامج',
'shortcuts' : 'الاختصارات',
'help' : 'مساعدة',
'webfm' : 'مدير ملفات الويب',
'ver' : 'رقم الإصدار',
'protocol' : 'اصدار البرتوكول',
'homepage' : 'الصفحة الرئيسية',
'docs' : 'التعليمات',
'github' : 'شاركنا بتطوير المشروع على Github',
'twitter' : 'تابعنا على تويتر',
'facebook' : 'انضم إلينا على الفيس بوك',
'team' : 'الفريق',
'chiefdev' : 'رئيس المبرمجين',
'developer' : 'مبرمح',
'contributor' : 'مبرمح',
'maintainer' : 'مشارك',
'translator' : 'مترجم',
'icons' : 'أيقونات',
'dontforget' : 'and don\'t forget to take your towel',
'shortcutsof' : 'الاختصارات غير مفعلة',
'dropFiles' : 'لصق الملفات هنا',
'or' : 'أو',
'selectForUpload' : 'اختر الملفات التي تريد رفعها',
'moveFiles' : 'قص الملفات',
'copyFiles' : 'نسخ الملفات',
'rmFromPlaces' : 'Remove from places',
'untitled folder' : 'untitled folder',
'untitled file.txt' : 'untitled file.txt',
/********************************** mimetypes **********************************/
'kindUnknown' : 'غير معروف',
'kindFolder' : 'مجلد',
'kindAlias' : 'اختصار',
'kindAliasBroken' : 'اختصار غير صالح',
// applications
'kindApp' : 'برنامج',
'kindPostscript' : 'Postscript ملف',
'kindMsOffice' : 'Microsoft Office ملف',
'kindMsWord' : 'Microsoft Word ملف',
'kindMsExcel' : 'Microsoft Excel ملف',
'kindMsPP' : 'Microsoft Powerpoint عرض تقديمي ',
'kindOO' : 'Open Office ملف',
'kindAppFlash' : 'تطبيق فلاش',
'kindPDF' : 'ملف (PDF)',
'kindTorrent' : 'Bittorrent ملف',
'kind7z' : '7z ملف',
'kindTAR' : 'TAR ملف',
'kindGZIP' : 'GZIP ملف',
'kindBZIP' : 'BZIP ملف',
'kindZIP' : 'ZIP ملف',
'kindRAR' : 'RAR ملف',
'kindJAR' : 'Java JAR ملف',
'kindTTF' : 'True Type خط ',
'kindOTF' : 'Open Type خط ',
'kindRPM' : 'RPM ملف تنصيب',
// texts
'kindText' : 'Text ملف',
'kindTextPlain' : 'مستند نصي',
'kindPHP' : 'PHP ملف نصي برمجي لـ',
'kindCSS' : 'Cascading style sheet',
'kindHTML' : 'HTML ملف',
'kindJS' : 'Javascript ملف نصي برمجي لـ',
'kindRTF' : 'Rich Text Format',
'kindC' : 'C ملف نصي برمجي لـ',
'kindCHeader' : 'C header ملف نصي برمجي لـ',
'kindCPP' : 'C++ ملف نصي برمجي لـ',
'kindCPPHeader' : 'C++ header ملف نصي برمجي لـ',
'kindShell' : 'Unix shell script',
'kindPython' : 'Python ملف نصي برمجي لـ',
'kindJava' : 'Java ملف نصي برمجي لـ',
'kindRuby' : 'Ruby ملف نصي برمجي لـ',
'kindPerl' : 'Perl script',
'kindSQL' : 'SQL ملف نصي برمجي لـ',
'kindXML' : 'XML ملف',
'kindAWK' : 'AWK ملف نصي برمجي لـ',
'kindCSV' : 'ملف CSV',
'kindDOCBOOK' : 'Docbook XML ملف',
// images
'kindصورة' : 'صورة',
'kindBMP' : 'BMP صورة',
'kindJPEG' : 'JPEG صورة',
'kindGIF' : 'GIF صورة',
'kindPNG' : 'PNG صورة',
'kindTIFF' : 'TIFF صورة',
'kindTGA' : 'TGA صورة',
'kindPSD' : 'Adobe Photoshop صورة',
'kindXBITMAP' : 'X bitmap صورة',
'kindPXM' : 'Pixelmator صورة',
// media
'kindAudio' : 'ملف صوتي',
'kindAudioMPEG' : 'MPEG ملف صوتي',
'kindAudioMPEG4' : 'MPEG-4 ملف صوتي',
'kindAudioMIDI' : 'MIDI ملف صوتي',
'kindAudioOGG' : 'Ogg Vorbis ملف صوتي',
'kindAudioWAV' : 'WAV ملف صوتي',
'AudioPlaylist' : 'MP3 قائمة تشغيل',
'kindVideo' : 'ملف فيديو',
'kindVideoDV' : 'DV ملف فيديو',
'kindVideoMPEG' : 'MPEG ملف فيديو',
'kindVideoMPEG4' : 'MPEG-4 ملف فيديو',
'kindVideoAVI' : 'AVI ملف فيديو',
'kindVideoMOV' : 'Quick Time ملف فيديو',
'kindVideoWM' : 'Windows Media ملف فيديو',
'kindVideoFlash' : 'Flash ملف فيديو',
'kindVideoMKV' : 'Matroska ملف فيديو',
'kindVideoOGG' : 'Ogg ملف فيديو'
}
}
}
| conditional_block | |
_project_cfg_importer.py | """
Project Configuration Importer
Handles the importing the project configuration from a separate location
and validates the version against the specified expected version.
NOTE: If you update this file or any others in scripts and require a
NEW variable in project_cfg, then you need to UPDATE THE EXPECTED_CFG_VERSION
That way, if someone tries to use the new scripts with an old cfg, they'll
get a warning.
"""
import importlib
import os
import sys
PROJECT_CFG_DIR = os.path.realpath(os.path.dirname(__file__) + "/../../cfg/")
PROJECT_CFG_NAME = "project_cfg"
EXPECTED_CFG_VERSION = 1.1
def get_project_cfg():
"""
Returns the project configuration module
"""
sys.path.append(PROJECT_CFG_DIR)
try:
project_cfg_module = importlib.import_module(PROJECT_CFG_NAME)
except:
raise FileNotFoundError("\n\n================================= ERROR ========================================"
"\nUnable to import project configuration: " + PROJECT_CFG_DIR + "/" + PROJECT_CFG_NAME + ".py"
"\n================================================================================\n")
_verify_correct_version(project_cfg_module)
return project_cfg_module
def | (project_cfg_module):
is_correct_version = False
if project_cfg_module.__CFG_VERSION__ == EXPECTED_CFG_VERSION:
is_correct_version = True
else:
raise Exception("\n\n================================= ERROR ========================================"
"\nIncorrect project configuration version: " + str(project_cfg_module.__CFG_VERSION__) +
"\n Development environment expected: " + str(EXPECTED_CFG_VERSION) +
"\n================================================================================\n")
return is_correct_version
| _verify_correct_version | identifier_name |
_project_cfg_importer.py | """
Project Configuration Importer
Handles the importing the project configuration from a separate location
and validates the version against the specified expected version.
NOTE: If you update this file or any others in scripts and require a
NEW variable in project_cfg, then you need to UPDATE THE EXPECTED_CFG_VERSION
That way, if someone tries to use the new scripts with an old cfg, they'll
get a warning.
"""
import importlib
import os
import sys
PROJECT_CFG_DIR = os.path.realpath(os.path.dirname(__file__) + "/../../cfg/")
PROJECT_CFG_NAME = "project_cfg"
EXPECTED_CFG_VERSION = 1.1
def get_project_cfg():
"""
Returns the project configuration module
"""
sys.path.append(PROJECT_CFG_DIR)
try:
project_cfg_module = importlib.import_module(PROJECT_CFG_NAME)
except:
raise FileNotFoundError("\n\n================================= ERROR ========================================"
"\nUnable to import project configuration: " + PROJECT_CFG_DIR + "/" + PROJECT_CFG_NAME + ".py"
"\n================================================================================\n")
_verify_correct_version(project_cfg_module)
return project_cfg_module
def _verify_correct_version(project_cfg_module):
| is_correct_version = False
if project_cfg_module.__CFG_VERSION__ == EXPECTED_CFG_VERSION:
is_correct_version = True
else:
raise Exception("\n\n================================= ERROR ========================================"
"\nIncorrect project configuration version: " + str(project_cfg_module.__CFG_VERSION__) +
"\n Development environment expected: " + str(EXPECTED_CFG_VERSION) +
"\n================================================================================\n")
return is_correct_version | identifier_body | |
_project_cfg_importer.py | """
Project Configuration Importer
Handles the importing the project configuration from a separate location
and validates the version against the specified expected version.
NOTE: If you update this file or any others in scripts and require a
NEW variable in project_cfg, then you need to UPDATE THE EXPECTED_CFG_VERSION
That way, if someone tries to use the new scripts with an old cfg, they'll
get a warning.
"""
import importlib
import os
import sys
PROJECT_CFG_DIR = os.path.realpath(os.path.dirname(__file__) + "/../../cfg/")
PROJECT_CFG_NAME = "project_cfg"
EXPECTED_CFG_VERSION = 1.1
| """
sys.path.append(PROJECT_CFG_DIR)
try:
project_cfg_module = importlib.import_module(PROJECT_CFG_NAME)
except:
raise FileNotFoundError("\n\n================================= ERROR ========================================"
"\nUnable to import project configuration: " + PROJECT_CFG_DIR + "/" + PROJECT_CFG_NAME + ".py"
"\n================================================================================\n")
_verify_correct_version(project_cfg_module)
return project_cfg_module
def _verify_correct_version(project_cfg_module):
is_correct_version = False
if project_cfg_module.__CFG_VERSION__ == EXPECTED_CFG_VERSION:
is_correct_version = True
else:
raise Exception("\n\n================================= ERROR ========================================"
"\nIncorrect project configuration version: " + str(project_cfg_module.__CFG_VERSION__) +
"\n Development environment expected: " + str(EXPECTED_CFG_VERSION) +
"\n================================================================================\n")
return is_correct_version | def get_project_cfg():
"""
Returns the project configuration module | random_line_split |
_project_cfg_importer.py | """
Project Configuration Importer
Handles the importing the project configuration from a separate location
and validates the version against the specified expected version.
NOTE: If you update this file or any others in scripts and require a
NEW variable in project_cfg, then you need to UPDATE THE EXPECTED_CFG_VERSION
That way, if someone tries to use the new scripts with an old cfg, they'll
get a warning.
"""
import importlib
import os
import sys
PROJECT_CFG_DIR = os.path.realpath(os.path.dirname(__file__) + "/../../cfg/")
PROJECT_CFG_NAME = "project_cfg"
EXPECTED_CFG_VERSION = 1.1
def get_project_cfg():
"""
Returns the project configuration module
"""
sys.path.append(PROJECT_CFG_DIR)
try:
project_cfg_module = importlib.import_module(PROJECT_CFG_NAME)
except:
raise FileNotFoundError("\n\n================================= ERROR ========================================"
"\nUnable to import project configuration: " + PROJECT_CFG_DIR + "/" + PROJECT_CFG_NAME + ".py"
"\n================================================================================\n")
_verify_correct_version(project_cfg_module)
return project_cfg_module
def _verify_correct_version(project_cfg_module):
is_correct_version = False
if project_cfg_module.__CFG_VERSION__ == EXPECTED_CFG_VERSION:
is_correct_version = True
else:
|
return is_correct_version
| raise Exception("\n\n================================= ERROR ========================================"
"\nIncorrect project configuration version: " + str(project_cfg_module.__CFG_VERSION__) +
"\n Development environment expected: " + str(EXPECTED_CFG_VERSION) +
"\n================================================================================\n") | conditional_block |
ogbg_molpcba.py | # coding=utf-8
# Copyright 2022 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline for the ogbg_molpcba graph dataset from TFDS.
See https://www.tensorflow.org/datasets/catalog/ogbg_molpcba and
https://ogb.stanford.edu/docs/graphprop/ for more details.
"""
import itertools
from init2winit.dataset_lib import data_utils
from init2winit.dataset_lib.data_utils import Dataset
import jax
import jraph
from ml_collections.config_dict import config_dict
import numpy as np
import tensorflow_datasets as tfds
AVG_NODES_PER_GRAPH = 26
AVG_EDGES_PER_GRAPH = 56
DEFAULT_HPARAMS = config_dict.ConfigDict(
dict(
output_shape=(128,),
input_edge_shape=(3,),
input_node_shape=(9,),
train_size=350343,
valid_size=43793,
test_size=43793,
# Max edges/nodes per batch will batch_size times the multiplier.
# We set them to the average size of the graph in the dataset,
# so that each batch contains batch_size graphs on average.
max_edges_multiplier=AVG_EDGES_PER_GRAPH,
max_nodes_multiplier=AVG_NODES_PER_GRAPH,
))
METADATA = {
'apply_one_hot_in_loss': False,
}
def _load_dataset(split,
should_shuffle=False,
shuffle_seed=None,
shuffle_buffer_size=None):
"""Loads a dataset split from TFDS."""
if should_shuffle:
assert shuffle_seed is not None and shuffle_buffer_size is not None
file_shuffle_seed, dataset_shuffle_seed = jax.random.split(shuffle_seed)
file_shuffle_seed = file_shuffle_seed[0]
dataset_shuffle_seed = dataset_shuffle_seed[0]
else:
file_shuffle_seed = None
dataset_shuffle_seed = None
read_config = tfds.ReadConfig(
add_tfds_id=True, shuffle_seed=file_shuffle_seed)
dataset = tfds.load(
'ogbg_molpcba',
split=split,
shuffle_files=should_shuffle,
read_config=read_config)
if should_shuffle:
dataset = dataset.shuffle(
seed=dataset_shuffle_seed, buffer_size=shuffle_buffer_size)
dataset = dataset.repeat()
return dataset
def _to_jraph(example):
"""Converts an example graph to jraph.GraphsTuple."""
example = data_utils.tf_to_numpy(example)
edge_feat = example['edge_feat']
node_feat = example['node_feat']
edge_index = example['edge_index']
labels = example['labels']
num_nodes = example['num_nodes']
senders = edge_index[:, 0]
receivers = edge_index[:, 1]
return jraph.GraphsTuple(
n_node=num_nodes,
n_edge=np.array([len(edge_index) * 2]),
nodes=node_feat,
edges=np.concatenate([edge_feat, edge_feat]),
# Make the edges bidirectional
senders=np.concatenate([senders, receivers]),
receivers=np.concatenate([receivers, senders]),
# Keep the labels with the graph for batching. They will be removed
# in the processed batch.
globals=np.expand_dims(labels, axis=0))
def _get_weights_by_nan_and_padding(labels, padding_mask):
"""Handles NaNs and padding in labels.
Sets all the weights from examples coming from padding to 0. Changes all NaNs
in labels to 0s and sets the corresponding per-label weight to 0.
Args:
labels: Labels including labels from padded examples
padding_mask: Binary array of which examples are padding
Returns:
tuple of (processed labels, corresponding weights)
"""
nan_mask = np.isnan(labels)
replaced_labels = np.copy(labels)
np.place(replaced_labels, nan_mask, 0)
weights = 1.0 - nan_mask
# Weights for all labels of a padded element will be 0
weights = weights * padding_mask[:, None]
return replaced_labels, weights
def _get_batch_iterator(dataset_iter,
batch_size,
nodes_per_graph,
edges_per_graph,
num_shards=None):
"""Turns a TFDS per-example iterator into a batched iterator in the init2winit format.
Constructs the batch from num_shards smaller batches, so that we can easily
shard the batch to multiple devices during training. We use
dynamic batching, so we specify some max number of graphs/nodes/edges, add
as many graphs as we can, and then pad to the max values.
Args:
dataset_iter: The TFDS dataset iterator.
batch_size: How many average-sized graphs go into the batch.
nodes_per_graph: How many nodes per graph there are on average. Max number
of nodes in the batch will be nodes_per_graph * batch_size.
edges_per_graph: How many edges per graph there are on average. Max number
of edges in the batch will be edges_per_graph * batch_size.
num_shards: How many devices we should be able to shard the batch into.
Yields:
Batch in the init2winit format. Each field is a list of num_shards separate
smaller batches.
"""
if not num_shards:
num_shards = jax.device_count()
# We will construct num_shards smaller batches and then put them together.
batch_size /= num_shards
max_n_nodes = nodes_per_graph * batch_size
max_n_edges = edges_per_graph * batch_size
max_n_graphs = batch_size
jraph_iter = map(_to_jraph, dataset_iter)
batched_iter = jraph.dynamically_batch(jraph_iter, max_n_nodes + 1,
max_n_edges, max_n_graphs + 1)
count = 0
graphs_shards = []
labels_shards = []
weights_shards = []
for batched_graph in batched_iter:
|
def get_ogbg_molpcba(shuffle_rng, batch_size, eval_batch_size, hps=None):
"""Data generators for ogbg-molpcba."""
shuffle_buffer_size = 2**15
shuffle_rng_train, shuffle_rng_eval_train = jax.random.split(shuffle_rng)
train_ds = _load_dataset(
'train',
should_shuffle=True,
shuffle_seed=shuffle_rng_train,
shuffle_buffer_size=shuffle_buffer_size)
eval_train_ds = _load_dataset(
'train',
should_shuffle=True,
shuffle_seed=shuffle_rng_eval_train,
shuffle_buffer_size=shuffle_buffer_size)
valid_ds = _load_dataset('validation')
test_ds = _load_dataset('test')
def train_iterator_fn():
return _get_batch_iterator(
iter(train_ds), batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier)
def eval_train_epoch(num_batches=None):
return itertools.islice(
_get_batch_iterator(
iter(eval_train_ds), batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier), num_batches)
def valid_epoch(num_batches=None):
return itertools.islice(
_get_batch_iterator(
iter(valid_ds), eval_batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier), num_batches)
def test_epoch(num_batches=None):
return itertools.islice(
_get_batch_iterator(
iter(test_ds), eval_batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier), num_batches)
return Dataset(train_iterator_fn, eval_train_epoch, valid_epoch, test_epoch)
| count += 1
# Separate the labels from the graph
labels = batched_graph.globals
graph = batched_graph._replace(globals={})
replaced_labels, weights = _get_weights_by_nan_and_padding(
labels, jraph.get_graph_padding_mask(graph))
graphs_shards.append(graph)
labels_shards.append(replaced_labels)
weights_shards.append(weights)
if count == num_shards:
yield {
'inputs': graphs_shards,
'targets': labels_shards,
'weights': weights_shards
}
count = 0
graphs_shards = []
labels_shards = []
weights_shards = [] | conditional_block |
ogbg_molpcba.py | # coding=utf-8
# Copyright 2022 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline for the ogbg_molpcba graph dataset from TFDS.
See https://www.tensorflow.org/datasets/catalog/ogbg_molpcba and
https://ogb.stanford.edu/docs/graphprop/ for more details.
"""
import itertools
from init2winit.dataset_lib import data_utils
from init2winit.dataset_lib.data_utils import Dataset
import jax
import jraph
from ml_collections.config_dict import config_dict
import numpy as np
import tensorflow_datasets as tfds
AVG_NODES_PER_GRAPH = 26
AVG_EDGES_PER_GRAPH = 56
DEFAULT_HPARAMS = config_dict.ConfigDict(
dict(
output_shape=(128,),
input_edge_shape=(3,),
input_node_shape=(9,),
train_size=350343,
valid_size=43793,
test_size=43793,
# Max edges/nodes per batch will batch_size times the multiplier.
# We set them to the average size of the graph in the dataset,
# so that each batch contains batch_size graphs on average.
max_edges_multiplier=AVG_EDGES_PER_GRAPH,
max_nodes_multiplier=AVG_NODES_PER_GRAPH,
))
METADATA = {
'apply_one_hot_in_loss': False,
}
def _load_dataset(split,
should_shuffle=False,
shuffle_seed=None,
shuffle_buffer_size=None):
"""Loads a dataset split from TFDS."""
if should_shuffle:
assert shuffle_seed is not None and shuffle_buffer_size is not None
file_shuffle_seed, dataset_shuffle_seed = jax.random.split(shuffle_seed)
file_shuffle_seed = file_shuffle_seed[0]
dataset_shuffle_seed = dataset_shuffle_seed[0]
else:
file_shuffle_seed = None
dataset_shuffle_seed = None
read_config = tfds.ReadConfig(
add_tfds_id=True, shuffle_seed=file_shuffle_seed)
dataset = tfds.load(
'ogbg_molpcba',
split=split,
shuffle_files=should_shuffle,
read_config=read_config)
if should_shuffle:
dataset = dataset.shuffle(
seed=dataset_shuffle_seed, buffer_size=shuffle_buffer_size)
dataset = dataset.repeat()
return dataset
def _to_jraph(example):
"""Converts an example graph to jraph.GraphsTuple."""
example = data_utils.tf_to_numpy(example)
edge_feat = example['edge_feat']
node_feat = example['node_feat']
edge_index = example['edge_index']
labels = example['labels']
num_nodes = example['num_nodes']
senders = edge_index[:, 0]
receivers = edge_index[:, 1]
return jraph.GraphsTuple(
n_node=num_nodes,
n_edge=np.array([len(edge_index) * 2]),
nodes=node_feat,
edges=np.concatenate([edge_feat, edge_feat]),
# Make the edges bidirectional
senders=np.concatenate([senders, receivers]),
receivers=np.concatenate([receivers, senders]),
# Keep the labels with the graph for batching. They will be removed
# in the processed batch.
globals=np.expand_dims(labels, axis=0))
def _get_weights_by_nan_and_padding(labels, padding_mask):
"""Handles NaNs and padding in labels.
Sets all the weights from examples coming from padding to 0. Changes all NaNs
in labels to 0s and sets the corresponding per-label weight to 0.
Args:
labels: Labels including labels from padded examples
padding_mask: Binary array of which examples are padding
Returns:
tuple of (processed labels, corresponding weights)
"""
nan_mask = np.isnan(labels)
replaced_labels = np.copy(labels)
np.place(replaced_labels, nan_mask, 0)
weights = 1.0 - nan_mask
# Weights for all labels of a padded element will be 0
weights = weights * padding_mask[:, None]
return replaced_labels, weights
def _get_batch_iterator(dataset_iter,
batch_size,
nodes_per_graph,
edges_per_graph,
num_shards=None):
"""Turns a TFDS per-example iterator into a batched iterator in the init2winit format.
Constructs the batch from num_shards smaller batches, so that we can easily
shard the batch to multiple devices during training. We use
dynamic batching, so we specify some max number of graphs/nodes/edges, add
as many graphs as we can, and then pad to the max values.
Args:
dataset_iter: The TFDS dataset iterator.
batch_size: How many average-sized graphs go into the batch.
nodes_per_graph: How many nodes per graph there are on average. Max number
of nodes in the batch will be nodes_per_graph * batch_size.
edges_per_graph: How many edges per graph there are on average. Max number
of edges in the batch will be edges_per_graph * batch_size.
num_shards: How many devices we should be able to shard the batch into.
Yields:
Batch in the init2winit format. Each field is a list of num_shards separate
smaller batches.
"""
if not num_shards:
num_shards = jax.device_count()
# We will construct num_shards smaller batches and then put them together.
batch_size /= num_shards
max_n_nodes = nodes_per_graph * batch_size
max_n_edges = edges_per_graph * batch_size
max_n_graphs = batch_size
jraph_iter = map(_to_jraph, dataset_iter)
batched_iter = jraph.dynamically_batch(jraph_iter, max_n_nodes + 1,
max_n_edges, max_n_graphs + 1)
count = 0
graphs_shards = []
labels_shards = []
weights_shards = []
for batched_graph in batched_iter:
count += 1
# Separate the labels from the graph
labels = batched_graph.globals
graph = batched_graph._replace(globals={})
replaced_labels, weights = _get_weights_by_nan_and_padding(
labels, jraph.get_graph_padding_mask(graph))
graphs_shards.append(graph)
labels_shards.append(replaced_labels)
weights_shards.append(weights)
if count == num_shards:
yield {
'inputs': graphs_shards,
'targets': labels_shards,
'weights': weights_shards
}
count = 0
graphs_shards = []
labels_shards = []
weights_shards = []
def get_ogbg_molpcba(shuffle_rng, batch_size, eval_batch_size, hps=None):
"""Data generators for ogbg-molpcba."""
shuffle_buffer_size = 2**15
shuffle_rng_train, shuffle_rng_eval_train = jax.random.split(shuffle_rng)
train_ds = _load_dataset(
'train',
should_shuffle=True,
shuffle_seed=shuffle_rng_train,
shuffle_buffer_size=shuffle_buffer_size)
eval_train_ds = _load_dataset(
'train',
should_shuffle=True,
shuffle_seed=shuffle_rng_eval_train,
shuffle_buffer_size=shuffle_buffer_size)
valid_ds = _load_dataset('validation')
test_ds = _load_dataset('test')
def train_iterator_fn():
return _get_batch_iterator(
iter(train_ds), batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier)
def eval_train_epoch(num_batches=None):
return itertools.islice(
_get_batch_iterator(
iter(eval_train_ds), batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier), num_batches)
def valid_epoch(num_batches=None):
return itertools.islice(
_get_batch_iterator(
iter(valid_ds), eval_batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier), num_batches)
def test_epoch(num_batches=None):
|
return Dataset(train_iterator_fn, eval_train_epoch, valid_epoch, test_epoch)
| return itertools.islice(
_get_batch_iterator(
iter(test_ds), eval_batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier), num_batches) | identifier_body |
ogbg_molpcba.py | # coding=utf-8
# Copyright 2022 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline for the ogbg_molpcba graph dataset from TFDS.
See https://www.tensorflow.org/datasets/catalog/ogbg_molpcba and
https://ogb.stanford.edu/docs/graphprop/ for more details.
"""
import itertools
from init2winit.dataset_lib import data_utils
from init2winit.dataset_lib.data_utils import Dataset
import jax
import jraph
from ml_collections.config_dict import config_dict
import numpy as np
import tensorflow_datasets as tfds
AVG_NODES_PER_GRAPH = 26
AVG_EDGES_PER_GRAPH = 56
DEFAULT_HPARAMS = config_dict.ConfigDict(
dict(
output_shape=(128,),
input_edge_shape=(3,),
input_node_shape=(9,),
train_size=350343,
valid_size=43793,
test_size=43793,
# Max edges/nodes per batch will batch_size times the multiplier.
# We set them to the average size of the graph in the dataset,
# so that each batch contains batch_size graphs on average.
max_edges_multiplier=AVG_EDGES_PER_GRAPH,
max_nodes_multiplier=AVG_NODES_PER_GRAPH,
))
METADATA = {
'apply_one_hot_in_loss': False,
}
def _load_dataset(split,
should_shuffle=False,
shuffle_seed=None,
shuffle_buffer_size=None):
"""Loads a dataset split from TFDS."""
if should_shuffle:
assert shuffle_seed is not None and shuffle_buffer_size is not None
file_shuffle_seed, dataset_shuffle_seed = jax.random.split(shuffle_seed)
file_shuffle_seed = file_shuffle_seed[0]
dataset_shuffle_seed = dataset_shuffle_seed[0]
else:
file_shuffle_seed = None
dataset_shuffle_seed = None
read_config = tfds.ReadConfig(
add_tfds_id=True, shuffle_seed=file_shuffle_seed)
dataset = tfds.load(
'ogbg_molpcba',
split=split,
shuffle_files=should_shuffle,
read_config=read_config)
if should_shuffle:
dataset = dataset.shuffle(
seed=dataset_shuffle_seed, buffer_size=shuffle_buffer_size)
dataset = dataset.repeat()
return dataset
def _to_jraph(example):
"""Converts an example graph to jraph.GraphsTuple."""
example = data_utils.tf_to_numpy(example)
edge_feat = example['edge_feat']
node_feat = example['node_feat']
edge_index = example['edge_index']
labels = example['labels']
num_nodes = example['num_nodes']
senders = edge_index[:, 0]
receivers = edge_index[:, 1]
return jraph.GraphsTuple(
n_node=num_nodes,
n_edge=np.array([len(edge_index) * 2]),
nodes=node_feat,
edges=np.concatenate([edge_feat, edge_feat]),
# Make the edges bidirectional
senders=np.concatenate([senders, receivers]),
receivers=np.concatenate([receivers, senders]),
# Keep the labels with the graph for batching. They will be removed
# in the processed batch.
globals=np.expand_dims(labels, axis=0))
def _get_weights_by_nan_and_padding(labels, padding_mask):
"""Handles NaNs and padding in labels.
Sets all the weights from examples coming from padding to 0. Changes all NaNs
in labels to 0s and sets the corresponding per-label weight to 0.
Args:
labels: Labels including labels from padded examples
padding_mask: Binary array of which examples are padding
Returns:
tuple of (processed labels, corresponding weights)
"""
nan_mask = np.isnan(labels)
replaced_labels = np.copy(labels)
np.place(replaced_labels, nan_mask, 0)
weights = 1.0 - nan_mask
# Weights for all labels of a padded element will be 0
weights = weights * padding_mask[:, None]
return replaced_labels, weights
def _get_batch_iterator(dataset_iter,
batch_size,
nodes_per_graph,
edges_per_graph,
num_shards=None):
"""Turns a TFDS per-example iterator into a batched iterator in the init2winit format.
Constructs the batch from num_shards smaller batches, so that we can easily
shard the batch to multiple devices during training. We use
dynamic batching, so we specify some max number of graphs/nodes/edges, add
as many graphs as we can, and then pad to the max values.
Args:
dataset_iter: The TFDS dataset iterator.
batch_size: How many average-sized graphs go into the batch.
nodes_per_graph: How many nodes per graph there are on average. Max number
of nodes in the batch will be nodes_per_graph * batch_size.
edges_per_graph: How many edges per graph there are on average. Max number
of edges in the batch will be edges_per_graph * batch_size.
num_shards: How many devices we should be able to shard the batch into.
Yields:
Batch in the init2winit format. Each field is a list of num_shards separate
smaller batches.
"""
if not num_shards:
num_shards = jax.device_count()
# We will construct num_shards smaller batches and then put them together.
batch_size /= num_shards
max_n_nodes = nodes_per_graph * batch_size
max_n_edges = edges_per_graph * batch_size
max_n_graphs = batch_size
jraph_iter = map(_to_jraph, dataset_iter)
batched_iter = jraph.dynamically_batch(jraph_iter, max_n_nodes + 1,
max_n_edges, max_n_graphs + 1)
count = 0
graphs_shards = []
labels_shards = []
weights_shards = []
for batched_graph in batched_iter:
count += 1
# Separate the labels from the graph
labels = batched_graph.globals
graph = batched_graph._replace(globals={})
replaced_labels, weights = _get_weights_by_nan_and_padding(
labels, jraph.get_graph_padding_mask(graph))
graphs_shards.append(graph)
labels_shards.append(replaced_labels)
weights_shards.append(weights)
if count == num_shards:
yield {
'inputs': graphs_shards,
'targets': labels_shards,
'weights': weights_shards
}
count = 0
graphs_shards = []
labels_shards = []
weights_shards = []
def | (shuffle_rng, batch_size, eval_batch_size, hps=None):
"""Data generators for ogbg-molpcba."""
shuffle_buffer_size = 2**15
shuffle_rng_train, shuffle_rng_eval_train = jax.random.split(shuffle_rng)
train_ds = _load_dataset(
'train',
should_shuffle=True,
shuffle_seed=shuffle_rng_train,
shuffle_buffer_size=shuffle_buffer_size)
eval_train_ds = _load_dataset(
'train',
should_shuffle=True,
shuffle_seed=shuffle_rng_eval_train,
shuffle_buffer_size=shuffle_buffer_size)
valid_ds = _load_dataset('validation')
test_ds = _load_dataset('test')
def train_iterator_fn():
return _get_batch_iterator(
iter(train_ds), batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier)
def eval_train_epoch(num_batches=None):
return itertools.islice(
_get_batch_iterator(
iter(eval_train_ds), batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier), num_batches)
def valid_epoch(num_batches=None):
return itertools.islice(
_get_batch_iterator(
iter(valid_ds), eval_batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier), num_batches)
def test_epoch(num_batches=None):
return itertools.islice(
_get_batch_iterator(
iter(test_ds), eval_batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier), num_batches)
return Dataset(train_iterator_fn, eval_train_epoch, valid_epoch, test_epoch)
| get_ogbg_molpcba | identifier_name |
ogbg_molpcba.py | # coding=utf-8
# Copyright 2022 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline for the ogbg_molpcba graph dataset from TFDS.
See https://www.tensorflow.org/datasets/catalog/ogbg_molpcba and
https://ogb.stanford.edu/docs/graphprop/ for more details.
"""
import itertools
from init2winit.dataset_lib import data_utils
from init2winit.dataset_lib.data_utils import Dataset
import jax
import jraph
from ml_collections.config_dict import config_dict
import numpy as np
import tensorflow_datasets as tfds
AVG_NODES_PER_GRAPH = 26
AVG_EDGES_PER_GRAPH = 56
DEFAULT_HPARAMS = config_dict.ConfigDict(
dict(
output_shape=(128,),
input_edge_shape=(3,),
input_node_shape=(9,),
train_size=350343,
valid_size=43793,
test_size=43793,
# Max edges/nodes per batch will batch_size times the multiplier.
# We set them to the average size of the graph in the dataset,
# so that each batch contains batch_size graphs on average.
max_edges_multiplier=AVG_EDGES_PER_GRAPH,
max_nodes_multiplier=AVG_NODES_PER_GRAPH,
))
METADATA = {
'apply_one_hot_in_loss': False,
}
def _load_dataset(split,
should_shuffle=False,
shuffle_seed=None,
shuffle_buffer_size=None):
"""Loads a dataset split from TFDS."""
if should_shuffle:
assert shuffle_seed is not None and shuffle_buffer_size is not None
file_shuffle_seed, dataset_shuffle_seed = jax.random.split(shuffle_seed)
file_shuffle_seed = file_shuffle_seed[0]
dataset_shuffle_seed = dataset_shuffle_seed[0]
else:
file_shuffle_seed = None
dataset_shuffle_seed = None
read_config = tfds.ReadConfig(
add_tfds_id=True, shuffle_seed=file_shuffle_seed)
dataset = tfds.load(
'ogbg_molpcba',
split=split,
shuffle_files=should_shuffle,
read_config=read_config)
if should_shuffle:
dataset = dataset.shuffle(
seed=dataset_shuffle_seed, buffer_size=shuffle_buffer_size)
dataset = dataset.repeat()
return dataset
def _to_jraph(example):
"""Converts an example graph to jraph.GraphsTuple."""
example = data_utils.tf_to_numpy(example)
edge_feat = example['edge_feat']
node_feat = example['node_feat']
edge_index = example['edge_index']
labels = example['labels']
num_nodes = example['num_nodes'] | n_node=num_nodes,
n_edge=np.array([len(edge_index) * 2]),
nodes=node_feat,
edges=np.concatenate([edge_feat, edge_feat]),
# Make the edges bidirectional
senders=np.concatenate([senders, receivers]),
receivers=np.concatenate([receivers, senders]),
# Keep the labels with the graph for batching. They will be removed
# in the processed batch.
globals=np.expand_dims(labels, axis=0))
def _get_weights_by_nan_and_padding(labels, padding_mask):
"""Handles NaNs and padding in labels.
Sets all the weights from examples coming from padding to 0. Changes all NaNs
in labels to 0s and sets the corresponding per-label weight to 0.
Args:
labels: Labels including labels from padded examples
padding_mask: Binary array of which examples are padding
Returns:
tuple of (processed labels, corresponding weights)
"""
nan_mask = np.isnan(labels)
replaced_labels = np.copy(labels)
np.place(replaced_labels, nan_mask, 0)
weights = 1.0 - nan_mask
# Weights for all labels of a padded element will be 0
weights = weights * padding_mask[:, None]
return replaced_labels, weights
def _get_batch_iterator(dataset_iter,
batch_size,
nodes_per_graph,
edges_per_graph,
num_shards=None):
"""Turns a TFDS per-example iterator into a batched iterator in the init2winit format.
Constructs the batch from num_shards smaller batches, so that we can easily
shard the batch to multiple devices during training. We use
dynamic batching, so we specify some max number of graphs/nodes/edges, add
as many graphs as we can, and then pad to the max values.
Args:
dataset_iter: The TFDS dataset iterator.
batch_size: How many average-sized graphs go into the batch.
nodes_per_graph: How many nodes per graph there are on average. Max number
of nodes in the batch will be nodes_per_graph * batch_size.
edges_per_graph: How many edges per graph there are on average. Max number
of edges in the batch will be edges_per_graph * batch_size.
num_shards: How many devices we should be able to shard the batch into.
Yields:
Batch in the init2winit format. Each field is a list of num_shards separate
smaller batches.
"""
if not num_shards:
num_shards = jax.device_count()
# We will construct num_shards smaller batches and then put them together.
batch_size /= num_shards
max_n_nodes = nodes_per_graph * batch_size
max_n_edges = edges_per_graph * batch_size
max_n_graphs = batch_size
jraph_iter = map(_to_jraph, dataset_iter)
batched_iter = jraph.dynamically_batch(jraph_iter, max_n_nodes + 1,
max_n_edges, max_n_graphs + 1)
count = 0
graphs_shards = []
labels_shards = []
weights_shards = []
for batched_graph in batched_iter:
count += 1
# Separate the labels from the graph
labels = batched_graph.globals
graph = batched_graph._replace(globals={})
replaced_labels, weights = _get_weights_by_nan_and_padding(
labels, jraph.get_graph_padding_mask(graph))
graphs_shards.append(graph)
labels_shards.append(replaced_labels)
weights_shards.append(weights)
if count == num_shards:
yield {
'inputs': graphs_shards,
'targets': labels_shards,
'weights': weights_shards
}
count = 0
graphs_shards = []
labels_shards = []
weights_shards = []
def get_ogbg_molpcba(shuffle_rng, batch_size, eval_batch_size, hps=None):
"""Data generators for ogbg-molpcba."""
shuffle_buffer_size = 2**15
shuffle_rng_train, shuffle_rng_eval_train = jax.random.split(shuffle_rng)
train_ds = _load_dataset(
'train',
should_shuffle=True,
shuffle_seed=shuffle_rng_train,
shuffle_buffer_size=shuffle_buffer_size)
eval_train_ds = _load_dataset(
'train',
should_shuffle=True,
shuffle_seed=shuffle_rng_eval_train,
shuffle_buffer_size=shuffle_buffer_size)
valid_ds = _load_dataset('validation')
test_ds = _load_dataset('test')
def train_iterator_fn():
return _get_batch_iterator(
iter(train_ds), batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier)
def eval_train_epoch(num_batches=None):
return itertools.islice(
_get_batch_iterator(
iter(eval_train_ds), batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier), num_batches)
def valid_epoch(num_batches=None):
return itertools.islice(
_get_batch_iterator(
iter(valid_ds), eval_batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier), num_batches)
def test_epoch(num_batches=None):
return itertools.islice(
_get_batch_iterator(
iter(test_ds), eval_batch_size, hps.max_nodes_multiplier,
hps.max_edges_multiplier), num_batches)
return Dataset(train_iterator_fn, eval_train_epoch, valid_epoch, test_epoch) |
senders = edge_index[:, 0]
receivers = edge_index[:, 1]
return jraph.GraphsTuple( | random_line_split |
talents.ts | import 'rxjs/add/operator/map';
import { Injectable } from '@angular/core';
import { Http, Headers, URLSearchParams } from '@angular/http';
import { Observable } from 'rxjs/Observable';
import Constants from '../constants';
import { ITalent } from '../models';
@Injectable()
export class TalentsService {
private API_PATH: string = `${Constants.API_URL}/talents`;
constructor(private http: Http) { }
public getTalents(payload, token): Observable<ITalent[]> {
const headers = new Headers({ 'Content-Type': 'application/json', 'X-Auth-Secret': token });
const params: URLSearchParams = new URLSearchParams();
if (payload.pagination) {
params.set('per', payload.pagination.per);
params.set('page', payload.pagination.page);
}
if (payload.filter) {
Object.keys(payload.filter).forEach((name) => {
if (name !== 'talents_ids') {
params.set(`filter[${name}]`, payload.filter[name]);
} else if (payload.filter['talents_ids'] && payload.filter['talents_ids'].length) |
});
}
if (payload.order) {
Object.keys(payload.order).map((name) => {
params.set(`order[${name}]`, payload.order[name]);
});
}
if (!params.get('order[name]')) {
params.set('order[name]', 'asc');
}
return this.http.get(this.API_PATH, { search: params, headers })
.map((res) => res.json());
}
public postTalents(payload, token): Observable<ITalent> {
const headers = new Headers({ 'Content-Type': 'application/json', 'X-Auth-Secret': token });
return this.http.post(this.API_PATH, payload, { headers })
.map((res) => res.json());
}
}
| {
payload.filter['talents_ids'].forEach((item) => {
params.append('filter[talents_ids][]', item);
});
} | conditional_block |
talents.ts | import 'rxjs/add/operator/map';
import { Injectable } from '@angular/core';
import { Http, Headers, URLSearchParams } from '@angular/http';
import { Observable } from 'rxjs/Observable';
import Constants from '../constants';
import { ITalent } from '../models';
@Injectable()
export class TalentsService {
private API_PATH: string = `${Constants.API_URL}/talents`;
constructor(private http: Http) { }
public getTalents(payload, token): Observable<ITalent[]> |
public postTalents(payload, token): Observable<ITalent> {
const headers = new Headers({ 'Content-Type': 'application/json', 'X-Auth-Secret': token });
return this.http.post(this.API_PATH, payload, { headers })
.map((res) => res.json());
}
}
| {
const headers = new Headers({ 'Content-Type': 'application/json', 'X-Auth-Secret': token });
const params: URLSearchParams = new URLSearchParams();
if (payload.pagination) {
params.set('per', payload.pagination.per);
params.set('page', payload.pagination.page);
}
if (payload.filter) {
Object.keys(payload.filter).forEach((name) => {
if (name !== 'talents_ids') {
params.set(`filter[${name}]`, payload.filter[name]);
} else if (payload.filter['talents_ids'] && payload.filter['talents_ids'].length) {
payload.filter['talents_ids'].forEach((item) => {
params.append('filter[talents_ids][]', item);
});
}
});
}
if (payload.order) {
Object.keys(payload.order).map((name) => {
params.set(`order[${name}]`, payload.order[name]);
});
}
if (!params.get('order[name]')) {
params.set('order[name]', 'asc');
}
return this.http.get(this.API_PATH, { search: params, headers })
.map((res) => res.json());
} | identifier_body |
talents.ts | import 'rxjs/add/operator/map';
import { Injectable } from '@angular/core';
import { Http, Headers, URLSearchParams } from '@angular/http';
import { Observable } from 'rxjs/Observable';
import Constants from '../constants';
import { ITalent } from '../models';
@Injectable()
export class | {
private API_PATH: string = `${Constants.API_URL}/talents`;
constructor(private http: Http) { }
public getTalents(payload, token): Observable<ITalent[]> {
const headers = new Headers({ 'Content-Type': 'application/json', 'X-Auth-Secret': token });
const params: URLSearchParams = new URLSearchParams();
if (payload.pagination) {
params.set('per', payload.pagination.per);
params.set('page', payload.pagination.page);
}
if (payload.filter) {
Object.keys(payload.filter).forEach((name) => {
if (name !== 'talents_ids') {
params.set(`filter[${name}]`, payload.filter[name]);
} else if (payload.filter['talents_ids'] && payload.filter['talents_ids'].length) {
payload.filter['talents_ids'].forEach((item) => {
params.append('filter[talents_ids][]', item);
});
}
});
}
if (payload.order) {
Object.keys(payload.order).map((name) => {
params.set(`order[${name}]`, payload.order[name]);
});
}
if (!params.get('order[name]')) {
params.set('order[name]', 'asc');
}
return this.http.get(this.API_PATH, { search: params, headers })
.map((res) => res.json());
}
public postTalents(payload, token): Observable<ITalent> {
const headers = new Headers({ 'Content-Type': 'application/json', 'X-Auth-Secret': token });
return this.http.post(this.API_PATH, payload, { headers })
.map((res) => res.json());
}
}
| TalentsService | identifier_name |
talents.ts | import 'rxjs/add/operator/map';
import { Injectable } from '@angular/core';
import { Http, Headers, URLSearchParams } from '@angular/http';
import { Observable } from 'rxjs/Observable';
import Constants from '../constants';
import { ITalent } from '../models';
@Injectable()
export class TalentsService {
private API_PATH: string = `${Constants.API_URL}/talents`;
constructor(private http: Http) { }
public getTalents(payload, token): Observable<ITalent[]> {
const headers = new Headers({ 'Content-Type': 'application/json', 'X-Auth-Secret': token });
const params: URLSearchParams = new URLSearchParams();
if (payload.pagination) {
params.set('per', payload.pagination.per);
params.set('page', payload.pagination.page);
}
if (payload.filter) {
Object.keys(payload.filter).forEach((name) => {
if (name !== 'talents_ids') {
params.set(`filter[${name}]`, payload.filter[name]);
} else if (payload.filter['talents_ids'] && payload.filter['talents_ids'].length) {
payload.filter['talents_ids'].forEach((item) => {
params.append('filter[talents_ids][]', item);
});
}
});
} | Object.keys(payload.order).map((name) => {
params.set(`order[${name}]`, payload.order[name]);
});
}
if (!params.get('order[name]')) {
params.set('order[name]', 'asc');
}
return this.http.get(this.API_PATH, { search: params, headers })
.map((res) => res.json());
}
public postTalents(payload, token): Observable<ITalent> {
const headers = new Headers({ 'Content-Type': 'application/json', 'X-Auth-Secret': token });
return this.http.post(this.API_PATH, payload, { headers })
.map((res) => res.json());
}
} |
if (payload.order) { | random_line_split |
eggie.py | #!/usr/bin/env python
#Copyright (c) <2015>, <Jaakko Leppakangas>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
'''
Created on Dec 16, 2014
@author: Jaakko Leppakangas
'''
import sys
from PyQt4 import QtGui
from ui.preprocessDialog import PreprocessDialog
def | ():
app = QtGui.QApplication(sys.argv)
window=PreprocessDialog()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| main | identifier_name |
eggie.py | #!/usr/bin/env python
#Copyright (c) <2015>, <Jaakko Leppakangas>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
'''
Created on Dec 16, 2014
@author: Jaakko Leppakangas
'''
import sys
from PyQt4 import QtGui
from ui.preprocessDialog import PreprocessDialog
def main():
app = QtGui.QApplication(sys.argv)
window=PreprocessDialog()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
| main() | conditional_block | |
eggie.py | #!/usr/bin/env python
#Copyright (c) <2015>, <Jaakko Leppakangas>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this | # this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
'''
Created on Dec 16, 2014
@author: Jaakko Leppakangas
'''
import sys
from PyQt4 import QtGui
from ui.preprocessDialog import PreprocessDialog
def main():
app = QtGui.QApplication(sys.argv)
window=PreprocessDialog()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | # list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright notice, | random_line_split |
eggie.py | #!/usr/bin/env python
#Copyright (c) <2015>, <Jaakko Leppakangas>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
'''
Created on Dec 16, 2014
@author: Jaakko Leppakangas
'''
import sys
from PyQt4 import QtGui
from ui.preprocessDialog import PreprocessDialog
def main():
|
if __name__ == '__main__':
main()
| app = QtGui.QApplication(sys.argv)
window=PreprocessDialog()
window.show()
sys.exit(app.exec_()) | identifier_body |
js_Fc4I144XPrPKyUpaWv36lNESuazCkfla6EpZyDPBOQk.js |
(function ($) {
/**
* Attach the child dialog behavior to new content.
*/
Drupal.behaviors.overlayChild = {
attach: function (context, settings) {
// Make sure this behavior is not processed more than once.
if (this.processed) {
return;
}
this.processed = true;
// If we cannot reach the parent window, break out of the overlay.
if (!parent.Drupal || !parent.Drupal.overlay) {
window.location = window.location.href.replace(/([?&]?)render=overlay&?/g, '$1').replace(/\?$/, '');
}
var settings = settings.overlayChild || {};
// If the entire parent window should be refreshed when the overlay is
// closed, pass that information to the parent window.
if (settings.refreshPage) {
parent.Drupal.overlay.refreshPage = true;
}
// If a form has been submitted successfully, then the server side script
// may have decided to tell the parent window to close the popup dialog.
if (settings.closeOverlay) {
parent.Drupal.overlay.bindChild(window, true);
// Use setTimeout to close the child window from a separate thread,
// because the current one is busy processing Drupal behaviors.
setTimeout(function () {
if (typeof settings.redirect == 'string') {
parent.Drupal.overlay.redirect(settings.redirect);
}
else {
parent.Drupal.overlay.close();
}
}, 1);
return;
}
// If one of the regions displaying outside the overlay needs to be
// reloaded immediately, let the parent window know.
if (settings.refreshRegions) {
parent.Drupal.overlay.refreshRegions(settings.refreshRegions);
}
// Ok, now we can tell the parent window we're ready.
parent.Drupal.overlay.bindChild(window);
// IE8 crashes on certain pages if this isn't called; reason unknown.
window.scrollTo(window.scrollX, window.scrollY);
// Attach child related behaviors to the iframe document.
Drupal.overlayChild.attachBehaviors(context, settings);
// There are two links within the message that informs people about the
// overlay and how to disable it. Make sure both links are visible when
// either one has focus and add a class to the wrapper for styling purposes.
$('#overlay-disable-message', context)
.focusin(function () {
$(this).addClass('overlay-disable-message-focused');
$('a.element-focusable', this).removeClass('element-invisible');
})
.focusout(function () {
$(this).removeClass('overlay-disable-message-focused');
$('a.element-focusable', this).addClass('element-invisible');
});
}
};
/**
* Overlay object for child windows.
*/
Drupal.overlayChild = Drupal.overlayChild || {
behaviors: {}
};
Drupal.overlayChild.prototype = {};
/**
* Attach child related behaviors to the iframe document.
*/
Drupal.overlayChild.attachBehaviors = function (context, settings) {
$.each(this.behaviors, function () {
this(context, settings);
});
};
/**
* Capture and handle clicks.
*
* Instead of binding a click event handler to every link we bind one to the
* document and handle events that bubble up. This also allows other scripts
* to bind their own handlers to links and also to prevent overlay's handling.
*/
Drupal.overlayChild.behaviors.addClickHandler = function (context, settings) {
$(document).bind('click.drupal-overlay mouseup.drupal-overlay', $.proxy(parent.Drupal.overlay, 'eventhandlerOverrideLink'));
};
/**
* Modify forms depending on their relation to the overlay.
*
* By default, forms are assumed to keep the flow in the overlay. Thus their
* action attribute get a ?render=overlay suffix.
*/
Drupal.overlayChild.behaviors.parseForms = function (context, settings) {
$('form', context).once('overlay', function () {
// Obtain the action attribute of the form.
var action = $(this).attr('action');
// Keep internal forms in the overlay.
if (action == undefined || (action.indexOf('http') != 0 && action.indexOf('https') != 0)) {
action += (action.indexOf('?') > -1 ? '&' : '?') + 'render=overlay';
$(this).attr('action', action);
}
// Submit external forms into a new window.
else {
$(this).attr('target', '_new');
}
});
};
/**
* Replace the overlay title with a message while loading another page.
*/
Drupal.overlayChild.behaviors.loading = function (context, settings) {
var $title;
var text = Drupal.t('Loading');
var dots = '';
$(document).bind('drupalOverlayBeforeLoad.drupal-overlay.drupal-overlay-child-loading', function () {
$title = $('#overlay-title').text(text);
var id = setInterval(function () {
dots = (dots.length > 10) ? '' : dots + '.';
$title.text(text + dots);
}, 500);
});
};
/**
* Switch active tab immediately.
*/
Drupal.overlayChild.behaviors.tabs = function (context, settings) {
var $tabsLinks = $('#overlay-tabs > li > a');
$('#overlay-tabs > li > a').bind('click.drupal-overlay', function () {
var active_tab = Drupal.t('(active tab)');
$tabsLinks.parent().siblings().removeClass('active').find('element-invisible:contains(' + active_tab + ')').appendTo(this);
$(this).parent().addClass('active');
});
};
/**
* If the shortcut add/delete button exists, move it to the overlay titlebar.
*/
Drupal.overlayChild.behaviors.shortcutAddLink = function (context, settings) {
// Remove any existing shortcut button markup from the titlebar.
$('#overlay-titlebar').find('.add-or-remove-shortcuts').remove();
// If the shortcut add/delete button exists, move it to the titlebar.
var $addToShortcuts = $('.add-or-remove-shortcuts');
if ($addToShortcuts.length) {
$addToShortcuts.insertAfter('#overlay-title');
}
$(document).bind('drupalOverlayBeforeLoad.drupal-overlay.drupal-overlay-child-loading', function () {
$('#overlay-titlebar').find('.add-or-remove-shortcuts').remove();
});
};
/**
* Use displacement from parent window.
*/
Drupal.overlayChild.behaviors.alterTableHeaderOffset = function (context, settings) {
if (Drupal.settings.tableHeaderOffset) {
Drupal.overlayChild.prevTableHeaderOffset = Drupal.settings.tableHeaderOffset;
}
Drupal.settings.tableHeaderOffset = 'Drupal.overlayChild.tableHeaderOffset';
};
/**
* Callback for Drupal.settings.tableHeaderOffset.
*/
Drupal.overlayChild.tableHeaderOffset = function () {
var topOffset = Drupal.overlayChild.prevTableHeaderOffset ? eval(Drupal.overlayChild.prevTableHeaderOffset + '()') : 0;
return topOffset + parseInt($(document.body).css('marginTop'));
};
})(jQuery);
;
(function ($) {
/**
* Retrieves the summary for the first element.
*/
$.fn.drupalGetSummary = function () {
var callback = this.data('summaryCallback');
return (this[0] && callback) ? $.trim(callback(this[0])) : '';
};
/**
* Sets the summary for all matched elements.
*
* @param callback
* Either a function that will be called each time the summary is
* retrieved or a string (which is returned each time).
*/
$.fn.drupalSetSummary = function (callback) {
var self = this;
// To facilitate things, the callback should always be a function. If it's
// not, we wrap it into an anonymous function which just returns the value.
if (typeof callback != 'function') {
var val = callback;
callback = function () { return val; };
}
return this
.data('summaryCallback', callback)
// To prevent duplicate events, the handlers are first removed and then
// (re-)added.
.unbind('formUpdated.summary')
.bind('formUpdated.summary', function () {
self.trigger('summaryUpdated');
})
// The actual summaryUpdated handler doesn't fire when the callback is
// changed, so we have to do this manually.
.trigger('summaryUpdated');
};
/**
* Sends a 'formUpdated' event each time a form element is modified.
*/
Drupal.behaviors.formUpdated = {
attach: function (context) {
// These events are namespaced so that we can remove them later.
var events = 'change.formUpdated click.formUpdated blur.formUpdated keyup.formUpdated';
$(context)
// Since context could be an input element itself, it's added back to
// the jQuery object and filtered again.
.find(':input').andSelf().filter(':input')
// To prevent duplicate events, the handlers are first removed and then
// (re-)added.
.unbind(events).bind(events, function () {
$(this).trigger('formUpdated');
});
}
};
/**
* Prepopulate form fields with information from the visitor cookie.
*/
Drupal.behaviors.fillUserInfoFromCookie = {
attach: function (context, settings) {
$('form.user-info-from-cookie').once('user-info-from-cookie', function () {
var formContext = this;
$.each(['name', 'mail', 'homepage'], function () {
var $element = $('[name=' + this + ']', formContext);
var cookie = $.cookie('Drupal.visitor.' + this);
if ($element.length && cookie) {
$element.val(cookie);
}
});
});
}
};
})(jQuery);
;
(function ($) {
/**
* The base States namespace.
*
* Having the local states variable allows us to use the States namespace
* without having to always declare "Drupal.states".
*/
var states = Drupal.states = {
// An array of functions that should be postponed.
postponed: []
};
/**
* Attaches the states.
*/
Drupal.behaviors.states = {
attach: function (context, settings) {
for (var selector in settings.states) {
for (var state in settings.states[selector]) {
new states.Dependent({
element: $(selector),
state: states.State.sanitize(state),
dependees: settings.states[selector][state]
});
}
}
// Execute all postponed functions now.
while (states.postponed.length) {
(states.postponed.shift())();
}
}
};
/**
* Object representing an element that depends on other elements.
*
* @param args
* Object with the following keys (all of which are required):
* - element: A jQuery object of the dependent element
* - state: A State object describing the state that is dependent
* - dependees: An object with dependency specifications. Lists all elements
* that this element depends on.
*/
states.Dependent = function (args) {
$.extend(this, { values: {}, oldValue: undefined }, args);
for (var selector in this.dependees) {
this.initializeDependee(selector, this.dependees[selector]);
}
};
/**
* Comparison functions for comparing the value of an element with the
* specification from the dependency settings. If the object type can't be
* found in this list, the === operator is used by default.
*/
states.Dependent.comparisons = {
'RegExp': function (reference, value) {
return reference.test(value);
},
'Function': function (reference, value) {
// The "reference" variable is a comparison function.
return reference(value);
},
'Number': function (reference, value) {
// If "reference" is a number and "value" is a string, then cast reference
// as a string before applying the strict comparison in compare(). Otherwise
// numeric keys in the form's #states array fail to match string values
// returned from jQuery's val().
return (value.constructor.name === 'String') ? compare(String(reference), value) : compare(reference, value);
}
};
states.Dependent.prototype = {
/**
* Initializes one of the elements this dependent depends on.
*
* @param selector
* The CSS selector describing the dependee.
* @param dependeeStates
* The list of states that have to be monitored for tracking the
* dependee's compliance status.
*/
initializeDependee: function (selector, dependeeStates) {
var self = this;
// Cache for the states of this dependee.
self.values[selector] = {};
$.each(dependeeStates, function (state, value) {
state = states.State.sanitize(state);
// Initialize the value of this state.
self.values[selector][state.pristine] = undefined;
// Monitor state changes of the specified state for this dependee.
$(selector).bind('state:' + state, function (e) {
var complies = self.compare(value, e.value);
self.update(selector, state, complies);
});
// Make sure the event we just bound ourselves to is actually fired.
new states.Trigger({ selector: selector, state: state });
});
},
/**
* Compares a value with a reference value.
*
* @param reference
* The value used for reference.
* @param value
* The value to compare with the reference value.
* @return
* true, undefined or false.
*/
compare: function (reference, value) {
if (reference.constructor.name in states.Dependent.comparisons) {
// Use a custom compare function for certain reference value types.
return states.Dependent.comparisons[reference.constructor.name](reference, value);
}
else {
// Do a plain comparison otherwise.
return compare(reference, value);
}
},
/**
* Update the value of a dependee's state.
*
* @param selector
* CSS selector describing the dependee.
* @param state
* A State object describing the dependee's updated state.
* @param value
* The new value for the dependee's updated state.
*/
update: function (selector, state, value) {
// Only act when the 'new' value is actually new.
if (value !== this.values[selector][state.pristine]) {
this.values[selector][state.pristine] = value;
this.reevaluate();
}
},
/**
* Triggers change events in case a state changed.
*/
reevaluate: function () {
var value = undefined;
// Merge all individual values to find out whether this dependee complies.
for (var selector in this.values) {
for (var state in this.values[selector]) {
state = states.State.sanitize(state);
var complies = this.values[selector][state.pristine];
value = ternary(value, invert(complies, state.invert));
}
}
// Only invoke a state change event when the value actually changed.
if (value !== this.oldValue) {
// Store the new value so that we can compare later whether the value
// actually changed.
this.oldValue = value;
// Normalize the value to match the normalized state name.
value = invert(value, this.state.invert);
// By adding "trigger: true", we ensure that state changes don't go into
// infinite loops.
this.element.trigger({ type: 'state:' + this.state, value: value, trigger: true });
}
}
};
states.Trigger = function (args) {
$.extend(this, args);
if (this.state in states.Trigger.states) {
this.element = $(this.selector);
// Only call the trigger initializer when it wasn't yet attached to this
// element. Otherwise we'd end up with duplicate events.
if (!this.element.data('trigger:' + this.state)) {
this.initialize();
}
}
};
states.Trigger.prototype = {
initialize: function () {
var self = this;
var trigger = states.Trigger.states[this.state];
if (typeof trigger == 'function') {
// We have a custom trigger initialization function.
trigger.call(window, this.element);
}
else {
$.each(trigger, function (event, valueFn) {
self.defaultTrigger(event, valueFn);
});
}
// Mark this trigger as initialized for this element.
this.element.data('trigger:' + this.state, true);
},
defaultTrigger: function (event, valueFn) {
var self = this;
var oldValue = valueFn.call(this.element);
// Attach the event callback.
this.element.bind(event, function (e) {
var value = valueFn.call(self.element, e);
// Only trigger the event if the value has actually changed.
if (oldValue !== value) {
self.element.trigger({ type: 'state:' + self.state, value: value, oldValue: oldValue });
oldValue = value;
}
});
states.postponed.push(function () {
// Trigger the event once for initialization purposes.
self.element.trigger({ type: 'state:' + self.state, value: oldValue, oldValue: undefined });
});
}
};
/**
* This list of states contains functions that are used to monitor the state
* of an element. Whenever an element depends on the state of another element,
* one of these trigger functions is added to the dependee so that the
* dependent element can be updated.
*/
states.Trigger.states = {
// 'empty' describes the state to be monitored
empty: {
// 'keyup' is the (native DOM) event that we watch for.
'keyup': function () {
// The function associated to that trigger returns the new value for the
// state.
return this.val() == '';
}
},
checked: {
'change': function () {
return this.attr('checked');
}
},
// For radio buttons, only return the value if the radio button is selected.
value: {
'keyup': function () {
// Radio buttons share the same :input[name="key"] selector.
if (this.length > 1) {
// Initial checked value of radios is undefined, so we return false.
return this.filter(':checked').val() || false;
}
return this.val();
},
'change': function () {
// Radio buttons share the same :input[name="key"] selector.
if (this.length > 1) {
// Initial checked value of radios is undefined, so we return false.
return this.filter(':checked').val() || false;
}
return this.val();
}
},
collapsed: {
'collapsed': function(e) {
return (e !== undefined && 'value' in e) ? e.value : this.is('.collapsed');
}
}
};
/**
* A state object is used for describing the state and performing aliasing.
*/
states.State = function(state) {
// We may need the original unresolved name later.
this.pristine = this.name = state;
// Normalize the state name.
while (true) {
// Iteratively remove exclamation marks and invert the value.
while (this.name.charAt(0) == '!') {
this.name = this.name.substring(1);
this.invert = !this.invert;
}
// Replace the state with its normalized name.
if (this.name in states.State.aliases) {
this.name = states.State.aliases[this.name];
}
else {
break;
}
}
};
/**
* Create a new State object by sanitizing the passed value.
*/
states.State.sanitize = function (state) {
if (state instanceof states.State) {
return state;
}
else {
return new states.State(state);
}
};
/**
* This list of aliases is used to normalize states and associates negated names
* with their respective inverse state.
*/
states.State.aliases = {
'enabled': '!disabled',
'invisible': '!visible',
'invalid': '!valid',
'untouched': '!touched',
'optional': '!required',
'filled': '!empty',
'unchecked': '!checked',
'irrelevant': '!relevant',
'expanded': '!collapsed',
'readwrite': '!readonly'
};
states.State.prototype = {
invert: false,
/**
* Ensures that just using the state object returns the name.
*/
toString: function() {
return this.name;
}
};
/**
* Global state change handlers. These are bound to "document" to cover all
* elements whose state changes. Events sent to elements within the page
* bubble up to these handlers. We use this system so that themes and modules
* can override these state change handlers for particular parts of a page.
*/
{
$(document).bind('state:disabled', function(e) {
// Only act when this change was triggered by a dependency and not by the
// element monitoring itself.
if (e.trigger) {
$(e.target)
.attr('disabled', e.value)
.filter('.form-element')
.closest('.form-item, .form-submit, .form-wrapper')[e.value ? 'addClass' : 'removeClass']('form-disabled');
// Note: WebKit nightlies don't reflect that change correctly.
// See https://bugs.webkit.org/show_bug.cgi?id=23789
}
});
$(document).bind('state:required', function(e) {
if (e.trigger) {
if (e.value) {
$(e.target).closest('.form-item, .form-wrapper').find('label').append('<span class="form-required">*</span>');
}
else {
$(e.target).closest('.form-item, .form-wrapper').find('label .form-required').remove();
}
}
});
$(document).bind('state:visible', function(e) {
if (e.trigger) {
$(e.target).closest('.form-item, .form-submit, .form-wrapper')[e.value ? 'show' : 'hide']();
}
});
$(document).bind('state:checked', function(e) {
if (e.trigger) {
$(e.target).attr('checked', e.value);
}
});
$(document).bind('state:collapsed', function(e) {
if (e.trigger) {
if ($(e.target).is('.collapsed') !== e.value) {
$('> legend a', e.target).click();
}
}
});
}
/**
* These are helper functions implementing addition "operators" and don't
* implement any logic that is particular to states.
*/
{
// Bitwise AND with a third undefined state.
function ternary (a, b) {
return a === undefined ? b : (b === undefined ? a : a && b);
};
// Inverts a (if it's not undefined) when invert is true.
function | (a, invert) {
return (invert && a !== undefined) ? !a : a;
};
// Compares two values while ignoring undefined values.
function compare (a, b) {
return (a === b) ? (a === undefined ? a : true) : (a === undefined || b === undefined);
}
}
})(jQuery);
;
| invert | identifier_name |
js_Fc4I144XPrPKyUpaWv36lNESuazCkfla6EpZyDPBOQk.js |
(function ($) {
/**
* Attach the child dialog behavior to new content.
*/
Drupal.behaviors.overlayChild = {
attach: function (context, settings) {
// Make sure this behavior is not processed more than once.
if (this.processed) {
return;
}
this.processed = true;
// If we cannot reach the parent window, break out of the overlay.
if (!parent.Drupal || !parent.Drupal.overlay) {
window.location = window.location.href.replace(/([?&]?)render=overlay&?/g, '$1').replace(/\?$/, '');
}
var settings = settings.overlayChild || {};
// If the entire parent window should be refreshed when the overlay is
// closed, pass that information to the parent window.
if (settings.refreshPage) {
parent.Drupal.overlay.refreshPage = true;
}
// If a form has been submitted successfully, then the server side script
// may have decided to tell the parent window to close the popup dialog.
if (settings.closeOverlay) {
parent.Drupal.overlay.bindChild(window, true);
// Use setTimeout to close the child window from a separate thread,
// because the current one is busy processing Drupal behaviors.
setTimeout(function () {
if (typeof settings.redirect == 'string') {
parent.Drupal.overlay.redirect(settings.redirect);
}
else {
parent.Drupal.overlay.close();
}
}, 1);
return;
}
// If one of the regions displaying outside the overlay needs to be
// reloaded immediately, let the parent window know.
if (settings.refreshRegions) {
parent.Drupal.overlay.refreshRegions(settings.refreshRegions);
}
// Ok, now we can tell the parent window we're ready.
parent.Drupal.overlay.bindChild(window);
// IE8 crashes on certain pages if this isn't called; reason unknown.
window.scrollTo(window.scrollX, window.scrollY);
// Attach child related behaviors to the iframe document.
Drupal.overlayChild.attachBehaviors(context, settings);
// There are two links within the message that informs people about the
// overlay and how to disable it. Make sure both links are visible when
// either one has focus and add a class to the wrapper for styling purposes.
$('#overlay-disable-message', context)
.focusin(function () {
$(this).addClass('overlay-disable-message-focused');
$('a.element-focusable', this).removeClass('element-invisible');
})
.focusout(function () {
$(this).removeClass('overlay-disable-message-focused');
$('a.element-focusable', this).addClass('element-invisible');
});
}
};
/**
* Overlay object for child windows.
*/
Drupal.overlayChild = Drupal.overlayChild || {
behaviors: {}
};
Drupal.overlayChild.prototype = {};
/**
* Attach child related behaviors to the iframe document.
*/
Drupal.overlayChild.attachBehaviors = function (context, settings) {
$.each(this.behaviors, function () {
this(context, settings);
});
};
/**
* Capture and handle clicks.
*
* Instead of binding a click event handler to every link we bind one to the
* document and handle events that bubble up. This also allows other scripts
* to bind their own handlers to links and also to prevent overlay's handling.
*/
Drupal.overlayChild.behaviors.addClickHandler = function (context, settings) {
$(document).bind('click.drupal-overlay mouseup.drupal-overlay', $.proxy(parent.Drupal.overlay, 'eventhandlerOverrideLink'));
};
/**
* Modify forms depending on their relation to the overlay.
*
* By default, forms are assumed to keep the flow in the overlay. Thus their
* action attribute get a ?render=overlay suffix.
*/
Drupal.overlayChild.behaviors.parseForms = function (context, settings) {
$('form', context).once('overlay', function () {
// Obtain the action attribute of the form.
var action = $(this).attr('action');
// Keep internal forms in the overlay.
if (action == undefined || (action.indexOf('http') != 0 && action.indexOf('https') != 0)) {
action += (action.indexOf('?') > -1 ? '&' : '?') + 'render=overlay';
$(this).attr('action', action);
}
// Submit external forms into a new window.
else {
$(this).attr('target', '_new');
}
});
};
/**
* Replace the overlay title with a message while loading another page.
*/
Drupal.overlayChild.behaviors.loading = function (context, settings) {
var $title;
var text = Drupal.t('Loading');
var dots = '';
$(document).bind('drupalOverlayBeforeLoad.drupal-overlay.drupal-overlay-child-loading', function () {
$title = $('#overlay-title').text(text);
var id = setInterval(function () {
dots = (dots.length > 10) ? '' : dots + '.';
$title.text(text + dots);
}, 500);
});
};
/**
* Switch active tab immediately.
*/
Drupal.overlayChild.behaviors.tabs = function (context, settings) {
var $tabsLinks = $('#overlay-tabs > li > a');
$('#overlay-tabs > li > a').bind('click.drupal-overlay', function () {
var active_tab = Drupal.t('(active tab)');
$tabsLinks.parent().siblings().removeClass('active').find('element-invisible:contains(' + active_tab + ')').appendTo(this);
$(this).parent().addClass('active');
});
};
/**
* If the shortcut add/delete button exists, move it to the overlay titlebar.
*/
Drupal.overlayChild.behaviors.shortcutAddLink = function (context, settings) {
// Remove any existing shortcut button markup from the titlebar.
$('#overlay-titlebar').find('.add-or-remove-shortcuts').remove();
// If the shortcut add/delete button exists, move it to the titlebar.
var $addToShortcuts = $('.add-or-remove-shortcuts');
if ($addToShortcuts.length) {
$addToShortcuts.insertAfter('#overlay-title');
}
$(document).bind('drupalOverlayBeforeLoad.drupal-overlay.drupal-overlay-child-loading', function () {
$('#overlay-titlebar').find('.add-or-remove-shortcuts').remove();
});
};
/**
* Use displacement from parent window.
*/
Drupal.overlayChild.behaviors.alterTableHeaderOffset = function (context, settings) {
if (Drupal.settings.tableHeaderOffset) {
Drupal.overlayChild.prevTableHeaderOffset = Drupal.settings.tableHeaderOffset;
}
Drupal.settings.tableHeaderOffset = 'Drupal.overlayChild.tableHeaderOffset';
};
/**
* Callback for Drupal.settings.tableHeaderOffset.
*/
Drupal.overlayChild.tableHeaderOffset = function () {
var topOffset = Drupal.overlayChild.prevTableHeaderOffset ? eval(Drupal.overlayChild.prevTableHeaderOffset + '()') : 0;
return topOffset + parseInt($(document.body).css('marginTop'));
};
})(jQuery);
;
(function ($) {
/**
* Retrieves the summary for the first element.
*/
$.fn.drupalGetSummary = function () {
var callback = this.data('summaryCallback');
return (this[0] && callback) ? $.trim(callback(this[0])) : '';
};
/**
* Sets the summary for all matched elements.
*
* @param callback
* Either a function that will be called each time the summary is
* retrieved or a string (which is returned each time).
*/
$.fn.drupalSetSummary = function (callback) {
var self = this;
// To facilitate things, the callback should always be a function. If it's
// not, we wrap it into an anonymous function which just returns the value.
if (typeof callback != 'function') {
var val = callback;
callback = function () { return val; };
}
return this
.data('summaryCallback', callback)
// To prevent duplicate events, the handlers are first removed and then
// (re-)added.
.unbind('formUpdated.summary')
.bind('formUpdated.summary', function () {
self.trigger('summaryUpdated');
})
// The actual summaryUpdated handler doesn't fire when the callback is
// changed, so we have to do this manually.
.trigger('summaryUpdated');
};
/**
* Sends a 'formUpdated' event each time a form element is modified.
*/
Drupal.behaviors.formUpdated = {
attach: function (context) {
// These events are namespaced so that we can remove them later.
var events = 'change.formUpdated click.formUpdated blur.formUpdated keyup.formUpdated';
$(context)
// Since context could be an input element itself, it's added back to
// the jQuery object and filtered again.
.find(':input').andSelf().filter(':input')
// To prevent duplicate events, the handlers are first removed and then
// (re-)added.
.unbind(events).bind(events, function () {
$(this).trigger('formUpdated');
});
}
};
/**
* Prepopulate form fields with information from the visitor cookie.
*/
Drupal.behaviors.fillUserInfoFromCookie = {
attach: function (context, settings) {
$('form.user-info-from-cookie').once('user-info-from-cookie', function () {
var formContext = this;
$.each(['name', 'mail', 'homepage'], function () {
var $element = $('[name=' + this + ']', formContext);
var cookie = $.cookie('Drupal.visitor.' + this);
if ($element.length && cookie) {
$element.val(cookie);
}
});
});
}
};
})(jQuery);
;
(function ($) {
/**
* The base States namespace.
*
* Having the local states variable allows us to use the States namespace
* without having to always declare "Drupal.states".
*/
var states = Drupal.states = {
// An array of functions that should be postponed.
postponed: []
};
/**
* Attaches the states.
*/
Drupal.behaviors.states = {
attach: function (context, settings) {
for (var selector in settings.states) {
for (var state in settings.states[selector]) {
new states.Dependent({
element: $(selector),
state: states.State.sanitize(state),
dependees: settings.states[selector][state]
});
}
}
// Execute all postponed functions now.
while (states.postponed.length) {
(states.postponed.shift())();
}
}
};
/**
* Object representing an element that depends on other elements.
*
* @param args
* Object with the following keys (all of which are required):
* - element: A jQuery object of the dependent element
* - state: A State object describing the state that is dependent
* - dependees: An object with dependency specifications. Lists all elements
* that this element depends on.
*/
states.Dependent = function (args) {
$.extend(this, { values: {}, oldValue: undefined }, args);
for (var selector in this.dependees) {
this.initializeDependee(selector, this.dependees[selector]);
}
};
/**
* Comparison functions for comparing the value of an element with the
* specification from the dependency settings. If the object type can't be
* found in this list, the === operator is used by default.
*/
states.Dependent.comparisons = {
'RegExp': function (reference, value) {
return reference.test(value);
},
'Function': function (reference, value) {
// The "reference" variable is a comparison function.
return reference(value);
},
'Number': function (reference, value) {
// If "reference" is a number and "value" is a string, then cast reference
// as a string before applying the strict comparison in compare(). Otherwise
// numeric keys in the form's #states array fail to match string values
// returned from jQuery's val().
return (value.constructor.name === 'String') ? compare(String(reference), value) : compare(reference, value);
}
};
states.Dependent.prototype = {
/**
* Initializes one of the elements this dependent depends on.
*
* @param selector
* The CSS selector describing the dependee.
* @param dependeeStates
* The list of states that have to be monitored for tracking the
* dependee's compliance status.
*/
initializeDependee: function (selector, dependeeStates) {
var self = this;
// Cache for the states of this dependee.
self.values[selector] = {};
$.each(dependeeStates, function (state, value) {
state = states.State.sanitize(state);
// Initialize the value of this state.
self.values[selector][state.pristine] = undefined;
// Monitor state changes of the specified state for this dependee.
$(selector).bind('state:' + state, function (e) {
var complies = self.compare(value, e.value);
self.update(selector, state, complies);
});
// Make sure the event we just bound ourselves to is actually fired.
new states.Trigger({ selector: selector, state: state });
});
},
/**
* Compares a value with a reference value.
*
* @param reference
* The value used for reference.
* @param value
* The value to compare with the reference value.
* @return
* true, undefined or false.
*/
compare: function (reference, value) {
if (reference.constructor.name in states.Dependent.comparisons) {
// Use a custom compare function for certain reference value types.
return states.Dependent.comparisons[reference.constructor.name](reference, value);
}
else {
// Do a plain comparison otherwise.
return compare(reference, value);
}
},
/**
* Update the value of a dependee's state.
*
* @param selector
* CSS selector describing the dependee.
* @param state
* A State object describing the dependee's updated state.
* @param value
* The new value for the dependee's updated state.
*/
update: function (selector, state, value) {
// Only act when the 'new' value is actually new.
if (value !== this.values[selector][state.pristine]) {
this.values[selector][state.pristine] = value;
this.reevaluate();
}
},
/**
* Triggers change events in case a state changed.
*/
reevaluate: function () {
var value = undefined;
// Merge all individual values to find out whether this dependee complies.
for (var selector in this.values) {
for (var state in this.values[selector]) {
state = states.State.sanitize(state);
var complies = this.values[selector][state.pristine];
value = ternary(value, invert(complies, state.invert));
}
}
// Only invoke a state change event when the value actually changed.
if (value !== this.oldValue) {
// Store the new value so that we can compare later whether the value
// actually changed.
this.oldValue = value;
// Normalize the value to match the normalized state name.
value = invert(value, this.state.invert);
// By adding "trigger: true", we ensure that state changes don't go into
// infinite loops.
this.element.trigger({ type: 'state:' + this.state, value: value, trigger: true });
}
}
};
states.Trigger = function (args) {
$.extend(this, args);
if (this.state in states.Trigger.states) {
this.element = $(this.selector);
// Only call the trigger initializer when it wasn't yet attached to this
// element. Otherwise we'd end up with duplicate events.
if (!this.element.data('trigger:' + this.state)) {
this.initialize();
}
}
};
states.Trigger.prototype = {
initialize: function () {
var self = this;
var trigger = states.Trigger.states[this.state];
if (typeof trigger == 'function') {
// We have a custom trigger initialization function.
trigger.call(window, this.element);
}
else {
$.each(trigger, function (event, valueFn) {
self.defaultTrigger(event, valueFn);
});
}
// Mark this trigger as initialized for this element.
this.element.data('trigger:' + this.state, true);
},
defaultTrigger: function (event, valueFn) {
var self = this;
var oldValue = valueFn.call(this.element);
// Attach the event callback.
this.element.bind(event, function (e) {
var value = valueFn.call(self.element, e);
// Only trigger the event if the value has actually changed.
if (oldValue !== value) {
self.element.trigger({ type: 'state:' + self.state, value: value, oldValue: oldValue });
oldValue = value;
}
});
states.postponed.push(function () {
// Trigger the event once for initialization purposes.
self.element.trigger({ type: 'state:' + self.state, value: oldValue, oldValue: undefined });
});
}
};
/**
* This list of states contains functions that are used to monitor the state
* of an element. Whenever an element depends on the state of another element,
* one of these trigger functions is added to the dependee so that the
* dependent element can be updated.
*/
states.Trigger.states = {
// 'empty' describes the state to be monitored
empty: {
// 'keyup' is the (native DOM) event that we watch for.
'keyup': function () {
// The function associated to that trigger returns the new value for the
// state.
return this.val() == '';
}
},
checked: {
'change': function () {
return this.attr('checked');
}
},
// For radio buttons, only return the value if the radio button is selected.
value: {
'keyup': function () {
// Radio buttons share the same :input[name="key"] selector.
if (this.length > 1) {
// Initial checked value of radios is undefined, so we return false.
return this.filter(':checked').val() || false;
}
return this.val();
},
'change': function () {
// Radio buttons share the same :input[name="key"] selector.
if (this.length > 1) {
// Initial checked value of radios is undefined, so we return false.
return this.filter(':checked').val() || false;
}
return this.val();
}
},
collapsed: {
'collapsed': function(e) {
return (e !== undefined && 'value' in e) ? e.value : this.is('.collapsed');
}
}
};
/**
* A state object is used for describing the state and performing aliasing.
*/
states.State = function(state) {
// We may need the original unresolved name later.
this.pristine = this.name = state;
// Normalize the state name.
while (true) {
// Iteratively remove exclamation marks and invert the value.
while (this.name.charAt(0) == '!') {
this.name = this.name.substring(1);
this.invert = !this.invert;
}
// Replace the state with its normalized name.
if (this.name in states.State.aliases) {
this.name = states.State.aliases[this.name];
}
else {
break;
}
}
};
/**
* Create a new State object by sanitizing the passed value.
*/
states.State.sanitize = function (state) {
if (state instanceof states.State) {
return state;
}
else {
return new states.State(state);
}
};
/**
* This list of aliases is used to normalize states and associates negated names
* with their respective inverse state.
*/
states.State.aliases = {
'enabled': '!disabled',
'invisible': '!visible',
'invalid': '!valid',
'untouched': '!touched',
'optional': '!required',
'filled': '!empty',
'unchecked': '!checked',
'irrelevant': '!relevant',
'expanded': '!collapsed',
'readwrite': '!readonly'
};
states.State.prototype = {
invert: false,
/**
* Ensures that just using the state object returns the name.
*/
toString: function() {
return this.name;
}
};
/**
* Global state change handlers. These are bound to "document" to cover all
* elements whose state changes. Events sent to elements within the page
* bubble up to these handlers. We use this system so that themes and modules
* can override these state change handlers for particular parts of a page.
*/
{
$(document).bind('state:disabled', function(e) {
// Only act when this change was triggered by a dependency and not by the
// element monitoring itself.
if (e.trigger) {
$(e.target)
.attr('disabled', e.value)
.filter('.form-element')
.closest('.form-item, .form-submit, .form-wrapper')[e.value ? 'addClass' : 'removeClass']('form-disabled');
// Note: WebKit nightlies don't reflect that change correctly.
// See https://bugs.webkit.org/show_bug.cgi?id=23789
}
});
$(document).bind('state:required', function(e) {
if (e.trigger) {
if (e.value) {
$(e.target).closest('.form-item, .form-wrapper').find('label').append('<span class="form-required">*</span>');
}
else {
$(e.target).closest('.form-item, .form-wrapper').find('label .form-required').remove();
}
}
});
$(document).bind('state:visible', function(e) {
if (e.trigger) {
$(e.target).closest('.form-item, .form-submit, .form-wrapper')[e.value ? 'show' : 'hide']();
}
});
$(document).bind('state:checked', function(e) {
if (e.trigger) {
$(e.target).attr('checked', e.value);
}
});
$(document).bind('state:collapsed', function(e) {
if (e.trigger) {
if ($(e.target).is('.collapsed') !== e.value) {
$('> legend a', e.target).click();
}
}
});
}
/**
* These are helper functions implementing addition "operators" and don't
* implement any logic that is particular to states.
*/
{
// Bitwise AND with a third undefined state.
function ternary (a, b) {
return a === undefined ? b : (b === undefined ? a : a && b);
};
// Inverts a (if it's not undefined) when invert is true.
function invert (a, invert) {
return (invert && a !== undefined) ? !a : a;
};
// Compares two values while ignoring undefined values.
function compare (a, b) |
}
})(jQuery);
;
| {
return (a === b) ? (a === undefined ? a : true) : (a === undefined || b === undefined);
} | identifier_body |
js_Fc4I144XPrPKyUpaWv36lNESuazCkfla6EpZyDPBOQk.js |
(function ($) {
/**
* Attach the child dialog behavior to new content.
*/
Drupal.behaviors.overlayChild = {
attach: function (context, settings) {
// Make sure this behavior is not processed more than once.
if (this.processed) {
return;
}
this.processed = true;
// If we cannot reach the parent window, break out of the overlay.
if (!parent.Drupal || !parent.Drupal.overlay) {
window.location = window.location.href.replace(/([?&]?)render=overlay&?/g, '$1').replace(/\?$/, '');
}
var settings = settings.overlayChild || {};
// If the entire parent window should be refreshed when the overlay is
// closed, pass that information to the parent window.
if (settings.refreshPage) {
parent.Drupal.overlay.refreshPage = true;
}
// If a form has been submitted successfully, then the server side script
// may have decided to tell the parent window to close the popup dialog.
if (settings.closeOverlay) {
parent.Drupal.overlay.bindChild(window, true);
// Use setTimeout to close the child window from a separate thread,
// because the current one is busy processing Drupal behaviors.
setTimeout(function () {
if (typeof settings.redirect == 'string') {
parent.Drupal.overlay.redirect(settings.redirect);
}
else {
parent.Drupal.overlay.close();
}
}, 1);
return;
}
// If one of the regions displaying outside the overlay needs to be
// reloaded immediately, let the parent window know.
if (settings.refreshRegions) {
parent.Drupal.overlay.refreshRegions(settings.refreshRegions);
}
// Ok, now we can tell the parent window we're ready.
parent.Drupal.overlay.bindChild(window);
// IE8 crashes on certain pages if this isn't called; reason unknown.
window.scrollTo(window.scrollX, window.scrollY);
// Attach child related behaviors to the iframe document.
Drupal.overlayChild.attachBehaviors(context, settings);
// There are two links within the message that informs people about the
// overlay and how to disable it. Make sure both links are visible when
// either one has focus and add a class to the wrapper for styling purposes.
$('#overlay-disable-message', context)
.focusin(function () {
$(this).addClass('overlay-disable-message-focused');
$('a.element-focusable', this).removeClass('element-invisible');
})
.focusout(function () {
$(this).removeClass('overlay-disable-message-focused');
$('a.element-focusable', this).addClass('element-invisible');
});
}
};
/**
* Overlay object for child windows.
*/
Drupal.overlayChild = Drupal.overlayChild || {
behaviors: {}
};
Drupal.overlayChild.prototype = {};
/**
* Attach child related behaviors to the iframe document.
*/
Drupal.overlayChild.attachBehaviors = function (context, settings) {
$.each(this.behaviors, function () {
this(context, settings);
});
};
/**
* Capture and handle clicks.
*
* Instead of binding a click event handler to every link we bind one to the
* document and handle events that bubble up. This also allows other scripts
* to bind their own handlers to links and also to prevent overlay's handling.
*/
Drupal.overlayChild.behaviors.addClickHandler = function (context, settings) {
$(document).bind('click.drupal-overlay mouseup.drupal-overlay', $.proxy(parent.Drupal.overlay, 'eventhandlerOverrideLink'));
};
/**
* Modify forms depending on their relation to the overlay.
*
* By default, forms are assumed to keep the flow in the overlay. Thus their
* action attribute get a ?render=overlay suffix.
*/
Drupal.overlayChild.behaviors.parseForms = function (context, settings) {
$('form', context).once('overlay', function () {
// Obtain the action attribute of the form.
var action = $(this).attr('action');
// Keep internal forms in the overlay.
if (action == undefined || (action.indexOf('http') != 0 && action.indexOf('https') != 0)) {
action += (action.indexOf('?') > -1 ? '&' : '?') + 'render=overlay';
$(this).attr('action', action);
}
// Submit external forms into a new window.
else {
$(this).attr('target', '_new');
}
});
};
/**
* Replace the overlay title with a message while loading another page.
*/
Drupal.overlayChild.behaviors.loading = function (context, settings) {
var $title;
var text = Drupal.t('Loading');
var dots = '';
$(document).bind('drupalOverlayBeforeLoad.drupal-overlay.drupal-overlay-child-loading', function () {
$title = $('#overlay-title').text(text);
var id = setInterval(function () {
dots = (dots.length > 10) ? '' : dots + '.';
$title.text(text + dots);
}, 500);
});
};
/**
* Switch active tab immediately.
*/
Drupal.overlayChild.behaviors.tabs = function (context, settings) {
var $tabsLinks = $('#overlay-tabs > li > a');
$('#overlay-tabs > li > a').bind('click.drupal-overlay', function () {
var active_tab = Drupal.t('(active tab)');
$tabsLinks.parent().siblings().removeClass('active').find('element-invisible:contains(' + active_tab + ')').appendTo(this);
$(this).parent().addClass('active');
});
};
/**
* If the shortcut add/delete button exists, move it to the overlay titlebar.
*/
Drupal.overlayChild.behaviors.shortcutAddLink = function (context, settings) {
// Remove any existing shortcut button markup from the titlebar.
$('#overlay-titlebar').find('.add-or-remove-shortcuts').remove();
// If the shortcut add/delete button exists, move it to the titlebar.
var $addToShortcuts = $('.add-or-remove-shortcuts');
if ($addToShortcuts.length) {
$addToShortcuts.insertAfter('#overlay-title');
}
$(document).bind('drupalOverlayBeforeLoad.drupal-overlay.drupal-overlay-child-loading', function () {
$('#overlay-titlebar').find('.add-or-remove-shortcuts').remove();
});
};
/**
* Use displacement from parent window.
*/
Drupal.overlayChild.behaviors.alterTableHeaderOffset = function (context, settings) {
if (Drupal.settings.tableHeaderOffset) {
Drupal.overlayChild.prevTableHeaderOffset = Drupal.settings.tableHeaderOffset;
}
Drupal.settings.tableHeaderOffset = 'Drupal.overlayChild.tableHeaderOffset';
};
/**
* Callback for Drupal.settings.tableHeaderOffset.
*/
Drupal.overlayChild.tableHeaderOffset = function () {
var topOffset = Drupal.overlayChild.prevTableHeaderOffset ? eval(Drupal.overlayChild.prevTableHeaderOffset + '()') : 0;
return topOffset + parseInt($(document.body).css('marginTop'));
};
})(jQuery);
;
(function ($) {
/**
* Retrieves the summary for the first element.
*/
$.fn.drupalGetSummary = function () {
var callback = this.data('summaryCallback');
return (this[0] && callback) ? $.trim(callback(this[0])) : '';
};
/**
* Sets the summary for all matched elements.
*
* @param callback
* Either a function that will be called each time the summary is
* retrieved or a string (which is returned each time).
*/
$.fn.drupalSetSummary = function (callback) {
var self = this;
// To facilitate things, the callback should always be a function. If it's
// not, we wrap it into an anonymous function which just returns the value.
if (typeof callback != 'function') {
var val = callback;
callback = function () { return val; };
}
return this
.data('summaryCallback', callback)
// To prevent duplicate events, the handlers are first removed and then
// (re-)added.
.unbind('formUpdated.summary')
.bind('formUpdated.summary', function () {
self.trigger('summaryUpdated');
})
// The actual summaryUpdated handler doesn't fire when the callback is
// changed, so we have to do this manually.
.trigger('summaryUpdated');
};
/**
* Sends a 'formUpdated' event each time a form element is modified.
*/
Drupal.behaviors.formUpdated = {
attach: function (context) {
// These events are namespaced so that we can remove them later.
var events = 'change.formUpdated click.formUpdated blur.formUpdated keyup.formUpdated';
$(context)
// Since context could be an input element itself, it's added back to
// the jQuery object and filtered again.
.find(':input').andSelf().filter(':input')
// To prevent duplicate events, the handlers are first removed and then
// (re-)added.
.unbind(events).bind(events, function () {
$(this).trigger('formUpdated');
});
}
};
/**
* Prepopulate form fields with information from the visitor cookie.
*/
Drupal.behaviors.fillUserInfoFromCookie = {
attach: function (context, settings) {
$('form.user-info-from-cookie').once('user-info-from-cookie', function () {
var formContext = this;
$.each(['name', 'mail', 'homepage'], function () {
var $element = $('[name=' + this + ']', formContext);
var cookie = $.cookie('Drupal.visitor.' + this);
if ($element.length && cookie) {
$element.val(cookie);
}
});
});
}
};
})(jQuery);
;
(function ($) {
/**
* The base States namespace.
*
* Having the local states variable allows us to use the States namespace
* without having to always declare "Drupal.states".
*/
var states = Drupal.states = {
// An array of functions that should be postponed.
postponed: []
};
/**
* Attaches the states.
*/
Drupal.behaviors.states = {
attach: function (context, settings) {
for (var selector in settings.states) {
for (var state in settings.states[selector]) {
new states.Dependent({
element: $(selector),
state: states.State.sanitize(state),
dependees: settings.states[selector][state]
});
}
}
// Execute all postponed functions now.
while (states.postponed.length) {
(states.postponed.shift())();
}
}
};
/**
* Object representing an element that depends on other elements.
*
* @param args
* Object with the following keys (all of which are required):
* - element: A jQuery object of the dependent element
* - state: A State object describing the state that is dependent
* - dependees: An object with dependency specifications. Lists all elements
* that this element depends on.
*/
states.Dependent = function (args) {
$.extend(this, { values: {}, oldValue: undefined }, args);
for (var selector in this.dependees) {
this.initializeDependee(selector, this.dependees[selector]);
}
};
/**
* Comparison functions for comparing the value of an element with the
* specification from the dependency settings. If the object type can't be
* found in this list, the === operator is used by default.
*/
states.Dependent.comparisons = {
'RegExp': function (reference, value) {
return reference.test(value);
},
'Function': function (reference, value) {
// The "reference" variable is a comparison function.
return reference(value);
},
'Number': function (reference, value) {
// If "reference" is a number and "value" is a string, then cast reference
// as a string before applying the strict comparison in compare(). Otherwise
// numeric keys in the form's #states array fail to match string values
// returned from jQuery's val().
return (value.constructor.name === 'String') ? compare(String(reference), value) : compare(reference, value);
}
};
states.Dependent.prototype = {
/**
* Initializes one of the elements this dependent depends on.
*
* @param selector
* The CSS selector describing the dependee.
* @param dependeeStates
* The list of states that have to be monitored for tracking the
* dependee's compliance status.
*/
initializeDependee: function (selector, dependeeStates) {
var self = this;
// Cache for the states of this dependee.
self.values[selector] = {};
$.each(dependeeStates, function (state, value) {
state = states.State.sanitize(state);
// Initialize the value of this state.
self.values[selector][state.pristine] = undefined;
// Monitor state changes of the specified state for this dependee.
$(selector).bind('state:' + state, function (e) {
var complies = self.compare(value, e.value);
self.update(selector, state, complies);
});
// Make sure the event we just bound ourselves to is actually fired.
new states.Trigger({ selector: selector, state: state });
});
},
/**
* Compares a value with a reference value.
*
* @param reference
* The value used for reference.
* @param value
* The value to compare with the reference value.
* @return
* true, undefined or false.
*/
compare: function (reference, value) {
if (reference.constructor.name in states.Dependent.comparisons) {
// Use a custom compare function for certain reference value types.
return states.Dependent.comparisons[reference.constructor.name](reference, value);
}
else {
// Do a plain comparison otherwise.
return compare(reference, value);
}
},
/**
* Update the value of a dependee's state.
*
* @param selector
* CSS selector describing the dependee.
* @param state
* A State object describing the dependee's updated state.
* @param value
* The new value for the dependee's updated state.
*/
update: function (selector, state, value) {
// Only act when the 'new' value is actually new.
if (value !== this.values[selector][state.pristine]) |
},
/**
* Triggers change events in case a state changed.
*/
reevaluate: function () {
var value = undefined;
// Merge all individual values to find out whether this dependee complies.
for (var selector in this.values) {
for (var state in this.values[selector]) {
state = states.State.sanitize(state);
var complies = this.values[selector][state.pristine];
value = ternary(value, invert(complies, state.invert));
}
}
// Only invoke a state change event when the value actually changed.
if (value !== this.oldValue) {
// Store the new value so that we can compare later whether the value
// actually changed.
this.oldValue = value;
// Normalize the value to match the normalized state name.
value = invert(value, this.state.invert);
// By adding "trigger: true", we ensure that state changes don't go into
// infinite loops.
this.element.trigger({ type: 'state:' + this.state, value: value, trigger: true });
}
}
};
states.Trigger = function (args) {
$.extend(this, args);
if (this.state in states.Trigger.states) {
this.element = $(this.selector);
// Only call the trigger initializer when it wasn't yet attached to this
// element. Otherwise we'd end up with duplicate events.
if (!this.element.data('trigger:' + this.state)) {
this.initialize();
}
}
};
states.Trigger.prototype = {
initialize: function () {
var self = this;
var trigger = states.Trigger.states[this.state];
if (typeof trigger == 'function') {
// We have a custom trigger initialization function.
trigger.call(window, this.element);
}
else {
$.each(trigger, function (event, valueFn) {
self.defaultTrigger(event, valueFn);
});
}
// Mark this trigger as initialized for this element.
this.element.data('trigger:' + this.state, true);
},
defaultTrigger: function (event, valueFn) {
var self = this;
var oldValue = valueFn.call(this.element);
// Attach the event callback.
this.element.bind(event, function (e) {
var value = valueFn.call(self.element, e);
// Only trigger the event if the value has actually changed.
if (oldValue !== value) {
self.element.trigger({ type: 'state:' + self.state, value: value, oldValue: oldValue });
oldValue = value;
}
});
states.postponed.push(function () {
// Trigger the event once for initialization purposes.
self.element.trigger({ type: 'state:' + self.state, value: oldValue, oldValue: undefined });
});
}
};
/**
* This list of states contains functions that are used to monitor the state
* of an element. Whenever an element depends on the state of another element,
* one of these trigger functions is added to the dependee so that the
* dependent element can be updated.
*/
states.Trigger.states = {
// 'empty' describes the state to be monitored
empty: {
// 'keyup' is the (native DOM) event that we watch for.
'keyup': function () {
// The function associated to that trigger returns the new value for the
// state.
return this.val() == '';
}
},
checked: {
'change': function () {
return this.attr('checked');
}
},
// For radio buttons, only return the value if the radio button is selected.
value: {
'keyup': function () {
// Radio buttons share the same :input[name="key"] selector.
if (this.length > 1) {
// Initial checked value of radios is undefined, so we return false.
return this.filter(':checked').val() || false;
}
return this.val();
},
'change': function () {
// Radio buttons share the same :input[name="key"] selector.
if (this.length > 1) {
// Initial checked value of radios is undefined, so we return false.
return this.filter(':checked').val() || false;
}
return this.val();
}
},
collapsed: {
'collapsed': function(e) {
return (e !== undefined && 'value' in e) ? e.value : this.is('.collapsed');
}
}
};
/**
* A state object is used for describing the state and performing aliasing.
*/
states.State = function(state) {
// We may need the original unresolved name later.
this.pristine = this.name = state;
// Normalize the state name.
while (true) {
// Iteratively remove exclamation marks and invert the value.
while (this.name.charAt(0) == '!') {
this.name = this.name.substring(1);
this.invert = !this.invert;
}
// Replace the state with its normalized name.
if (this.name in states.State.aliases) {
this.name = states.State.aliases[this.name];
}
else {
break;
}
}
};
/**
* Create a new State object by sanitizing the passed value.
*/
states.State.sanitize = function (state) {
if (state instanceof states.State) {
return state;
}
else {
return new states.State(state);
}
};
/**
* This list of aliases is used to normalize states and associates negated names
* with their respective inverse state.
*/
states.State.aliases = {
'enabled': '!disabled',
'invisible': '!visible',
'invalid': '!valid',
'untouched': '!touched',
'optional': '!required',
'filled': '!empty',
'unchecked': '!checked',
'irrelevant': '!relevant',
'expanded': '!collapsed',
'readwrite': '!readonly'
};
states.State.prototype = {
invert: false,
/**
* Ensures that just using the state object returns the name.
*/
toString: function() {
return this.name;
}
};
/**
* Global state change handlers. These are bound to "document" to cover all
* elements whose state changes. Events sent to elements within the page
* bubble up to these handlers. We use this system so that themes and modules
* can override these state change handlers for particular parts of a page.
*/
{
$(document).bind('state:disabled', function(e) {
// Only act when this change was triggered by a dependency and not by the
// element monitoring itself.
if (e.trigger) {
$(e.target)
.attr('disabled', e.value)
.filter('.form-element')
.closest('.form-item, .form-submit, .form-wrapper')[e.value ? 'addClass' : 'removeClass']('form-disabled');
// Note: WebKit nightlies don't reflect that change correctly.
// See https://bugs.webkit.org/show_bug.cgi?id=23789
}
});
$(document).bind('state:required', function(e) {
if (e.trigger) {
if (e.value) {
$(e.target).closest('.form-item, .form-wrapper').find('label').append('<span class="form-required">*</span>');
}
else {
$(e.target).closest('.form-item, .form-wrapper').find('label .form-required').remove();
}
}
});
$(document).bind('state:visible', function(e) {
if (e.trigger) {
$(e.target).closest('.form-item, .form-submit, .form-wrapper')[e.value ? 'show' : 'hide']();
}
});
$(document).bind('state:checked', function(e) {
if (e.trigger) {
$(e.target).attr('checked', e.value);
}
});
$(document).bind('state:collapsed', function(e) {
if (e.trigger) {
if ($(e.target).is('.collapsed') !== e.value) {
$('> legend a', e.target).click();
}
}
});
}
/**
* These are helper functions implementing addition "operators" and don't
* implement any logic that is particular to states.
*/
{
// Bitwise AND with a third undefined state.
function ternary (a, b) {
return a === undefined ? b : (b === undefined ? a : a && b);
};
// Inverts a (if it's not undefined) when invert is true.
function invert (a, invert) {
return (invert && a !== undefined) ? !a : a;
};
// Compares two values while ignoring undefined values.
function compare (a, b) {
return (a === b) ? (a === undefined ? a : true) : (a === undefined || b === undefined);
}
}
})(jQuery);
;
| {
this.values[selector][state.pristine] = value;
this.reevaluate();
} | conditional_block |
js_Fc4I144XPrPKyUpaWv36lNESuazCkfla6EpZyDPBOQk.js | (function ($) {
/**
* Attach the child dialog behavior to new content.
*/
Drupal.behaviors.overlayChild = {
attach: function (context, settings) {
// Make sure this behavior is not processed more than once.
if (this.processed) {
return;
}
this.processed = true;
// If we cannot reach the parent window, break out of the overlay.
if (!parent.Drupal || !parent.Drupal.overlay) {
window.location = window.location.href.replace(/([?&]?)render=overlay&?/g, '$1').replace(/\?$/, '');
}
var settings = settings.overlayChild || {};
// If the entire parent window should be refreshed when the overlay is
// closed, pass that information to the parent window.
if (settings.refreshPage) {
parent.Drupal.overlay.refreshPage = true;
}
// If a form has been submitted successfully, then the server side script
// may have decided to tell the parent window to close the popup dialog.
if (settings.closeOverlay) {
parent.Drupal.overlay.bindChild(window, true);
// Use setTimeout to close the child window from a separate thread,
// because the current one is busy processing Drupal behaviors.
setTimeout(function () {
if (typeof settings.redirect == 'string') {
parent.Drupal.overlay.redirect(settings.redirect);
}
else {
parent.Drupal.overlay.close();
}
}, 1);
return;
}
// If one of the regions displaying outside the overlay needs to be
// reloaded immediately, let the parent window know.
if (settings.refreshRegions) {
parent.Drupal.overlay.refreshRegions(settings.refreshRegions);
}
// Ok, now we can tell the parent window we're ready.
parent.Drupal.overlay.bindChild(window);
// IE8 crashes on certain pages if this isn't called; reason unknown.
window.scrollTo(window.scrollX, window.scrollY);
// Attach child related behaviors to the iframe document.
Drupal.overlayChild.attachBehaviors(context, settings);
// There are two links within the message that informs people about the
// overlay and how to disable it. Make sure both links are visible when
// either one has focus and add a class to the wrapper for styling purposes.
$('#overlay-disable-message', context)
.focusin(function () {
$(this).addClass('overlay-disable-message-focused');
$('a.element-focusable', this).removeClass('element-invisible');
})
.focusout(function () {
$(this).removeClass('overlay-disable-message-focused');
$('a.element-focusable', this).addClass('element-invisible');
});
}
};
/**
* Overlay object for child windows.
*/
Drupal.overlayChild = Drupal.overlayChild || {
behaviors: {}
};
Drupal.overlayChild.prototype = {};
/**
* Attach child related behaviors to the iframe document.
*/
Drupal.overlayChild.attachBehaviors = function (context, settings) {
$.each(this.behaviors, function () {
this(context, settings);
});
};
/**
* Capture and handle clicks.
*
* Instead of binding a click event handler to every link we bind one to the
* document and handle events that bubble up. This also allows other scripts
* to bind their own handlers to links and also to prevent overlay's handling.
*/
Drupal.overlayChild.behaviors.addClickHandler = function (context, settings) {
$(document).bind('click.drupal-overlay mouseup.drupal-overlay', $.proxy(parent.Drupal.overlay, 'eventhandlerOverrideLink'));
};
/**
* Modify forms depending on their relation to the overlay.
*
* By default, forms are assumed to keep the flow in the overlay. Thus their
* action attribute get a ?render=overlay suffix.
*/
Drupal.overlayChild.behaviors.parseForms = function (context, settings) {
$('form', context).once('overlay', function () {
// Obtain the action attribute of the form.
var action = $(this).attr('action');
// Keep internal forms in the overlay.
if (action == undefined || (action.indexOf('http') != 0 && action.indexOf('https') != 0)) {
action += (action.indexOf('?') > -1 ? '&' : '?') + 'render=overlay';
$(this).attr('action', action);
}
// Submit external forms into a new window.
else {
$(this).attr('target', '_new');
}
});
};
/**
* Replace the overlay title with a message while loading another page.
*/
Drupal.overlayChild.behaviors.loading = function (context, settings) {
var $title;
var text = Drupal.t('Loading');
var dots = '';
$(document).bind('drupalOverlayBeforeLoad.drupal-overlay.drupal-overlay-child-loading', function () {
$title = $('#overlay-title').text(text);
var id = setInterval(function () {
dots = (dots.length > 10) ? '' : dots + '.';
$title.text(text + dots);
}, 500);
});
};
/**
* Switch active tab immediately.
*/
Drupal.overlayChild.behaviors.tabs = function (context, settings) {
var $tabsLinks = $('#overlay-tabs > li > a');
$('#overlay-tabs > li > a').bind('click.drupal-overlay', function () {
var active_tab = Drupal.t('(active tab)');
$tabsLinks.parent().siblings().removeClass('active').find('element-invisible:contains(' + active_tab + ')').appendTo(this);
$(this).parent().addClass('active');
});
};
/**
* If the shortcut add/delete button exists, move it to the overlay titlebar.
*/
Drupal.overlayChild.behaviors.shortcutAddLink = function (context, settings) {
// Remove any existing shortcut button markup from the titlebar.
$('#overlay-titlebar').find('.add-or-remove-shortcuts').remove();
// If the shortcut add/delete button exists, move it to the titlebar.
var $addToShortcuts = $('.add-or-remove-shortcuts');
if ($addToShortcuts.length) {
$addToShortcuts.insertAfter('#overlay-title');
}
$(document).bind('drupalOverlayBeforeLoad.drupal-overlay.drupal-overlay-child-loading', function () {
$('#overlay-titlebar').find('.add-or-remove-shortcuts').remove();
});
};
/**
* Use displacement from parent window.
*/
Drupal.overlayChild.behaviors.alterTableHeaderOffset = function (context, settings) {
if (Drupal.settings.tableHeaderOffset) {
Drupal.overlayChild.prevTableHeaderOffset = Drupal.settings.tableHeaderOffset;
}
Drupal.settings.tableHeaderOffset = 'Drupal.overlayChild.tableHeaderOffset';
};
/**
* Callback for Drupal.settings.tableHeaderOffset.
*/
Drupal.overlayChild.tableHeaderOffset = function () {
var topOffset = Drupal.overlayChild.prevTableHeaderOffset ? eval(Drupal.overlayChild.prevTableHeaderOffset + '()') : 0;
return topOffset + parseInt($(document.body).css('marginTop'));
};
})(jQuery);
;
(function ($) {
/**
* Retrieves the summary for the first element.
*/
$.fn.drupalGetSummary = function () {
var callback = this.data('summaryCallback');
return (this[0] && callback) ? $.trim(callback(this[0])) : '';
};
/**
* Sets the summary for all matched elements.
*
* @param callback
* Either a function that will be called each time the summary is
* retrieved or a string (which is returned each time).
*/
$.fn.drupalSetSummary = function (callback) {
var self = this;
// To facilitate things, the callback should always be a function. If it's
// not, we wrap it into an anonymous function which just returns the value.
if (typeof callback != 'function') {
var val = callback;
callback = function () { return val; };
}
return this
.data('summaryCallback', callback)
// To prevent duplicate events, the handlers are first removed and then
// (re-)added.
.unbind('formUpdated.summary')
.bind('formUpdated.summary', function () {
self.trigger('summaryUpdated');
})
// The actual summaryUpdated handler doesn't fire when the callback is
// changed, so we have to do this manually.
.trigger('summaryUpdated');
};
/**
* Sends a 'formUpdated' event each time a form element is modified.
*/
Drupal.behaviors.formUpdated = {
attach: function (context) {
// These events are namespaced so that we can remove them later.
var events = 'change.formUpdated click.formUpdated blur.formUpdated keyup.formUpdated';
$(context)
// Since context could be an input element itself, it's added back to
// the jQuery object and filtered again.
.find(':input').andSelf().filter(':input')
// To prevent duplicate events, the handlers are first removed and then
// (re-)added.
.unbind(events).bind(events, function () {
$(this).trigger('formUpdated');
});
}
};
/**
* Prepopulate form fields with information from the visitor cookie.
*/
Drupal.behaviors.fillUserInfoFromCookie = {
attach: function (context, settings) {
$('form.user-info-from-cookie').once('user-info-from-cookie', function () {
var formContext = this;
$.each(['name', 'mail', 'homepage'], function () {
var $element = $('[name=' + this + ']', formContext);
var cookie = $.cookie('Drupal.visitor.' + this);
if ($element.length && cookie) {
$element.val(cookie);
}
});
});
}
};
})(jQuery);
;
(function ($) {
/**
* The base States namespace.
*
* Having the local states variable allows us to use the States namespace | var states = Drupal.states = {
// An array of functions that should be postponed.
postponed: []
};
/**
* Attaches the states.
*/
Drupal.behaviors.states = {
attach: function (context, settings) {
for (var selector in settings.states) {
for (var state in settings.states[selector]) {
new states.Dependent({
element: $(selector),
state: states.State.sanitize(state),
dependees: settings.states[selector][state]
});
}
}
// Execute all postponed functions now.
while (states.postponed.length) {
(states.postponed.shift())();
}
}
};
/**
* Object representing an element that depends on other elements.
*
* @param args
* Object with the following keys (all of which are required):
* - element: A jQuery object of the dependent element
* - state: A State object describing the state that is dependent
* - dependees: An object with dependency specifications. Lists all elements
* that this element depends on.
*/
states.Dependent = function (args) {
$.extend(this, { values: {}, oldValue: undefined }, args);
for (var selector in this.dependees) {
this.initializeDependee(selector, this.dependees[selector]);
}
};
/**
* Comparison functions for comparing the value of an element with the
* specification from the dependency settings. If the object type can't be
* found in this list, the === operator is used by default.
*/
states.Dependent.comparisons = {
'RegExp': function (reference, value) {
return reference.test(value);
},
'Function': function (reference, value) {
// The "reference" variable is a comparison function.
return reference(value);
},
'Number': function (reference, value) {
// If "reference" is a number and "value" is a string, then cast reference
// as a string before applying the strict comparison in compare(). Otherwise
// numeric keys in the form's #states array fail to match string values
// returned from jQuery's val().
return (value.constructor.name === 'String') ? compare(String(reference), value) : compare(reference, value);
}
};
states.Dependent.prototype = {
/**
* Initializes one of the elements this dependent depends on.
*
* @param selector
* The CSS selector describing the dependee.
* @param dependeeStates
* The list of states that have to be monitored for tracking the
* dependee's compliance status.
*/
initializeDependee: function (selector, dependeeStates) {
var self = this;
// Cache for the states of this dependee.
self.values[selector] = {};
$.each(dependeeStates, function (state, value) {
state = states.State.sanitize(state);
// Initialize the value of this state.
self.values[selector][state.pristine] = undefined;
// Monitor state changes of the specified state for this dependee.
$(selector).bind('state:' + state, function (e) {
var complies = self.compare(value, e.value);
self.update(selector, state, complies);
});
// Make sure the event we just bound ourselves to is actually fired.
new states.Trigger({ selector: selector, state: state });
});
},
/**
* Compares a value with a reference value.
*
* @param reference
* The value used for reference.
* @param value
* The value to compare with the reference value.
* @return
* true, undefined or false.
*/
compare: function (reference, value) {
if (reference.constructor.name in states.Dependent.comparisons) {
// Use a custom compare function for certain reference value types.
return states.Dependent.comparisons[reference.constructor.name](reference, value);
}
else {
// Do a plain comparison otherwise.
return compare(reference, value);
}
},
/**
* Update the value of a dependee's state.
*
* @param selector
* CSS selector describing the dependee.
* @param state
* A State object describing the dependee's updated state.
* @param value
* The new value for the dependee's updated state.
*/
update: function (selector, state, value) {
// Only act when the 'new' value is actually new.
if (value !== this.values[selector][state.pristine]) {
this.values[selector][state.pristine] = value;
this.reevaluate();
}
},
/**
* Triggers change events in case a state changed.
*/
reevaluate: function () {
var value = undefined;
// Merge all individual values to find out whether this dependee complies.
for (var selector in this.values) {
for (var state in this.values[selector]) {
state = states.State.sanitize(state);
var complies = this.values[selector][state.pristine];
value = ternary(value, invert(complies, state.invert));
}
}
// Only invoke a state change event when the value actually changed.
if (value !== this.oldValue) {
// Store the new value so that we can compare later whether the value
// actually changed.
this.oldValue = value;
// Normalize the value to match the normalized state name.
value = invert(value, this.state.invert);
// By adding "trigger: true", we ensure that state changes don't go into
// infinite loops.
this.element.trigger({ type: 'state:' + this.state, value: value, trigger: true });
}
}
};
states.Trigger = function (args) {
$.extend(this, args);
if (this.state in states.Trigger.states) {
this.element = $(this.selector);
// Only call the trigger initializer when it wasn't yet attached to this
// element. Otherwise we'd end up with duplicate events.
if (!this.element.data('trigger:' + this.state)) {
this.initialize();
}
}
};
states.Trigger.prototype = {
initialize: function () {
var self = this;
var trigger = states.Trigger.states[this.state];
if (typeof trigger == 'function') {
// We have a custom trigger initialization function.
trigger.call(window, this.element);
}
else {
$.each(trigger, function (event, valueFn) {
self.defaultTrigger(event, valueFn);
});
}
// Mark this trigger as initialized for this element.
this.element.data('trigger:' + this.state, true);
},
defaultTrigger: function (event, valueFn) {
var self = this;
var oldValue = valueFn.call(this.element);
// Attach the event callback.
this.element.bind(event, function (e) {
var value = valueFn.call(self.element, e);
// Only trigger the event if the value has actually changed.
if (oldValue !== value) {
self.element.trigger({ type: 'state:' + self.state, value: value, oldValue: oldValue });
oldValue = value;
}
});
states.postponed.push(function () {
// Trigger the event once for initialization purposes.
self.element.trigger({ type: 'state:' + self.state, value: oldValue, oldValue: undefined });
});
}
};
/**
* This list of states contains functions that are used to monitor the state
* of an element. Whenever an element depends on the state of another element,
* one of these trigger functions is added to the dependee so that the
* dependent element can be updated.
*/
states.Trigger.states = {
// 'empty' describes the state to be monitored
empty: {
// 'keyup' is the (native DOM) event that we watch for.
'keyup': function () {
// The function associated to that trigger returns the new value for the
// state.
return this.val() == '';
}
},
checked: {
'change': function () {
return this.attr('checked');
}
},
// For radio buttons, only return the value if the radio button is selected.
value: {
'keyup': function () {
// Radio buttons share the same :input[name="key"] selector.
if (this.length > 1) {
// Initial checked value of radios is undefined, so we return false.
return this.filter(':checked').val() || false;
}
return this.val();
},
'change': function () {
// Radio buttons share the same :input[name="key"] selector.
if (this.length > 1) {
// Initial checked value of radios is undefined, so we return false.
return this.filter(':checked').val() || false;
}
return this.val();
}
},
collapsed: {
'collapsed': function(e) {
return (e !== undefined && 'value' in e) ? e.value : this.is('.collapsed');
}
}
};
/**
* A state object is used for describing the state and performing aliasing.
*/
states.State = function(state) {
// We may need the original unresolved name later.
this.pristine = this.name = state;
// Normalize the state name.
while (true) {
// Iteratively remove exclamation marks and invert the value.
while (this.name.charAt(0) == '!') {
this.name = this.name.substring(1);
this.invert = !this.invert;
}
// Replace the state with its normalized name.
if (this.name in states.State.aliases) {
this.name = states.State.aliases[this.name];
}
else {
break;
}
}
};
/**
* Create a new State object by sanitizing the passed value.
*/
states.State.sanitize = function (state) {
if (state instanceof states.State) {
return state;
}
else {
return new states.State(state);
}
};
/**
* This list of aliases is used to normalize states and associates negated names
* with their respective inverse state.
*/
states.State.aliases = {
'enabled': '!disabled',
'invisible': '!visible',
'invalid': '!valid',
'untouched': '!touched',
'optional': '!required',
'filled': '!empty',
'unchecked': '!checked',
'irrelevant': '!relevant',
'expanded': '!collapsed',
'readwrite': '!readonly'
};
states.State.prototype = {
invert: false,
/**
* Ensures that just using the state object returns the name.
*/
toString: function() {
return this.name;
}
};
/**
* Global state change handlers. These are bound to "document" to cover all
* elements whose state changes. Events sent to elements within the page
* bubble up to these handlers. We use this system so that themes and modules
* can override these state change handlers for particular parts of a page.
*/
{
$(document).bind('state:disabled', function(e) {
// Only act when this change was triggered by a dependency and not by the
// element monitoring itself.
if (e.trigger) {
$(e.target)
.attr('disabled', e.value)
.filter('.form-element')
.closest('.form-item, .form-submit, .form-wrapper')[e.value ? 'addClass' : 'removeClass']('form-disabled');
// Note: WebKit nightlies don't reflect that change correctly.
// See https://bugs.webkit.org/show_bug.cgi?id=23789
}
});
$(document).bind('state:required', function(e) {
if (e.trigger) {
if (e.value) {
$(e.target).closest('.form-item, .form-wrapper').find('label').append('<span class="form-required">*</span>');
}
else {
$(e.target).closest('.form-item, .form-wrapper').find('label .form-required').remove();
}
}
});
$(document).bind('state:visible', function(e) {
if (e.trigger) {
$(e.target).closest('.form-item, .form-submit, .form-wrapper')[e.value ? 'show' : 'hide']();
}
});
$(document).bind('state:checked', function(e) {
if (e.trigger) {
$(e.target).attr('checked', e.value);
}
});
$(document).bind('state:collapsed', function(e) {
if (e.trigger) {
if ($(e.target).is('.collapsed') !== e.value) {
$('> legend a', e.target).click();
}
}
});
}
/**
* These are helper functions implementing addition "operators" and don't
* implement any logic that is particular to states.
*/
{
// Bitwise AND with a third undefined state.
function ternary (a, b) {
return a === undefined ? b : (b === undefined ? a : a && b);
};
// Inverts a (if it's not undefined) when invert is true.
function invert (a, invert) {
return (invert && a !== undefined) ? !a : a;
};
// Compares two values while ignoring undefined values.
function compare (a, b) {
return (a === b) ? (a === undefined ? a : true) : (a === undefined || b === undefined);
}
}
})(jQuery);
; | * without having to always declare "Drupal.states".
*/ | random_line_split |
AIReviewStream.tsx | /*
* Copyright (C) 2012-2022 Online-Go.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import * as React from "react";
import * as data from "data";
import { ai_socket } from "sockets";
import { MoveTree } from "goban";
import { IdType } from "src/lib/types";
const analysis_requests_made: { [id: string]: boolean } = {};
interface AIReviewStreamProperties {
uuid?: string;
game_id?: IdType;
ai_review_id?: IdType;
callback: (data: any) => any;
}
export function AIReviewStream(props: AIReviewStreamProperties): JSX.Element {
const uuid = props.uuid;
const game_id = props.game_id;
const ai_review_id = props.ai_review_id;
React.useEffect(() => {
if (!props.uuid) {
console.log("No UUID for review stream");
return;
} else {
ai_socket.on("connect", onConnect);
ai_socket.on(uuid, onMessage);
if (ai_socket.connected) {
onConnect();
}
}
function onJwtChange() {
const user = data.get("config.user");
const user_jwt = data.get("config.user_jwt");
if (!user.anonymous && user_jwt) {
ai_socket.send("authenticate", { jwt: user_jwt });
}
}
function watch_jwt() {
data.watch("config.user_jwt", onJwtChange);
}
function unwatch_jwt() |
function onConnect() {
ai_socket.send("ai-review-connect", { uuid, game_id, ai_review_id });
watch_jwt();
}
function onMessage(data: any) {
props.callback(data);
}
return () => {
if (ai_socket.connected) {
ai_socket.send("ai-review-disconnect", { uuid });
}
ai_socket.off("connect", onConnect);
ai_socket.off(uuid, onMessage);
unwatch_jwt();
};
}, [uuid]);
return null;
}
export function ai_request_variation_analysis(
uuid,
game_id,
ai_review_id,
cur_move: MoveTree,
trunk_move: MoveTree,
): void {
if (!ai_socket.connected) {
console.warn(
"Not sending request for variation analysis since we wern't connected to the AI server",
);
return;
}
const trunk_move_string = trunk_move.getMoveStringToThisPoint();
const cur_move_string = cur_move.getMoveStringToThisPoint();
const variation = cur_move_string.slice(trunk_move_string.length);
const key = `${uuid}-${game_id}-${ai_review_id}-${trunk_move.move_number}-${variation}`;
if (key in analysis_requests_made) {
return;
}
analysis_requests_made[key] = true;
const req = {
uuid: uuid,
game_id: game_id,
ai_review_id: ai_review_id,
from: trunk_move.move_number,
variation: variation,
};
ai_socket.send("ai-analyze-variation", req);
}
| {
data.unwatch("config.user_jwt", onJwtChange);
} | identifier_body |
AIReviewStream.tsx | /*
* Copyright (C) 2012-2022 Online-Go.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import * as React from "react";
import * as data from "data";
import { ai_socket } from "sockets";
import { MoveTree } from "goban";
import { IdType } from "src/lib/types";
const analysis_requests_made: { [id: string]: boolean } = {};
interface AIReviewStreamProperties {
uuid?: string;
game_id?: IdType;
ai_review_id?: IdType;
callback: (data: any) => any;
}
export function AIReviewStream(props: AIReviewStreamProperties): JSX.Element {
const uuid = props.uuid;
const game_id = props.game_id;
const ai_review_id = props.ai_review_id;
React.useEffect(() => {
if (!props.uuid) {
console.log("No UUID for review stream");
return;
} else {
ai_socket.on("connect", onConnect);
ai_socket.on(uuid, onMessage);
if (ai_socket.connected) {
onConnect();
}
}
function | () {
const user = data.get("config.user");
const user_jwt = data.get("config.user_jwt");
if (!user.anonymous && user_jwt) {
ai_socket.send("authenticate", { jwt: user_jwt });
}
}
function watch_jwt() {
data.watch("config.user_jwt", onJwtChange);
}
function unwatch_jwt() {
data.unwatch("config.user_jwt", onJwtChange);
}
function onConnect() {
ai_socket.send("ai-review-connect", { uuid, game_id, ai_review_id });
watch_jwt();
}
function onMessage(data: any) {
props.callback(data);
}
return () => {
if (ai_socket.connected) {
ai_socket.send("ai-review-disconnect", { uuid });
}
ai_socket.off("connect", onConnect);
ai_socket.off(uuid, onMessage);
unwatch_jwt();
};
}, [uuid]);
return null;
}
export function ai_request_variation_analysis(
uuid,
game_id,
ai_review_id,
cur_move: MoveTree,
trunk_move: MoveTree,
): void {
if (!ai_socket.connected) {
console.warn(
"Not sending request for variation analysis since we wern't connected to the AI server",
);
return;
}
const trunk_move_string = trunk_move.getMoveStringToThisPoint();
const cur_move_string = cur_move.getMoveStringToThisPoint();
const variation = cur_move_string.slice(trunk_move_string.length);
const key = `${uuid}-${game_id}-${ai_review_id}-${trunk_move.move_number}-${variation}`;
if (key in analysis_requests_made) {
return;
}
analysis_requests_made[key] = true;
const req = {
uuid: uuid,
game_id: game_id,
ai_review_id: ai_review_id,
from: trunk_move.move_number,
variation: variation,
};
ai_socket.send("ai-analyze-variation", req);
}
| onJwtChange | identifier_name |
AIReviewStream.tsx | /*
* Copyright (C) 2012-2022 Online-Go.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import * as React from "react";
import * as data from "data";
import { ai_socket } from "sockets";
import { MoveTree } from "goban";
import { IdType } from "src/lib/types";
const analysis_requests_made: { [id: string]: boolean } = {};
interface AIReviewStreamProperties {
uuid?: string;
game_id?: IdType;
ai_review_id?: IdType;
callback: (data: any) => any;
}
export function AIReviewStream(props: AIReviewStreamProperties): JSX.Element {
const uuid = props.uuid;
const game_id = props.game_id;
const ai_review_id = props.ai_review_id;
React.useEffect(() => {
if (!props.uuid) {
console.log("No UUID for review stream");
return;
} else {
ai_socket.on("connect", onConnect);
ai_socket.on(uuid, onMessage);
if (ai_socket.connected) {
onConnect();
}
}
function onJwtChange() {
const user = data.get("config.user");
const user_jwt = data.get("config.user_jwt");
if (!user.anonymous && user_jwt) {
ai_socket.send("authenticate", { jwt: user_jwt });
}
}
function watch_jwt() {
data.watch("config.user_jwt", onJwtChange);
}
function unwatch_jwt() {
data.unwatch("config.user_jwt", onJwtChange);
}
function onConnect() {
ai_socket.send("ai-review-connect", { uuid, game_id, ai_review_id });
watch_jwt();
}
function onMessage(data: any) {
props.callback(data);
}
return () => {
if (ai_socket.connected) {
ai_socket.send("ai-review-disconnect", { uuid });
}
ai_socket.off("connect", onConnect);
ai_socket.off(uuid, onMessage);
unwatch_jwt();
};
}, [uuid]);
return null;
}
export function ai_request_variation_analysis(
uuid,
game_id,
ai_review_id,
cur_move: MoveTree,
trunk_move: MoveTree,
): void {
if (!ai_socket.connected) {
console.warn(
"Not sending request for variation analysis since we wern't connected to the AI server", | const cur_move_string = cur_move.getMoveStringToThisPoint();
const variation = cur_move_string.slice(trunk_move_string.length);
const key = `${uuid}-${game_id}-${ai_review_id}-${trunk_move.move_number}-${variation}`;
if (key in analysis_requests_made) {
return;
}
analysis_requests_made[key] = true;
const req = {
uuid: uuid,
game_id: game_id,
ai_review_id: ai_review_id,
from: trunk_move.move_number,
variation: variation,
};
ai_socket.send("ai-analyze-variation", req);
} | );
return;
}
const trunk_move_string = trunk_move.getMoveStringToThisPoint(); | random_line_split |
AIReviewStream.tsx | /*
* Copyright (C) 2012-2022 Online-Go.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import * as React from "react";
import * as data from "data";
import { ai_socket } from "sockets";
import { MoveTree } from "goban";
import { IdType } from "src/lib/types";
const analysis_requests_made: { [id: string]: boolean } = {};
interface AIReviewStreamProperties {
uuid?: string;
game_id?: IdType;
ai_review_id?: IdType;
callback: (data: any) => any;
}
export function AIReviewStream(props: AIReviewStreamProperties): JSX.Element {
const uuid = props.uuid;
const game_id = props.game_id;
const ai_review_id = props.ai_review_id;
React.useEffect(() => {
if (!props.uuid) {
console.log("No UUID for review stream");
return;
} else |
function onJwtChange() {
const user = data.get("config.user");
const user_jwt = data.get("config.user_jwt");
if (!user.anonymous && user_jwt) {
ai_socket.send("authenticate", { jwt: user_jwt });
}
}
function watch_jwt() {
data.watch("config.user_jwt", onJwtChange);
}
function unwatch_jwt() {
data.unwatch("config.user_jwt", onJwtChange);
}
function onConnect() {
ai_socket.send("ai-review-connect", { uuid, game_id, ai_review_id });
watch_jwt();
}
function onMessage(data: any) {
props.callback(data);
}
return () => {
if (ai_socket.connected) {
ai_socket.send("ai-review-disconnect", { uuid });
}
ai_socket.off("connect", onConnect);
ai_socket.off(uuid, onMessage);
unwatch_jwt();
};
}, [uuid]);
return null;
}
export function ai_request_variation_analysis(
uuid,
game_id,
ai_review_id,
cur_move: MoveTree,
trunk_move: MoveTree,
): void {
if (!ai_socket.connected) {
console.warn(
"Not sending request for variation analysis since we wern't connected to the AI server",
);
return;
}
const trunk_move_string = trunk_move.getMoveStringToThisPoint();
const cur_move_string = cur_move.getMoveStringToThisPoint();
const variation = cur_move_string.slice(trunk_move_string.length);
const key = `${uuid}-${game_id}-${ai_review_id}-${trunk_move.move_number}-${variation}`;
if (key in analysis_requests_made) {
return;
}
analysis_requests_made[key] = true;
const req = {
uuid: uuid,
game_id: game_id,
ai_review_id: ai_review_id,
from: trunk_move.move_number,
variation: variation,
};
ai_socket.send("ai-analyze-variation", req);
}
| {
ai_socket.on("connect", onConnect);
ai_socket.on(uuid, onMessage);
if (ai_socket.connected) {
onConnect();
}
} | conditional_block |
test_backend.py | import json
import os
import subprocess
import uuid
import passlib.hash
import pytest
import gen
import gen.build_deploy.aws
import release
from dcos_installer import backend
from dcos_installer.config import Config, make_default_config_if_needed, to_config
os.environ["BOOTSTRAP_ID"] = "12345"
@pytest.fixture(scope='module')
def config():
if not os.path.exists('dcos-release.config.yaml'):
pytest.skip("Skipping because there is no configuration in dcos-release.config.yaml")
return release.load_config('dcos-release.config.yaml')
@pytest.fixture(scope='module')
def config_testing(config):
if 'testing' not in config:
pytest.skip("Skipped because there is no `testing` configuration in dcos-release.config.yaml")
return config['testing']
@pytest.fixture(scope='module')
def config_aws(config_testing):
if 'aws' not in config_testing:
pytest.skip("Skipped because there is no `testing.aws` configuration in dcos-release.config.yaml")
return config_testing['aws']
def test_password_hash():
"""Tests that the password hashing method creates de-cryptable hash
"""
password = 'DcosTestingPassword!@#'
# only reads from STDOUT
hash_pw = subprocess.check_output(['dcos_installer', '--hash-password', password])
print(hash_pw)
hash_pw = hash_pw.decode('ascii').strip('\n')
assert passlib.hash.sha512_crypt.verify(password, hash_pw), 'Hash does not match password'
def test_set_superuser_password(tmpdir):
"""Test that --set-superuser-hash works"""
with tmpdir.as_cwd():
tmpdir.join('genconf').ensure(dir=True)
# TODO(cmaloney): Add tests for the behavior around a non-existent config.yaml
# Setting in a non-empty config.yaml which has no password set
make_default_config_if_needed('genconf/config.yaml')
assert 'superuser_password_hash' not in Config('genconf/config.yaml').config
# Set the password
create_fake_build_artifacts(tmpdir)
subprocess.check_call(['dcos_installer', '--set-superuser-password', 'foo'], cwd=str(tmpdir))
# Check that config.yaml has the password set
config = Config('genconf/config.yaml')
assert passlib.hash.sha512_crypt.verify('foo', config['superuser_password_hash'])
def test_generate_node_upgrade_script(tmpdir, monkeypatch):
upgrade_config = """
---
# The name of your DC/OS cluster. Visable in the DC/OS user interface.
cluster_name: 'DC/OS'
master_discovery: static
exhibitor_storage_backend: 'static'
resolvers:
- 8.8.8.8
- 8.8.4.4
ssh_port: 22
process_timeout: 10000
bootstrap_url: file:///opt/dcos_install_tmp
master_list: ['10.0.0.1', '10.0.0.2', '10.0.0.5']
"""
monkeypatch.setenv('BOOTSTRAP_VARIANT', '')
create_config(upgrade_config, tmpdir)
create_fake_build_artifacts(tmpdir)
output = subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script', 'fake'], cwd=str(tmpdir))
assert output.decode('utf-8').splitlines()[-1].split("Node upgrade script URL: ", 1)[1]\
.endswith("dcos_node_upgrade.sh")
try:
subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script'], cwd=str(tmpdir))
except subprocess.CalledProcessError as e:
print(e.output)
assert e.output.decode('ascii') == "Must provide the version of the cluster upgrading from\n"
else:
raise Exception("Test passed, this should not pass without specifying a version number")
def test_version(monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'some-variant')
version_data = subprocess.check_output(['dcos_installer', '--version']).decode()
assert json.loads(version_data) == {
'version': '1.9-dev',
'variant': 'some-variant'
}
def test_good_create_config_from_post(tmpdir):
"""
Test that it creates the config
"""
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
temp_ip_detect_path = workspace + '/ip-detect'
f = open(temp_ip_detect_path, "w")
f.write("#/bin/bash foo")
good_post_data = {
"agent_list": ["10.0.0.2"],
"master_list": ["10.0.0.1"],
"cluster_name": "Good Test",
"resolvers": ["4.4.4.4"],
"ip_detect_filename": temp_ip_detect_path
}
expected_good_messages = {}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=good_post_data,
config_path=temp_config_path)
assert messages == expected_good_messages
def test_bad_create_config_from_post(tmpdir):
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
bad_post_data = {
"agent_list": "foo",
"master_list": ["foo"],
}
expected_bad_messages = {
"agent_list": "Must be a JSON formatted list, but couldn't be parsed the given value `foo` as "
"one because of: Expecting value: line 1 column 1 (char 0)",
"master_list": 'Invalid IPv4 addresses in list: foo',
}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=bad_post_data,
config_path=temp_config_path)
assert messages == expected_bad_messages
def test_do_validate_config(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
# Create a temp config
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
temp_config_path = str(genconf_dir.join('config.yaml'))
# Initialize with defautls
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
expected_output = {
'ip_detect_contents': 'ip-detect script `genconf/ip-detect` must exist',
'ssh_user': 'Must set ssh_user, no way to calculate value.',
'master_list': 'Must set master_list, no way to calculate value.',
'ssh_key_path': 'could not find ssh private key: genconf/ssh_key'
}
with tmpdir.as_cwd():
assert Config(config_path='genconf/config.yaml').do_validate(include_ssh=True) == expected_output
def test_get_config(tmpdir):
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
expected_data = {
'cluster_name': 'DC/OS',
'master_discovery': 'static',
'exhibitor_storage_backend': 'static',
'resolvers': ['8.8.8.8', '8.8.4.4'],
'ssh_port': 22,
'process_timeout': 10000,
'bootstrap_url': 'file:///opt/dcos_install_tmp'
}
make_default_config_if_needed(temp_config_path)
config = Config(temp_config_path)
assert expected_data == config.config
def test_determine_config_type(tmpdir):
# Ensure the default created config is of simple type
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
got_output = backend.determine_config_type(config_path=temp_config_path)
expected_output = {
'message': '',
'type': 'minimal',
}
assert got_output == expected_output
def test_success():
mock_config = to_config({
'master_list': ['10.0.0.1', '10.0.0.2', '10.0.0.5'],
'agent_list': ['10.0.0.3', '10.0.0.4']
})
expected_output = {
"success": "http://10.0.0.1",
"master_count": 3,
"agent_count": 2
}
expected_output_bad = {
"success": "",
"master_count": 0,
"agent_count": 0
}
got_output, code = backend.success(mock_config)
mock_config.update({'master_list': '', 'agent_list': ''})
bad_out, bad_code = backend.success(mock_config)
assert got_output == expected_output
assert code == 200
assert bad_out == expected_output_bad
assert bad_code == 400
def test_accept_overrides_for_undefined_config_params(tmpdir):
temp_config_path = tmpdir.strpath + '/config.yaml'
param = ('fake_test_param_name', 'fake_test_param_value')
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=dict([param]),
config_path=temp_config_path)
assert not messages, "unexpected validation error: {}".format(messages)
assert Config(config_path=temp_config_path)[param[0]] == param[1]
simple_full_config = """---
cluster_name: DC/OS
master_discovery: static
exhibitor_storage_backend: static
master_list:
- 127.0.0.1
bootstrap_url: http://example.com
"""
def test_do_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(simple_full_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_configure(config_path='genconf/config.yaml') == 0
aws_base_config = """---
# NOTE: Taking advantage of what isn't talked about not being validated so we don't need valid AWS /
# s3 credentials in this configuration.
aws_template_storage_bucket: psychic
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_region_name: us-west-2
aws_template_upload: false
"""
|
def test_do_aws_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(aws_base_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
valid_storage_config = """---
master_list:
- 127.0.0.1
aws_template_storage_access_key_id: {key_id}
aws_template_storage_bucket: {bucket}
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_secret_access_key: {access_key}
aws_template_upload: true
"""
def test_do_aws_cf_configure_valid_storage_config(config_aws, tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
session = gen.build_deploy.aws.get_test_session(config_aws)
s3 = session.resource('s3')
bucket = str(uuid.uuid4())
s3_bucket = s3.Bucket(bucket)
s3_bucket.create(CreateBucketConfiguration={'LocationConstraint': config_aws['region_name']})
try:
config_str = valid_storage_config.format(
key_id=config_aws["access_key_id"],
bucket=bucket,
access_key=config_aws["secret_access_key"])
create_config(config_str, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
# TODO: add an assertion that the config that was resolved inside do_aws_cf_configure
# ended up with the correct region where the above testing bucket was created.
finally:
objects = [{'Key': o.key} for o in s3_bucket.objects.all()]
s3_bucket.delete_objects(Delete={'Objects': objects})
s3_bucket.delete()
def create_config(config_str, tmpdir):
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
config_path = genconf_dir.join('config.yaml')
config_path.write(config_str)
genconf_dir.join('ip-detect').write('#!/bin/bash\necho 127.0.0.1')
def create_fake_build_artifacts(tmpdir):
artifact_dir = tmpdir.join('artifacts/bootstrap')
artifact_dir.ensure(dir=True)
artifact_dir.join('12345.bootstrap.tar.xz').write('contents_of_bootstrap', ensure=True)
artifact_dir.join('12345.active.json').write('["package--version"]', ensure=True)
artifact_dir.join('test_variant.bootstrap.latest').write("12345")
tmpdir.join('artifacts/complete/test_variant.complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/complete/complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/packages/package/package--version.tar.xz').write('contents_of_package', ensure=True) | random_line_split | |
test_backend.py | import json
import os
import subprocess
import uuid
import passlib.hash
import pytest
import gen
import gen.build_deploy.aws
import release
from dcos_installer import backend
from dcos_installer.config import Config, make_default_config_if_needed, to_config
os.environ["BOOTSTRAP_ID"] = "12345"
@pytest.fixture(scope='module')
def config():
if not os.path.exists('dcos-release.config.yaml'):
pytest.skip("Skipping because there is no configuration in dcos-release.config.yaml")
return release.load_config('dcos-release.config.yaml')
@pytest.fixture(scope='module')
def config_testing(config):
if 'testing' not in config:
pytest.skip("Skipped because there is no `testing` configuration in dcos-release.config.yaml")
return config['testing']
@pytest.fixture(scope='module')
def config_aws(config_testing):
if 'aws' not in config_testing:
pytest.skip("Skipped because there is no `testing.aws` configuration in dcos-release.config.yaml")
return config_testing['aws']
def test_password_hash():
"""Tests that the password hashing method creates de-cryptable hash
"""
password = 'DcosTestingPassword!@#'
# only reads from STDOUT
hash_pw = subprocess.check_output(['dcos_installer', '--hash-password', password])
print(hash_pw)
hash_pw = hash_pw.decode('ascii').strip('\n')
assert passlib.hash.sha512_crypt.verify(password, hash_pw), 'Hash does not match password'
def test_set_superuser_password(tmpdir):
"""Test that --set-superuser-hash works"""
with tmpdir.as_cwd():
tmpdir.join('genconf').ensure(dir=True)
# TODO(cmaloney): Add tests for the behavior around a non-existent config.yaml
# Setting in a non-empty config.yaml which has no password set
make_default_config_if_needed('genconf/config.yaml')
assert 'superuser_password_hash' not in Config('genconf/config.yaml').config
# Set the password
create_fake_build_artifacts(tmpdir)
subprocess.check_call(['dcos_installer', '--set-superuser-password', 'foo'], cwd=str(tmpdir))
# Check that config.yaml has the password set
config = Config('genconf/config.yaml')
assert passlib.hash.sha512_crypt.verify('foo', config['superuser_password_hash'])
def test_generate_node_upgrade_script(tmpdir, monkeypatch):
upgrade_config = """
---
# The name of your DC/OS cluster. Visable in the DC/OS user interface.
cluster_name: 'DC/OS'
master_discovery: static
exhibitor_storage_backend: 'static'
resolvers:
- 8.8.8.8
- 8.8.4.4
ssh_port: 22
process_timeout: 10000
bootstrap_url: file:///opt/dcos_install_tmp
master_list: ['10.0.0.1', '10.0.0.2', '10.0.0.5']
"""
monkeypatch.setenv('BOOTSTRAP_VARIANT', '')
create_config(upgrade_config, tmpdir)
create_fake_build_artifacts(tmpdir)
output = subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script', 'fake'], cwd=str(tmpdir))
assert output.decode('utf-8').splitlines()[-1].split("Node upgrade script URL: ", 1)[1]\
.endswith("dcos_node_upgrade.sh")
try:
subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script'], cwd=str(tmpdir))
except subprocess.CalledProcessError as e:
print(e.output)
assert e.output.decode('ascii') == "Must provide the version of the cluster upgrading from\n"
else:
raise Exception("Test passed, this should not pass without specifying a version number")
def test_version(monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'some-variant')
version_data = subprocess.check_output(['dcos_installer', '--version']).decode()
assert json.loads(version_data) == {
'version': '1.9-dev',
'variant': 'some-variant'
}
def test_good_create_config_from_post(tmpdir):
"""
Test that it creates the config
"""
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
temp_ip_detect_path = workspace + '/ip-detect'
f = open(temp_ip_detect_path, "w")
f.write("#/bin/bash foo")
good_post_data = {
"agent_list": ["10.0.0.2"],
"master_list": ["10.0.0.1"],
"cluster_name": "Good Test",
"resolvers": ["4.4.4.4"],
"ip_detect_filename": temp_ip_detect_path
}
expected_good_messages = {}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=good_post_data,
config_path=temp_config_path)
assert messages == expected_good_messages
def test_bad_create_config_from_post(tmpdir):
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
bad_post_data = {
"agent_list": "foo",
"master_list": ["foo"],
}
expected_bad_messages = {
"agent_list": "Must be a JSON formatted list, but couldn't be parsed the given value `foo` as "
"one because of: Expecting value: line 1 column 1 (char 0)",
"master_list": 'Invalid IPv4 addresses in list: foo',
}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=bad_post_data,
config_path=temp_config_path)
assert messages == expected_bad_messages
def | (tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
# Create a temp config
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
temp_config_path = str(genconf_dir.join('config.yaml'))
# Initialize with defautls
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
expected_output = {
'ip_detect_contents': 'ip-detect script `genconf/ip-detect` must exist',
'ssh_user': 'Must set ssh_user, no way to calculate value.',
'master_list': 'Must set master_list, no way to calculate value.',
'ssh_key_path': 'could not find ssh private key: genconf/ssh_key'
}
with tmpdir.as_cwd():
assert Config(config_path='genconf/config.yaml').do_validate(include_ssh=True) == expected_output
def test_get_config(tmpdir):
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
expected_data = {
'cluster_name': 'DC/OS',
'master_discovery': 'static',
'exhibitor_storage_backend': 'static',
'resolvers': ['8.8.8.8', '8.8.4.4'],
'ssh_port': 22,
'process_timeout': 10000,
'bootstrap_url': 'file:///opt/dcos_install_tmp'
}
make_default_config_if_needed(temp_config_path)
config = Config(temp_config_path)
assert expected_data == config.config
def test_determine_config_type(tmpdir):
# Ensure the default created config is of simple type
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
got_output = backend.determine_config_type(config_path=temp_config_path)
expected_output = {
'message': '',
'type': 'minimal',
}
assert got_output == expected_output
def test_success():
mock_config = to_config({
'master_list': ['10.0.0.1', '10.0.0.2', '10.0.0.5'],
'agent_list': ['10.0.0.3', '10.0.0.4']
})
expected_output = {
"success": "http://10.0.0.1",
"master_count": 3,
"agent_count": 2
}
expected_output_bad = {
"success": "",
"master_count": 0,
"agent_count": 0
}
got_output, code = backend.success(mock_config)
mock_config.update({'master_list': '', 'agent_list': ''})
bad_out, bad_code = backend.success(mock_config)
assert got_output == expected_output
assert code == 200
assert bad_out == expected_output_bad
assert bad_code == 400
def test_accept_overrides_for_undefined_config_params(tmpdir):
temp_config_path = tmpdir.strpath + '/config.yaml'
param = ('fake_test_param_name', 'fake_test_param_value')
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=dict([param]),
config_path=temp_config_path)
assert not messages, "unexpected validation error: {}".format(messages)
assert Config(config_path=temp_config_path)[param[0]] == param[1]
simple_full_config = """---
cluster_name: DC/OS
master_discovery: static
exhibitor_storage_backend: static
master_list:
- 127.0.0.1
bootstrap_url: http://example.com
"""
def test_do_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(simple_full_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_configure(config_path='genconf/config.yaml') == 0
aws_base_config = """---
# NOTE: Taking advantage of what isn't talked about not being validated so we don't need valid AWS /
# s3 credentials in this configuration.
aws_template_storage_bucket: psychic
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_region_name: us-west-2
aws_template_upload: false
"""
def test_do_aws_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(aws_base_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
valid_storage_config = """---
master_list:
- 127.0.0.1
aws_template_storage_access_key_id: {key_id}
aws_template_storage_bucket: {bucket}
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_secret_access_key: {access_key}
aws_template_upload: true
"""
def test_do_aws_cf_configure_valid_storage_config(config_aws, tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
session = gen.build_deploy.aws.get_test_session(config_aws)
s3 = session.resource('s3')
bucket = str(uuid.uuid4())
s3_bucket = s3.Bucket(bucket)
s3_bucket.create(CreateBucketConfiguration={'LocationConstraint': config_aws['region_name']})
try:
config_str = valid_storage_config.format(
key_id=config_aws["access_key_id"],
bucket=bucket,
access_key=config_aws["secret_access_key"])
create_config(config_str, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
# TODO: add an assertion that the config that was resolved inside do_aws_cf_configure
# ended up with the correct region where the above testing bucket was created.
finally:
objects = [{'Key': o.key} for o in s3_bucket.objects.all()]
s3_bucket.delete_objects(Delete={'Objects': objects})
s3_bucket.delete()
def create_config(config_str, tmpdir):
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
config_path = genconf_dir.join('config.yaml')
config_path.write(config_str)
genconf_dir.join('ip-detect').write('#!/bin/bash\necho 127.0.0.1')
def create_fake_build_artifacts(tmpdir):
artifact_dir = tmpdir.join('artifacts/bootstrap')
artifact_dir.ensure(dir=True)
artifact_dir.join('12345.bootstrap.tar.xz').write('contents_of_bootstrap', ensure=True)
artifact_dir.join('12345.active.json').write('["package--version"]', ensure=True)
artifact_dir.join('test_variant.bootstrap.latest').write("12345")
tmpdir.join('artifacts/complete/test_variant.complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/complete/complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/packages/package/package--version.tar.xz').write('contents_of_package', ensure=True)
| test_do_validate_config | identifier_name |
test_backend.py | import json
import os
import subprocess
import uuid
import passlib.hash
import pytest
import gen
import gen.build_deploy.aws
import release
from dcos_installer import backend
from dcos_installer.config import Config, make_default_config_if_needed, to_config
os.environ["BOOTSTRAP_ID"] = "12345"
@pytest.fixture(scope='module')
def config():
if not os.path.exists('dcos-release.config.yaml'):
pytest.skip("Skipping because there is no configuration in dcos-release.config.yaml")
return release.load_config('dcos-release.config.yaml')
@pytest.fixture(scope='module')
def config_testing(config):
if 'testing' not in config:
pytest.skip("Skipped because there is no `testing` configuration in dcos-release.config.yaml")
return config['testing']
@pytest.fixture(scope='module')
def config_aws(config_testing):
if 'aws' not in config_testing:
pytest.skip("Skipped because there is no `testing.aws` configuration in dcos-release.config.yaml")
return config_testing['aws']
def test_password_hash():
"""Tests that the password hashing method creates de-cryptable hash
"""
password = 'DcosTestingPassword!@#'
# only reads from STDOUT
hash_pw = subprocess.check_output(['dcos_installer', '--hash-password', password])
print(hash_pw)
hash_pw = hash_pw.decode('ascii').strip('\n')
assert passlib.hash.sha512_crypt.verify(password, hash_pw), 'Hash does not match password'
def test_set_superuser_password(tmpdir):
"""Test that --set-superuser-hash works"""
with tmpdir.as_cwd():
tmpdir.join('genconf').ensure(dir=True)
# TODO(cmaloney): Add tests for the behavior around a non-existent config.yaml
# Setting in a non-empty config.yaml which has no password set
make_default_config_if_needed('genconf/config.yaml')
assert 'superuser_password_hash' not in Config('genconf/config.yaml').config
# Set the password
create_fake_build_artifacts(tmpdir)
subprocess.check_call(['dcos_installer', '--set-superuser-password', 'foo'], cwd=str(tmpdir))
# Check that config.yaml has the password set
config = Config('genconf/config.yaml')
assert passlib.hash.sha512_crypt.verify('foo', config['superuser_password_hash'])
def test_generate_node_upgrade_script(tmpdir, monkeypatch):
upgrade_config = """
---
# The name of your DC/OS cluster. Visable in the DC/OS user interface.
cluster_name: 'DC/OS'
master_discovery: static
exhibitor_storage_backend: 'static'
resolvers:
- 8.8.8.8
- 8.8.4.4
ssh_port: 22
process_timeout: 10000
bootstrap_url: file:///opt/dcos_install_tmp
master_list: ['10.0.0.1', '10.0.0.2', '10.0.0.5']
"""
monkeypatch.setenv('BOOTSTRAP_VARIANT', '')
create_config(upgrade_config, tmpdir)
create_fake_build_artifacts(tmpdir)
output = subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script', 'fake'], cwd=str(tmpdir))
assert output.decode('utf-8').splitlines()[-1].split("Node upgrade script URL: ", 1)[1]\
.endswith("dcos_node_upgrade.sh")
try:
subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script'], cwd=str(tmpdir))
except subprocess.CalledProcessError as e:
print(e.output)
assert e.output.decode('ascii') == "Must provide the version of the cluster upgrading from\n"
else:
raise Exception("Test passed, this should not pass without specifying a version number")
def test_version(monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'some-variant')
version_data = subprocess.check_output(['dcos_installer', '--version']).decode()
assert json.loads(version_data) == {
'version': '1.9-dev',
'variant': 'some-variant'
}
def test_good_create_config_from_post(tmpdir):
"""
Test that it creates the config
"""
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
temp_ip_detect_path = workspace + '/ip-detect'
f = open(temp_ip_detect_path, "w")
f.write("#/bin/bash foo")
good_post_data = {
"agent_list": ["10.0.0.2"],
"master_list": ["10.0.0.1"],
"cluster_name": "Good Test",
"resolvers": ["4.4.4.4"],
"ip_detect_filename": temp_ip_detect_path
}
expected_good_messages = {}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=good_post_data,
config_path=temp_config_path)
assert messages == expected_good_messages
def test_bad_create_config_from_post(tmpdir):
# Create a temp config
|
def test_do_validate_config(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
# Create a temp config
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
temp_config_path = str(genconf_dir.join('config.yaml'))
# Initialize with defautls
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
expected_output = {
'ip_detect_contents': 'ip-detect script `genconf/ip-detect` must exist',
'ssh_user': 'Must set ssh_user, no way to calculate value.',
'master_list': 'Must set master_list, no way to calculate value.',
'ssh_key_path': 'could not find ssh private key: genconf/ssh_key'
}
with tmpdir.as_cwd():
assert Config(config_path='genconf/config.yaml').do_validate(include_ssh=True) == expected_output
def test_get_config(tmpdir):
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
expected_data = {
'cluster_name': 'DC/OS',
'master_discovery': 'static',
'exhibitor_storage_backend': 'static',
'resolvers': ['8.8.8.8', '8.8.4.4'],
'ssh_port': 22,
'process_timeout': 10000,
'bootstrap_url': 'file:///opt/dcos_install_tmp'
}
make_default_config_if_needed(temp_config_path)
config = Config(temp_config_path)
assert expected_data == config.config
def test_determine_config_type(tmpdir):
# Ensure the default created config is of simple type
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
got_output = backend.determine_config_type(config_path=temp_config_path)
expected_output = {
'message': '',
'type': 'minimal',
}
assert got_output == expected_output
def test_success():
mock_config = to_config({
'master_list': ['10.0.0.1', '10.0.0.2', '10.0.0.5'],
'agent_list': ['10.0.0.3', '10.0.0.4']
})
expected_output = {
"success": "http://10.0.0.1",
"master_count": 3,
"agent_count": 2
}
expected_output_bad = {
"success": "",
"master_count": 0,
"agent_count": 0
}
got_output, code = backend.success(mock_config)
mock_config.update({'master_list': '', 'agent_list': ''})
bad_out, bad_code = backend.success(mock_config)
assert got_output == expected_output
assert code == 200
assert bad_out == expected_output_bad
assert bad_code == 400
def test_accept_overrides_for_undefined_config_params(tmpdir):
temp_config_path = tmpdir.strpath + '/config.yaml'
param = ('fake_test_param_name', 'fake_test_param_value')
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=dict([param]),
config_path=temp_config_path)
assert not messages, "unexpected validation error: {}".format(messages)
assert Config(config_path=temp_config_path)[param[0]] == param[1]
simple_full_config = """---
cluster_name: DC/OS
master_discovery: static
exhibitor_storage_backend: static
master_list:
- 127.0.0.1
bootstrap_url: http://example.com
"""
def test_do_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(simple_full_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_configure(config_path='genconf/config.yaml') == 0
aws_base_config = """---
# NOTE: Taking advantage of what isn't talked about not being validated so we don't need valid AWS /
# s3 credentials in this configuration.
aws_template_storage_bucket: psychic
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_region_name: us-west-2
aws_template_upload: false
"""
def test_do_aws_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(aws_base_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
valid_storage_config = """---
master_list:
- 127.0.0.1
aws_template_storage_access_key_id: {key_id}
aws_template_storage_bucket: {bucket}
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_secret_access_key: {access_key}
aws_template_upload: true
"""
def test_do_aws_cf_configure_valid_storage_config(config_aws, tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
session = gen.build_deploy.aws.get_test_session(config_aws)
s3 = session.resource('s3')
bucket = str(uuid.uuid4())
s3_bucket = s3.Bucket(bucket)
s3_bucket.create(CreateBucketConfiguration={'LocationConstraint': config_aws['region_name']})
try:
config_str = valid_storage_config.format(
key_id=config_aws["access_key_id"],
bucket=bucket,
access_key=config_aws["secret_access_key"])
create_config(config_str, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
# TODO: add an assertion that the config that was resolved inside do_aws_cf_configure
# ended up with the correct region where the above testing bucket was created.
finally:
objects = [{'Key': o.key} for o in s3_bucket.objects.all()]
s3_bucket.delete_objects(Delete={'Objects': objects})
s3_bucket.delete()
def create_config(config_str, tmpdir):
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
config_path = genconf_dir.join('config.yaml')
config_path.write(config_str)
genconf_dir.join('ip-detect').write('#!/bin/bash\necho 127.0.0.1')
def create_fake_build_artifacts(tmpdir):
artifact_dir = tmpdir.join('artifacts/bootstrap')
artifact_dir.ensure(dir=True)
artifact_dir.join('12345.bootstrap.tar.xz').write('contents_of_bootstrap', ensure=True)
artifact_dir.join('12345.active.json').write('["package--version"]', ensure=True)
artifact_dir.join('test_variant.bootstrap.latest').write("12345")
tmpdir.join('artifacts/complete/test_variant.complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/complete/complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/packages/package/package--version.tar.xz').write('contents_of_package', ensure=True)
| workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
bad_post_data = {
"agent_list": "foo",
"master_list": ["foo"],
}
expected_bad_messages = {
"agent_list": "Must be a JSON formatted list, but couldn't be parsed the given value `foo` as "
"one because of: Expecting value: line 1 column 1 (char 0)",
"master_list": 'Invalid IPv4 addresses in list: foo',
}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=bad_post_data,
config_path=temp_config_path)
assert messages == expected_bad_messages | identifier_body |
test_backend.py | import json
import os
import subprocess
import uuid
import passlib.hash
import pytest
import gen
import gen.build_deploy.aws
import release
from dcos_installer import backend
from dcos_installer.config import Config, make_default_config_if_needed, to_config
os.environ["BOOTSTRAP_ID"] = "12345"
@pytest.fixture(scope='module')
def config():
if not os.path.exists('dcos-release.config.yaml'):
|
return release.load_config('dcos-release.config.yaml')
@pytest.fixture(scope='module')
def config_testing(config):
if 'testing' not in config:
pytest.skip("Skipped because there is no `testing` configuration in dcos-release.config.yaml")
return config['testing']
@pytest.fixture(scope='module')
def config_aws(config_testing):
if 'aws' not in config_testing:
pytest.skip("Skipped because there is no `testing.aws` configuration in dcos-release.config.yaml")
return config_testing['aws']
def test_password_hash():
"""Tests that the password hashing method creates de-cryptable hash
"""
password = 'DcosTestingPassword!@#'
# only reads from STDOUT
hash_pw = subprocess.check_output(['dcos_installer', '--hash-password', password])
print(hash_pw)
hash_pw = hash_pw.decode('ascii').strip('\n')
assert passlib.hash.sha512_crypt.verify(password, hash_pw), 'Hash does not match password'
def test_set_superuser_password(tmpdir):
"""Test that --set-superuser-hash works"""
with tmpdir.as_cwd():
tmpdir.join('genconf').ensure(dir=True)
# TODO(cmaloney): Add tests for the behavior around a non-existent config.yaml
# Setting in a non-empty config.yaml which has no password set
make_default_config_if_needed('genconf/config.yaml')
assert 'superuser_password_hash' not in Config('genconf/config.yaml').config
# Set the password
create_fake_build_artifacts(tmpdir)
subprocess.check_call(['dcos_installer', '--set-superuser-password', 'foo'], cwd=str(tmpdir))
# Check that config.yaml has the password set
config = Config('genconf/config.yaml')
assert passlib.hash.sha512_crypt.verify('foo', config['superuser_password_hash'])
def test_generate_node_upgrade_script(tmpdir, monkeypatch):
upgrade_config = """
---
# The name of your DC/OS cluster. Visable in the DC/OS user interface.
cluster_name: 'DC/OS'
master_discovery: static
exhibitor_storage_backend: 'static'
resolvers:
- 8.8.8.8
- 8.8.4.4
ssh_port: 22
process_timeout: 10000
bootstrap_url: file:///opt/dcos_install_tmp
master_list: ['10.0.0.1', '10.0.0.2', '10.0.0.5']
"""
monkeypatch.setenv('BOOTSTRAP_VARIANT', '')
create_config(upgrade_config, tmpdir)
create_fake_build_artifacts(tmpdir)
output = subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script', 'fake'], cwd=str(tmpdir))
assert output.decode('utf-8').splitlines()[-1].split("Node upgrade script URL: ", 1)[1]\
.endswith("dcos_node_upgrade.sh")
try:
subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script'], cwd=str(tmpdir))
except subprocess.CalledProcessError as e:
print(e.output)
assert e.output.decode('ascii') == "Must provide the version of the cluster upgrading from\n"
else:
raise Exception("Test passed, this should not pass without specifying a version number")
def test_version(monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'some-variant')
version_data = subprocess.check_output(['dcos_installer', '--version']).decode()
assert json.loads(version_data) == {
'version': '1.9-dev',
'variant': 'some-variant'
}
def test_good_create_config_from_post(tmpdir):
"""
Test that it creates the config
"""
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
temp_ip_detect_path = workspace + '/ip-detect'
f = open(temp_ip_detect_path, "w")
f.write("#/bin/bash foo")
good_post_data = {
"agent_list": ["10.0.0.2"],
"master_list": ["10.0.0.1"],
"cluster_name": "Good Test",
"resolvers": ["4.4.4.4"],
"ip_detect_filename": temp_ip_detect_path
}
expected_good_messages = {}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=good_post_data,
config_path=temp_config_path)
assert messages == expected_good_messages
def test_bad_create_config_from_post(tmpdir):
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
bad_post_data = {
"agent_list": "foo",
"master_list": ["foo"],
}
expected_bad_messages = {
"agent_list": "Must be a JSON formatted list, but couldn't be parsed the given value `foo` as "
"one because of: Expecting value: line 1 column 1 (char 0)",
"master_list": 'Invalid IPv4 addresses in list: foo',
}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=bad_post_data,
config_path=temp_config_path)
assert messages == expected_bad_messages
def test_do_validate_config(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
# Create a temp config
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
temp_config_path = str(genconf_dir.join('config.yaml'))
# Initialize with defautls
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
expected_output = {
'ip_detect_contents': 'ip-detect script `genconf/ip-detect` must exist',
'ssh_user': 'Must set ssh_user, no way to calculate value.',
'master_list': 'Must set master_list, no way to calculate value.',
'ssh_key_path': 'could not find ssh private key: genconf/ssh_key'
}
with tmpdir.as_cwd():
assert Config(config_path='genconf/config.yaml').do_validate(include_ssh=True) == expected_output
def test_get_config(tmpdir):
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
expected_data = {
'cluster_name': 'DC/OS',
'master_discovery': 'static',
'exhibitor_storage_backend': 'static',
'resolvers': ['8.8.8.8', '8.8.4.4'],
'ssh_port': 22,
'process_timeout': 10000,
'bootstrap_url': 'file:///opt/dcos_install_tmp'
}
make_default_config_if_needed(temp_config_path)
config = Config(temp_config_path)
assert expected_data == config.config
def test_determine_config_type(tmpdir):
# Ensure the default created config is of simple type
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
got_output = backend.determine_config_type(config_path=temp_config_path)
expected_output = {
'message': '',
'type': 'minimal',
}
assert got_output == expected_output
def test_success():
mock_config = to_config({
'master_list': ['10.0.0.1', '10.0.0.2', '10.0.0.5'],
'agent_list': ['10.0.0.3', '10.0.0.4']
})
expected_output = {
"success": "http://10.0.0.1",
"master_count": 3,
"agent_count": 2
}
expected_output_bad = {
"success": "",
"master_count": 0,
"agent_count": 0
}
got_output, code = backend.success(mock_config)
mock_config.update({'master_list': '', 'agent_list': ''})
bad_out, bad_code = backend.success(mock_config)
assert got_output == expected_output
assert code == 200
assert bad_out == expected_output_bad
assert bad_code == 400
def test_accept_overrides_for_undefined_config_params(tmpdir):
temp_config_path = tmpdir.strpath + '/config.yaml'
param = ('fake_test_param_name', 'fake_test_param_value')
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=dict([param]),
config_path=temp_config_path)
assert not messages, "unexpected validation error: {}".format(messages)
assert Config(config_path=temp_config_path)[param[0]] == param[1]
simple_full_config = """---
cluster_name: DC/OS
master_discovery: static
exhibitor_storage_backend: static
master_list:
- 127.0.0.1
bootstrap_url: http://example.com
"""
def test_do_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(simple_full_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_configure(config_path='genconf/config.yaml') == 0
aws_base_config = """---
# NOTE: Taking advantage of what isn't talked about not being validated so we don't need valid AWS /
# s3 credentials in this configuration.
aws_template_storage_bucket: psychic
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_region_name: us-west-2
aws_template_upload: false
"""
def test_do_aws_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(aws_base_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
valid_storage_config = """---
master_list:
- 127.0.0.1
aws_template_storage_access_key_id: {key_id}
aws_template_storage_bucket: {bucket}
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_secret_access_key: {access_key}
aws_template_upload: true
"""
def test_do_aws_cf_configure_valid_storage_config(config_aws, tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
session = gen.build_deploy.aws.get_test_session(config_aws)
s3 = session.resource('s3')
bucket = str(uuid.uuid4())
s3_bucket = s3.Bucket(bucket)
s3_bucket.create(CreateBucketConfiguration={'LocationConstraint': config_aws['region_name']})
try:
config_str = valid_storage_config.format(
key_id=config_aws["access_key_id"],
bucket=bucket,
access_key=config_aws["secret_access_key"])
create_config(config_str, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
# TODO: add an assertion that the config that was resolved inside do_aws_cf_configure
# ended up with the correct region where the above testing bucket was created.
finally:
objects = [{'Key': o.key} for o in s3_bucket.objects.all()]
s3_bucket.delete_objects(Delete={'Objects': objects})
s3_bucket.delete()
def create_config(config_str, tmpdir):
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
config_path = genconf_dir.join('config.yaml')
config_path.write(config_str)
genconf_dir.join('ip-detect').write('#!/bin/bash\necho 127.0.0.1')
def create_fake_build_artifacts(tmpdir):
artifact_dir = tmpdir.join('artifacts/bootstrap')
artifact_dir.ensure(dir=True)
artifact_dir.join('12345.bootstrap.tar.xz').write('contents_of_bootstrap', ensure=True)
artifact_dir.join('12345.active.json').write('["package--version"]', ensure=True)
artifact_dir.join('test_variant.bootstrap.latest').write("12345")
tmpdir.join('artifacts/complete/test_variant.complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/complete/complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/packages/package/package--version.tar.xz').write('contents_of_package', ensure=True)
| pytest.skip("Skipping because there is no configuration in dcos-release.config.yaml") | conditional_block |
ImageEnhance.py | #
# The Python Imaging Library.
# $Id$
#
# image enhancement classes
#
# For a background, see "Image Processing By Interpolation and
# Extrapolation", Paul Haeberli and Douglas Voorhies. Available
# at http://www.sgi.com/grafica/interp/index.html
#
# History:
# 1996-03-23 fl Created
# 2009-06-16 fl Fixed mean calculation
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFilter, ImageStat
class _Enhance:
def enhance(self, factor):
"""
Returns an enhanced image.
:param factor: A floating point value controlling the enhancement.
Factor 1.0 always returns a copy of the original image,
lower factors mean less color (brightness, contrast,
etc), and higher values more. There are no restrictions
on this value.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.blend(self.degenerate, self.image, factor)
class Color(_Enhance):
"""Adjust image color balance.
This class can be used to adjust the colour balance of an image, in
a manner similar to the controls on a colour TV set. An enhancement
factor of 0.0 gives a black and white image. A factor of 1.0 gives
the original image.
"""
def __init__(self, image):
self.image = image
self.degenerate = image.convert("L").convert(image.mode)
class Contrast(_Enhance):
"""Adjust image contrast.
This class can be used to control the contrast of an image, similar
to the contrast control on a TV set. An enhancement factor of 0.0
gives a solid grey image. A factor of 1.0 gives the original image.
"""
def __init__(self, image):
self.image = image
mean = int(ImageStat.Stat(image.convert("L")).mean[0] + 0.5)
self.degenerate = Image.new("L", image.size, mean).convert(image.mode)
class Brightness(_Enhance):
"""Adjust image brightness.
This class can be used to control the brighntess of an image. An
enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the
original image.
"""
def __init__(self, image):
self.image = image
self.degenerate = Image.new(image.mode, image.size, 0)
class | (_Enhance):
"""Adjust image sharpness.
This class can be used to adjust the sharpness of an image. An
enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the
original image, and a factor of 2.0 gives a sharpened image.
"""
def __init__(self, image):
self.image = image
self.degenerate = image.filter(ImageFilter.SMOOTH)
| Sharpness | identifier_name |
ImageEnhance.py | #
# The Python Imaging Library.
# $Id$
#
# image enhancement classes
#
# For a background, see "Image Processing By Interpolation and
# Extrapolation", Paul Haeberli and Douglas Voorhies. Available
# at http://www.sgi.com/grafica/interp/index.html
#
# History:
# 1996-03-23 fl Created
# 2009-06-16 fl Fixed mean calculation
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFilter, ImageStat
class _Enhance:
def enhance(self, factor):
"""
Returns an enhanced image.
:param factor: A floating point value controlling the enhancement.
Factor 1.0 always returns a copy of the original image,
lower factors mean less color (brightness, contrast,
etc), and higher values more. There are no restrictions
on this value.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.blend(self.degenerate, self.image, factor)
class Color(_Enhance):
"""Adjust image color balance.
This class can be used to adjust the colour balance of an image, in
a manner similar to the controls on a colour TV set. An enhancement
factor of 0.0 gives a black and white image. A factor of 1.0 gives
the original image.
"""
def __init__(self, image):
self.image = image
self.degenerate = image.convert("L").convert(image.mode)
class Contrast(_Enhance):
"""Adjust image contrast.
This class can be used to control the contrast of an image, similar
to the contrast control on a TV set. An enhancement factor of 0.0
gives a solid grey image. A factor of 1.0 gives the original image.
"""
def __init__(self, image):
self.image = image
mean = int(ImageStat.Stat(image.convert("L")).mean[0] + 0.5)
self.degenerate = Image.new("L", image.size, mean).convert(image.mode)
class Brightness(_Enhance):
"""Adjust image brightness.
This class can be used to control the brighntess of an image. An
enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the
original image.
"""
def __init__(self, image):
self.image = image
self.degenerate = Image.new(image.mode, image.size, 0)
class Sharpness(_Enhance):
"""Adjust image sharpness.
This class can be used to adjust the sharpness of an image. An
enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the
original image, and a factor of 2.0 gives a sharpened image.
"""
def __init__(self, image):
| self.image = image
self.degenerate = image.filter(ImageFilter.SMOOTH) | identifier_body | |
ImageEnhance.py | #
# The Python Imaging Library.
# $Id$
#
# image enhancement classes
#
# For a background, see "Image Processing By Interpolation and
# Extrapolation", Paul Haeberli and Douglas Voorhies. Available
# at http://www.sgi.com/grafica/interp/index.html
#
# History:
# 1996-03-23 fl Created
# 2009-06-16 fl Fixed mean calculation
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFilter, ImageStat
class _Enhance:
def enhance(self, factor):
"""
Returns an enhanced image.
:param factor: A floating point value controlling the enhancement.
Factor 1.0 always returns a copy of the original image,
lower factors mean less color (brightness, contrast,
etc), and higher values more. There are no restrictions
on this value.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.blend(self.degenerate, self.image, factor)
class Color(_Enhance):
"""Adjust image color balance.
This class can be used to adjust the colour balance of an image, in
a manner similar to the controls on a colour TV set. An enhancement
factor of 0.0 gives a black and white image. A factor of 1.0 gives
the original image.
"""
def __init__(self, image):
self.image = image
self.degenerate = image.convert("L").convert(image.mode)
class Contrast(_Enhance):
"""Adjust image contrast.
This class can be used to control the contrast of an image, similar
to the contrast control on a TV set. An enhancement factor of 0.0
gives a solid grey image. A factor of 1.0 gives the original image.
"""
def __init__(self, image):
self.image = image
mean = int(ImageStat.Stat(image.convert("L")).mean[0] + 0.5)
self.degenerate = Image.new("L", image.size, mean).convert(image.mode)
|
class Brightness(_Enhance):
"""Adjust image brightness.
This class can be used to control the brighntess of an image. An
enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the
original image.
"""
def __init__(self, image):
self.image = image
self.degenerate = Image.new(image.mode, image.size, 0)
class Sharpness(_Enhance):
"""Adjust image sharpness.
This class can be used to adjust the sharpness of an image. An
enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the
original image, and a factor of 2.0 gives a sharpened image.
"""
def __init__(self, image):
self.image = image
self.degenerate = image.filter(ImageFilter.SMOOTH) | random_line_split | |
rbawz2.py | #!/usr/bin/env python
#
# Computation of the rate-distortion function for source coding with side
# information at the decoder using the Blahut-Arimoto algorithm.
#
# Formulation similar to R.E. Blahut "Computation of Channel Capacity and
# Rate-Distortion Functions," IEEE Transactions on Information Theory, 18,
# no. 4, 1972.
#
# Author: Christophe Ramananjaona
# (c) 2005, Department of Electrical and Computer Engineering, Duke University.
# (c) 2017, Isloux, for the Python version.
from numpy import shape,sum,zeros,ones,arange,log,exp,array,longdouble,finfo
from sys import float_info
from os.path import isfile
from sys import argv
#from code_generator0 import code_generator
from code_generator import code_generator
def distortion_measure(n):
# Hamming distance
D=ones((n,n),dtype='longdouble')
for i in range(n):
D[i][i]=0.0
return(D)
def blahut_arimoto(q):
nx,ny=shape(q)
qx=[]
for i in range(nx):
qx.append(longdouble(sum(q[i,:])))
qy=[]
for j in range(ny):
qy.append(longdouble(sum(q[:,j])))
nz=nx
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# The array t contains all the possible codes that map Y into Z
nt=nx+1
t,nt=code_generator(nz,ny)
# If nx!=nz there is a problem
D=distortion_measure(max(nx,ny))
npoints=100
ds=arange(-10.0,0.0,0.1)
c=zeros((nx,nt),dtype='longdouble')
vtx=zeros((nt,nx),dtype='longdouble')
sexp=zeros(nt,dtype='longdouble')
#epsilon=finfo(longdouble(1.0)).eps
epsilon=1.0e-7
for s in range(npoints):
qty=ones((nt,ny),dtype='longdouble')
qty=qty/nt/ny
# Initialise stop test
stop=longdouble(1.0e5)
n=0
while stop>epsilon:
n=n+1
for i in range(nx):
if qx[i]!=0.0:
qxid=longdouble(1.0)/qx[i]
for k in range(nt):
ssum=longdouble(0.0)
for j in range(ny):
if qx[i]!=0.0:
ssum+=q[i][j]*qxid*log(qty[k][j])
else:
ssum+=qy[j]*log(qty[k][j])
c[i][k]=exp(ssum)
for i in range(nx):
sexp=zeros(nt,dtype='longdouble')
sd=longdouble(0.0)
if qx[i]!=0.0:
qxid=longdouble(1.0)/qx[i]
for k in range(nt):
for j in range(ny):
if qx[i]!=0.0:
sexp[k]+=q[i][j]*qxid*D[i,t[2][k,j]]
else:
sexp[k]+=qy[j]*D[i,t[2][k,j]]
sd+=c[i][k]*exp(ds[s]*sexp[k])
sd=longdouble(1.0)/sd
for k in range(nt):
vtx[k][i]=c[i][k]*exp(ds[s]*sexp[k])*sd
qtym=qty
qty=zeros((nt,ny),dtype='longdouble')
stop=longdouble(0.0)
for j in range(ny):
qyjd=longdouble(1.0)/qy[j]
for k in range(nt):
for i in range(nx):
qty[k][j]+=q[i][j]*qyjd*vtx[k][i]
stop+=qy[j]*qty[k][j]*log(qty[k][j]/qtym[k][j])
ssum=longdouble(0.0)
dv=longdouble(0.0)
for i in range(nx):
ssum2=longdouble(0.0)
if qx[i]!=0.0:
qxid=longdouble(1.0)/qx[i]
for k in range(nt):
ssexp=longdouble(0.0)
for j in range(ny):
if qx[i]!=0.0:
ssexp+=q[i][j]*qxid*D[i,t[2][k,j]]
else:
ssexp+=qy[j]*D[i,t[2][k,j]]
dv+=q[i][j]*vtx[k][i]*D[i,t[2][k,j]]
ssum2+=c[i][k]*exp(ds[s]*ssexp)
ssum+=qx[i]*log(ssum2)
R=ds[s]*dv-ssum
print dv,R,n
def readinputfile(inputfile):
a=[ line.split() for line in file(inputfile) ]
nx=len(a) # Number of lines
ny=len(a[0]) # Number of columns
q=zeros((nx,ny),dtype='longdouble')
for i in range(nx):
for j in range(ny):
q[i][j]=a[i][j]
return(q)
def main(inputfile="q.txt"):
|
if __name__=="__main__":
if len(argv)>1:
main(argv[1])
else:
main()
| if isfile(inputfile):
q=readinputfile(inputfile)
else:
nx=2
ny=2
q=array([[0.3,0.2],[0.24,0.26]],dtype='longdouble')
blahut_arimoto(q) | identifier_body |
rbawz2.py | #!/usr/bin/env python
#
# Computation of the rate-distortion function for source coding with side
# information at the decoder using the Blahut-Arimoto algorithm.
#
# Formulation similar to R.E. Blahut "Computation of Channel Capacity and
# Rate-Distortion Functions," IEEE Transactions on Information Theory, 18,
# no. 4, 1972.
#
# Author: Christophe Ramananjaona
# (c) 2005, Department of Electrical and Computer Engineering, Duke University.
# (c) 2017, Isloux, for the Python version.
from numpy import shape,sum,zeros,ones,arange,log,exp,array,longdouble,finfo
from sys import float_info
from os.path import isfile
from sys import argv
#from code_generator0 import code_generator
from code_generator import code_generator
def distortion_measure(n):
# Hamming distance
D=ones((n,n),dtype='longdouble')
for i in range(n):
D[i][i]=0.0
return(D)
def blahut_arimoto(q):
nx,ny=shape(q)
qx=[]
for i in range(nx):
qx.append(longdouble(sum(q[i,:])))
qy=[]
for j in range(ny):
qy.append(longdouble(sum(q[:,j])))
nz=nx
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# The array t contains all the possible codes that map Y into Z
nt=nx+1
t,nt=code_generator(nz,ny)
# If nx!=nz there is a problem
D=distortion_measure(max(nx,ny))
npoints=100
ds=arange(-10.0,0.0,0.1)
c=zeros((nx,nt),dtype='longdouble')
vtx=zeros((nt,nx),dtype='longdouble')
sexp=zeros(nt,dtype='longdouble')
#epsilon=finfo(longdouble(1.0)).eps
epsilon=1.0e-7
for s in range(npoints):
qty=ones((nt,ny),dtype='longdouble')
qty=qty/nt/ny
# Initialise stop test
stop=longdouble(1.0e5)
n=0
while stop>epsilon:
n=n+1
for i in range(nx):
if qx[i]!=0.0:
qxid=longdouble(1.0)/qx[i]
for k in range(nt):
ssum=longdouble(0.0)
for j in range(ny):
if qx[i]!=0.0:
ssum+=q[i][j]*qxid*log(qty[k][j])
else:
ssum+=qy[j]*log(qty[k][j])
c[i][k]=exp(ssum)
for i in range(nx):
sexp=zeros(nt,dtype='longdouble')
sd=longdouble(0.0)
if qx[i]!=0.0:
qxid=longdouble(1.0)/qx[i]
for k in range(nt):
for j in range(ny):
if qx[i]!=0.0:
sexp[k]+=q[i][j]*qxid*D[i,t[2][k,j]]
else:
sexp[k]+=qy[j]*D[i,t[2][k,j]]
sd+=c[i][k]*exp(ds[s]*sexp[k])
sd=longdouble(1.0)/sd
for k in range(nt):
vtx[k][i]=c[i][k]*exp(ds[s]*sexp[k])*sd
qtym=qty
qty=zeros((nt,ny),dtype='longdouble')
stop=longdouble(0.0)
for j in range(ny):
qyjd=longdouble(1.0)/qy[j]
for k in range(nt):
for i in range(nx):
qty[k][j]+=q[i][j]*qyjd*vtx[k][i]
stop+=qy[j]*qty[k][j]*log(qty[k][j]/qtym[k][j])
ssum=longdouble(0.0)
dv=longdouble(0.0)
for i in range(nx):
ssum2=longdouble(0.0)
if qx[i]!=0.0:
qxid=longdouble(1.0)/qx[i]
for k in range(nt):
ssexp=longdouble(0.0)
for j in range(ny):
if qx[i]!=0.0:
ssexp+=q[i][j]*qxid*D[i,t[2][k,j]]
else:
ssexp+=qy[j]*D[i,t[2][k,j]]
dv+=q[i][j]*vtx[k][i]*D[i,t[2][k,j]]
ssum2+=c[i][k]*exp(ds[s]*ssexp)
ssum+=qx[i]*log(ssum2)
R=ds[s]*dv-ssum
print dv,R,n
def | (inputfile):
a=[ line.split() for line in file(inputfile) ]
nx=len(a) # Number of lines
ny=len(a[0]) # Number of columns
q=zeros((nx,ny),dtype='longdouble')
for i in range(nx):
for j in range(ny):
q[i][j]=a[i][j]
return(q)
def main(inputfile="q.txt"):
if isfile(inputfile):
q=readinputfile(inputfile)
else:
nx=2
ny=2
q=array([[0.3,0.2],[0.24,0.26]],dtype='longdouble')
blahut_arimoto(q)
if __name__=="__main__":
if len(argv)>1:
main(argv[1])
else:
main()
| readinputfile | identifier_name |
rbawz2.py | #!/usr/bin/env python
#
# Computation of the rate-distortion function for source coding with side
# information at the decoder using the Blahut-Arimoto algorithm.
#
# Formulation similar to R.E. Blahut "Computation of Channel Capacity and
# Rate-Distortion Functions," IEEE Transactions on Information Theory, 18,
# no. 4, 1972.
#
# Author: Christophe Ramananjaona
# (c) 2005, Department of Electrical and Computer Engineering, Duke University.
# (c) 2017, Isloux, for the Python version.
from numpy import shape,sum,zeros,ones,arange,log,exp,array,longdouble,finfo
from sys import float_info
from os.path import isfile
from sys import argv
#from code_generator0 import code_generator
from code_generator import code_generator
def distortion_measure(n):
# Hamming distance
D=ones((n,n),dtype='longdouble')
for i in range(n):
D[i][i]=0.0
return(D)
| def blahut_arimoto(q):
nx,ny=shape(q)
qx=[]
for i in range(nx):
qx.append(longdouble(sum(q[i,:])))
qy=[]
for j in range(ny):
qy.append(longdouble(sum(q[:,j])))
nz=nx
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# The array t contains all the possible codes that map Y into Z
nt=nx+1
t,nt=code_generator(nz,ny)
# If nx!=nz there is a problem
D=distortion_measure(max(nx,ny))
npoints=100
ds=arange(-10.0,0.0,0.1)
c=zeros((nx,nt),dtype='longdouble')
vtx=zeros((nt,nx),dtype='longdouble')
sexp=zeros(nt,dtype='longdouble')
#epsilon=finfo(longdouble(1.0)).eps
epsilon=1.0e-7
for s in range(npoints):
qty=ones((nt,ny),dtype='longdouble')
qty=qty/nt/ny
# Initialise stop test
stop=longdouble(1.0e5)
n=0
while stop>epsilon:
n=n+1
for i in range(nx):
if qx[i]!=0.0:
qxid=longdouble(1.0)/qx[i]
for k in range(nt):
ssum=longdouble(0.0)
for j in range(ny):
if qx[i]!=0.0:
ssum+=q[i][j]*qxid*log(qty[k][j])
else:
ssum+=qy[j]*log(qty[k][j])
c[i][k]=exp(ssum)
for i in range(nx):
sexp=zeros(nt,dtype='longdouble')
sd=longdouble(0.0)
if qx[i]!=0.0:
qxid=longdouble(1.0)/qx[i]
for k in range(nt):
for j in range(ny):
if qx[i]!=0.0:
sexp[k]+=q[i][j]*qxid*D[i,t[2][k,j]]
else:
sexp[k]+=qy[j]*D[i,t[2][k,j]]
sd+=c[i][k]*exp(ds[s]*sexp[k])
sd=longdouble(1.0)/sd
for k in range(nt):
vtx[k][i]=c[i][k]*exp(ds[s]*sexp[k])*sd
qtym=qty
qty=zeros((nt,ny),dtype='longdouble')
stop=longdouble(0.0)
for j in range(ny):
qyjd=longdouble(1.0)/qy[j]
for k in range(nt):
for i in range(nx):
qty[k][j]+=q[i][j]*qyjd*vtx[k][i]
stop+=qy[j]*qty[k][j]*log(qty[k][j]/qtym[k][j])
ssum=longdouble(0.0)
dv=longdouble(0.0)
for i in range(nx):
ssum2=longdouble(0.0)
if qx[i]!=0.0:
qxid=longdouble(1.0)/qx[i]
for k in range(nt):
ssexp=longdouble(0.0)
for j in range(ny):
if qx[i]!=0.0:
ssexp+=q[i][j]*qxid*D[i,t[2][k,j]]
else:
ssexp+=qy[j]*D[i,t[2][k,j]]
dv+=q[i][j]*vtx[k][i]*D[i,t[2][k,j]]
ssum2+=c[i][k]*exp(ds[s]*ssexp)
ssum+=qx[i]*log(ssum2)
R=ds[s]*dv-ssum
print dv,R,n
def readinputfile(inputfile):
a=[ line.split() for line in file(inputfile) ]
nx=len(a) # Number of lines
ny=len(a[0]) # Number of columns
q=zeros((nx,ny),dtype='longdouble')
for i in range(nx):
for j in range(ny):
q[i][j]=a[i][j]
return(q)
def main(inputfile="q.txt"):
if isfile(inputfile):
q=readinputfile(inputfile)
else:
nx=2
ny=2
q=array([[0.3,0.2],[0.24,0.26]],dtype='longdouble')
blahut_arimoto(q)
if __name__=="__main__":
if len(argv)>1:
main(argv[1])
else:
main() | random_line_split | |
rbawz2.py | #!/usr/bin/env python
#
# Computation of the rate-distortion function for source coding with side
# information at the decoder using the Blahut-Arimoto algorithm.
#
# Formulation similar to R.E. Blahut "Computation of Channel Capacity and
# Rate-Distortion Functions," IEEE Transactions on Information Theory, 18,
# no. 4, 1972.
#
# Author: Christophe Ramananjaona
# (c) 2005, Department of Electrical and Computer Engineering, Duke University.
# (c) 2017, Isloux, for the Python version.
from numpy import shape,sum,zeros,ones,arange,log,exp,array,longdouble,finfo
from sys import float_info
from os.path import isfile
from sys import argv
#from code_generator0 import code_generator
from code_generator import code_generator
def distortion_measure(n):
# Hamming distance
D=ones((n,n),dtype='longdouble')
for i in range(n):
D[i][i]=0.0
return(D)
def blahut_arimoto(q):
nx,ny=shape(q)
qx=[]
for i in range(nx):
qx.append(longdouble(sum(q[i,:])))
qy=[]
for j in range(ny):
qy.append(longdouble(sum(q[:,j])))
nz=nx
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# The array t contains all the possible codes that map Y into Z
nt=nx+1
t,nt=code_generator(nz,ny)
# If nx!=nz there is a problem
D=distortion_measure(max(nx,ny))
npoints=100
ds=arange(-10.0,0.0,0.1)
c=zeros((nx,nt),dtype='longdouble')
vtx=zeros((nt,nx),dtype='longdouble')
sexp=zeros(nt,dtype='longdouble')
#epsilon=finfo(longdouble(1.0)).eps
epsilon=1.0e-7
for s in range(npoints):
qty=ones((nt,ny),dtype='longdouble')
qty=qty/nt/ny
# Initialise stop test
stop=longdouble(1.0e5)
n=0
while stop>epsilon:
n=n+1
for i in range(nx):
if qx[i]!=0.0:
qxid=longdouble(1.0)/qx[i]
for k in range(nt):
ssum=longdouble(0.0)
for j in range(ny):
if qx[i]!=0.0:
ssum+=q[i][j]*qxid*log(qty[k][j])
else:
ssum+=qy[j]*log(qty[k][j])
c[i][k]=exp(ssum)
for i in range(nx):
sexp=zeros(nt,dtype='longdouble')
sd=longdouble(0.0)
if qx[i]!=0.0:
qxid=longdouble(1.0)/qx[i]
for k in range(nt):
for j in range(ny):
if qx[i]!=0.0:
sexp[k]+=q[i][j]*qxid*D[i,t[2][k,j]]
else:
sexp[k]+=qy[j]*D[i,t[2][k,j]]
sd+=c[i][k]*exp(ds[s]*sexp[k])
sd=longdouble(1.0)/sd
for k in range(nt):
vtx[k][i]=c[i][k]*exp(ds[s]*sexp[k])*sd
qtym=qty
qty=zeros((nt,ny),dtype='longdouble')
stop=longdouble(0.0)
for j in range(ny):
qyjd=longdouble(1.0)/qy[j]
for k in range(nt):
for i in range(nx):
qty[k][j]+=q[i][j]*qyjd*vtx[k][i]
stop+=qy[j]*qty[k][j]*log(qty[k][j]/qtym[k][j])
ssum=longdouble(0.0)
dv=longdouble(0.0)
for i in range(nx):
ssum2=longdouble(0.0)
if qx[i]!=0.0:
qxid=longdouble(1.0)/qx[i]
for k in range(nt):
ssexp=longdouble(0.0)
for j in range(ny):
if qx[i]!=0.0:
ssexp+=q[i][j]*qxid*D[i,t[2][k,j]]
else:
ssexp+=qy[j]*D[i,t[2][k,j]]
dv+=q[i][j]*vtx[k][i]*D[i,t[2][k,j]]
ssum2+=c[i][k]*exp(ds[s]*ssexp)
ssum+=qx[i]*log(ssum2)
R=ds[s]*dv-ssum
print dv,R,n
def readinputfile(inputfile):
a=[ line.split() for line in file(inputfile) ]
nx=len(a) # Number of lines
ny=len(a[0]) # Number of columns
q=zeros((nx,ny),dtype='longdouble')
for i in range(nx):
for j in range(ny):
q[i][j]=a[i][j]
return(q)
def main(inputfile="q.txt"):
if isfile(inputfile):
q=readinputfile(inputfile)
else:
nx=2
ny=2
q=array([[0.3,0.2],[0.24,0.26]],dtype='longdouble')
blahut_arimoto(q)
if __name__=="__main__":
| if len(argv)>1:
main(argv[1])
else:
main() | conditional_block | |
URITemplate.js | /*!
* URI.js - Mutating URLs
* URI Template Support - http://tools.ietf.org/html/rfc6570
*
* Version: 1.16.0
*
* Author: Rodney Rehm
* Web: http://medialize.github.io/URI.js/
*
* Licensed under
* MIT License http://www.opensource.org/licenses/mit-license
* GPL v3 http://opensource.org/licenses/GPL-3.0
*
*/
(function (root, factory) {
'use strict';
// https://github.com/umdjs/umd/blob/master/returnExports.js
if (typeof exports === 'object') {
// Node
module.exports = factory(require('./URI'));
} else if (typeof define === 'function' && define.amd) {
// AMD. Register as an anonymous module.
define(['./URI'], factory);
} else {
// Browser globals (root is window)
root.URITemplate = factory(root.URI, root);
}
}(this, function (URI, root) {
'use strict';
// FIXME: v2.0.0 renamce non-camelCase properties to uppercase
/*jshint camelcase: false */
// save current URITemplate variable, if any
var _URITemplate = root && root.URITemplate;
var hasOwn = Object.prototype.hasOwnProperty;
function URITemplate(expression) {
// serve from cache where possible
if (URITemplate._cache[expression]) {
return URITemplate._cache[expression];
}
// Allow instantiation without the 'new' keyword
if (!(this instanceof URITemplate)) {
return new URITemplate(expression);
}
this.expression = expression;
URITemplate._cache[expression] = this;
return this;
}
function Data(data) {
this.data = data;
this.cache = {};
}
var p = URITemplate.prototype;
// list of operators and their defined options
var operators = {
// Simple string expansion
'' : {
prefix: '',
separator: ',',
named: false,
empty_name_separator: false,
encode : 'encode'
},
// Reserved character strings
'+' : {
prefix: '',
separator: ',',
named: false,
empty_name_separator: false,
encode : 'encodeReserved'
},
// Fragment identifiers prefixed by '#'
'#' : {
prefix: '#',
separator: ',',
named: false,
empty_name_separator: false,
encode : 'encodeReserved'
},
// Name labels or extensions prefixed by '.'
'.' : {
prefix: '.',
separator: '.',
named: false,
empty_name_separator: false,
encode : 'encode'
},
// Path segments prefixed by '/'
'/' : {
prefix: '/',
separator: '/',
named: false,
empty_name_separator: false,
encode : 'encode'
},
// Path parameter name or name=value pairs prefixed by ';'
';' : {
prefix: ';',
separator: ';',
named: true,
empty_name_separator: false,
encode : 'encode'
},
// Query component beginning with '?' and consisting
// of name=value pairs separated by '&'; an
'?' : {
prefix: '?',
separator: '&',
named: true,
empty_name_separator: true,
encode : 'encode'
},
// Continuation of query-style &name=value pairs
// within a literal query component.
'&' : {
prefix: '&',
separator: '&',
named: true,
empty_name_separator: true,
encode : 'encode'
}
// The operator characters equals ("="), comma (","), exclamation ("!"),
// at sign ("@"), and pipe ("|") are reserved for future extensions.
};
// storage for already parsed templates
URITemplate._cache = {};
// pattern to identify expressions [operator, variable-list] in template
URITemplate.EXPRESSION_PATTERN = /\{([^a-zA-Z0-9%_]?)([^\}]+)(\}|$)/g;
// pattern to identify variables [name, explode, maxlength] in variable-list
URITemplate.VARIABLE_PATTERN = /^([^*:]+)((\*)|:(\d+))?$/;
// pattern to verify variable name integrity
URITemplate.VARIABLE_NAME_PATTERN = /[^a-zA-Z0-9%_]/;
// expand parsed expression (expression, not template!)
URITemplate.expand = function(expression, data) {
// container for defined options for the given operator
var options = operators[expression.operator];
// expansion type (include keys or not)
var type = options.named ? 'Named' : 'Unnamed';
// list of variables within the expression
var variables = expression.variables;
// result buffer for evaluating the expression
var buffer = [];
var d, variable, i;
for (i = 0; (variable = variables[i]); i++) {
// fetch simplified data source
d = data.get(variable.name);
if (!d.val.length) {
if (d.type) {
// empty variables (empty string)
// still lead to a separator being appended!
buffer.push('');
}
// no data, no action
continue;
}
// expand the given variable
buffer.push(URITemplate['expand' + type](
d,
options,
variable.explode,
variable.explode && options.separator || ',',
variable.maxlength,
variable.name
));
}
if (buffer.length) {
return options.prefix + buffer.join(options.separator);
} else {
// prefix is not prepended for empty expressions
return '';
}
};
// expand a named variable
URITemplate.expandNamed = function(d, options, explode, separator, length, name) {
// variable result buffer
var result = '';
// peformance crap
var encode = options.encode;
var empty_name_separator = options.empty_name_separator;
// flag noting if values are already encoded
var _encode = !d[encode].length;
// key for named expansion
var _name = d.type === 2 ? '': URI[encode](name);
var _value, i, l;
// for each found value
for (i = 0, l = d.val.length; i < l; i++) {
if (length) {
// maxlength must be determined before encoding can happen
_value = URI[encode](d.val[i][1].substring(0, length));
if (d.type === 2) {
// apply maxlength to keys of objects as well
_name = URI[encode](d.val[i][0].substring(0, length));
}
} else if (_encode) {
// encode value
_value = URI[encode](d.val[i][1]);
if (d.type === 2) {
// encode name and cache encoded value
_name = URI[encode](d.val[i][0]);
d[encode].push([_name, _value]);
} else {
// cache encoded value
d[encode].push([undefined, _value]);
}
} else {
// values are already encoded and can be pulled from cache
_value = d[encode][i][1];
if (d.type === 2) {
_name = d[encode][i][0];
}
}
if (result) {
// unless we're the first value, prepend the separator
result += separator;
}
if (!explode) {
if (!i) |
if (d.type === 2) {
// without explode-modifier, keys of objects are returned comma-separated
result += _name + ',';
}
result += _value;
} else {
// only add the = if it is either default (?&) or there actually is a value (;)
result += _name + (empty_name_separator || _value ? '=' : '') + _value;
}
}
return result;
};
// expand an unnamed variable
URITemplate.expandUnnamed = function(d, options, explode, separator, length) {
// variable result buffer
var result = '';
// performance crap
var encode = options.encode;
var empty_name_separator = options.empty_name_separator;
// flag noting if values are already encoded
var _encode = !d[encode].length;
var _name, _value, i, l;
// for each found value
for (i = 0, l = d.val.length; i < l; i++) {
if (length) {
// maxlength must be determined before encoding can happen
_value = URI[encode](d.val[i][1].substring(0, length));
} else if (_encode) {
// encode and cache value
_value = URI[encode](d.val[i][1]);
d[encode].push([
d.type === 2 ? URI[encode](d.val[i][0]) : undefined,
_value
]);
} else {
// value already encoded, pull from cache
_value = d[encode][i][1];
}
if (result) {
// unless we're the first value, prepend the separator
result += separator;
}
if (d.type === 2) {
if (length) {
// maxlength also applies to keys of objects
_name = URI[encode](d.val[i][0].substring(0, length));
} else {
// at this point the name must already be encoded
_name = d[encode][i][0];
}
result += _name;
if (explode) {
// explode-modifier separates name and value by "="
result += (empty_name_separator || _value ? '=' : '');
} else {
// no explode-modifier separates name and value by ","
result += ',';
}
}
result += _value;
}
return result;
};
URITemplate.noConflict = function() {
if (root.URITemplate === URITemplate) {
root.URITemplate = _URITemplate;
}
return URITemplate;
};
// expand template through given data map
p.expand = function(data) {
var result = '';
if (!this.parts || !this.parts.length) {
// lazilyy parse the template
this.parse();
}
if (!(data instanceof Data)) {
// make given data available through the
// optimized data handling thingie
data = new Data(data);
}
for (var i = 0, l = this.parts.length; i < l; i++) {
/*jshint laxbreak: true */
result += typeof this.parts[i] === 'string'
// literal string
? this.parts[i]
// expression
: URITemplate.expand(this.parts[i], data);
/*jshint laxbreak: false */
}
return result;
};
// parse template into action tokens
p.parse = function() {
// performance crap
var expression = this.expression;
var ePattern = URITemplate.EXPRESSION_PATTERN;
var vPattern = URITemplate.VARIABLE_PATTERN;
var nPattern = URITemplate.VARIABLE_NAME_PATTERN;
// token result buffer
var parts = [];
// position within source template
var pos = 0;
var variables, eMatch, vMatch;
// RegExp is shared accross all templates,
// which requires a manual reset
ePattern.lastIndex = 0;
// I don't like while(foo = bar()) loops,
// to make things simpler I go while(true) and break when required
while (true) {
eMatch = ePattern.exec(expression);
if (eMatch === null) {
// push trailing literal
parts.push(expression.substring(pos));
break;
} else {
// push leading literal
parts.push(expression.substring(pos, eMatch.index));
pos = eMatch.index + eMatch[0].length;
}
if (!operators[eMatch[1]]) {
throw new Error('Unknown Operator "' + eMatch[1] + '" in "' + eMatch[0] + '"');
} else if (!eMatch[3]) {
throw new Error('Unclosed Expression "' + eMatch[0] + '"');
}
// parse variable-list
variables = eMatch[2].split(',');
for (var i = 0, l = variables.length; i < l; i++) {
vMatch = variables[i].match(vPattern);
if (vMatch === null) {
throw new Error('Invalid Variable "' + variables[i] + '" in "' + eMatch[0] + '"');
} else if (vMatch[1].match(nPattern)) {
throw new Error('Invalid Variable Name "' + vMatch[1] + '" in "' + eMatch[0] + '"');
}
variables[i] = {
name: vMatch[1],
explode: !!vMatch[3],
maxlength: vMatch[4] && parseInt(vMatch[4], 10)
};
}
if (!variables.length) {
throw new Error('Expression Missing Variable(s) "' + eMatch[0] + '"');
}
parts.push({
expression: eMatch[0],
operator: eMatch[1],
variables: variables
});
}
if (!parts.length) {
// template doesn't contain any expressions
// so it is a simple literal string
// this probably should fire a warning or something?
parts.push(expression);
}
this.parts = parts;
return this;
};
// simplify data structures
Data.prototype.get = function(key) {
// performance crap
var data = this.data;
// cache for processed data-point
var d = {
// type of data 0: undefined/null, 1: string, 2: object, 3: array
type: 0,
// original values (except undefined/null)
val: [],
// cache for encoded values (only for non-maxlength expansion)
encode: [],
encodeReserved: []
};
var i, l, value;
if (this.cache[key] !== undefined) {
// we've already processed this key
return this.cache[key];
}
this.cache[key] = d;
if (String(Object.prototype.toString.call(data)) === '[object Function]') {
// data itself is a callback (global callback)
value = data(key);
} else if (String(Object.prototype.toString.call(data[key])) === '[object Function]') {
// data is a map of callbacks (local callback)
value = data[key](key);
} else {
// data is a map of data
value = data[key];
}
// generalize input into [ [name1, value1], [name2, value2], … ]
// so expansion has to deal with a single data structure only
if (value === undefined || value === null) {
// undefined and null values are to be ignored completely
return d;
} else if (String(Object.prototype.toString.call(value)) === '[object Array]') {
for (i = 0, l = value.length; i < l; i++) {
if (value[i] !== undefined && value[i] !== null) {
// arrays don't have names
d.val.push([undefined, String(value[i])]);
}
}
if (d.val.length) {
// only treat non-empty arrays as arrays
d.type = 3; // array
}
} else if (String(Object.prototype.toString.call(value)) === '[object Object]') {
for (i in value) {
if (hasOwn.call(value, i) && value[i] !== undefined && value[i] !== null) {
// objects have keys, remember them for named expansion
d.val.push([i, String(value[i])]);
}
}
if (d.val.length) {
// only treat non-empty objects as objects
d.type = 2; // object
}
} else {
d.type = 1; // primitive string (could've been string, number, boolean and objects with a toString())
// arrays don't have names
d.val.push([undefined, String(value)]);
}
return d;
};
// hook into URI for fluid access
URI.expand = function(expression, data) {
var template = new URITemplate(expression);
var expansion = template.expand(data);
return new URI(expansion);
};
return URITemplate;
}));
| {
// first element, so prepend variable name
result += URI[encode](name) + (empty_name_separator || _value ? '=' : '');
} | conditional_block |
URITemplate.js | /*!
* URI.js - Mutating URLs
* URI Template Support - http://tools.ietf.org/html/rfc6570
*
* Version: 1.16.0
*
* Author: Rodney Rehm
* Web: http://medialize.github.io/URI.js/
*
* Licensed under
* MIT License http://www.opensource.org/licenses/mit-license
* GPL v3 http://opensource.org/licenses/GPL-3.0
*
*/
(function (root, factory) {
'use strict';
// https://github.com/umdjs/umd/blob/master/returnExports.js
if (typeof exports === 'object') {
// Node
module.exports = factory(require('./URI'));
} else if (typeof define === 'function' && define.amd) {
// AMD. Register as an anonymous module.
define(['./URI'], factory);
} else {
// Browser globals (root is window)
root.URITemplate = factory(root.URI, root);
}
}(this, function (URI, root) {
'use strict';
// FIXME: v2.0.0 renamce non-camelCase properties to uppercase
/*jshint camelcase: false */
// save current URITemplate variable, if any
var _URITemplate = root && root.URITemplate;
var hasOwn = Object.prototype.hasOwnProperty;
function URITemplate(expression) {
// serve from cache where possible
if (URITemplate._cache[expression]) {
return URITemplate._cache[expression];
}
// Allow instantiation without the 'new' keyword
if (!(this instanceof URITemplate)) {
return new URITemplate(expression);
}
this.expression = expression;
URITemplate._cache[expression] = this;
return this;
}
function Data(data) {
this.data = data;
this.cache = {};
}
var p = URITemplate.prototype;
// list of operators and their defined options
var operators = {
// Simple string expansion
'' : {
prefix: '',
separator: ',',
named: false,
empty_name_separator: false,
encode : 'encode'
},
// Reserved character strings
'+' : {
prefix: '',
separator: ',',
named: false,
empty_name_separator: false,
encode : 'encodeReserved'
},
// Fragment identifiers prefixed by '#'
'#' : {
prefix: '#',
separator: ',',
named: false,
empty_name_separator: false,
encode : 'encodeReserved'
},
// Name labels or extensions prefixed by '.'
'.' : {
prefix: '.',
separator: '.',
named: false,
empty_name_separator: false,
encode : 'encode'
},
// Path segments prefixed by '/'
'/' : {
prefix: '/',
separator: '/',
named: false,
empty_name_separator: false,
encode : 'encode'
},
// Path parameter name or name=value pairs prefixed by ';'
';' : {
prefix: ';',
separator: ';',
named: true,
empty_name_separator: false,
encode : 'encode'
},
// Query component beginning with '?' and consisting
// of name=value pairs separated by '&'; an
'?' : {
prefix: '?',
separator: '&',
named: true,
empty_name_separator: true,
encode : 'encode'
},
// Continuation of query-style &name=value pairs
// within a literal query component.
'&' : {
prefix: '&',
separator: '&',
named: true,
empty_name_separator: true,
encode : 'encode'
}
// The operator characters equals ("="), comma (","), exclamation ("!"),
// at sign ("@"), and pipe ("|") are reserved for future extensions.
};
// storage for already parsed templates
URITemplate._cache = {};
// pattern to identify expressions [operator, variable-list] in template
URITemplate.EXPRESSION_PATTERN = /\{([^a-zA-Z0-9%_]?)([^\}]+)(\}|$)/g;
// pattern to identify variables [name, explode, maxlength] in variable-list
URITemplate.VARIABLE_PATTERN = /^([^*:]+)((\*)|:(\d+))?$/;
// pattern to verify variable name integrity
URITemplate.VARIABLE_NAME_PATTERN = /[^a-zA-Z0-9%_]/;
// expand parsed expression (expression, not template!)
URITemplate.expand = function(expression, data) {
// container for defined options for the given operator
var options = operators[expression.operator];
// expansion type (include keys or not)
var type = options.named ? 'Named' : 'Unnamed';
// list of variables within the expression
var variables = expression.variables;
// result buffer for evaluating the expression
var buffer = [];
var d, variable, i;
for (i = 0; (variable = variables[i]); i++) {
// fetch simplified data source
d = data.get(variable.name);
if (!d.val.length) {
if (d.type) {
// empty variables (empty string)
// still lead to a separator being appended!
buffer.push('');
}
// no data, no action
continue;
}
// expand the given variable
buffer.push(URITemplate['expand' + type](
d,
options,
variable.explode,
variable.explode && options.separator || ',',
variable.maxlength,
variable.name
));
}
if (buffer.length) {
return options.prefix + buffer.join(options.separator);
} else {
// prefix is not prepended for empty expressions
return '';
}
};
// expand a named variable
URITemplate.expandNamed = function(d, options, explode, separator, length, name) {
// variable result buffer
var result = '';
// peformance crap
var encode = options.encode;
var empty_name_separator = options.empty_name_separator;
// flag noting if values are already encoded
var _encode = !d[encode].length;
// key for named expansion
var _name = d.type === 2 ? '': URI[encode](name);
var _value, i, l;
// for each found value
for (i = 0, l = d.val.length; i < l; i++) {
if (length) {
// maxlength must be determined before encoding can happen
_value = URI[encode](d.val[i][1].substring(0, length));
if (d.type === 2) {
// apply maxlength to keys of objects as well
_name = URI[encode](d.val[i][0].substring(0, length));
}
} else if (_encode) {
// encode value
_value = URI[encode](d.val[i][1]);
if (d.type === 2) {
// encode name and cache encoded value
_name = URI[encode](d.val[i][0]);
d[encode].push([_name, _value]);
} else {
// cache encoded value
d[encode].push([undefined, _value]);
}
} else {
// values are already encoded and can be pulled from cache
_value = d[encode][i][1];
if (d.type === 2) {
_name = d[encode][i][0];
}
}
if (result) {
// unless we're the first value, prepend the separator
result += separator;
}
if (!explode) {
if (!i) {
// first element, so prepend variable name
result += URI[encode](name) + (empty_name_separator || _value ? '=' : '');
}
if (d.type === 2) {
// without explode-modifier, keys of objects are returned comma-separated
result += _name + ','; | // only add the = if it is either default (?&) or there actually is a value (;)
result += _name + (empty_name_separator || _value ? '=' : '') + _value;
}
}
return result;
};
// expand an unnamed variable
URITemplate.expandUnnamed = function(d, options, explode, separator, length) {
// variable result buffer
var result = '';
// performance crap
var encode = options.encode;
var empty_name_separator = options.empty_name_separator;
// flag noting if values are already encoded
var _encode = !d[encode].length;
var _name, _value, i, l;
// for each found value
for (i = 0, l = d.val.length; i < l; i++) {
if (length) {
// maxlength must be determined before encoding can happen
_value = URI[encode](d.val[i][1].substring(0, length));
} else if (_encode) {
// encode and cache value
_value = URI[encode](d.val[i][1]);
d[encode].push([
d.type === 2 ? URI[encode](d.val[i][0]) : undefined,
_value
]);
} else {
// value already encoded, pull from cache
_value = d[encode][i][1];
}
if (result) {
// unless we're the first value, prepend the separator
result += separator;
}
if (d.type === 2) {
if (length) {
// maxlength also applies to keys of objects
_name = URI[encode](d.val[i][0].substring(0, length));
} else {
// at this point the name must already be encoded
_name = d[encode][i][0];
}
result += _name;
if (explode) {
// explode-modifier separates name and value by "="
result += (empty_name_separator || _value ? '=' : '');
} else {
// no explode-modifier separates name and value by ","
result += ',';
}
}
result += _value;
}
return result;
};
URITemplate.noConflict = function() {
if (root.URITemplate === URITemplate) {
root.URITemplate = _URITemplate;
}
return URITemplate;
};
// expand template through given data map
p.expand = function(data) {
var result = '';
if (!this.parts || !this.parts.length) {
// lazilyy parse the template
this.parse();
}
if (!(data instanceof Data)) {
// make given data available through the
// optimized data handling thingie
data = new Data(data);
}
for (var i = 0, l = this.parts.length; i < l; i++) {
/*jshint laxbreak: true */
result += typeof this.parts[i] === 'string'
// literal string
? this.parts[i]
// expression
: URITemplate.expand(this.parts[i], data);
/*jshint laxbreak: false */
}
return result;
};
// parse template into action tokens
p.parse = function() {
// performance crap
var expression = this.expression;
var ePattern = URITemplate.EXPRESSION_PATTERN;
var vPattern = URITemplate.VARIABLE_PATTERN;
var nPattern = URITemplate.VARIABLE_NAME_PATTERN;
// token result buffer
var parts = [];
// position within source template
var pos = 0;
var variables, eMatch, vMatch;
// RegExp is shared accross all templates,
// which requires a manual reset
ePattern.lastIndex = 0;
// I don't like while(foo = bar()) loops,
// to make things simpler I go while(true) and break when required
while (true) {
eMatch = ePattern.exec(expression);
if (eMatch === null) {
// push trailing literal
parts.push(expression.substring(pos));
break;
} else {
// push leading literal
parts.push(expression.substring(pos, eMatch.index));
pos = eMatch.index + eMatch[0].length;
}
if (!operators[eMatch[1]]) {
throw new Error('Unknown Operator "' + eMatch[1] + '" in "' + eMatch[0] + '"');
} else if (!eMatch[3]) {
throw new Error('Unclosed Expression "' + eMatch[0] + '"');
}
// parse variable-list
variables = eMatch[2].split(',');
for (var i = 0, l = variables.length; i < l; i++) {
vMatch = variables[i].match(vPattern);
if (vMatch === null) {
throw new Error('Invalid Variable "' + variables[i] + '" in "' + eMatch[0] + '"');
} else if (vMatch[1].match(nPattern)) {
throw new Error('Invalid Variable Name "' + vMatch[1] + '" in "' + eMatch[0] + '"');
}
variables[i] = {
name: vMatch[1],
explode: !!vMatch[3],
maxlength: vMatch[4] && parseInt(vMatch[4], 10)
};
}
if (!variables.length) {
throw new Error('Expression Missing Variable(s) "' + eMatch[0] + '"');
}
parts.push({
expression: eMatch[0],
operator: eMatch[1],
variables: variables
});
}
if (!parts.length) {
// template doesn't contain any expressions
// so it is a simple literal string
// this probably should fire a warning or something?
parts.push(expression);
}
this.parts = parts;
return this;
};
// simplify data structures
Data.prototype.get = function(key) {
// performance crap
var data = this.data;
// cache for processed data-point
var d = {
// type of data 0: undefined/null, 1: string, 2: object, 3: array
type: 0,
// original values (except undefined/null)
val: [],
// cache for encoded values (only for non-maxlength expansion)
encode: [],
encodeReserved: []
};
var i, l, value;
if (this.cache[key] !== undefined) {
// we've already processed this key
return this.cache[key];
}
this.cache[key] = d;
if (String(Object.prototype.toString.call(data)) === '[object Function]') {
// data itself is a callback (global callback)
value = data(key);
} else if (String(Object.prototype.toString.call(data[key])) === '[object Function]') {
// data is a map of callbacks (local callback)
value = data[key](key);
} else {
// data is a map of data
value = data[key];
}
// generalize input into [ [name1, value1], [name2, value2], … ]
// so expansion has to deal with a single data structure only
if (value === undefined || value === null) {
// undefined and null values are to be ignored completely
return d;
} else if (String(Object.prototype.toString.call(value)) === '[object Array]') {
for (i = 0, l = value.length; i < l; i++) {
if (value[i] !== undefined && value[i] !== null) {
// arrays don't have names
d.val.push([undefined, String(value[i])]);
}
}
if (d.val.length) {
// only treat non-empty arrays as arrays
d.type = 3; // array
}
} else if (String(Object.prototype.toString.call(value)) === '[object Object]') {
for (i in value) {
if (hasOwn.call(value, i) && value[i] !== undefined && value[i] !== null) {
// objects have keys, remember them for named expansion
d.val.push([i, String(value[i])]);
}
}
if (d.val.length) {
// only treat non-empty objects as objects
d.type = 2; // object
}
} else {
d.type = 1; // primitive string (could've been string, number, boolean and objects with a toString())
// arrays don't have names
d.val.push([undefined, String(value)]);
}
return d;
};
// hook into URI for fluid access
URI.expand = function(expression, data) {
var template = new URITemplate(expression);
var expansion = template.expand(data);
return new URI(expansion);
};
return URITemplate;
})); | }
result += _value;
} else { | random_line_split |
URITemplate.js | /*!
* URI.js - Mutating URLs
* URI Template Support - http://tools.ietf.org/html/rfc6570
*
* Version: 1.16.0
*
* Author: Rodney Rehm
* Web: http://medialize.github.io/URI.js/
*
* Licensed under
* MIT License http://www.opensource.org/licenses/mit-license
* GPL v3 http://opensource.org/licenses/GPL-3.0
*
*/
(function (root, factory) {
'use strict';
// https://github.com/umdjs/umd/blob/master/returnExports.js
if (typeof exports === 'object') {
// Node
module.exports = factory(require('./URI'));
} else if (typeof define === 'function' && define.amd) {
// AMD. Register as an anonymous module.
define(['./URI'], factory);
} else {
// Browser globals (root is window)
root.URITemplate = factory(root.URI, root);
}
}(this, function (URI, root) {
'use strict';
// FIXME: v2.0.0 renamce non-camelCase properties to uppercase
/*jshint camelcase: false */
// save current URITemplate variable, if any
var _URITemplate = root && root.URITemplate;
var hasOwn = Object.prototype.hasOwnProperty;
function | (expression) {
// serve from cache where possible
if (URITemplate._cache[expression]) {
return URITemplate._cache[expression];
}
// Allow instantiation without the 'new' keyword
if (!(this instanceof URITemplate)) {
return new URITemplate(expression);
}
this.expression = expression;
URITemplate._cache[expression] = this;
return this;
}
function Data(data) {
this.data = data;
this.cache = {};
}
var p = URITemplate.prototype;
// list of operators and their defined options
var operators = {
// Simple string expansion
'' : {
prefix: '',
separator: ',',
named: false,
empty_name_separator: false,
encode : 'encode'
},
// Reserved character strings
'+' : {
prefix: '',
separator: ',',
named: false,
empty_name_separator: false,
encode : 'encodeReserved'
},
// Fragment identifiers prefixed by '#'
'#' : {
prefix: '#',
separator: ',',
named: false,
empty_name_separator: false,
encode : 'encodeReserved'
},
// Name labels or extensions prefixed by '.'
'.' : {
prefix: '.',
separator: '.',
named: false,
empty_name_separator: false,
encode : 'encode'
},
// Path segments prefixed by '/'
'/' : {
prefix: '/',
separator: '/',
named: false,
empty_name_separator: false,
encode : 'encode'
},
// Path parameter name or name=value pairs prefixed by ';'
';' : {
prefix: ';',
separator: ';',
named: true,
empty_name_separator: false,
encode : 'encode'
},
// Query component beginning with '?' and consisting
// of name=value pairs separated by '&'; an
'?' : {
prefix: '?',
separator: '&',
named: true,
empty_name_separator: true,
encode : 'encode'
},
// Continuation of query-style &name=value pairs
// within a literal query component.
'&' : {
prefix: '&',
separator: '&',
named: true,
empty_name_separator: true,
encode : 'encode'
}
// The operator characters equals ("="), comma (","), exclamation ("!"),
// at sign ("@"), and pipe ("|") are reserved for future extensions.
};
// storage for already parsed templates
URITemplate._cache = {};
// pattern to identify expressions [operator, variable-list] in template
URITemplate.EXPRESSION_PATTERN = /\{([^a-zA-Z0-9%_]?)([^\}]+)(\}|$)/g;
// pattern to identify variables [name, explode, maxlength] in variable-list
URITemplate.VARIABLE_PATTERN = /^([^*:]+)((\*)|:(\d+))?$/;
// pattern to verify variable name integrity
URITemplate.VARIABLE_NAME_PATTERN = /[^a-zA-Z0-9%_]/;
// expand parsed expression (expression, not template!)
URITemplate.expand = function(expression, data) {
// container for defined options for the given operator
var options = operators[expression.operator];
// expansion type (include keys or not)
var type = options.named ? 'Named' : 'Unnamed';
// list of variables within the expression
var variables = expression.variables;
// result buffer for evaluating the expression
var buffer = [];
var d, variable, i;
for (i = 0; (variable = variables[i]); i++) {
// fetch simplified data source
d = data.get(variable.name);
if (!d.val.length) {
if (d.type) {
// empty variables (empty string)
// still lead to a separator being appended!
buffer.push('');
}
// no data, no action
continue;
}
// expand the given variable
buffer.push(URITemplate['expand' + type](
d,
options,
variable.explode,
variable.explode && options.separator || ',',
variable.maxlength,
variable.name
));
}
if (buffer.length) {
return options.prefix + buffer.join(options.separator);
} else {
// prefix is not prepended for empty expressions
return '';
}
};
// expand a named variable
URITemplate.expandNamed = function(d, options, explode, separator, length, name) {
// variable result buffer
var result = '';
// peformance crap
var encode = options.encode;
var empty_name_separator = options.empty_name_separator;
// flag noting if values are already encoded
var _encode = !d[encode].length;
// key for named expansion
var _name = d.type === 2 ? '': URI[encode](name);
var _value, i, l;
// for each found value
for (i = 0, l = d.val.length; i < l; i++) {
if (length) {
// maxlength must be determined before encoding can happen
_value = URI[encode](d.val[i][1].substring(0, length));
if (d.type === 2) {
// apply maxlength to keys of objects as well
_name = URI[encode](d.val[i][0].substring(0, length));
}
} else if (_encode) {
// encode value
_value = URI[encode](d.val[i][1]);
if (d.type === 2) {
// encode name and cache encoded value
_name = URI[encode](d.val[i][0]);
d[encode].push([_name, _value]);
} else {
// cache encoded value
d[encode].push([undefined, _value]);
}
} else {
// values are already encoded and can be pulled from cache
_value = d[encode][i][1];
if (d.type === 2) {
_name = d[encode][i][0];
}
}
if (result) {
// unless we're the first value, prepend the separator
result += separator;
}
if (!explode) {
if (!i) {
// first element, so prepend variable name
result += URI[encode](name) + (empty_name_separator || _value ? '=' : '');
}
if (d.type === 2) {
// without explode-modifier, keys of objects are returned comma-separated
result += _name + ',';
}
result += _value;
} else {
// only add the = if it is either default (?&) or there actually is a value (;)
result += _name + (empty_name_separator || _value ? '=' : '') + _value;
}
}
return result;
};
// expand an unnamed variable
URITemplate.expandUnnamed = function(d, options, explode, separator, length) {
// variable result buffer
var result = '';
// performance crap
var encode = options.encode;
var empty_name_separator = options.empty_name_separator;
// flag noting if values are already encoded
var _encode = !d[encode].length;
var _name, _value, i, l;
// for each found value
for (i = 0, l = d.val.length; i < l; i++) {
if (length) {
// maxlength must be determined before encoding can happen
_value = URI[encode](d.val[i][1].substring(0, length));
} else if (_encode) {
// encode and cache value
_value = URI[encode](d.val[i][1]);
d[encode].push([
d.type === 2 ? URI[encode](d.val[i][0]) : undefined,
_value
]);
} else {
// value already encoded, pull from cache
_value = d[encode][i][1];
}
if (result) {
// unless we're the first value, prepend the separator
result += separator;
}
if (d.type === 2) {
if (length) {
// maxlength also applies to keys of objects
_name = URI[encode](d.val[i][0].substring(0, length));
} else {
// at this point the name must already be encoded
_name = d[encode][i][0];
}
result += _name;
if (explode) {
// explode-modifier separates name and value by "="
result += (empty_name_separator || _value ? '=' : '');
} else {
// no explode-modifier separates name and value by ","
result += ',';
}
}
result += _value;
}
return result;
};
URITemplate.noConflict = function() {
if (root.URITemplate === URITemplate) {
root.URITemplate = _URITemplate;
}
return URITemplate;
};
// expand template through given data map
p.expand = function(data) {
var result = '';
if (!this.parts || !this.parts.length) {
// lazilyy parse the template
this.parse();
}
if (!(data instanceof Data)) {
// make given data available through the
// optimized data handling thingie
data = new Data(data);
}
for (var i = 0, l = this.parts.length; i < l; i++) {
/*jshint laxbreak: true */
result += typeof this.parts[i] === 'string'
// literal string
? this.parts[i]
// expression
: URITemplate.expand(this.parts[i], data);
/*jshint laxbreak: false */
}
return result;
};
// parse template into action tokens
p.parse = function() {
// performance crap
var expression = this.expression;
var ePattern = URITemplate.EXPRESSION_PATTERN;
var vPattern = URITemplate.VARIABLE_PATTERN;
var nPattern = URITemplate.VARIABLE_NAME_PATTERN;
// token result buffer
var parts = [];
// position within source template
var pos = 0;
var variables, eMatch, vMatch;
// RegExp is shared accross all templates,
// which requires a manual reset
ePattern.lastIndex = 0;
// I don't like while(foo = bar()) loops,
// to make things simpler I go while(true) and break when required
while (true) {
eMatch = ePattern.exec(expression);
if (eMatch === null) {
// push trailing literal
parts.push(expression.substring(pos));
break;
} else {
// push leading literal
parts.push(expression.substring(pos, eMatch.index));
pos = eMatch.index + eMatch[0].length;
}
if (!operators[eMatch[1]]) {
throw new Error('Unknown Operator "' + eMatch[1] + '" in "' + eMatch[0] + '"');
} else if (!eMatch[3]) {
throw new Error('Unclosed Expression "' + eMatch[0] + '"');
}
// parse variable-list
variables = eMatch[2].split(',');
for (var i = 0, l = variables.length; i < l; i++) {
vMatch = variables[i].match(vPattern);
if (vMatch === null) {
throw new Error('Invalid Variable "' + variables[i] + '" in "' + eMatch[0] + '"');
} else if (vMatch[1].match(nPattern)) {
throw new Error('Invalid Variable Name "' + vMatch[1] + '" in "' + eMatch[0] + '"');
}
variables[i] = {
name: vMatch[1],
explode: !!vMatch[3],
maxlength: vMatch[4] && parseInt(vMatch[4], 10)
};
}
if (!variables.length) {
throw new Error('Expression Missing Variable(s) "' + eMatch[0] + '"');
}
parts.push({
expression: eMatch[0],
operator: eMatch[1],
variables: variables
});
}
if (!parts.length) {
// template doesn't contain any expressions
// so it is a simple literal string
// this probably should fire a warning or something?
parts.push(expression);
}
this.parts = parts;
return this;
};
// simplify data structures
Data.prototype.get = function(key) {
// performance crap
var data = this.data;
// cache for processed data-point
var d = {
// type of data 0: undefined/null, 1: string, 2: object, 3: array
type: 0,
// original values (except undefined/null)
val: [],
// cache for encoded values (only for non-maxlength expansion)
encode: [],
encodeReserved: []
};
var i, l, value;
if (this.cache[key] !== undefined) {
// we've already processed this key
return this.cache[key];
}
this.cache[key] = d;
if (String(Object.prototype.toString.call(data)) === '[object Function]') {
// data itself is a callback (global callback)
value = data(key);
} else if (String(Object.prototype.toString.call(data[key])) === '[object Function]') {
// data is a map of callbacks (local callback)
value = data[key](key);
} else {
// data is a map of data
value = data[key];
}
// generalize input into [ [name1, value1], [name2, value2], … ]
// so expansion has to deal with a single data structure only
if (value === undefined || value === null) {
// undefined and null values are to be ignored completely
return d;
} else if (String(Object.prototype.toString.call(value)) === '[object Array]') {
for (i = 0, l = value.length; i < l; i++) {
if (value[i] !== undefined && value[i] !== null) {
// arrays don't have names
d.val.push([undefined, String(value[i])]);
}
}
if (d.val.length) {
// only treat non-empty arrays as arrays
d.type = 3; // array
}
} else if (String(Object.prototype.toString.call(value)) === '[object Object]') {
for (i in value) {
if (hasOwn.call(value, i) && value[i] !== undefined && value[i] !== null) {
// objects have keys, remember them for named expansion
d.val.push([i, String(value[i])]);
}
}
if (d.val.length) {
// only treat non-empty objects as objects
d.type = 2; // object
}
} else {
d.type = 1; // primitive string (could've been string, number, boolean and objects with a toString())
// arrays don't have names
d.val.push([undefined, String(value)]);
}
return d;
};
// hook into URI for fluid access
URI.expand = function(expression, data) {
var template = new URITemplate(expression);
var expansion = template.expand(data);
return new URI(expansion);
};
return URITemplate;
}));
| URITemplate | identifier_name |
URITemplate.js | /*!
* URI.js - Mutating URLs
* URI Template Support - http://tools.ietf.org/html/rfc6570
*
* Version: 1.16.0
*
* Author: Rodney Rehm
* Web: http://medialize.github.io/URI.js/
*
* Licensed under
* MIT License http://www.opensource.org/licenses/mit-license
* GPL v3 http://opensource.org/licenses/GPL-3.0
*
*/
(function (root, factory) {
'use strict';
// https://github.com/umdjs/umd/blob/master/returnExports.js
if (typeof exports === 'object') {
// Node
module.exports = factory(require('./URI'));
} else if (typeof define === 'function' && define.amd) {
// AMD. Register as an anonymous module.
define(['./URI'], factory);
} else {
// Browser globals (root is window)
root.URITemplate = factory(root.URI, root);
}
}(this, function (URI, root) {
'use strict';
// FIXME: v2.0.0 renamce non-camelCase properties to uppercase
/*jshint camelcase: false */
// save current URITemplate variable, if any
var _URITemplate = root && root.URITemplate;
var hasOwn = Object.prototype.hasOwnProperty;
function URITemplate(expression) {
// serve from cache where possible
if (URITemplate._cache[expression]) {
return URITemplate._cache[expression];
}
// Allow instantiation without the 'new' keyword
if (!(this instanceof URITemplate)) {
return new URITemplate(expression);
}
this.expression = expression;
URITemplate._cache[expression] = this;
return this;
}
function Data(data) |
var p = URITemplate.prototype;
// list of operators and their defined options
var operators = {
// Simple string expansion
'' : {
prefix: '',
separator: ',',
named: false,
empty_name_separator: false,
encode : 'encode'
},
// Reserved character strings
'+' : {
prefix: '',
separator: ',',
named: false,
empty_name_separator: false,
encode : 'encodeReserved'
},
// Fragment identifiers prefixed by '#'
'#' : {
prefix: '#',
separator: ',',
named: false,
empty_name_separator: false,
encode : 'encodeReserved'
},
// Name labels or extensions prefixed by '.'
'.' : {
prefix: '.',
separator: '.',
named: false,
empty_name_separator: false,
encode : 'encode'
},
// Path segments prefixed by '/'
'/' : {
prefix: '/',
separator: '/',
named: false,
empty_name_separator: false,
encode : 'encode'
},
// Path parameter name or name=value pairs prefixed by ';'
';' : {
prefix: ';',
separator: ';',
named: true,
empty_name_separator: false,
encode : 'encode'
},
// Query component beginning with '?' and consisting
// of name=value pairs separated by '&'; an
'?' : {
prefix: '?',
separator: '&',
named: true,
empty_name_separator: true,
encode : 'encode'
},
// Continuation of query-style &name=value pairs
// within a literal query component.
'&' : {
prefix: '&',
separator: '&',
named: true,
empty_name_separator: true,
encode : 'encode'
}
// The operator characters equals ("="), comma (","), exclamation ("!"),
// at sign ("@"), and pipe ("|") are reserved for future extensions.
};
// storage for already parsed templates
URITemplate._cache = {};
// pattern to identify expressions [operator, variable-list] in template
URITemplate.EXPRESSION_PATTERN = /\{([^a-zA-Z0-9%_]?)([^\}]+)(\}|$)/g;
// pattern to identify variables [name, explode, maxlength] in variable-list
URITemplate.VARIABLE_PATTERN = /^([^*:]+)((\*)|:(\d+))?$/;
// pattern to verify variable name integrity
URITemplate.VARIABLE_NAME_PATTERN = /[^a-zA-Z0-9%_]/;
// expand parsed expression (expression, not template!)
URITemplate.expand = function(expression, data) {
// container for defined options for the given operator
var options = operators[expression.operator];
// expansion type (include keys or not)
var type = options.named ? 'Named' : 'Unnamed';
// list of variables within the expression
var variables = expression.variables;
// result buffer for evaluating the expression
var buffer = [];
var d, variable, i;
for (i = 0; (variable = variables[i]); i++) {
// fetch simplified data source
d = data.get(variable.name);
if (!d.val.length) {
if (d.type) {
// empty variables (empty string)
// still lead to a separator being appended!
buffer.push('');
}
// no data, no action
continue;
}
// expand the given variable
buffer.push(URITemplate['expand' + type](
d,
options,
variable.explode,
variable.explode && options.separator || ',',
variable.maxlength,
variable.name
));
}
if (buffer.length) {
return options.prefix + buffer.join(options.separator);
} else {
// prefix is not prepended for empty expressions
return '';
}
};
// expand a named variable
URITemplate.expandNamed = function(d, options, explode, separator, length, name) {
// variable result buffer
var result = '';
// peformance crap
var encode = options.encode;
var empty_name_separator = options.empty_name_separator;
// flag noting if values are already encoded
var _encode = !d[encode].length;
// key for named expansion
var _name = d.type === 2 ? '': URI[encode](name);
var _value, i, l;
// for each found value
for (i = 0, l = d.val.length; i < l; i++) {
if (length) {
// maxlength must be determined before encoding can happen
_value = URI[encode](d.val[i][1].substring(0, length));
if (d.type === 2) {
// apply maxlength to keys of objects as well
_name = URI[encode](d.val[i][0].substring(0, length));
}
} else if (_encode) {
// encode value
_value = URI[encode](d.val[i][1]);
if (d.type === 2) {
// encode name and cache encoded value
_name = URI[encode](d.val[i][0]);
d[encode].push([_name, _value]);
} else {
// cache encoded value
d[encode].push([undefined, _value]);
}
} else {
// values are already encoded and can be pulled from cache
_value = d[encode][i][1];
if (d.type === 2) {
_name = d[encode][i][0];
}
}
if (result) {
// unless we're the first value, prepend the separator
result += separator;
}
if (!explode) {
if (!i) {
// first element, so prepend variable name
result += URI[encode](name) + (empty_name_separator || _value ? '=' : '');
}
if (d.type === 2) {
// without explode-modifier, keys of objects are returned comma-separated
result += _name + ',';
}
result += _value;
} else {
// only add the = if it is either default (?&) or there actually is a value (;)
result += _name + (empty_name_separator || _value ? '=' : '') + _value;
}
}
return result;
};
// expand an unnamed variable
URITemplate.expandUnnamed = function(d, options, explode, separator, length) {
// variable result buffer
var result = '';
// performance crap
var encode = options.encode;
var empty_name_separator = options.empty_name_separator;
// flag noting if values are already encoded
var _encode = !d[encode].length;
var _name, _value, i, l;
// for each found value
for (i = 0, l = d.val.length; i < l; i++) {
if (length) {
// maxlength must be determined before encoding can happen
_value = URI[encode](d.val[i][1].substring(0, length));
} else if (_encode) {
// encode and cache value
_value = URI[encode](d.val[i][1]);
d[encode].push([
d.type === 2 ? URI[encode](d.val[i][0]) : undefined,
_value
]);
} else {
// value already encoded, pull from cache
_value = d[encode][i][1];
}
if (result) {
// unless we're the first value, prepend the separator
result += separator;
}
if (d.type === 2) {
if (length) {
// maxlength also applies to keys of objects
_name = URI[encode](d.val[i][0].substring(0, length));
} else {
// at this point the name must already be encoded
_name = d[encode][i][0];
}
result += _name;
if (explode) {
// explode-modifier separates name and value by "="
result += (empty_name_separator || _value ? '=' : '');
} else {
// no explode-modifier separates name and value by ","
result += ',';
}
}
result += _value;
}
return result;
};
URITemplate.noConflict = function() {
if (root.URITemplate === URITemplate) {
root.URITemplate = _URITemplate;
}
return URITemplate;
};
// expand template through given data map
p.expand = function(data) {
var result = '';
if (!this.parts || !this.parts.length) {
// lazilyy parse the template
this.parse();
}
if (!(data instanceof Data)) {
// make given data available through the
// optimized data handling thingie
data = new Data(data);
}
for (var i = 0, l = this.parts.length; i < l; i++) {
/*jshint laxbreak: true */
result += typeof this.parts[i] === 'string'
// literal string
? this.parts[i]
// expression
: URITemplate.expand(this.parts[i], data);
/*jshint laxbreak: false */
}
return result;
};
// parse template into action tokens
p.parse = function() {
// performance crap
var expression = this.expression;
var ePattern = URITemplate.EXPRESSION_PATTERN;
var vPattern = URITemplate.VARIABLE_PATTERN;
var nPattern = URITemplate.VARIABLE_NAME_PATTERN;
// token result buffer
var parts = [];
// position within source template
var pos = 0;
var variables, eMatch, vMatch;
// RegExp is shared accross all templates,
// which requires a manual reset
ePattern.lastIndex = 0;
// I don't like while(foo = bar()) loops,
// to make things simpler I go while(true) and break when required
while (true) {
eMatch = ePattern.exec(expression);
if (eMatch === null) {
// push trailing literal
parts.push(expression.substring(pos));
break;
} else {
// push leading literal
parts.push(expression.substring(pos, eMatch.index));
pos = eMatch.index + eMatch[0].length;
}
if (!operators[eMatch[1]]) {
throw new Error('Unknown Operator "' + eMatch[1] + '" in "' + eMatch[0] + '"');
} else if (!eMatch[3]) {
throw new Error('Unclosed Expression "' + eMatch[0] + '"');
}
// parse variable-list
variables = eMatch[2].split(',');
for (var i = 0, l = variables.length; i < l; i++) {
vMatch = variables[i].match(vPattern);
if (vMatch === null) {
throw new Error('Invalid Variable "' + variables[i] + '" in "' + eMatch[0] + '"');
} else if (vMatch[1].match(nPattern)) {
throw new Error('Invalid Variable Name "' + vMatch[1] + '" in "' + eMatch[0] + '"');
}
variables[i] = {
name: vMatch[1],
explode: !!vMatch[3],
maxlength: vMatch[4] && parseInt(vMatch[4], 10)
};
}
if (!variables.length) {
throw new Error('Expression Missing Variable(s) "' + eMatch[0] + '"');
}
parts.push({
expression: eMatch[0],
operator: eMatch[1],
variables: variables
});
}
if (!parts.length) {
// template doesn't contain any expressions
// so it is a simple literal string
// this probably should fire a warning or something?
parts.push(expression);
}
this.parts = parts;
return this;
};
// simplify data structures
Data.prototype.get = function(key) {
// performance crap
var data = this.data;
// cache for processed data-point
var d = {
// type of data 0: undefined/null, 1: string, 2: object, 3: array
type: 0,
// original values (except undefined/null)
val: [],
// cache for encoded values (only for non-maxlength expansion)
encode: [],
encodeReserved: []
};
var i, l, value;
if (this.cache[key] !== undefined) {
// we've already processed this key
return this.cache[key];
}
this.cache[key] = d;
if (String(Object.prototype.toString.call(data)) === '[object Function]') {
// data itself is a callback (global callback)
value = data(key);
} else if (String(Object.prototype.toString.call(data[key])) === '[object Function]') {
// data is a map of callbacks (local callback)
value = data[key](key);
} else {
// data is a map of data
value = data[key];
}
// generalize input into [ [name1, value1], [name2, value2], … ]
// so expansion has to deal with a single data structure only
if (value === undefined || value === null) {
// undefined and null values are to be ignored completely
return d;
} else if (String(Object.prototype.toString.call(value)) === '[object Array]') {
for (i = 0, l = value.length; i < l; i++) {
if (value[i] !== undefined && value[i] !== null) {
// arrays don't have names
d.val.push([undefined, String(value[i])]);
}
}
if (d.val.length) {
// only treat non-empty arrays as arrays
d.type = 3; // array
}
} else if (String(Object.prototype.toString.call(value)) === '[object Object]') {
for (i in value) {
if (hasOwn.call(value, i) && value[i] !== undefined && value[i] !== null) {
// objects have keys, remember them for named expansion
d.val.push([i, String(value[i])]);
}
}
if (d.val.length) {
// only treat non-empty objects as objects
d.type = 2; // object
}
} else {
d.type = 1; // primitive string (could've been string, number, boolean and objects with a toString())
// arrays don't have names
d.val.push([undefined, String(value)]);
}
return d;
};
// hook into URI for fluid access
URI.expand = function(expression, data) {
var template = new URITemplate(expression);
var expansion = template.expand(data);
return new URI(expansion);
};
return URITemplate;
}));
| {
this.data = data;
this.cache = {};
} | identifier_body |
canvas_msg.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use canvas_paint_task::{FillOrStrokeStyle, LineCapStyle, LineJoinStyle, CompositionOrBlending};
use geom::matrix2d::Matrix2D;
use geom::point::Point2D;
use geom::rect::Rect;
use geom::size::Size2D;
use std::sync::mpsc::{Sender};
#[derive(Clone)]
pub enum | {
Canvas2d(Canvas2dMsg),
Common(CanvasCommonMsg),
WebGL(CanvasWebGLMsg),
}
#[derive(Clone)]
pub enum Canvas2dMsg {
Arc(Point2D<f32>, f32, f32, f32, bool),
ArcTo(Point2D<f32>, Point2D<f32>, f32),
DrawImage(Vec<u8>, Size2D<f64>, Rect<f64>, Rect<f64>, bool),
DrawImageSelf(Size2D<f64>, Rect<f64>, Rect<f64>, bool),
BeginPath,
BezierCurveTo(Point2D<f32>, Point2D<f32>, Point2D<f32>),
ClearRect(Rect<f32>),
Clip,
ClosePath,
Fill,
FillRect(Rect<f32>),
GetImageData(Rect<f64>, Size2D<f64>, Sender<Vec<u8>>),
LineTo(Point2D<f32>),
MoveTo(Point2D<f32>),
PutImageData(Vec<u8>, Rect<f64>, Option<Rect<f64>>),
QuadraticCurveTo(Point2D<f32>, Point2D<f32>),
Rect(Rect<f32>),
RestoreContext,
SaveContext,
StrokeRect(Rect<f32>),
Stroke,
SetFillStyle(FillOrStrokeStyle),
SetStrokeStyle(FillOrStrokeStyle),
SetLineWidth(f32),
SetLineCap(LineCapStyle),
SetLineJoin(LineJoinStyle),
SetMiterLimit(f32),
SetGlobalAlpha(f32),
SetGlobalComposition(CompositionOrBlending),
SetTransform(Matrix2D<f32>),
}
#[derive(Clone)]
pub enum CanvasWebGLMsg {
AttachShader(u32, u32),
BindBuffer(u32, u32),
BufferData(u32, Vec<f32>, u32),
Clear(u32),
ClearColor(f32, f32, f32, f32),
CompileShader(u32),
CreateBuffer(Sender<u32>),
CreateProgram(Sender<u32>),
CreateShader(u32, Sender<u32>),
DrawArrays(u32, i32, i32),
EnableVertexAttribArray(u32),
GetAttribLocation(u32, String, Sender<i32>),
GetShaderInfoLog(u32, Sender<String>),
GetShaderParameter(u32, u32, Sender<i32>),
GetUniformLocation(u32, String, Sender<u32>),
LinkProgram(u32),
ShaderSource(u32, Vec<String>),
Uniform4fv(u32, Vec<f32>),
UseProgram(u32),
VertexAttribPointer2f(u32, i32, bool, i32, i64),
Viewport(i32, i32, i32, i32),
}
#[derive(Clone)]
pub enum CanvasCommonMsg {
Close,
Recreate(Size2D<i32>),
SendPixelContents(Sender<Vec<u8>>),
}
| CanvasMsg | identifier_name |
canvas_msg.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use canvas_paint_task::{FillOrStrokeStyle, LineCapStyle, LineJoinStyle, CompositionOrBlending};
use geom::matrix2d::Matrix2D;
use geom::point::Point2D;
use geom::rect::Rect;
use geom::size::Size2D;
use std::sync::mpsc::{Sender};
#[derive(Clone)]
pub enum CanvasMsg {
Canvas2d(Canvas2dMsg),
Common(CanvasCommonMsg),
WebGL(CanvasWebGLMsg),
}
#[derive(Clone)]
pub enum Canvas2dMsg {
Arc(Point2D<f32>, f32, f32, f32, bool),
ArcTo(Point2D<f32>, Point2D<f32>, f32),
DrawImage(Vec<u8>, Size2D<f64>, Rect<f64>, Rect<f64>, bool),
DrawImageSelf(Size2D<f64>, Rect<f64>, Rect<f64>, bool),
BeginPath,
BezierCurveTo(Point2D<f32>, Point2D<f32>, Point2D<f32>),
ClearRect(Rect<f32>), | LineTo(Point2D<f32>),
MoveTo(Point2D<f32>),
PutImageData(Vec<u8>, Rect<f64>, Option<Rect<f64>>),
QuadraticCurveTo(Point2D<f32>, Point2D<f32>),
Rect(Rect<f32>),
RestoreContext,
SaveContext,
StrokeRect(Rect<f32>),
Stroke,
SetFillStyle(FillOrStrokeStyle),
SetStrokeStyle(FillOrStrokeStyle),
SetLineWidth(f32),
SetLineCap(LineCapStyle),
SetLineJoin(LineJoinStyle),
SetMiterLimit(f32),
SetGlobalAlpha(f32),
SetGlobalComposition(CompositionOrBlending),
SetTransform(Matrix2D<f32>),
}
#[derive(Clone)]
pub enum CanvasWebGLMsg {
AttachShader(u32, u32),
BindBuffer(u32, u32),
BufferData(u32, Vec<f32>, u32),
Clear(u32),
ClearColor(f32, f32, f32, f32),
CompileShader(u32),
CreateBuffer(Sender<u32>),
CreateProgram(Sender<u32>),
CreateShader(u32, Sender<u32>),
DrawArrays(u32, i32, i32),
EnableVertexAttribArray(u32),
GetAttribLocation(u32, String, Sender<i32>),
GetShaderInfoLog(u32, Sender<String>),
GetShaderParameter(u32, u32, Sender<i32>),
GetUniformLocation(u32, String, Sender<u32>),
LinkProgram(u32),
ShaderSource(u32, Vec<String>),
Uniform4fv(u32, Vec<f32>),
UseProgram(u32),
VertexAttribPointer2f(u32, i32, bool, i32, i64),
Viewport(i32, i32, i32, i32),
}
#[derive(Clone)]
pub enum CanvasCommonMsg {
Close,
Recreate(Size2D<i32>),
SendPixelContents(Sender<Vec<u8>>),
} | Clip,
ClosePath,
Fill,
FillRect(Rect<f32>),
GetImageData(Rect<f64>, Size2D<f64>, Sender<Vec<u8>>), | random_line_split |
ShareSharp.js | import React from 'react'; | export default createSvgIcon(
<path d="M18 16.08c-.76 0-1.44.3-1.96.77L8.91 12.7c.05-.23.09-.46.09-.7s-.04-.47-.09-.7l7.05-4.11c.54.5 1.25.81 2.04.81 1.66 0 3-1.34 3-3s-1.34-3-3-3-3 1.34-3 3c0 .24.04.47.09.7L8.04 9.81C7.5 9.31 6.79 9 6 9c-1.66 0-3 1.34-3 3s1.34 3 3 3c.79 0 1.5-.31 2.04-.81l7.12 4.16c-.05.21-.08.43-.08.65 0 1.61 1.31 2.92 2.92 2.92s2.92-1.31 2.92-2.92-1.31-2.92-2.92-2.92z" />
, 'ShareSharp'); | import createSvgIcon from './utils/createSvgIcon';
| random_line_split |
runner.rs | // Copyright (C) 2019, Cloudflare, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use quiche::h3::NameValue;
use ring::rand::*;
use crate::Http3TestError;
pub fn run(
test: &mut crate::Http3Test, peer_addr: std::net::SocketAddr,
verify_peer: bool, idle_timeout: u64, max_data: u64, early_data: bool,
session_file: Option<String>,
) -> Result<(), Http3TestError> {
const MAX_DATAGRAM_SIZE: usize = 1350;
let mut buf = [0; 65535];
let mut out = [0; MAX_DATAGRAM_SIZE];
let max_stream_data = max_data;
let version = if let Some(v) = std::env::var_os("QUIC_VERSION") {
match v.to_str() {
Some("current") => quiche::PROTOCOL_VERSION,
Some(v) => u32::from_str_radix(v, 16).unwrap(),
_ => 0xbaba_baba,
}
} else {
0xbaba_baba
};
let mut reqs_count = 0;
let mut reqs_complete = 0;
// Setup the event loop.
let poll = mio::Poll::new().unwrap();
let mut events = mio::Events::with_capacity(1024);
info!("connecting to {:}", peer_addr);
// Bind to INADDR_ANY or IN6ADDR_ANY depending on the IP family of the
// server address. This is needed on macOS and BSD variants that don't
// support binding to IN6ADDR_ANY for both v4 and v6.
let bind_addr = match peer_addr {
std::net::SocketAddr::V4(_) => "0.0.0.0:0",
std::net::SocketAddr::V6(_) => "[::]:0",
};
// Create the UDP socket backing the QUIC connection, and register it with
// the event loop.
let socket = std::net::UdpSocket::bind(bind_addr).unwrap();
let socket = mio::net::UdpSocket::from_socket(socket).unwrap();
poll.register(
&socket,
mio::Token(0),
mio::Ready::readable(),
mio::PollOpt::edge(),
)
.unwrap();
// Create the configuration for the QUIC connection.
let mut config = quiche::Config::new(version).unwrap();
config.verify_peer(verify_peer);
config
.set_application_protos(quiche::h3::APPLICATION_PROTOCOL)
.unwrap();
config.set_max_idle_timeout(idle_timeout);
config.set_max_recv_udp_payload_size(MAX_DATAGRAM_SIZE);
config.set_initial_max_data(max_data);
config.set_initial_max_stream_data_bidi_local(max_stream_data);
config.set_initial_max_stream_data_bidi_remote(max_stream_data);
config.set_initial_max_stream_data_uni(max_stream_data);
config.set_initial_max_streams_bidi(100);
config.set_initial_max_streams_uni(100);
config.set_disable_active_migration(true);
if early_data {
config.enable_early_data();
debug!("early data enabled");
}
let mut http3_conn = None;
if std::env::var_os("SSLKEYLOGFILE").is_some() {
config.log_keys();
}
// Generate a random source connection ID for the connection.
let mut scid = [0; quiche::MAX_CONN_ID_LEN];
SystemRandom::new().fill(&mut scid[..]).unwrap();
let scid = quiche::ConnectionId::from_ref(&scid);
// Create a QUIC connection and initiate handshake.
let url = &test.endpoint();
let mut conn =
quiche::connect(url.domain(), &scid, peer_addr, &mut config).unwrap();
if let Some(session_file) = &session_file {
if let Ok(session) = std::fs::read(session_file) {
conn.set_session(&session).ok();
}
}
let (write, send_info) = conn.send(&mut out).expect("initial send failed");
while let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
continue;
}
return Err(Http3TestError::Other(format!("send() failed: {:?}", e)));
}
debug!("written {}", write);
let req_start = std::time::Instant::now();
loop {
if !conn.is_in_early_data() || http3_conn.is_some() {
poll.poll(&mut events, conn.timeout()).unwrap();
}
// Read incoming UDP packets from the socket and feed them to quiche,
// until there are no more packets to read.
'read: loop {
// If the event loop reported no events, it means that the timeout
// has expired, so handle it without attempting to read packets. We
// will then proceed with the send loop.
if events.is_empty() {
debug!("timed out");
conn.on_timeout();
break 'read;
}
let (len, from) = match socket.recv_from(&mut buf) {
Ok(v) => v,
Err(e) => {
// There are no more UDP packets to read, so end the read
// loop.
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("recv() would block");
break 'read;
}
return Err(Http3TestError::Other(format!(
"recv() failed: {:?}",
e
)));
},
};
debug!("got {} bytes", len);
let recv_info = quiche::RecvInfo { from };
// Process potentially coalesced packets.
let read = match conn.recv(&mut buf[..len], recv_info) {
Ok(v) => v,
Err(quiche::Error::Done) => {
debug!("done reading");
break;
},
Err(e) => {
error!("recv failed: {:?}", e);
break 'read;
},
};
debug!("processed {} bytes", read);
}
if conn.is_closed() {
info!("connection closed, {:?}", conn.stats());
if !conn.is_established() {
error!("connection timed out after {:?}", req_start.elapsed(),);
return Err(Http3TestError::HandshakeFail);
}
if reqs_complete != reqs_count {
error!("Client timed out after {:?} and only completed {}/{} requests",
req_start.elapsed(), reqs_complete, reqs_count);
return Err(Http3TestError::HttpFail);
}
if let Some(session_file) = session_file {
if let Some(session) = conn.session() {
std::fs::write(session_file, &session).ok();
}
}
break;
}
// Create a new HTTP/3 connection and end an HTTP request as soon as
// the QUIC connection is established.
if (conn.is_established() || conn.is_in_early_data()) &&
http3_conn.is_none()
{
let h3_config = quiche::h3::Config::new().unwrap();
let mut h3_conn =
quiche::h3::Connection::with_transport(&mut conn, &h3_config)
.unwrap();
reqs_count = test.requests_count();
match test.send_requests(&mut conn, &mut h3_conn) {
Ok(_) => (),
Err(quiche::h3::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error sending: {:?}",
e
)));
},
};
http3_conn = Some(h3_conn);
}
if let Some(http3_conn) = &mut http3_conn {
// Process HTTP/3 events.
loop {
match http3_conn.poll(&mut conn) {
Ok((stream_id, quiche::h3::Event::Headers { list, .. })) => {
info!(
"got response headers {:?} on stream id {}",
hdrs_to_strings(&list),
stream_id
);
test.add_response_headers(stream_id, &list);
},
Ok((stream_id, quiche::h3::Event::Data)) => {
if let Ok(read) =
http3_conn.recv_body(&mut conn, stream_id, &mut buf)
{
info!(
"got {} bytes of response data on stream {}",
read, stream_id
);
test.add_response_body(stream_id, &buf, read);
}
},
Ok((_stream_id, quiche::h3::Event::Finished)) => {
reqs_complete += 1;
info!(
"{}/{} responses received",
reqs_complete, reqs_count
);
if reqs_complete == reqs_count {
info!(
"Completed test run. {}/{} response(s) received in {:?}, closing...",
reqs_complete,
reqs_count,
req_start.elapsed()
);
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error closing conn: {:?}",
e
)));
},
}
test.assert();
break;
}
match test.send_requests(&mut conn, http3_conn) {
Ok(_) => (),
Err(quiche::h3::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error sending request: {:?}",
e
)));
},
}
},
Ok((stream_id, quiche::h3::Event::Reset(e))) => {
reqs_complete += 1;
info!("request was reset by peer with {}", e);
test.set_reset_stream_error(stream_id, e);
if reqs_complete == reqs_count {
info!(
"Completed test run. {}/{} response(s) received in {:?}, closing...",
reqs_complete,
reqs_count,
req_start.elapsed()
);
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error closing conn: {:?}",
e
)));
},
}
test.assert();
break;
}
},
Ok((_flow_id, quiche::h3::Event::Datagram)) => (),
Ok((_goaway_id, quiche::h3::Event::GoAway)) => (),
Err(quiche::h3::Error::Done) => {
break;
},
Err(e) => {
error!("HTTP/3 processing failed: {:?}", e);
break; | // Generate outgoing QUIC packets and send them on the UDP socket, until
// quiche reports that there are no more packets to be sent.
loop {
let (write, send_info) = match conn.send(&mut out) {
Ok(v) => v,
Err(quiche::Error::Done) => {
debug!("done writing");
break;
},
Err(e) => {
error!("send failed: {:?}", e);
conn.close(false, 0x1, b"fail").ok();
break;
},
};
if let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
break;
}
return Err(Http3TestError::Other(format!(
"send() failed: {:?}",
e
)));
}
debug!("written {}", write);
}
if conn.is_closed() {
info!("connection closed, {:?}", conn.stats());
if reqs_complete != reqs_count {
error!("Client timed out after {:?} and only completed {}/{} requests",
req_start.elapsed(), reqs_complete, reqs_count);
return Err(Http3TestError::HttpFail);
}
if let Some(session_file) = session_file {
if let Some(session) = conn.session() {
std::fs::write(session_file, &session).ok();
}
}
break;
}
}
Ok(())
}
fn hdrs_to_strings(hdrs: &[quiche::h3::Header]) -> Vec<(String, String)> {
hdrs.iter()
.map(|h| {
(
String::from_utf8(h.name().into()).unwrap(),
String::from_utf8(h.value().into()).unwrap(),
)
})
.collect()
} | },
}
}
}
| random_line_split |
runner.rs | // Copyright (C) 2019, Cloudflare, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use quiche::h3::NameValue;
use ring::rand::*;
use crate::Http3TestError;
pub fn run(
test: &mut crate::Http3Test, peer_addr: std::net::SocketAddr,
verify_peer: bool, idle_timeout: u64, max_data: u64, early_data: bool,
session_file: Option<String>,
) -> Result<(), Http3TestError> {
const MAX_DATAGRAM_SIZE: usize = 1350;
let mut buf = [0; 65535];
let mut out = [0; MAX_DATAGRAM_SIZE];
let max_stream_data = max_data;
let version = if let Some(v) = std::env::var_os("QUIC_VERSION") {
match v.to_str() {
Some("current") => quiche::PROTOCOL_VERSION,
Some(v) => u32::from_str_radix(v, 16).unwrap(),
_ => 0xbaba_baba,
}
} else {
0xbaba_baba
};
let mut reqs_count = 0;
let mut reqs_complete = 0;
// Setup the event loop.
let poll = mio::Poll::new().unwrap();
let mut events = mio::Events::with_capacity(1024);
info!("connecting to {:}", peer_addr);
// Bind to INADDR_ANY or IN6ADDR_ANY depending on the IP family of the
// server address. This is needed on macOS and BSD variants that don't
// support binding to IN6ADDR_ANY for both v4 and v6.
let bind_addr = match peer_addr {
std::net::SocketAddr::V4(_) => "0.0.0.0:0",
std::net::SocketAddr::V6(_) => "[::]:0",
};
// Create the UDP socket backing the QUIC connection, and register it with
// the event loop.
let socket = std::net::UdpSocket::bind(bind_addr).unwrap();
let socket = mio::net::UdpSocket::from_socket(socket).unwrap();
poll.register(
&socket,
mio::Token(0),
mio::Ready::readable(),
mio::PollOpt::edge(),
)
.unwrap();
// Create the configuration for the QUIC connection.
let mut config = quiche::Config::new(version).unwrap();
config.verify_peer(verify_peer);
config
.set_application_protos(quiche::h3::APPLICATION_PROTOCOL)
.unwrap();
config.set_max_idle_timeout(idle_timeout);
config.set_max_recv_udp_payload_size(MAX_DATAGRAM_SIZE);
config.set_initial_max_data(max_data);
config.set_initial_max_stream_data_bidi_local(max_stream_data);
config.set_initial_max_stream_data_bidi_remote(max_stream_data);
config.set_initial_max_stream_data_uni(max_stream_data);
config.set_initial_max_streams_bidi(100);
config.set_initial_max_streams_uni(100);
config.set_disable_active_migration(true);
if early_data {
config.enable_early_data();
debug!("early data enabled");
}
let mut http3_conn = None;
if std::env::var_os("SSLKEYLOGFILE").is_some() {
config.log_keys();
}
// Generate a random source connection ID for the connection.
let mut scid = [0; quiche::MAX_CONN_ID_LEN];
SystemRandom::new().fill(&mut scid[..]).unwrap();
let scid = quiche::ConnectionId::from_ref(&scid);
// Create a QUIC connection and initiate handshake.
let url = &test.endpoint();
let mut conn =
quiche::connect(url.domain(), &scid, peer_addr, &mut config).unwrap();
if let Some(session_file) = &session_file {
if let Ok(session) = std::fs::read(session_file) {
conn.set_session(&session).ok();
}
}
let (write, send_info) = conn.send(&mut out).expect("initial send failed");
while let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
continue;
}
return Err(Http3TestError::Other(format!("send() failed: {:?}", e)));
}
debug!("written {}", write);
let req_start = std::time::Instant::now();
loop {
if !conn.is_in_early_data() || http3_conn.is_some() {
poll.poll(&mut events, conn.timeout()).unwrap();
}
// Read incoming UDP packets from the socket and feed them to quiche,
// until there are no more packets to read.
'read: loop {
// If the event loop reported no events, it means that the timeout
// has expired, so handle it without attempting to read packets. We
// will then proceed with the send loop.
if events.is_empty() {
debug!("timed out");
conn.on_timeout();
break 'read;
}
let (len, from) = match socket.recv_from(&mut buf) {
Ok(v) => v,
Err(e) => {
// There are no more UDP packets to read, so end the read
// loop.
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("recv() would block");
break 'read;
}
return Err(Http3TestError::Other(format!(
"recv() failed: {:?}",
e
)));
},
};
debug!("got {} bytes", len);
let recv_info = quiche::RecvInfo { from };
// Process potentially coalesced packets.
let read = match conn.recv(&mut buf[..len], recv_info) {
Ok(v) => v,
Err(quiche::Error::Done) => {
debug!("done reading");
break;
},
Err(e) => {
error!("recv failed: {:?}", e);
break 'read;
},
};
debug!("processed {} bytes", read);
}
if conn.is_closed() {
info!("connection closed, {:?}", conn.stats());
if !conn.is_established() {
error!("connection timed out after {:?}", req_start.elapsed(),);
return Err(Http3TestError::HandshakeFail);
}
if reqs_complete != reqs_count {
error!("Client timed out after {:?} and only completed {}/{} requests",
req_start.elapsed(), reqs_complete, reqs_count);
return Err(Http3TestError::HttpFail);
}
if let Some(session_file) = session_file {
if let Some(session) = conn.session() {
std::fs::write(session_file, &session).ok();
}
}
break;
}
// Create a new HTTP/3 connection and end an HTTP request as soon as
// the QUIC connection is established.
if (conn.is_established() || conn.is_in_early_data()) &&
http3_conn.is_none()
{
let h3_config = quiche::h3::Config::new().unwrap();
let mut h3_conn =
quiche::h3::Connection::with_transport(&mut conn, &h3_config)
.unwrap();
reqs_count = test.requests_count();
match test.send_requests(&mut conn, &mut h3_conn) {
Ok(_) => (),
Err(quiche::h3::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error sending: {:?}",
e
)));
},
};
http3_conn = Some(h3_conn);
}
if let Some(http3_conn) = &mut http3_conn {
// Process HTTP/3 events.
loop {
match http3_conn.poll(&mut conn) {
Ok((stream_id, quiche::h3::Event::Headers { list, .. })) => {
info!(
"got response headers {:?} on stream id {}",
hdrs_to_strings(&list),
stream_id
);
test.add_response_headers(stream_id, &list);
},
Ok((stream_id, quiche::h3::Event::Data)) => {
if let Ok(read) =
http3_conn.recv_body(&mut conn, stream_id, &mut buf)
{
info!(
"got {} bytes of response data on stream {}",
read, stream_id
);
test.add_response_body(stream_id, &buf, read);
}
},
Ok((_stream_id, quiche::h3::Event::Finished)) => {
reqs_complete += 1;
info!(
"{}/{} responses received",
reqs_complete, reqs_count
);
if reqs_complete == reqs_count {
info!(
"Completed test run. {}/{} response(s) received in {:?}, closing...",
reqs_complete,
reqs_count,
req_start.elapsed()
);
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error closing conn: {:?}",
e
)));
},
}
test.assert();
break;
}
match test.send_requests(&mut conn, http3_conn) {
Ok(_) => (),
Err(quiche::h3::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error sending request: {:?}",
e
)));
},
}
},
Ok((stream_id, quiche::h3::Event::Reset(e))) => {
reqs_complete += 1;
info!("request was reset by peer with {}", e);
test.set_reset_stream_error(stream_id, e);
if reqs_complete == reqs_count {
info!(
"Completed test run. {}/{} response(s) received in {:?}, closing...",
reqs_complete,
reqs_count,
req_start.elapsed()
);
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error closing conn: {:?}",
e
)));
},
}
test.assert();
break;
}
},
Ok((_flow_id, quiche::h3::Event::Datagram)) => (),
Ok((_goaway_id, quiche::h3::Event::GoAway)) => (),
Err(quiche::h3::Error::Done) => {
break;
},
Err(e) => {
error!("HTTP/3 processing failed: {:?}", e);
break;
},
}
}
}
// Generate outgoing QUIC packets and send them on the UDP socket, until
// quiche reports that there are no more packets to be sent.
loop {
let (write, send_info) = match conn.send(&mut out) {
Ok(v) => v,
Err(quiche::Error::Done) => {
debug!("done writing");
break;
},
Err(e) => {
error!("send failed: {:?}", e);
conn.close(false, 0x1, b"fail").ok();
break;
},
};
if let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
break;
}
return Err(Http3TestError::Other(format!(
"send() failed: {:?}",
e
)));
}
debug!("written {}", write);
}
if conn.is_closed() {
info!("connection closed, {:?}", conn.stats());
if reqs_complete != reqs_count {
error!("Client timed out after {:?} and only completed {}/{} requests",
req_start.elapsed(), reqs_complete, reqs_count);
return Err(Http3TestError::HttpFail);
}
if let Some(session_file) = session_file {
if let Some(session) = conn.session() {
std::fs::write(session_file, &session).ok();
}
}
break;
}
}
Ok(())
}
fn hdrs_to_strings(hdrs: &[quiche::h3::Header]) -> Vec<(String, String)> | {
hdrs.iter()
.map(|h| {
(
String::from_utf8(h.name().into()).unwrap(),
String::from_utf8(h.value().into()).unwrap(),
)
})
.collect()
} | identifier_body | |
runner.rs | // Copyright (C) 2019, Cloudflare, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use quiche::h3::NameValue;
use ring::rand::*;
use crate::Http3TestError;
pub fn | (
test: &mut crate::Http3Test, peer_addr: std::net::SocketAddr,
verify_peer: bool, idle_timeout: u64, max_data: u64, early_data: bool,
session_file: Option<String>,
) -> Result<(), Http3TestError> {
const MAX_DATAGRAM_SIZE: usize = 1350;
let mut buf = [0; 65535];
let mut out = [0; MAX_DATAGRAM_SIZE];
let max_stream_data = max_data;
let version = if let Some(v) = std::env::var_os("QUIC_VERSION") {
match v.to_str() {
Some("current") => quiche::PROTOCOL_VERSION,
Some(v) => u32::from_str_radix(v, 16).unwrap(),
_ => 0xbaba_baba,
}
} else {
0xbaba_baba
};
let mut reqs_count = 0;
let mut reqs_complete = 0;
// Setup the event loop.
let poll = mio::Poll::new().unwrap();
let mut events = mio::Events::with_capacity(1024);
info!("connecting to {:}", peer_addr);
// Bind to INADDR_ANY or IN6ADDR_ANY depending on the IP family of the
// server address. This is needed on macOS and BSD variants that don't
// support binding to IN6ADDR_ANY for both v4 and v6.
let bind_addr = match peer_addr {
std::net::SocketAddr::V4(_) => "0.0.0.0:0",
std::net::SocketAddr::V6(_) => "[::]:0",
};
// Create the UDP socket backing the QUIC connection, and register it with
// the event loop.
let socket = std::net::UdpSocket::bind(bind_addr).unwrap();
let socket = mio::net::UdpSocket::from_socket(socket).unwrap();
poll.register(
&socket,
mio::Token(0),
mio::Ready::readable(),
mio::PollOpt::edge(),
)
.unwrap();
// Create the configuration for the QUIC connection.
let mut config = quiche::Config::new(version).unwrap();
config.verify_peer(verify_peer);
config
.set_application_protos(quiche::h3::APPLICATION_PROTOCOL)
.unwrap();
config.set_max_idle_timeout(idle_timeout);
config.set_max_recv_udp_payload_size(MAX_DATAGRAM_SIZE);
config.set_initial_max_data(max_data);
config.set_initial_max_stream_data_bidi_local(max_stream_data);
config.set_initial_max_stream_data_bidi_remote(max_stream_data);
config.set_initial_max_stream_data_uni(max_stream_data);
config.set_initial_max_streams_bidi(100);
config.set_initial_max_streams_uni(100);
config.set_disable_active_migration(true);
if early_data {
config.enable_early_data();
debug!("early data enabled");
}
let mut http3_conn = None;
if std::env::var_os("SSLKEYLOGFILE").is_some() {
config.log_keys();
}
// Generate a random source connection ID for the connection.
let mut scid = [0; quiche::MAX_CONN_ID_LEN];
SystemRandom::new().fill(&mut scid[..]).unwrap();
let scid = quiche::ConnectionId::from_ref(&scid);
// Create a QUIC connection and initiate handshake.
let url = &test.endpoint();
let mut conn =
quiche::connect(url.domain(), &scid, peer_addr, &mut config).unwrap();
if let Some(session_file) = &session_file {
if let Ok(session) = std::fs::read(session_file) {
conn.set_session(&session).ok();
}
}
let (write, send_info) = conn.send(&mut out).expect("initial send failed");
while let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
continue;
}
return Err(Http3TestError::Other(format!("send() failed: {:?}", e)));
}
debug!("written {}", write);
let req_start = std::time::Instant::now();
loop {
if !conn.is_in_early_data() || http3_conn.is_some() {
poll.poll(&mut events, conn.timeout()).unwrap();
}
// Read incoming UDP packets from the socket and feed them to quiche,
// until there are no more packets to read.
'read: loop {
// If the event loop reported no events, it means that the timeout
// has expired, so handle it without attempting to read packets. We
// will then proceed with the send loop.
if events.is_empty() {
debug!("timed out");
conn.on_timeout();
break 'read;
}
let (len, from) = match socket.recv_from(&mut buf) {
Ok(v) => v,
Err(e) => {
// There are no more UDP packets to read, so end the read
// loop.
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("recv() would block");
break 'read;
}
return Err(Http3TestError::Other(format!(
"recv() failed: {:?}",
e
)));
},
};
debug!("got {} bytes", len);
let recv_info = quiche::RecvInfo { from };
// Process potentially coalesced packets.
let read = match conn.recv(&mut buf[..len], recv_info) {
Ok(v) => v,
Err(quiche::Error::Done) => {
debug!("done reading");
break;
},
Err(e) => {
error!("recv failed: {:?}", e);
break 'read;
},
};
debug!("processed {} bytes", read);
}
if conn.is_closed() {
info!("connection closed, {:?}", conn.stats());
if !conn.is_established() {
error!("connection timed out after {:?}", req_start.elapsed(),);
return Err(Http3TestError::HandshakeFail);
}
if reqs_complete != reqs_count {
error!("Client timed out after {:?} and only completed {}/{} requests",
req_start.elapsed(), reqs_complete, reqs_count);
return Err(Http3TestError::HttpFail);
}
if let Some(session_file) = session_file {
if let Some(session) = conn.session() {
std::fs::write(session_file, &session).ok();
}
}
break;
}
// Create a new HTTP/3 connection and end an HTTP request as soon as
// the QUIC connection is established.
if (conn.is_established() || conn.is_in_early_data()) &&
http3_conn.is_none()
{
let h3_config = quiche::h3::Config::new().unwrap();
let mut h3_conn =
quiche::h3::Connection::with_transport(&mut conn, &h3_config)
.unwrap();
reqs_count = test.requests_count();
match test.send_requests(&mut conn, &mut h3_conn) {
Ok(_) => (),
Err(quiche::h3::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error sending: {:?}",
e
)));
},
};
http3_conn = Some(h3_conn);
}
if let Some(http3_conn) = &mut http3_conn {
// Process HTTP/3 events.
loop {
match http3_conn.poll(&mut conn) {
Ok((stream_id, quiche::h3::Event::Headers { list, .. })) => {
info!(
"got response headers {:?} on stream id {}",
hdrs_to_strings(&list),
stream_id
);
test.add_response_headers(stream_id, &list);
},
Ok((stream_id, quiche::h3::Event::Data)) => {
if let Ok(read) =
http3_conn.recv_body(&mut conn, stream_id, &mut buf)
{
info!(
"got {} bytes of response data on stream {}",
read, stream_id
);
test.add_response_body(stream_id, &buf, read);
}
},
Ok((_stream_id, quiche::h3::Event::Finished)) => {
reqs_complete += 1;
info!(
"{}/{} responses received",
reqs_complete, reqs_count
);
if reqs_complete == reqs_count {
info!(
"Completed test run. {}/{} response(s) received in {:?}, closing...",
reqs_complete,
reqs_count,
req_start.elapsed()
);
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error closing conn: {:?}",
e
)));
},
}
test.assert();
break;
}
match test.send_requests(&mut conn, http3_conn) {
Ok(_) => (),
Err(quiche::h3::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error sending request: {:?}",
e
)));
},
}
},
Ok((stream_id, quiche::h3::Event::Reset(e))) => {
reqs_complete += 1;
info!("request was reset by peer with {}", e);
test.set_reset_stream_error(stream_id, e);
if reqs_complete == reqs_count {
info!(
"Completed test run. {}/{} response(s) received in {:?}, closing...",
reqs_complete,
reqs_count,
req_start.elapsed()
);
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error closing conn: {:?}",
e
)));
},
}
test.assert();
break;
}
},
Ok((_flow_id, quiche::h3::Event::Datagram)) => (),
Ok((_goaway_id, quiche::h3::Event::GoAway)) => (),
Err(quiche::h3::Error::Done) => {
break;
},
Err(e) => {
error!("HTTP/3 processing failed: {:?}", e);
break;
},
}
}
}
// Generate outgoing QUIC packets and send them on the UDP socket, until
// quiche reports that there are no more packets to be sent.
loop {
let (write, send_info) = match conn.send(&mut out) {
Ok(v) => v,
Err(quiche::Error::Done) => {
debug!("done writing");
break;
},
Err(e) => {
error!("send failed: {:?}", e);
conn.close(false, 0x1, b"fail").ok();
break;
},
};
if let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
break;
}
return Err(Http3TestError::Other(format!(
"send() failed: {:?}",
e
)));
}
debug!("written {}", write);
}
if conn.is_closed() {
info!("connection closed, {:?}", conn.stats());
if reqs_complete != reqs_count {
error!("Client timed out after {:?} and only completed {}/{} requests",
req_start.elapsed(), reqs_complete, reqs_count);
return Err(Http3TestError::HttpFail);
}
if let Some(session_file) = session_file {
if let Some(session) = conn.session() {
std::fs::write(session_file, &session).ok();
}
}
break;
}
}
Ok(())
}
fn hdrs_to_strings(hdrs: &[quiche::h3::Header]) -> Vec<(String, String)> {
hdrs.iter()
.map(|h| {
(
String::from_utf8(h.name().into()).unwrap(),
String::from_utf8(h.value().into()).unwrap(),
)
})
.collect()
}
| run | identifier_name |
unet.py | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# knet.py
# create a network of khashmir nodes
# usage: knet.py <num_nodes> <start_port> <ip_address>
from utkhashmir import UTKhashmir
from BitTorrent.RawServer_magic import RawServer
from BitTorrent.defaultargs import common_options, rare_options
from random import randrange
from threading import Event
import sys, os
from krpc import KRPC
KRPC.noisy = 1
class Network:
def __init__(self, size=0, startport=5555, localip='127.0.0.1'):
self.num = size
self.startport = startport
self.localip = localip
def _done(self, val):
self.done = 1
def simpleSetUp(self):
#self.kfiles()
d = dict([(x[0],x[1]) for x in common_options + rare_options])
self.r = RawServer(Event(), d)
self.l = []
for i in range(self.num):
self.l.append(UTKhashmir('', self.startport + i, 'kh%s.db' % (self.startport + i), self.r))
for i in self.l:
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
self.r.listen_once(1)
self.r.listen_once(1)
self.r.listen_once(1)
for i in self.l:
self.done = 0
i.findCloseNodes(self._done)
while not self.done:
self.r.listen_once(1)
for i in self.l:
self.done = 0
i.findCloseNodes(self._done)
while not self.done:
|
def tearDown(self):
for i in self.l:
i.rawserver.stop_listening_udp(i.socket)
i.socket.close()
#self.kfiles()
def kfiles(self):
for i in range(self.startport, self.startport+self.num):
try:
os.unlink('kh%s.db' % i)
except:
pass
self.r.listen_once(1)
if __name__ == "__main__":
n = Network(int(sys.argv[1]), int(sys.argv[2]))
n.simpleSetUp()
print ">>> network ready"
try:
n.r.listen_forever()
finally:
n.tearDown()
| self.r.listen_once(1) | conditional_block |
unet.py | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# knet.py
# create a network of khashmir nodes
# usage: knet.py <num_nodes> <start_port> <ip_address>
from utkhashmir import UTKhashmir
from BitTorrent.RawServer_magic import RawServer
from BitTorrent.defaultargs import common_options, rare_options | import sys, os
from krpc import KRPC
KRPC.noisy = 1
class Network:
def __init__(self, size=0, startport=5555, localip='127.0.0.1'):
self.num = size
self.startport = startport
self.localip = localip
def _done(self, val):
self.done = 1
def simpleSetUp(self):
#self.kfiles()
d = dict([(x[0],x[1]) for x in common_options + rare_options])
self.r = RawServer(Event(), d)
self.l = []
for i in range(self.num):
self.l.append(UTKhashmir('', self.startport + i, 'kh%s.db' % (self.startport + i), self.r))
for i in self.l:
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
self.r.listen_once(1)
self.r.listen_once(1)
self.r.listen_once(1)
for i in self.l:
self.done = 0
i.findCloseNodes(self._done)
while not self.done:
self.r.listen_once(1)
for i in self.l:
self.done = 0
i.findCloseNodes(self._done)
while not self.done:
self.r.listen_once(1)
def tearDown(self):
for i in self.l:
i.rawserver.stop_listening_udp(i.socket)
i.socket.close()
#self.kfiles()
def kfiles(self):
for i in range(self.startport, self.startport+self.num):
try:
os.unlink('kh%s.db' % i)
except:
pass
self.r.listen_once(1)
if __name__ == "__main__":
n = Network(int(sys.argv[1]), int(sys.argv[2]))
n.simpleSetUp()
print ">>> network ready"
try:
n.r.listen_forever()
finally:
n.tearDown() | from random import randrange
from threading import Event | random_line_split |
unet.py | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# knet.py
# create a network of khashmir nodes
# usage: knet.py <num_nodes> <start_port> <ip_address>
from utkhashmir import UTKhashmir
from BitTorrent.RawServer_magic import RawServer
from BitTorrent.defaultargs import common_options, rare_options
from random import randrange
from threading import Event
import sys, os
from krpc import KRPC
KRPC.noisy = 1
class | :
def __init__(self, size=0, startport=5555, localip='127.0.0.1'):
self.num = size
self.startport = startport
self.localip = localip
def _done(self, val):
self.done = 1
def simpleSetUp(self):
#self.kfiles()
d = dict([(x[0],x[1]) for x in common_options + rare_options])
self.r = RawServer(Event(), d)
self.l = []
for i in range(self.num):
self.l.append(UTKhashmir('', self.startport + i, 'kh%s.db' % (self.startport + i), self.r))
for i in self.l:
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
self.r.listen_once(1)
self.r.listen_once(1)
self.r.listen_once(1)
for i in self.l:
self.done = 0
i.findCloseNodes(self._done)
while not self.done:
self.r.listen_once(1)
for i in self.l:
self.done = 0
i.findCloseNodes(self._done)
while not self.done:
self.r.listen_once(1)
def tearDown(self):
for i in self.l:
i.rawserver.stop_listening_udp(i.socket)
i.socket.close()
#self.kfiles()
def kfiles(self):
for i in range(self.startport, self.startport+self.num):
try:
os.unlink('kh%s.db' % i)
except:
pass
self.r.listen_once(1)
if __name__ == "__main__":
n = Network(int(sys.argv[1]), int(sys.argv[2]))
n.simpleSetUp()
print ">>> network ready"
try:
n.r.listen_forever()
finally:
n.tearDown()
| Network | identifier_name |
unet.py | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# knet.py
# create a network of khashmir nodes
# usage: knet.py <num_nodes> <start_port> <ip_address>
from utkhashmir import UTKhashmir
from BitTorrent.RawServer_magic import RawServer
from BitTorrent.defaultargs import common_options, rare_options
from random import randrange
from threading import Event
import sys, os
from krpc import KRPC
KRPC.noisy = 1
class Network:
def __init__(self, size=0, startport=5555, localip='127.0.0.1'):
self.num = size
self.startport = startport
self.localip = localip
def _done(self, val):
|
def simpleSetUp(self):
#self.kfiles()
d = dict([(x[0],x[1]) for x in common_options + rare_options])
self.r = RawServer(Event(), d)
self.l = []
for i in range(self.num):
self.l.append(UTKhashmir('', self.startport + i, 'kh%s.db' % (self.startport + i), self.r))
for i in self.l:
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
self.r.listen_once(1)
self.r.listen_once(1)
self.r.listen_once(1)
for i in self.l:
self.done = 0
i.findCloseNodes(self._done)
while not self.done:
self.r.listen_once(1)
for i in self.l:
self.done = 0
i.findCloseNodes(self._done)
while not self.done:
self.r.listen_once(1)
def tearDown(self):
for i in self.l:
i.rawserver.stop_listening_udp(i.socket)
i.socket.close()
#self.kfiles()
def kfiles(self):
for i in range(self.startport, self.startport+self.num):
try:
os.unlink('kh%s.db' % i)
except:
pass
self.r.listen_once(1)
if __name__ == "__main__":
n = Network(int(sys.argv[1]), int(sys.argv[2]))
n.simpleSetUp()
print ">>> network ready"
try:
n.r.listen_forever()
finally:
n.tearDown()
| self.done = 1 | identifier_body |
patchDiff.js | import * as R from 'ramda';
import { getPatchPath } from 'xod-project';
import { isAmong } from 'xod-func-tools';
import { def } from './types';
import { CHANGE_TYPES } from './constants'; |
const createPatchChange = def(
'createPatchChange :: AnyChangeType -> Patch -> AnyPatchChange',
(changeType, patch) =>
R.compose(
R.when(
() =>
changeType === CHANGE_TYPES.ADDED ||
changeType === CHANGE_TYPES.MODIFIED,
R.assoc('data', patch)
),
R.applySpec({
path: getPatchPath,
changeType: R.always(changeType),
})
)(patch)
);
export const calculateAdded = def(
'calculateAdded :: [Patch] -> [Patch] -> [AddedPatchChange]',
R.compose(
R.map(createPatchChange(CHANGE_TYPES.ADDED)),
R.flip(R.differenceWith(isEqualPatchPaths))
)
);
export const calculateModified = def(
'calculateModified :: [Patch] -> [Patch] -> [ModifiedPatchChange]',
(before, after) => {
const beforeIds = R.map(getPatchPath, before);
return R.compose(
R.map(createPatchChange(CHANGE_TYPES.MODIFIED)),
R.difference(R.__, before),
R.filter(R.compose(isAmong(beforeIds), getPatchPath))
)(after);
}
);
export const calculateDeleted = def(
'calculateDeleted :: [Patch] -> [Patch] -> [DeletedPatchChange]',
R.compose(
R.map(createPatchChange(CHANGE_TYPES.DELETED)),
R.differenceWith(isEqualPatchPaths)
)
);
export const calculateDiff = def(
'calculateDiff :: [Patch] -> [Patch] -> [AnyPatchChange]',
R.converge(R.unapply(R.unnest), [
calculateAdded,
calculateModified,
calculateDeleted,
])
); |
const isEqualPatchPaths = def(
'isEqualPatchPaths :: Patch -> Patch -> Boolean',
R.useWith(R.equals, [getPatchPath, getPatchPath])
); | random_line_split |
require.js | // -- kriskowal Kris Kowal Copyright (C) 2009-2010 MIT License
(function (require, exports) {
/**
* @module
*/
/*whatsupdoc*/
var Q = require("q");
var has = Object.prototype.hasOwnProperty;
var update = function (_object, object) {
for (var key in object) {
if (has.call(object, key)) {
_object[key] = object[key];
}
}
};
var copy = function (object) {
var _object = {};
update(_object, object);
return _object;
}
var enquote = typeof JSON !== "undefined" && JSON.stringify || function (text) {
return text;
};
/**
* Creates a `require` function, and arranges for modules
* to be executed and their exports memoized, in a lexical
* scope that includes:
*
* * `require(id)` with support for identifiers relative to
* the calling module.
* * `require.loader` for direct access to the module
* loader, which can be used in nested requirers.
* * `require.force(id)`
* * `require.once(id, scope)` to execute but not memoize
* a module, with an optional object that owns additional
* free variables to inject into the module's lexical
* scope.
* * `module`
* * `id`
* * `path`
* * `exports`
*
* @param {{loader, modules, debug}} options
* @constructor
* @returns {require(id)}
*/
exports.Require = function (options) {
options = options || {};
var loader = options.loader;
var factories = options.factories || {};
var modules = options.modules || {};
var apis = options.exports || {};
var supportDefine = options.supportDefine;
var sharedScope = options.scope || {};
for (var id in apis)
if (has.call(apis, id))
modules[id] = {"exports": apis[id]};
var load = function (id) {
if (!factories[id]) {
if (!loader) {
return Q.reject("require: Can't load " + enquote(id));
} else {
factories[id] = loader.load(id);
}
}
return factories[id];
};
var require = function (id, baseId, options) {
var module, factory, exports, completed, require;
options = options || {};
id = resolve(id, baseId);
if (has.call(modules, id)) {
module = modules[id];
} else if (has.call(factories, id)) {
factory = factories[id];
module = Module(id, factory.path);
modules[id] = module;
exports = modules[id].exports;
require = Require(id);
scope = {};
update(scope, sharedScope);
update(scope, options.scope || {});
update(scope, {
"require": require,
"exports": exports,
"module": module
});
if (supportDefine)
scope.define = Define(require, exports, module);
try {
var returned = factory(scope);
completed = true;
} finally {
if (!completed) {
delete modules[id];
}
}
if (typeof returned !== "undefined") {
module.exports = returned;
}
} else {
throw new Error("require: Can't load " + enquote(id));
}
return module.exports;
};
// curries require for a module, so its baseId can be assumed
var Require = function (baseId) {
var _require = function (id) { return require(id, baseId); };
_require.async = function (id) { return require.async(id, baseId) };
_require.loader = loader;
_require.main = modules[options.main];
return _require;
};
var Define = function (require, exports, module) {
return function () {
var callback = arguments[arguments.length - 1];
var returned;
if (typeof callback === "function") {
returned = callback(require, exports, module);
} else {
returned = callback;
}
if (typeof returned !== "undefined")
module.exports = returned;
return returned;
};
};
// creates a module object
var Module = function (baseId, path) {
var module = {};
module.exports = {};
module.id = baseId;
module.path = path;
return module;
};
// asynchronously adds module factories to a factory list
var advanceFactories = function (id, factories) {
return Q.when(load(id), function (factory) {
return (factory.requirements || []).reduce(function (factories, requirement) {
requirement = resolve(requirement, id);
return Q.when(factories, function (factories) {
if (has.call(modules, requirement) || has.call(factories, requirement))
return factories;
return advanceFactories(requirement, factories);
});
}, factories);
});
};
require.reload = function (id) {
return Q.when(advanceFactories(id, {}), function (factories) {
return exports.Require({
"loader": loader,
"factories": factories
});
});
};
require.ensure = function (ids, callback) {
var _modules = copy(modules);
var _factories = ids.reduce(function (factories, id) {
return Q.when(factories, function (factories) {
return advanceFactories(id, factories);
});
}, copy(factories));
return Q.when(_factories, function (factories) {
callback(exports.Require({
"loader": loader,
"factories": factories,
"modules": _modules
}));
}, function (reason) {
throw new Error(reason.message || reason);
});
};
require.async = function (id, baseId) {
var _factories = copy(factories);
var _modules = copy(modules);
return Q.when(advanceFactories(id, _factories), function (factories) {
var _require = exports.Require({
"loader": loader,
"factories": factories,
"modules": _modules
});
return _require(id, baseId);
});
};
require.exec = function (id, scope) {
var _factories = copy(factories);
var _modules = copy(modules);
return Q.when(advanceFactories(id, _factories), function (factories) {
var _require = exports.Require({
"loader": loader,
"factories": factories,
"modules": _modules,
"main": id,
"scope": sharedScope, | "supportDefine": supportDefine
});
return _require(id, undefined, {
"scope": scope
});
});
};
require.loader = loader;
return require;
};
exports.resolve = resolve;
function resolve(id, baseId) {
id = String(id);
var ids = id.split("/");
// assert ids.length >= 1 since "".split("") == [""]
var first = ids[0];
if (first === ".." || first === ".") {
var baseIds = baseId.split("/");
baseIds.pop();
ids.unshift.apply(ids, baseIds);
}
var parts = [];
while (ids.length) {
var part = ids.shift();
if (part === ".") {
} else if (part === "..") {
parts.pop();
} else {
parts.push(part);
}
}
return parts.join("/");
}
}).apply({},
typeof exports !== "undefined" ? [
require,
exports
] : [
(function (global) {
return function (id) {
return global["/" + id];
}
})(this),
this["/require"] = {}
]
); | random_line_split | |
require.js | // -- kriskowal Kris Kowal Copyright (C) 2009-2010 MIT License
(function (require, exports) {
/**
* @module
*/
/*whatsupdoc*/
var Q = require("q");
var has = Object.prototype.hasOwnProperty;
var update = function (_object, object) {
for (var key in object) {
if (has.call(object, key)) {
_object[key] = object[key];
}
}
};
var copy = function (object) {
var _object = {};
update(_object, object);
return _object;
}
var enquote = typeof JSON !== "undefined" && JSON.stringify || function (text) {
return text;
};
/**
* Creates a `require` function, and arranges for modules
* to be executed and their exports memoized, in a lexical
* scope that includes:
*
* * `require(id)` with support for identifiers relative to
* the calling module.
* * `require.loader` for direct access to the module
* loader, which can be used in nested requirers.
* * `require.force(id)`
* * `require.once(id, scope)` to execute but not memoize
* a module, with an optional object that owns additional
* free variables to inject into the module's lexical
* scope.
* * `module`
* * `id`
* * `path`
* * `exports`
*
* @param {{loader, modules, debug}} options
* @constructor
* @returns {require(id)}
*/
exports.Require = function (options) {
options = options || {};
var loader = options.loader;
var factories = options.factories || {};
var modules = options.modules || {};
var apis = options.exports || {};
var supportDefine = options.supportDefine;
var sharedScope = options.scope || {};
for (var id in apis)
if (has.call(apis, id))
modules[id] = {"exports": apis[id]};
var load = function (id) {
if (!factories[id]) {
if (!loader) {
return Q.reject("require: Can't load " + enquote(id));
} else {
factories[id] = loader.load(id);
}
}
return factories[id];
};
var require = function (id, baseId, options) {
var module, factory, exports, completed, require;
options = options || {};
id = resolve(id, baseId);
if (has.call(modules, id)) {
module = modules[id];
} else if (has.call(factories, id)) {
factory = factories[id];
module = Module(id, factory.path);
modules[id] = module;
exports = modules[id].exports;
require = Require(id);
scope = {};
update(scope, sharedScope);
update(scope, options.scope || {});
update(scope, {
"require": require,
"exports": exports,
"module": module
});
if (supportDefine)
scope.define = Define(require, exports, module);
try {
var returned = factory(scope);
completed = true;
} finally {
if (!completed) {
delete modules[id];
}
}
if (typeof returned !== "undefined") {
module.exports = returned;
}
} else {
throw new Error("require: Can't load " + enquote(id));
}
return module.exports;
};
// curries require for a module, so its baseId can be assumed
var Require = function (baseId) {
var _require = function (id) { return require(id, baseId); };
_require.async = function (id) { return require.async(id, baseId) };
_require.loader = loader;
_require.main = modules[options.main];
return _require;
};
var Define = function (require, exports, module) {
return function () {
var callback = arguments[arguments.length - 1];
var returned;
if (typeof callback === "function") {
returned = callback(require, exports, module);
} else {
returned = callback;
}
if (typeof returned !== "undefined")
module.exports = returned;
return returned;
};
};
// creates a module object
var Module = function (baseId, path) {
var module = {};
module.exports = {};
module.id = baseId;
module.path = path;
return module;
};
// asynchronously adds module factories to a factory list
var advanceFactories = function (id, factories) {
return Q.when(load(id), function (factory) {
return (factory.requirements || []).reduce(function (factories, requirement) {
requirement = resolve(requirement, id);
return Q.when(factories, function (factories) {
if (has.call(modules, requirement) || has.call(factories, requirement))
return factories;
return advanceFactories(requirement, factories);
});
}, factories);
});
};
require.reload = function (id) {
return Q.when(advanceFactories(id, {}), function (factories) {
return exports.Require({
"loader": loader,
"factories": factories
});
});
};
require.ensure = function (ids, callback) {
var _modules = copy(modules);
var _factories = ids.reduce(function (factories, id) {
return Q.when(factories, function (factories) {
return advanceFactories(id, factories);
});
}, copy(factories));
return Q.when(_factories, function (factories) {
callback(exports.Require({
"loader": loader,
"factories": factories,
"modules": _modules
}));
}, function (reason) {
throw new Error(reason.message || reason);
});
};
require.async = function (id, baseId) {
var _factories = copy(factories);
var _modules = copy(modules);
return Q.when(advanceFactories(id, _factories), function (factories) {
var _require = exports.Require({
"loader": loader,
"factories": factories,
"modules": _modules
});
return _require(id, baseId);
});
};
require.exec = function (id, scope) {
var _factories = copy(factories);
var _modules = copy(modules);
return Q.when(advanceFactories(id, _factories), function (factories) {
var _require = exports.Require({
"loader": loader,
"factories": factories,
"modules": _modules,
"main": id,
"scope": sharedScope,
"supportDefine": supportDefine
});
return _require(id, undefined, {
"scope": scope
});
});
};
require.loader = loader;
return require;
};
exports.resolve = resolve;
function | (id, baseId) {
id = String(id);
var ids = id.split("/");
// assert ids.length >= 1 since "".split("") == [""]
var first = ids[0];
if (first === ".." || first === ".") {
var baseIds = baseId.split("/");
baseIds.pop();
ids.unshift.apply(ids, baseIds);
}
var parts = [];
while (ids.length) {
var part = ids.shift();
if (part === ".") {
} else if (part === "..") {
parts.pop();
} else {
parts.push(part);
}
}
return parts.join("/");
}
}).apply({},
typeof exports !== "undefined" ? [
require,
exports
] : [
(function (global) {
return function (id) {
return global["/" + id];
}
})(this),
this["/require"] = {}
]
);
| resolve | identifier_name |
require.js | // -- kriskowal Kris Kowal Copyright (C) 2009-2010 MIT License
(function (require, exports) {
/**
* @module
*/
/*whatsupdoc*/
var Q = require("q");
var has = Object.prototype.hasOwnProperty;
var update = function (_object, object) {
for (var key in object) {
if (has.call(object, key)) {
_object[key] = object[key];
}
}
};
var copy = function (object) {
var _object = {};
update(_object, object);
return _object;
}
var enquote = typeof JSON !== "undefined" && JSON.stringify || function (text) {
return text;
};
/**
* Creates a `require` function, and arranges for modules
* to be executed and their exports memoized, in a lexical
* scope that includes:
*
* * `require(id)` with support for identifiers relative to
* the calling module.
* * `require.loader` for direct access to the module
* loader, which can be used in nested requirers.
* * `require.force(id)`
* * `require.once(id, scope)` to execute but not memoize
* a module, with an optional object that owns additional
* free variables to inject into the module's lexical
* scope.
* * `module`
* * `id`
* * `path`
* * `exports`
*
* @param {{loader, modules, debug}} options
* @constructor
* @returns {require(id)}
*/
exports.Require = function (options) {
options = options || {};
var loader = options.loader;
var factories = options.factories || {};
var modules = options.modules || {};
var apis = options.exports || {};
var supportDefine = options.supportDefine;
var sharedScope = options.scope || {};
for (var id in apis)
if (has.call(apis, id))
modules[id] = {"exports": apis[id]};
var load = function (id) {
if (!factories[id]) {
if (!loader) {
return Q.reject("require: Can't load " + enquote(id));
} else {
factories[id] = loader.load(id);
}
}
return factories[id];
};
var require = function (id, baseId, options) {
var module, factory, exports, completed, require;
options = options || {};
id = resolve(id, baseId);
if (has.call(modules, id)) {
module = modules[id];
} else if (has.call(factories, id)) {
factory = factories[id];
module = Module(id, factory.path);
modules[id] = module;
exports = modules[id].exports;
require = Require(id);
scope = {};
update(scope, sharedScope);
update(scope, options.scope || {});
update(scope, {
"require": require,
"exports": exports,
"module": module
});
if (supportDefine)
scope.define = Define(require, exports, module);
try {
var returned = factory(scope);
completed = true;
} finally {
if (!completed) {
delete modules[id];
}
}
if (typeof returned !== "undefined") {
module.exports = returned;
}
} else {
throw new Error("require: Can't load " + enquote(id));
}
return module.exports;
};
// curries require for a module, so its baseId can be assumed
var Require = function (baseId) {
var _require = function (id) { return require(id, baseId); };
_require.async = function (id) { return require.async(id, baseId) };
_require.loader = loader;
_require.main = modules[options.main];
return _require;
};
var Define = function (require, exports, module) {
return function () {
var callback = arguments[arguments.length - 1];
var returned;
if (typeof callback === "function") {
returned = callback(require, exports, module);
} else {
returned = callback;
}
if (typeof returned !== "undefined")
module.exports = returned;
return returned;
};
};
// creates a module object
var Module = function (baseId, path) {
var module = {};
module.exports = {};
module.id = baseId;
module.path = path;
return module;
};
// asynchronously adds module factories to a factory list
var advanceFactories = function (id, factories) {
return Q.when(load(id), function (factory) {
return (factory.requirements || []).reduce(function (factories, requirement) {
requirement = resolve(requirement, id);
return Q.when(factories, function (factories) {
if (has.call(modules, requirement) || has.call(factories, requirement))
return factories;
return advanceFactories(requirement, factories);
});
}, factories);
});
};
require.reload = function (id) {
return Q.when(advanceFactories(id, {}), function (factories) {
return exports.Require({
"loader": loader,
"factories": factories
});
});
};
require.ensure = function (ids, callback) {
var _modules = copy(modules);
var _factories = ids.reduce(function (factories, id) {
return Q.when(factories, function (factories) {
return advanceFactories(id, factories);
});
}, copy(factories));
return Q.when(_factories, function (factories) {
callback(exports.Require({
"loader": loader,
"factories": factories,
"modules": _modules
}));
}, function (reason) {
throw new Error(reason.message || reason);
});
};
require.async = function (id, baseId) {
var _factories = copy(factories);
var _modules = copy(modules);
return Q.when(advanceFactories(id, _factories), function (factories) {
var _require = exports.Require({
"loader": loader,
"factories": factories,
"modules": _modules
});
return _require(id, baseId);
});
};
require.exec = function (id, scope) {
var _factories = copy(factories);
var _modules = copy(modules);
return Q.when(advanceFactories(id, _factories), function (factories) {
var _require = exports.Require({
"loader": loader,
"factories": factories,
"modules": _modules,
"main": id,
"scope": sharedScope,
"supportDefine": supportDefine
});
return _require(id, undefined, {
"scope": scope
});
});
};
require.loader = loader;
return require;
};
exports.resolve = resolve;
function resolve(id, baseId) |
}).apply({},
typeof exports !== "undefined" ? [
require,
exports
] : [
(function (global) {
return function (id) {
return global["/" + id];
}
})(this),
this["/require"] = {}
]
);
| {
id = String(id);
var ids = id.split("/");
// assert ids.length >= 1 since "".split("") == [""]
var first = ids[0];
if (first === ".." || first === ".") {
var baseIds = baseId.split("/");
baseIds.pop();
ids.unshift.apply(ids, baseIds);
}
var parts = [];
while (ids.length) {
var part = ids.shift();
if (part === ".") {
} else if (part === "..") {
parts.pop();
} else {
parts.push(part);
}
}
return parts.join("/");
} | identifier_body |
require.js | // -- kriskowal Kris Kowal Copyright (C) 2009-2010 MIT License
(function (require, exports) {
/**
* @module
*/
/*whatsupdoc*/
var Q = require("q");
var has = Object.prototype.hasOwnProperty;
var update = function (_object, object) {
for (var key in object) {
if (has.call(object, key)) {
_object[key] = object[key];
}
}
};
var copy = function (object) {
var _object = {};
update(_object, object);
return _object;
}
var enquote = typeof JSON !== "undefined" && JSON.stringify || function (text) {
return text;
};
/**
* Creates a `require` function, and arranges for modules
* to be executed and their exports memoized, in a lexical
* scope that includes:
*
* * `require(id)` with support for identifiers relative to
* the calling module.
* * `require.loader` for direct access to the module
* loader, which can be used in nested requirers.
* * `require.force(id)`
* * `require.once(id, scope)` to execute but not memoize
* a module, with an optional object that owns additional
* free variables to inject into the module's lexical
* scope.
* * `module`
* * `id`
* * `path`
* * `exports`
*
* @param {{loader, modules, debug}} options
* @constructor
* @returns {require(id)}
*/
exports.Require = function (options) {
options = options || {};
var loader = options.loader;
var factories = options.factories || {};
var modules = options.modules || {};
var apis = options.exports || {};
var supportDefine = options.supportDefine;
var sharedScope = options.scope || {};
for (var id in apis)
if (has.call(apis, id))
modules[id] = {"exports": apis[id]};
var load = function (id) {
if (!factories[id]) {
if (!loader) {
return Q.reject("require: Can't load " + enquote(id));
} else {
factories[id] = loader.load(id);
}
}
return factories[id];
};
var require = function (id, baseId, options) {
var module, factory, exports, completed, require;
options = options || {};
id = resolve(id, baseId);
if (has.call(modules, id)) {
module = modules[id];
} else if (has.call(factories, id)) {
factory = factories[id];
module = Module(id, factory.path);
modules[id] = module;
exports = modules[id].exports;
require = Require(id);
scope = {};
update(scope, sharedScope);
update(scope, options.scope || {});
update(scope, {
"require": require,
"exports": exports,
"module": module
});
if (supportDefine)
scope.define = Define(require, exports, module);
try {
var returned = factory(scope);
completed = true;
} finally {
if (!completed) {
delete modules[id];
}
}
if (typeof returned !== "undefined") {
module.exports = returned;
}
} else |
return module.exports;
};
// curries require for a module, so its baseId can be assumed
var Require = function (baseId) {
var _require = function (id) { return require(id, baseId); };
_require.async = function (id) { return require.async(id, baseId) };
_require.loader = loader;
_require.main = modules[options.main];
return _require;
};
var Define = function (require, exports, module) {
return function () {
var callback = arguments[arguments.length - 1];
var returned;
if (typeof callback === "function") {
returned = callback(require, exports, module);
} else {
returned = callback;
}
if (typeof returned !== "undefined")
module.exports = returned;
return returned;
};
};
// creates a module object
var Module = function (baseId, path) {
var module = {};
module.exports = {};
module.id = baseId;
module.path = path;
return module;
};
// asynchronously adds module factories to a factory list
var advanceFactories = function (id, factories) {
return Q.when(load(id), function (factory) {
return (factory.requirements || []).reduce(function (factories, requirement) {
requirement = resolve(requirement, id);
return Q.when(factories, function (factories) {
if (has.call(modules, requirement) || has.call(factories, requirement))
return factories;
return advanceFactories(requirement, factories);
});
}, factories);
});
};
require.reload = function (id) {
return Q.when(advanceFactories(id, {}), function (factories) {
return exports.Require({
"loader": loader,
"factories": factories
});
});
};
require.ensure = function (ids, callback) {
var _modules = copy(modules);
var _factories = ids.reduce(function (factories, id) {
return Q.when(factories, function (factories) {
return advanceFactories(id, factories);
});
}, copy(factories));
return Q.when(_factories, function (factories) {
callback(exports.Require({
"loader": loader,
"factories": factories,
"modules": _modules
}));
}, function (reason) {
throw new Error(reason.message || reason);
});
};
require.async = function (id, baseId) {
var _factories = copy(factories);
var _modules = copy(modules);
return Q.when(advanceFactories(id, _factories), function (factories) {
var _require = exports.Require({
"loader": loader,
"factories": factories,
"modules": _modules
});
return _require(id, baseId);
});
};
require.exec = function (id, scope) {
var _factories = copy(factories);
var _modules = copy(modules);
return Q.when(advanceFactories(id, _factories), function (factories) {
var _require = exports.Require({
"loader": loader,
"factories": factories,
"modules": _modules,
"main": id,
"scope": sharedScope,
"supportDefine": supportDefine
});
return _require(id, undefined, {
"scope": scope
});
});
};
require.loader = loader;
return require;
};
exports.resolve = resolve;
function resolve(id, baseId) {
id = String(id);
var ids = id.split("/");
// assert ids.length >= 1 since "".split("") == [""]
var first = ids[0];
if (first === ".." || first === ".") {
var baseIds = baseId.split("/");
baseIds.pop();
ids.unshift.apply(ids, baseIds);
}
var parts = [];
while (ids.length) {
var part = ids.shift();
if (part === ".") {
} else if (part === "..") {
parts.pop();
} else {
parts.push(part);
}
}
return parts.join("/");
}
}).apply({},
typeof exports !== "undefined" ? [
require,
exports
] : [
(function (global) {
return function (id) {
return global["/" + id];
}
})(this),
this["/require"] = {}
]
);
| {
throw new Error("require: Can't load " + enquote(id));
} | conditional_block |
fasta.rs | // Copyright 2014-2016 Johannes Köster, Christopher Schröder.
// Licensed under the MIT license (http://opensource.org/licenses/MIT)
// This file may not be copied, modified, or distributed
// except according to those terms.
//! FASTA format reading and writing.
//!
//! # Example
//!
//! ```
//! use std::io;
//! use bio::io::fasta;
//! let reader = fasta::Reader::new(io::stdin());
//! ```
use std::io;
use std::io::prelude::*;
use std::ascii::AsciiExt;
use std::collections;
use std::fs;
use std::path::Path;
use std::convert::AsRef;
use csv;
use utils::{TextSlice, Text};
/// A FASTA reader.
pub struct Reader<R: io::Read> {
reader: io::BufReader<R>,
line: String,
}
impl Reader<fs::File> {
/// Read FASTA from given file path.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
fs::File::open(path).map(Reader::new)
}
}
impl<R: io::Read> Reader<R> {
/// Create a new Fasta reader given an instance of `io::Read`.
pub fn new(reader: R) -> Self {
Reader {
reader: io::BufReader::new(reader),
line: String::new(),
}
}
/// Read next FASTA record into the given `Record`.
pub fn read(&mut self, record: &mut Record) -> io::Result<()> {
record.clear();
if self.line.is_empty() {
try!(self.reader.read_line(&mut self.line));
if self.line.is_empty() {
return Ok(());
}
}
if !self.line.starts_with('>') {
return Err(io::Error::new(io::ErrorKind::Other, "Expected > at record start."));
}
record.header.push_str(&self.line);
loop {
self.line.clear();
try!(self.reader.read_line(&mut self.line));
if self.line.is_empty() || self.line.starts_with('>') {
break;
}
record.seq.push_str(self.line.trim_right());
}
Ok(())
}
/// Return an iterator over the records of this FastQ file.
pub fn records(self) -> Records<R> {
Records { reader: self }
}
}
/// A FASTA index as created by SAMtools (.fai).
pub struct Index {
inner: collections::HashMap<String, IndexRecord>,
seqs: Vec<String>,
}
impl Index {
/// Open a FASTA index from a given `io::Read` instance.
pub fn new<R: io::Read>(fai: R) -> csv::Result<Self> {
let mut inner = collections::HashMap::new();
let mut seqs = vec![];
let mut fai_reader = csv::Reader::from_reader(fai).delimiter(b'\t').has_headers(false);
for row in fai_reader.decode() {
let (name, record): (String, IndexRecord) = try!(row);
seqs.push(name.clone());
inner.insert(name, record);
}
Ok(Index {
inner: inner,
seqs: seqs,
})
}
/// Open a FASTA index from a given file path.
pub fn from_file<P: AsRef<Path>>(path: &P) -> csv::Result<Self> {
match fs::File::open(path) {
Ok(fai) => Self::new(fai),
Err(e) => Err(csv::Error::Io(e)),
}
}
/// Open a FASTA index given the corresponding FASTA file path (e.g. for ref.fasta we expect ref.fasta.fai).
pub fn with_fasta_file<P: AsRef<Path>>(fasta_path: &P) -> csv::Result<Self> {
let mut ext = fasta_path.as_ref().extension().unwrap().to_str().unwrap().to_owned();
ext.push_str(".fai");
let fai_path = fasta_path.as_ref().with_extension(ext);
Self::from_file(&fai_path)
}
/// Return a vector of sequences described in the index.
pub fn sequences(&self) -> Vec<Sequence> {
self.seqs
.iter()
.map(|name| {
Sequence {
name: name.clone(),
len: self.inner.get(name).unwrap().len,
}
})
.collect()
}
}
/// A FASTA reader with an index as created by SAMtools (.fai).
pub struct IndexedReader<R: io::Read + io::Seek> {
reader: io::BufReader<R>,
pub index: Index,
}
impl IndexedReader<fs::File> {
/// Read from a given file path. This assumes the index ref.fasta.fai to be present for FASTA ref.fasta.
pub fn from_file<P: AsRef<Path>>(path: &P) -> csv::Result<Self> {
let index = try!(Index::with_fasta_file(path));
match fs::File::open(path) {
Ok(fasta) => Ok(IndexedReader::with_index(fasta, index)),
Err(e) => Err(csv::Error::Io(e)),
}
}
}
impl<R: io::Read + io::Seek> IndexedReader<R> {
/// Read from a FASTA and its index, both given as `io::Read`. FASTA has to be `io::Seek` in addition.
pub fn new<I: io::Read>(fasta: R, fai: I) -> csv::Result<Self> {
let index = try!(Index::new(fai));
Ok(IndexedReader {
reader: io::BufReader::new(fasta),
index: index,
})
}
/// Read from a FASTA and its index, the first given as `io::Read`, the second given as index object.
pub fn with_index(fasta: R, index: Index) -> Self {
IndexedReader {
reader: io::BufReader::new(fasta),
index: index,
}
}
/// For a given seqname, read the whole sequence into the given vector.
pub fn read_all(&mut self, seqname: &str, seq: &mut Text) -> io::Result<()> {
match self.index.inner.get(seqname) {
Some(&idx) => self.read(seqname, 0, idx.len, seq),
None => Err(io::Error::new(io::ErrorKind::Other, "Unknown sequence name.")),
}
}
/// Read the given interval of the given seqname into the given vector (stop position is exclusive).
pub fn read(&mut self,
seqname: &str,
start: u64,
stop: u64,
seq: &mut Text)
-> io::Result<()> {
if let Some(idx) = self.index.inner.get(seqname) {
seq.clear();
if stop > idx.len {
return Err(io::Error::new(io::ErrorKind::Other, "FASTA read interval was out of bounds"));
}
if start > stop {
return Err(io::Error::new(io::ErrorKind::Other, "Invalid query interval"));
}
let mut line_offset = start % idx.line_bases;
let line_start = start / idx.line_bases * idx.line_bytes;
let offset = idx.offset + line_start + line_offset;
try!(self.reader.seek(io::SeekFrom::Start(offset)));
let length = stop - start as u64;
let mut buf = vec![0u8; idx.line_bytes as usize];
while (seq.len() as u64) < length {
let bases_left = length - seq.len() as u64;
let bases_on_line = idx.line_bases - line_offset;
let (bytes_to_read, bytes_to_keep) = if bases_on_line < bases_left {
(idx.line_bytes - line_offset, bases_on_line)
} else {
(bases_left, bases_left)
};
try!(self.reader.read_exact(&mut buf[..bytes_to_read as usize]));
seq.extend_from_slice(&buf[..bytes_to_keep as usize]);
line_offset = 0;
}
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::Other, "Unknown sequence name."))
}
}
}
/// Record of a FASTA index.
#[derive(RustcDecodable, Debug, Copy, Clone)]
struct IndexRecord {
len: u64,
offset: u64,
line_bases: u64,
line_bytes: u64,
}
/// A sequence record returned by the FASTA index.
pub struct Sequence {
pub name: String,
pub len: u64,
}
/// A Fasta writer.
pub struct Writer<W: io::Write> {
writer: io::BufWriter<W>,
}
impl Writer<fs::File> {
/// Write to the given file path.
pub fn to_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
fs::File::create(path).map(Writer::new)
}
}
impl<W: io::Write> Writer<W> {
/// Create a new Fasta writer.
pub fn new(writer: W) -> Self {
Writer { writer: io::BufWriter::new(writer) }
}
/// Directly write a Fasta record.
pub fn write_record(&mut self, record: &Record) -> io::Result<()> {
self.write(record.id().unwrap_or(""), record.desc(), record.seq())
}
/// Write a Fasta record with given id, optional description and sequence.
pub fn write(&mut self, id: &str, desc: Option<&str>, seq: TextSlice) -> io::Result<()> {
try!(self.writer.write(b">"));
try!(self.writer.write(id.as_bytes()));
if desc.is_some() {
try!(self.writer.write(b" "));
try!(self.writer.write(desc.unwrap().as_bytes()));
}
try!(self.writer.write(b"\n"));
try!(self.writer.write(seq));
try!(self.writer.write(b"\n"));
Ok(())
}
/// Flush the writer, ensuring that everything is written.
pub fn flush(&mut self) -> io::Result<()> {
self.writer.flush()
}
}
/// A FASTA record.
#[derive(Default)]
pub struct Record {
header: String,
seq: String,
}
impl Record {
/// Create a new instance.
pub fn new() -> Self {
Record {
header: String::new(),
seq: String::new(),
}
}
/// Check if record is empty.
pub fn is_empty(&self) -> bool {
self.header.is_empty() && self.seq.is_empty()
}
/// Check validity of Fasta record.
pub fn check(&self) -> Result<(), &str> {
if self.id().is_none() {
return Err("Expecting id for FastQ record.");
}
if !self.seq.is_ascii() {
return Err("Non-ascii character found in sequence.");
}
Ok(())
}
/// Return the id of the record.
pub fn id(&self) -> Option<&str> {
self.header[1..].trim_right().splitn(2, ' ').next()
}
/// Return descriptions if present.
pub fn desc(&self) -> Option<&str> {
self.header[1..].trim_right().splitn(2, ' ').skip(1).next()
}
/// Return the sequence of the record.
pub fn seq(&self) -> TextSlice {
self.seq.as_bytes()
}
/// Clear the record.
fn clear(&mut self) {
self.header.clear();
self.seq.clear();
}
}
/// An iterator over the records of a Fasta file.
pub struct Records<R: io::Read> {
reader: Reader<R>,
}
impl<R: io::Read> Iterator for Records<R> {
type Item = io::Result<Record>;
fn next(&mut self) -> Option<io::Result<Record>> {
let mut record = Record::new();
match self.reader.read(&mut record) {
Ok(()) if record.is_empty() => None,
Ok(()) => Some(Ok(record)),
Err(err) => Some(Err(err)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io;
const FASTA_FILE: &'static [u8] = b">id desc
ACCGTAGGCTGA
CCGTAGGCTGAA
CGTAGGCTGAAA
GTAGGCTGAAAA
CCCC
>id2
ATTGTTGTTTTA
ATTGTTGTTTTA
ATTGTTGTTTTA
GGGG
";
const FAI_FILE: &'static [u8] = b"id\t52\t9\t12\t13
id2\t40\t71\t12\t13
";
const FASTA_FILE_CRLF: &'static [u8] = b">id desc\r
ACCGTAGGCTGA\r
CCGTAGGCTGAA\r
CGTAGGCTGAAA\r
GTAGGCTGAAAA\r
CCCC\r
>id2\r
ATTGTTGTTTTA\r
ATTGTTGTTTTA\r
ATTGTTGTTTTA\r
GGGG\r
";
const FAI_FILE_CRLF: &'static [u8] = b"id\t52\t10\t12\t14\r
id2\t40\t78\t12\t14\r
";
| CCCC";
const FAI_FILE_NO_TRAILING_LF: &'static [u8] = b"id\t16\t9\t12\t13";
const WRITE_FASTA_FILE: &'static [u8] = b">id desc
ACCGTAGGCTGA
>id2
ATTGTTGTTTTA
";
#[test]
fn test_reader() {
let reader = Reader::new(FASTA_FILE);
let ids = [Some("id"), Some("id2")];
let descs = [Some("desc"), None];
let seqs: [&[u8]; 2] = [b"ACCGTAGGCTGACCGTAGGCTGAACGTAGGCTGAAAGTAGGCTGAAAACCCC",
b"ATTGTTGTTTTAATTGTTGTTTTAATTGTTGTTTTAGGGG"];
for (i, r) in reader.records().enumerate() {
let record = r.ok().expect("Error reading record");
assert_eq!(record.check(), Ok(()));
assert_eq!(record.id(), ids[i]);
assert_eq!(record.desc(), descs[i]);
assert_eq!(record.seq(), seqs[i]);
}
// let record = records.ok().nth(1).unwrap();
}
#[test]
fn test_indexed_reader() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE), FAI_FILE)
.ok()
.expect("Error reading index");
_test_indexed_reader(&mut reader)
}
#[test]
fn test_indexed_reader_crlf() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE_CRLF), FAI_FILE_CRLF)
.ok()
.expect("Error reading index");
_test_indexed_reader(&mut reader)
}
fn _test_indexed_reader<T: Seek + Read>(reader: &mut IndexedReader<T>) {
let mut seq = Vec::new();
// Test reading various substrings of the sequence
reader.read("id", 1, 5, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CCGT");
reader.read("id", 1, 31, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CCGTAGGCTGACCGTAGGCTGAACGTAGGC");
reader.read("id", 13, 23, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CGTAGGCTGA");
reader.read("id", 36, 52, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"GTAGGCTGAAAACCCC");
reader.read("id2", 12, 40, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"ATTGTTGTTTTAATTGTTGTTTTAGGGG");
reader.read("id2", 12, 12, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"");
reader.read("id2", 12, 13, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"A");
assert!(reader.read("id2", 12, 11, &mut seq).is_err());
assert!(reader.read("id2", 12, 1000, &mut seq).is_err());
}
#[test]
fn test_indexed_reader_no_trailing_lf() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE_NO_TRAILING_LF),
FAI_FILE_NO_TRAILING_LF)
.ok()
.expect("Error reading index");
let mut seq = Vec::new();
reader.read("id", 0, 16, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"GTAGGCTGAAAACCCC");
}
#[test]
fn test_writer() {
let mut writer = Writer::new(Vec::new());
writer.write("id", Some("desc"), b"ACCGTAGGCTGA").ok().expect("Expected successful write");
writer.write("id2", None, b"ATTGTTGTTTTA").ok().expect("Expected successful write");
writer.flush().ok().expect("Expected successful write");
assert_eq!(writer.writer.get_ref(), &WRITE_FASTA_FILE);
}
} | const FASTA_FILE_NO_TRAILING_LF : &'static [u8] = b">id desc
GTAGGCTGAAAA | random_line_split |
fasta.rs | // Copyright 2014-2016 Johannes Köster, Christopher Schröder.
// Licensed under the MIT license (http://opensource.org/licenses/MIT)
// This file may not be copied, modified, or distributed
// except according to those terms.
//! FASTA format reading and writing.
//!
//! # Example
//!
//! ```
//! use std::io;
//! use bio::io::fasta;
//! let reader = fasta::Reader::new(io::stdin());
//! ```
use std::io;
use std::io::prelude::*;
use std::ascii::AsciiExt;
use std::collections;
use std::fs;
use std::path::Path;
use std::convert::AsRef;
use csv;
use utils::{TextSlice, Text};
/// A FASTA reader.
pub struct Reader<R: io::Read> {
reader: io::BufReader<R>,
line: String,
}
impl Reader<fs::File> {
/// Read FASTA from given file path.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
fs::File::open(path).map(Reader::new)
}
}
impl<R: io::Read> Reader<R> {
/// Create a new Fasta reader given an instance of `io::Read`.
pub fn new(reader: R) -> Self {
Reader {
reader: io::BufReader::new(reader),
line: String::new(),
}
}
/// Read next FASTA record into the given `Record`.
pub fn read(&mut self, record: &mut Record) -> io::Result<()> {
record.clear();
if self.line.is_empty() {
try!(self.reader.read_line(&mut self.line));
if self.line.is_empty() {
return Ok(());
}
}
if !self.line.starts_with('>') {
return Err(io::Error::new(io::ErrorKind::Other, "Expected > at record start."));
}
record.header.push_str(&self.line);
loop {
self.line.clear();
try!(self.reader.read_line(&mut self.line));
if self.line.is_empty() || self.line.starts_with('>') {
break;
}
record.seq.push_str(self.line.trim_right());
}
Ok(())
}
/// Return an iterator over the records of this FastQ file.
pub fn records(self) -> Records<R> {
Records { reader: self }
}
}
/// A FASTA index as created by SAMtools (.fai).
pub struct Index {
inner: collections::HashMap<String, IndexRecord>,
seqs: Vec<String>,
}
impl Index {
/// Open a FASTA index from a given `io::Read` instance.
pub fn new<R: io::Read>(fai: R) -> csv::Result<Self> {
let mut inner = collections::HashMap::new();
let mut seqs = vec![];
let mut fai_reader = csv::Reader::from_reader(fai).delimiter(b'\t').has_headers(false);
for row in fai_reader.decode() {
let (name, record): (String, IndexRecord) = try!(row);
seqs.push(name.clone());
inner.insert(name, record);
}
Ok(Index {
inner: inner,
seqs: seqs,
})
}
/// Open a FASTA index from a given file path.
pub fn from_file<P: AsRef<Path>>(path: &P) -> csv::Result<Self> {
match fs::File::open(path) {
Ok(fai) => Self::new(fai),
Err(e) => Err(csv::Error::Io(e)),
}
}
/// Open a FASTA index given the corresponding FASTA file path (e.g. for ref.fasta we expect ref.fasta.fai).
pub fn with_fasta_file<P: AsRef<Path>>(fasta_path: &P) -> csv::Result<Self> {
let mut ext = fasta_path.as_ref().extension().unwrap().to_str().unwrap().to_owned();
ext.push_str(".fai");
let fai_path = fasta_path.as_ref().with_extension(ext);
Self::from_file(&fai_path)
}
/// Return a vector of sequences described in the index.
pub fn sequences(&self) -> Vec<Sequence> {
self.seqs
.iter()
.map(|name| {
Sequence {
name: name.clone(),
len: self.inner.get(name).unwrap().len,
}
})
.collect()
}
}
/// A FASTA reader with an index as created by SAMtools (.fai).
pub struct IndexedReader<R: io::Read + io::Seek> {
reader: io::BufReader<R>,
pub index: Index,
}
impl IndexedReader<fs::File> {
/// Read from a given file path. This assumes the index ref.fasta.fai to be present for FASTA ref.fasta.
pub fn from_file<P: AsRef<Path>>(path: &P) -> csv::Result<Self> {
let index = try!(Index::with_fasta_file(path));
match fs::File::open(path) {
Ok(fasta) => Ok(IndexedReader::with_index(fasta, index)),
Err(e) => Err(csv::Error::Io(e)),
}
}
}
impl<R: io::Read + io::Seek> IndexedReader<R> {
/// Read from a FASTA and its index, both given as `io::Read`. FASTA has to be `io::Seek` in addition.
pub fn new<I: io::Read>(fasta: R, fai: I) -> csv::Result<Self> {
let index = try!(Index::new(fai));
Ok(IndexedReader {
reader: io::BufReader::new(fasta),
index: index,
})
}
/// Read from a FASTA and its index, the first given as `io::Read`, the second given as index object.
pub fn wi | asta: R, index: Index) -> Self {
IndexedReader {
reader: io::BufReader::new(fasta),
index: index,
}
}
/// For a given seqname, read the whole sequence into the given vector.
pub fn read_all(&mut self, seqname: &str, seq: &mut Text) -> io::Result<()> {
match self.index.inner.get(seqname) {
Some(&idx) => self.read(seqname, 0, idx.len, seq),
None => Err(io::Error::new(io::ErrorKind::Other, "Unknown sequence name.")),
}
}
/// Read the given interval of the given seqname into the given vector (stop position is exclusive).
pub fn read(&mut self,
seqname: &str,
start: u64,
stop: u64,
seq: &mut Text)
-> io::Result<()> {
if let Some(idx) = self.index.inner.get(seqname) {
seq.clear();
if stop > idx.len {
return Err(io::Error::new(io::ErrorKind::Other, "FASTA read interval was out of bounds"));
}
if start > stop {
return Err(io::Error::new(io::ErrorKind::Other, "Invalid query interval"));
}
let mut line_offset = start % idx.line_bases;
let line_start = start / idx.line_bases * idx.line_bytes;
let offset = idx.offset + line_start + line_offset;
try!(self.reader.seek(io::SeekFrom::Start(offset)));
let length = stop - start as u64;
let mut buf = vec![0u8; idx.line_bytes as usize];
while (seq.len() as u64) < length {
let bases_left = length - seq.len() as u64;
let bases_on_line = idx.line_bases - line_offset;
let (bytes_to_read, bytes_to_keep) = if bases_on_line < bases_left {
(idx.line_bytes - line_offset, bases_on_line)
} else {
(bases_left, bases_left)
};
try!(self.reader.read_exact(&mut buf[..bytes_to_read as usize]));
seq.extend_from_slice(&buf[..bytes_to_keep as usize]);
line_offset = 0;
}
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::Other, "Unknown sequence name."))
}
}
}
/// Record of a FASTA index.
#[derive(RustcDecodable, Debug, Copy, Clone)]
struct IndexRecord {
len: u64,
offset: u64,
line_bases: u64,
line_bytes: u64,
}
/// A sequence record returned by the FASTA index.
pub struct Sequence {
pub name: String,
pub len: u64,
}
/// A Fasta writer.
pub struct Writer<W: io::Write> {
writer: io::BufWriter<W>,
}
impl Writer<fs::File> {
/// Write to the given file path.
pub fn to_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
fs::File::create(path).map(Writer::new)
}
}
impl<W: io::Write> Writer<W> {
/// Create a new Fasta writer.
pub fn new(writer: W) -> Self {
Writer { writer: io::BufWriter::new(writer) }
}
/// Directly write a Fasta record.
pub fn write_record(&mut self, record: &Record) -> io::Result<()> {
self.write(record.id().unwrap_or(""), record.desc(), record.seq())
}
/// Write a Fasta record with given id, optional description and sequence.
pub fn write(&mut self, id: &str, desc: Option<&str>, seq: TextSlice) -> io::Result<()> {
try!(self.writer.write(b">"));
try!(self.writer.write(id.as_bytes()));
if desc.is_some() {
try!(self.writer.write(b" "));
try!(self.writer.write(desc.unwrap().as_bytes()));
}
try!(self.writer.write(b"\n"));
try!(self.writer.write(seq));
try!(self.writer.write(b"\n"));
Ok(())
}
/// Flush the writer, ensuring that everything is written.
pub fn flush(&mut self) -> io::Result<()> {
self.writer.flush()
}
}
/// A FASTA record.
#[derive(Default)]
pub struct Record {
header: String,
seq: String,
}
impl Record {
/// Create a new instance.
pub fn new() -> Self {
Record {
header: String::new(),
seq: String::new(),
}
}
/// Check if record is empty.
pub fn is_empty(&self) -> bool {
self.header.is_empty() && self.seq.is_empty()
}
/// Check validity of Fasta record.
pub fn check(&self) -> Result<(), &str> {
if self.id().is_none() {
return Err("Expecting id for FastQ record.");
}
if !self.seq.is_ascii() {
return Err("Non-ascii character found in sequence.");
}
Ok(())
}
/// Return the id of the record.
pub fn id(&self) -> Option<&str> {
self.header[1..].trim_right().splitn(2, ' ').next()
}
/// Return descriptions if present.
pub fn desc(&self) -> Option<&str> {
self.header[1..].trim_right().splitn(2, ' ').skip(1).next()
}
/// Return the sequence of the record.
pub fn seq(&self) -> TextSlice {
self.seq.as_bytes()
}
/// Clear the record.
fn clear(&mut self) {
self.header.clear();
self.seq.clear();
}
}
/// An iterator over the records of a Fasta file.
pub struct Records<R: io::Read> {
reader: Reader<R>,
}
impl<R: io::Read> Iterator for Records<R> {
type Item = io::Result<Record>;
fn next(&mut self) -> Option<io::Result<Record>> {
let mut record = Record::new();
match self.reader.read(&mut record) {
Ok(()) if record.is_empty() => None,
Ok(()) => Some(Ok(record)),
Err(err) => Some(Err(err)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io;
const FASTA_FILE: &'static [u8] = b">id desc
ACCGTAGGCTGA
CCGTAGGCTGAA
CGTAGGCTGAAA
GTAGGCTGAAAA
CCCC
>id2
ATTGTTGTTTTA
ATTGTTGTTTTA
ATTGTTGTTTTA
GGGG
";
const FAI_FILE: &'static [u8] = b"id\t52\t9\t12\t13
id2\t40\t71\t12\t13
";
const FASTA_FILE_CRLF: &'static [u8] = b">id desc\r
ACCGTAGGCTGA\r
CCGTAGGCTGAA\r
CGTAGGCTGAAA\r
GTAGGCTGAAAA\r
CCCC\r
>id2\r
ATTGTTGTTTTA\r
ATTGTTGTTTTA\r
ATTGTTGTTTTA\r
GGGG\r
";
const FAI_FILE_CRLF: &'static [u8] = b"id\t52\t10\t12\t14\r
id2\t40\t78\t12\t14\r
";
const FASTA_FILE_NO_TRAILING_LF : &'static [u8] = b">id desc
GTAGGCTGAAAA
CCCC";
const FAI_FILE_NO_TRAILING_LF: &'static [u8] = b"id\t16\t9\t12\t13";
const WRITE_FASTA_FILE: &'static [u8] = b">id desc
ACCGTAGGCTGA
>id2
ATTGTTGTTTTA
";
#[test]
fn test_reader() {
let reader = Reader::new(FASTA_FILE);
let ids = [Some("id"), Some("id2")];
let descs = [Some("desc"), None];
let seqs: [&[u8]; 2] = [b"ACCGTAGGCTGACCGTAGGCTGAACGTAGGCTGAAAGTAGGCTGAAAACCCC",
b"ATTGTTGTTTTAATTGTTGTTTTAATTGTTGTTTTAGGGG"];
for (i, r) in reader.records().enumerate() {
let record = r.ok().expect("Error reading record");
assert_eq!(record.check(), Ok(()));
assert_eq!(record.id(), ids[i]);
assert_eq!(record.desc(), descs[i]);
assert_eq!(record.seq(), seqs[i]);
}
// let record = records.ok().nth(1).unwrap();
}
#[test]
fn test_indexed_reader() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE), FAI_FILE)
.ok()
.expect("Error reading index");
_test_indexed_reader(&mut reader)
}
#[test]
fn test_indexed_reader_crlf() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE_CRLF), FAI_FILE_CRLF)
.ok()
.expect("Error reading index");
_test_indexed_reader(&mut reader)
}
fn _test_indexed_reader<T: Seek + Read>(reader: &mut IndexedReader<T>) {
let mut seq = Vec::new();
// Test reading various substrings of the sequence
reader.read("id", 1, 5, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CCGT");
reader.read("id", 1, 31, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CCGTAGGCTGACCGTAGGCTGAACGTAGGC");
reader.read("id", 13, 23, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CGTAGGCTGA");
reader.read("id", 36, 52, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"GTAGGCTGAAAACCCC");
reader.read("id2", 12, 40, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"ATTGTTGTTTTAATTGTTGTTTTAGGGG");
reader.read("id2", 12, 12, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"");
reader.read("id2", 12, 13, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"A");
assert!(reader.read("id2", 12, 11, &mut seq).is_err());
assert!(reader.read("id2", 12, 1000, &mut seq).is_err());
}
#[test]
fn test_indexed_reader_no_trailing_lf() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE_NO_TRAILING_LF),
FAI_FILE_NO_TRAILING_LF)
.ok()
.expect("Error reading index");
let mut seq = Vec::new();
reader.read("id", 0, 16, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"GTAGGCTGAAAACCCC");
}
#[test]
fn test_writer() {
let mut writer = Writer::new(Vec::new());
writer.write("id", Some("desc"), b"ACCGTAGGCTGA").ok().expect("Expected successful write");
writer.write("id2", None, b"ATTGTTGTTTTA").ok().expect("Expected successful write");
writer.flush().ok().expect("Expected successful write");
assert_eq!(writer.writer.get_ref(), &WRITE_FASTA_FILE);
}
}
| th_index(f | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.