function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def __init__(self, input_text, extensions):
self.input = input_text.splitlines()
self.extensions = extensions | APMD/PMD | [
1,
4,
1,
1,
1448905660
] |
def read_directories(self):
pass | APMD/PMD | [
1,
4,
1,
1,
1448905660
] |
def _check_if_directory(self, line):
pass | APMD/PMD | [
1,
4,
1,
1,
1448905660
] |
def _parse_directory(self, line):
pass | APMD/PMD | [
1,
4,
1,
1,
1448905660
] |
def _parse_line(line, num_word):
words = line.split(None, num_word - 1)
if len(words) < num_word:
return ''
else:
return words[num_word - 1] | APMD/PMD | [
1,
4,
1,
1,
1448905660
] |
def read_directories(self):
return self._read_directories(9) | APMD/PMD | [
1,
4,
1,
1,
1448905660
] |
def _parse_directory(self, line):
return line[0:-1] + '/' | APMD/PMD | [
1,
4,
1,
1,
1448905660
] |
def __init__(self, input_text, extensions):
super(ParserDIR, self).__init__(input_text, extensions)
self.firstDir = '' | APMD/PMD | [
1,
4,
1,
1,
1448905660
] |
def _check_if_directory(self, line):
return line.startswith(" Katalog:") | APMD/PMD | [
1,
4,
1,
1,
1448905660
] |
def _set_first_dir(self):
i = 0
for line in self.input:
i += 1
if i > 5:
self.firstDir = ''
return
elif self._check_if_directory(line):
self.firstDir = self._parse_directory_zero(line)
return
self.firstDir = '' | APMD/PMD | [
1,
4,
1,
1,
1448905660
] |
def _parse_directory_zero(line):
return (line[10:] + '/').replace("\\", "/") | APMD/PMD | [
1,
4,
1,
1,
1448905660
] |
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_ith_backpack_field_06.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_imperial_officer_m_4.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def __init__(self, dirName):
"""
Args:
dirName (string): directory where to load the corpus
"""
self.lines = {}
self.conversations = []
MOVIE_LINES_FIELDS = ["lineID","characterID","movieID","character","text"]
MOVIE_CONVERSATIONS_FIELDS = ["character1ID","character2ID","movieID","utteranceIDs"]
dirName = "/usr/share/dragonfire/deepconv/"
self.lines = self.loadLines(os.path.join(dirName, "movie_lines.txt"), MOVIE_LINES_FIELDS)
self.conversations = self.loadConversations(os.path.join(dirName, "movie_conversations.txt"), MOVIE_CONVERSATIONS_FIELDS)
# TODO: Cleaner program (merge copy-paste) !! | mertyildiran/Dragonfire | [
1321,
207,
1321,
47,
1461108487
] |
def loadConversations(self, fileName, fields):
"""
Args:
fileName (str): file to load
field (set<str>): fields to extract
Return:
dict<dict<str>>: the extracted fields for each line
"""
conversations = []
with open(fileName, 'r', encoding='iso-8859-1') as f: # TODO: Solve Iso encoding pb !
for line in f:
values = line.split(" +++$+++ ")
# Extract fields
convObj = {}
for i, field in enumerate(fields):
convObj[field] = values[i]
# Convert string to list (convObj["utteranceIDs"] == "['L598485', 'L598486', ...]")
lineIds = ast.literal_eval(convObj["utteranceIDs"])
# Reassemble lines
convObj["lines"] = []
for lineId in lineIds:
convObj["lines"].append(self.lines[lineId])
conversations.append(convObj)
return conversations | mertyildiran/Dragonfire | [
1321,
207,
1321,
47,
1461108487
] |
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_sayama_edosun_q2_needed.iff"
result.attribute_template_id = -1
result.stfName("loot_nboo_n","sayama_edosun_q2_needed") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_dathomir_freedprisonerscamp_large1.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/all/shared_frn_all_throwpillow_med_s02.iff"
result.attribute_template_id = 6
result.stfName("frn_n","frn_throwpillow") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def __init__(self, plotly_name="annotations", parent_name="layout.scene", **kwargs):
super(AnnotationsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Annotation"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the `text`
within the box. Has an effect only if `text`
spans two or more lines (i.e. `text` contains
one or more <br> HTML tags) or if an explicit
width is set to override the text width.
arrowcolor
Sets the color of the annotation arrow.
arrowhead
Sets the end annotation arrow head style.
arrowside
Sets the annotation arrow head position.
arrowsize
Sets the size of the end annotation arrow head,
relative to `arrowwidth`. A value of 1
(default) gives a head about 3x as wide as the
line.
arrowwidth
Sets the width (in px) of annotation arrow
line.
ax
Sets the x component of the arrow tail about
the arrow head (in pixels).
ay
Sets the y component of the arrow tail about
the arrow head (in pixels).
bgcolor
Sets the background color of the annotation.
bordercolor
Sets the color of the border enclosing the
annotation `text`.
borderpad
Sets the padding (in px) between the `text` and
the enclosing border.
borderwidth
Sets the width (in px) of the border enclosing
the annotation `text`.
captureevents
Determines whether the annotation text box
captures mouse move and click events, or allows
those events to pass through to data points in
the plot that may be behind the annotation. By
default `captureevents` is False unless
`hovertext` is provided. If you use the event
`plotly_clickannotation` without `hovertext`
you must explicitly enable `captureevents`.
font
Sets the annotation text font.
height
Sets an explicit height for the text box. null
(default) lets the text set the box height.
Taller text will be clipped.
hoverlabel
:class:`plotly.graph_objects.layout.scene.annot
ation.Hoverlabel` instance or dict with
compatible properties
hovertext
Sets text to appear when hovering over this
annotation. If omitted or blank, no hover label
will appear.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
opacity
Sets the opacity of the annotation (text +
arrow).
showarrow
Determines whether or not the annotation is
drawn with an arrow. If True, `text` is placed
near the arrow's tail. If False, `text` lines
up with the `x` and `y` provided.
standoff
Sets a distance, in pixels, to move the end
arrowhead away from the position it is pointing
at, for example to point at the edge of a
marker independent of zoom. Note that this
shortens the arrow from the `ax` / `ay` vector,
in contrast to `xshift` / `yshift` which moves
everything by this amount.
startarrowhead
Sets the start annotation arrow head style.
startarrowsize
Sets the size of the start annotation arrow
head, relative to `arrowwidth`. A value of 1
(default) gives a head about 3x as wide as the
line.
startstandoff
Sets a distance, in pixels, to move the start
arrowhead away from the position it is pointing
at, for example to point at the edge of a
marker independent of zoom. Note that this
shortens the arrow from the `ax` / `ay` vector,
in contrast to `xshift` / `yshift` which moves
everything by this amount.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
text
Sets the text associated with this annotation.
Plotly uses a subset of HTML tags to do things
like newline (<br>), bold (<b></b>), italics
(<i></i>), hyperlinks (<a href='...'></a>).
Tags <em>, <sup>, <sub> <span> are also
supported.
textangle
Sets the angle at which the `text` is drawn
with respect to the horizontal.
valign
Sets the vertical alignment of the `text`
within the box. Has an effect only if an
explicit height is set to override the text
height.
visible
Determines whether or not this annotation is
visible.
width
Sets an explicit width for the text box. null
(default) lets the text set the box width.
Wider text will be clipped. There is no
automatic wrapping; use <br> to start a new
line.
x
Sets the annotation's x position.
xanchor
Sets the text box's horizontal position anchor
This anchor binds the `x` position to the
"left", "center" or "right" of the annotation.
For example, if `x` is set to 1, `xref` to
"paper" and `xanchor` to "right" then the
right-most portion of the annotation lines up
with the right-most edge of the plotting area.
If "auto", the anchor is equivalent to "center"
for data-referenced annotations or if there is
an arrow, whereas for paper-referenced with no
arrow, the anchor picked corresponds to the
closest side.
xshift
Shifts the position of the whole annotation and
arrow to the right (positive) or left
(negative) by this many pixels.
y
Sets the annotation's y position.
yanchor
Sets the text box's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the annotation.
For example, if `y` is set to 1, `yref` to
"paper" and `yanchor` to "top" then the top-
most portion of the annotation lines up with
the top-most edge of the plotting area. If
"auto", the anchor is equivalent to "middle"
for data-referenced annotations or if there is
an arrow, whereas for paper-referenced with no
arrow, the anchor picked corresponds to the
closest side.
yshift
Shifts the position of the whole annotation and
arrow up (positive) or down (negative) by this
many pixels.
z
Sets the annotation's z position. | plotly/python-api | [
13052,
2308,
13052,
1319,
1385013188
] |
def __init__(self, config, logHandlers=None):
self._config = DeviceClientConfig(**config)
AbstractClient.__init__(
self,
domain=self._config.domain,
organization=self._config.orgId,
clientId=self._config.clientId,
username=self._config.username,
password=self._config.password,
port=self._config.port,
transport=self._config.transport,
cleanStart=self._config.cleanStart,
sessionExpiry=self._config.sessionExpiry,
keepAlive=self._config.keepAlive,
caFile=self._config.caFile,
logLevel=self._config.logLevel,
logHandlers=logHandlers,
)
# Add handler for commands if not connected to QuickStart
if not self._config.isQuickstart():
self.client.message_callback_add("iot-2/cmd/+/fmt/+", self._onCommand)
# Initialize user supplied callback
self.commandCallback = None
# Register startup subscription list (only for non-Quickstart)
if not self._config.isQuickstart():
self._subscriptions[self._COMMAND_TOPIC] = 1 | ibm-watson-iot/iot-python | [
186,
162,
186,
4,
1397657256
] |
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role) | Sarthak30/User-Registration | [
4,
2,
4,
1,
1437956977
] |
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests) | Sarthak30/User-Registration | [
4,
2,
4,
1,
1437956977
] |
def __init__( self, *args, **kwargs ):
self.isRunning = True
self.hash = kwargs["hash"]
self.standalone = kwargs["standalone"]
self.teamObjs = {} | enen92/script.matchcenter | [
2,
3,
2,
3,
1455478036
] |
def getTweets(self):
self.getControl(32500).setLabel("#"+self.hash)
self.getControl(32503).setImage(os.path.join(addon_path,"resources","img","twitter_sm.png"))
tweetitems = []
tweets = tweet.get_hashtag_tweets(self.hash)
if tweets:
for _tweet in tweets:
td = ssutils.get_timedelta_string(datetime.datetime.utcnow() - _tweet["date"])
item = xbmcgui.ListItem(_tweet["text"].replace("\n",""))
item.setProperty("profilepic",_tweet["profilepic"])
item.setProperty("author","[B]" +"@" + _tweet["author"] + "[/B]")
item.setProperty("timedelta", td)
tweetitems.append(item)
self.getControl(32501).reset()
self.getControl(32501).addItems(tweetitems)
if tweetitems:
self.setFocusId(32501)
return | enen92/script.matchcenter | [
2,
3,
2,
3,
1455478036
] |
def stopRunning(self):
self.isRunning = False
self.close()
if not self.standalone:
mainmenu.start() | enen92/script.matchcenter | [
2,
3,
2,
3,
1455478036
] |
def onClick(self,controlId):
if controlId == 32501:
teamid = self.getControl(controlId).getSelectedItem().getProperty("teamid")
matchhistory.start(teamid)
elif controlId == 32514:
self.reset() | enen92/script.matchcenter | [
2,
3,
2,
3,
1455478036
] |
def copy_author_to_submission(user, book):
author = Author(
first_name=user.first_name,
middle_name=user.profile.middle_name,
last_name=user.last_name,
salutation=user.profile.salutation,
institution=user.profile.institution,
department=user.profile.department,
country=user.profile.country,
author_email=user.email,
biography=user.profile.biography,
orcid=user.profile.orcid,
twitter=user.profile.twitter,
linkedin=user.profile.linkedin,
facebook=user.profile.facebook,
)
author.save()
book.author.add(author)
return author | ubiquitypress/rua | [
10,
3,
10,
42,
1427716048
] |
def check_stage(book, check):
if book.submission_stage >= check:
pass
elif book.submission_date:
raise PermissionDenied()
else:
raise PermissionDenied() | ubiquitypress/rua | [
10,
3,
10,
42,
1427716048
] |
def u(x):
return codecs.unicode_escape_decode(x)[0] | dknlght/dkodi | [
14,
22,
14,
9,
1437005334
] |
def u(x):
return x | dknlght/dkodi | [
14,
22,
14,
9,
1437005334
] |
def __init__(self):
# generate files
self._generate_addons_file()
self._generate_md5_file()
# notify user
print("Finished updating addons xml and md5 files\n") | dknlght/dkodi | [
14,
22,
14,
9,
1437005334
] |
def _generate_addons_file(self):
# addon list
addons = sorted(os.listdir("."))
# final addons text
addons_xml = u("<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<addons>\n")
# loop thru and add each addons addon.xml file
for addon in addons:
try:
# skip any file or .svn folder or .git folder
if (not os.path.isdir(addon) or addon == ".svn" or addon == ".git" or addon == ".github" or addon == "download"): continue
# create path
_path = os.path.join(addon, "addon.xml")
# split lines for stripping
xml_lines = open(_path, "r").read().splitlines()
# new addon
addon_xml = ""
# loop thru cleaning each line
for line in xml_lines:
# skip encoding format line
if (line.find("<?xml") >= 0): continue
# add line
if sys.version < '3':
addon_xml += unicode(line.rstrip() + "\n", "UTF-8")
else:
addon_xml += line.rstrip() + "\n"
# we succeeded so add to our final addons.xml text
addons_xml += addon_xml.rstrip() + "\n\n"
except Exception as e:
# missing or poorly formatted addon.xml
print("Excluding %s for %s" % (_path, e))
# clean and add closing tag
addons_xml = addons_xml.strip() + u("\n</addons>\n")
# save file
self._save_file(addons_xml.encode("UTF-8"), file="addons.xml") | dknlght/dkodi | [
14,
22,
14,
9,
1437005334
] |
def _generate_md5_file(self):
# create a new md5 hash
try:
import md5
m = md5.new(open("addons.xml", "r").read()).hexdigest()
except ImportError:
import hashlib
m = hashlib.md5(open("addons.xml", "r", encoding="UTF-8").read().encode("UTF-8")).hexdigest() | dknlght/dkodi | [
14,
22,
14,
9,
1437005334
] |
def _save_file(self, data, file):
try:
# write data to the file (use b for Python 3)
open(file, "wb").write(data)
except Exception as e:
# oops
print("An error occurred saving %s file!\n%s" % (file, e)) | dknlght/dkodi | [
14,
22,
14,
9,
1437005334
] |
def zipfolder(foldername, target_dir, zips_dir, addon_dir):
zipobj = zipfile.ZipFile(zips_dir + foldername, 'w', zipfile.ZIP_DEFLATED)
rootlen = len(target_dir) + 1
for base, dirs, files in os.walk(target_dir):
for f in files:
fn = os.path.join(base, f)
zipobj.write(fn, os.path.join(addon_dir, fn[rootlen:]))
zipobj.close() | dknlght/dkodi | [
14,
22,
14,
9,
1437005334
] |
def __init__(self):
pass | gromacs/copernicus | [
14,
4,
14,
4,
1421754399
] |
def parseGET(headers,path):
#separate the request params from the path
splittedPath = path.split('?')
msg = splittedPath[1]
parsedDict = urlparse.parse_qs(msg) #Note values here are stored in lists, this is so one can handle many inputs with same name, for now we dont want that as our multipart parsing does not support it
params = dict()
for k,v in parsedDict.iteritems():
params[k] = v[0] | gromacs/copernicus | [
14,
4,
14,
4,
1421754399
] |
def parsePUT(headers,message):
pass | gromacs/copernicus | [
14,
4,
14,
4,
1421754399
] |
def parsePOST(headers,message): | gromacs/copernicus | [
14,
4,
14,
4,
1421754399
] |
def handleSinglePart(headers,message):
contentLength = long(headers['content-length'])
if headers['content-type'] == 'application/x-www-form-urlencoded' or headers['content-type'] == 'application/x-www-form-urlencoded; charset=UTF-8': #TODO generalize
msg = message.read(contentLength)
parsedDict = urlparse.parse_qs(msg) #Note values here are stored in lists, this is so one can handle many inputs with same name, for now we dont want that as our multipart parsing does not support it
params = dict()
for k,v in parsedDict.iteritems():
params[k] = v[0]
log.log(cpc.util.log.TRACE,'msg is %s'%params)
request = ServerRequest(headers,None,params)
return request | gromacs/copernicus | [
14,
4,
14,
4,
1421754399
] |
def handleMultipart(mainHeaders,msgStream):
files = dict()
params = dict()
BOUNDARY = "--"+HttpMethodParser.extractBoundary(mainHeaders)
stopBoundary = BOUNDARY+"--"
terminateBoundary = '' | gromacs/copernicus | [
14,
4,
14,
4,
1421754399
] |
def extractBoundary(headers):
regexp = 'boundary=(.*)'
if 'Content-Type' in headers:
contentType = headers['Content-Type']
else:
contentType = headers['content-type']
match = re.search(regexp,contentType)
if match == None:
raise Exception('Could not find a multipart message boundary')
else:
return match.group(1) | gromacs/copernicus | [
14,
4,
14,
4,
1421754399
] |
def runTest(self,realFile):
# For TESTING PURPOSES
#referenceFilename = "/Users/iman/Desktop/snowleopard_10a432_userdvd.dmg"
referenceFilename = "/Users/iman/Documents/workspace/copernicus/examples/single.tar.gz"
resultFilename = "/Users/iman/Desktop/cpctemp/resfile" | gromacs/copernicus | [
14,
4,
14,
4,
1421754399
] |
def mgo_text_split(query_text):
''' split text to support mongodb $text match on a phrase '''
sep = r'[`\-=~!@#$%^&*()_+\[\]{};\'\\:"|<,./<>?]'
word_lst = re.split(sep, query_text)
text_query = ' '.join('\"{}\"'.format(w) for w in word_lst)
return text_query | ysrc/xunfeng | [
3349,
1304,
3349,
68,
1482119796
] |
def create_arm( cls, bones ):
org_bones = cls.org_bones | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def phi_b(self):
return self.omega_rf*self.tau/2. | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def delta_b(self):
return self.dE_b/(self.beta_sq*self.energy) | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def dE_b(self):
return np.sqrt(self.beta_sq*self.energy*self.voltage*(1 -
np.cos(self.phi_b)) / (np.pi*self.harmonic*self.eta_0)) | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def integral(self):
return integrate.quad(lambda x: np.sqrt(2.*(np.cos(x) -
np.cos(self.phi_b))), 0, self.phi_b)[0] | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def emittance(self):
return 4.*self.energy*self.omega_s0*self.beta_sq*self.integral / \
(self.omega_rf**2*self.eta_0) | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def relativistic_quantities(self): | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def frequencies(self): | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def tune(self): | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def bucket_parameters(self): | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def emittance_from_bunch_length(self, four_sigma_bunch_length): | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def bunch_length_from_emittance(self, emittance): | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def setupUi(self, mainWindow): | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def retranslateUi(self, mainWindow): | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def addactions(self, mainWindow): | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def pbHandler(self): | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def cbEnergyHandler(self, text):
if text == 'Custom':
self.leCustom.show()
self.lbEV1.show()
else:
self.leCustom.hide()
self.lbEV1.hide() | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def cbGammaTHandler(self, text):
if text == 'Custom':
self.reCustom.show()
else:
self.reCustom.hide() | blond-admin/BLonD | [
10,
23,
10,
17,
1413184470
] |
def __init__(self, community):
super(SignatureRequestCache, self).__init__(community.request_cache, u'signature-request') | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def __init__(self, community, block_id):
super(BlockRequestCache, self).__init__(community.request_cache, u'block-request')
self.community = community
self.block_id = block_id | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def __init__(self, community, contract_id, contract_type, deferred, min_responses, max_responses):
super(TraversalRequestCache, self).__init__(community.request_cache, u'traversal-request')
self.logger = community.logger
self.contract_id = contract_id
self.contract_type = contract_type
self.deferred = deferred
self.min_responses = min_responses
self.max_responses = max_responses
self.responses = {}
self.public_keys = [] | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def add_response(self, public_key, response_tuple):
# Only allow 1 response per peer
if public_key in self.public_keys:
return False
self.public_keys.append(public_key)
self.responses[response_tuple] = self.responses.get(response_tuple, 0) + 1
# If we already have all responses there is not need to wait for the timeout
if sum(self.responses.values()) >= self.max_responses:
self.callback()
return True
return False | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def __init__(self, dispersy, master, my_member):
super(BlockchainCommunity, self).__init__(dispersy, master, my_member)
self.logger = logging.getLogger('BlockchainLogger')
self.incoming_contracts = OrderedDict()
self.incoming_blocks = {}
self.data_manager = None | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def initialize_database(self, database_fn=''):
if database_fn:
database_fn = os.path.join(self.dispersy.working_directory, database_fn)
self.data_manager = BlockchainDataManager(database_fn)
self.data_manager.initialize() | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def get_master_members(cls, dispersy):
# generated: Fri Feb 24 11:22:22 2017
# curve: None
# len: 571 bits ~ 144 bytes signature
# pub: 170 3081a7301006072a8648ce3d020106052b81040027038192000407b
# acf5ae4d3fe94d49a7f94b7239e9c2d878b29f0fbdb7374d5b6a09d9d6fba80d
# 3807affd0ba45ba1ac1c278ca59bec422d8a44b5fefaabcdd62c2778414c01da
# 4578b304b104b00eec74de98dcda803b79fd1783d76cc1bd7aab75cfd8fff982
# 7a9647ae3c59423c2a9a984700e7cb43b881a6455574032cc11dba806dba9699
# f54f2d30b10eed5c7c0381a0915a5
# pub-sha1 56553661e30b342b2fc39f1a425eb612ef8b8c33
# -----BEGIN PUBLIC KEY-----
# MIGnMBAGByqGSM49AgEGBSuBBAAnA4GSAAQHus9a5NP+lNSaf5S3I56cLYeLKfD7
# 23N01bagnZ1vuoDTgHr/0LpFuhrBwnjKWb7EItikS1/vqrzdYsJ3hBTAHaRXizBL
# EEsA7sdN6Y3NqAO3n9F4PXbMG9eqt1z9j/+YJ6lkeuPFlCPCqamEcA58tDuIGmRV
# V0AyzBHbqAbbqWmfVPLTCxDu1cfAOBoJFaU=
# -----END PUBLIC KEY-----
master_key = '3081a7301006072a8648ce3d020106052b81040027038192000407bacf5ae4d3fe94d49a7f94b7239e9c2d878b29' + \
'f0fbdb7374d5b6a09d9d6fba80d3807affd0ba45ba1ac1c278ca59bec422d8a44b5fefaabcdd62c2778414c01da4' + \
'578b304b104b00eec74de98dcda803b79fd1783d76cc1bd7aab75cfd8fff9827a9647ae3c59423c2a9a984700e7c' + \
'b43b881a6455574032cc11dba806dba9699f54f2d30b10eed5c7c0381a0915a5'
master = dispersy.get_member(public_key=master_key.decode('hex'))
return [master] | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def initiate_conversions(self):
return [DefaultConversion(self), BlockchainConversion(self)] | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def send_message(self, msg_type, candidates, payload_dict):
self.logger.debug('Sending %s message to %d candidate(s)', msg_type, len(candidates))
meta = self.get_meta_message(msg_type)
message = meta.impl(authentication=(self.my_member,),
distribution=(self.claim_global_time(),),
destination=candidates,
payload=(payload_dict,))
return self.dispersy.store_update_forward([message], False, False, True) | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def send_signature_request(self, contract, candidate):
cache = self.request_cache.add(SignatureRequestCache(self))
return self.send_message(u'signature-request', (candidate,), {'identifier': cache.number,
'contract': contract.to_dict()}) | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def send_signature_response(self, candidate, contract, identifier):
return self.send_message(u'signature-response', (candidate,), {'identifier': identifier,
'contract': contract.to_dict()}) | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def on_contract(self, messages):
for message in messages:
contract = Contract.from_dict(message.payload.dictionary['contract'])
if contract is None:
self.logger.warning('Dropping invalid contract from %s', message.candidate.sock_addr)
continue
elif self.incoming_contracts.get(contract.id) or self.data_manager.get_contract(contract.id):
self.logger.debug('Dropping contract %s (duplicate)', b64encode(contract.id))
continue
# Preliminary check to see if contract is allowed. A final check will be performed in check_block.
if not self.check_contract(contract, fail_without_parent=False):
self.logger.warning('Dropping contract %s (check failed)', b64encode(contract.id))
continue
self.logger.debug('Got contract %s', b64encode(contract.id))
# Forward if needed
if contract.id not in self.incoming_contracts:
self.incoming_contracts[contract.id] = contract
self.multicast_message(u'contract', {'contract': contract.to_dict()}, exclude=message.candidate) | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def on_block_request(self, messages):
for message in messages:
block_id = message.payload.dictionary['block_id']
self.logger.debug('Got block-request for id %s', b64encode(block_id))
block = self.data_manager.get_block(block_id)
if block is not None:
self.send_message(u'block', (message.candidate,), {'block': block.to_dict()}) | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def process_blocks_after(self, block):
# Process any orphan blocks that depend on the current block
for orphan in self.incoming_blocks.values():
if orphan.previous_hash == block.id:
del self.incoming_blocks[orphan.id]
if self.process_block(orphan):
self.logger.debug('Added postponed block with %s contract(s)', len(orphan.contracts))
self.process_blocks_after(orphan) | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def check_block(self, block):
if self.get_block_packet_size(block) > MAX_PACKET_SIZE:
self.logger.debug('Block failed check (block too large)')
return False
if not self.check_proof(block):
# Don't log message when we created the block
if block.creator != self.my_member.public_key:
self.logger.debug('Block failed check (incorrect proof)')
return False
if not block.verify():
self.logger.debug('Block failed check (invalid signature)')
return False
if self.data_manager.get_block(block.id):
self.logger.debug('Block failed check (duplicate block)')
return False
if block.time > int(time.time()) + MAX_CLOCK_DRIFT:
self.logger.debug('Block failed check (max clock drift exceeded)')
return False
for contract in block.contracts:
if block.time < contract.time:
self.logger.debug('Block failed check (block created before contract)')
return False
if not self.check_contract(contract):
self.logger.warning('Block check failed (contract check failed)')
self.incoming_contracts.pop(contract.id, None)
return False
if len(block.contracts) != len(set([contract.id for contract in block.contracts])):
self.logger.debug('Block failed check (duplicate contracts)')
return False
if block.merkle_root_hash != block.merkle_tree.build():
self.logger.debug('Block failed check (incorrect merkle root hash)')
return False
past_blocks = self.get_past_blocks(block, 11)
if past_blocks and block.time < median([b.time for b in past_blocks]):
self.logger.debug('Block failed check (block time smaller than median time of past 11 blocks)')
return False
return True | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def create_block(self):
latest_index = self.data_manager.get_block_indexes(limit=1)[0]
prev_block = self.data_manager.get_block(latest_index.block_id) if latest_index is not None else None
block = Block()
block.previous_hash = prev_block.id if prev_block is not None else BLOCK_GENESIS_HASH
block.target_difficulty = self.get_next_difficulty(prev_block)
block.time = int(time.time())
# Placeholder information (for calculating packet size)
block.merkle_root_hash = block.merkle_tree.build()
block.sign(self.my_member)
# Find dependencies
contracts = []
dependencies = defaultdict(list)
for contract in self.incoming_contracts.itervalues():
if contract.previous_hash:
# Get the previous contract from memory or the database
prev_contract = self.incoming_contracts.get(contract.previous_hash) or \
self.data_manager.get_contract(contract.previous_hash)
on_blockchain = self.data_manager.contract_on_blockchain(prev_contract.id) if prev_contract else False
# We need to wait until the previous contract is received and on the blockchain
if not on_blockchain:
dependencies[contract.id].append(prev_contract)
continue
contracts.append(contract)
# Add contracts to block
while contracts:
contract = contracts.pop(0)
block.contracts.append(contract)
if self.get_block_packet_size(block) > MAX_PACKET_SIZE:
block.contracts.pop()
break
if contract.id in dependencies:
# Put dependencies at the front of the list, so they will be processed in the next iterations
for index, dependency in enumerate(dependencies[contract.id]):
contracts.insert(index, dependency)
# Calculate final merkle root hash + sign block
block.merkle_root_hash = block.merkle_tree.build()
block.sign(self.my_member)
if self.check_block(block):
self.logger.debug('Created block with target difficulty 0x%064x', block.target_difficulty)
if self.process_block(block):
self.logger.debug('Added created block with %s contract(s)', len(block.contracts))
self.multicast_message(u'block', {'block': block.to_dict()})
return block | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def get_past_blocks(self, block, num_past):
result = []
current = block
for _ in range(num_past):
current = self.data_manager.get_block(current.previous_hash)
if current is None:
return None
result.append(current)
return result | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def check_contract(self, contract, fail_without_parent=True):
if not contract.verify():
self.logger.debug('Contract failed check (invalid signature)')
return False
if contract.previous_hash and fail_without_parent:
prev_contract = self.incoming_contracts.get(contract.previous_hash) or \
self.data_manager.get_contract(contract.previous_hash)
if prev_contract is None:
self.logger.error('Contract failed check (parent is unknown)')
return False
return True | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def finalize_contract(self, contract, sign=False):
# Final checks?
if sign:
contract.sign(self.my_member)
# Add contract to database
self.data_manager.add_contract(contract)
return True | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def on_traversal_request(self, messages):
for message in messages:
msg_dict = {'identifier': message.payload.dictionary['identifier']}
try:
contract_type = ObjectType(message.payload.dictionary['contract_type'])
except (ValueError, KeyError):
contract_type = None
contract = self.traverse_contracts(message.payload.dictionary['contract_id'],
contract_type)
if contract is not None:
msg_dict['contract'] = contract.to_dict()
# Add the number of confirmations this contract has
confirmations = self.find_confirmation_count(message.payload.dictionary['contract_id'])
if confirmations is not None:
msg_dict['confirmations'] = confirmations
self.send_message(u'traversal-response', (message.candidate,), msg_dict) | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def traverse_contracts(self, contract_id, contract_type):
contract_of_type = None
contract = self.data_manager.get_contract(contract_id) \
if self.data_manager.contract_on_blockchain(contract_id) else None
# Traverse contract chain
while contract:
if contract.type == contract_type:
contract_of_type = contract
contracts = self.data_manager.find_contracts(Contract.previous_hash == contract.id)
contracts = [contract for contract in list(contracts) if self.data_manager.contract_on_blockchain(contract.id)]
if len(contracts) == 1:
# Keep traversing the contract chain
contract = contracts[0]
continue
elif len(contracts) == 0:
# Found end of contract chain
return contract if contract_type is None else contract_of_type
break | Tribler/decentralized-mortgage-market | [
2,
1,
2,
9,
1486976150
] |
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
from . import ordered_dict
return ordered_dict.OrderedDict | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def _import_c_make_encoder():
try:
from ._speedups import make_encoder
return make_encoder
except ImportError:
return None | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, ignore_nan=False, int_as_string_bitcount=None,
iterable_as_array=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If *skipkeys* is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If *ensure_ascii* is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If *check_circular* is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If *allow_nan* is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the original JSON specification, instead of using
the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). See
*ignore_nan* for ECMA-262 compliant behavior.
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, *separators* should be an
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
compact JSON representation, you should specify ``(',', ':')`` to eliminate
whitespace.
*encoding* is the character encoding for str instances, default is UTF-8.
*default(obj)* is a function that should return a serializable version
of obj or raise ``TypeError``. The default simply raises ``TypeError``.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *iterable_as_array* is true (default: ``False``),
any object not in the above table that implements ``__iter__()``
will be encoded as a JSON array.
If *bigint_as_string* is true (default: ``False``), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise. Note that this is still a
lossy operation that will not round-trip correctly and should be used
sparingly.
If *int_as_string_bitcount* is a positive number (n), then int of size
greater than or equal to 2**n or lower than or equal to -2**n will be
encoded as strings.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precedence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
If *for_json* is true (default: ``False``), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as
``null`` in compliance with the ECMA-262 specification. If true, this will
override *allow_nan*.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg. NOTE: You should use *default* or *for_json* instead
of subclassing whenever possible.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not iterable_as_array
and not bigint_as_string and not sort_keys
and not item_sort_key and not for_json
and not ignore_nan and int_as_string_bitcount is None
and not kw
):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
iterable_as_array=iterable_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
ignore_nan=ignore_nan,
int_as_string_bitcount=int_as_string_bitcount,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk) | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw) | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def _toggle_speedups(enabled):
from . import decoder as dec
from . import encoder as enc
from . import scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
) | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def check_tags(self, server=None, rule=None):
valid_rule = True
server_tags = server.get('tags', [])
server_tags = [str(t) for t in server_tags]
tags = rule.get('tags', [])
tags = [str(t) for t in tags]
# Check tags first
if len(server_tags) > 0 and len(tags) > 0:
valid_rule = set(tags).issubset(server_tags)
return valid_rule | amonapp/amon | [
1338,
115,
1338,
37,
1284053941
] |
def check(self, data, server):
alerts = False
account_id = server.get('account_id', None)
# System alerts
rules = alerts_model.get_alerts(type='system', server=server)
if rules:
alerts = system_alerts.check(data=data, rules=rules, server=server)
if alerts:
alerts_model.save_system_occurence(alerts, server_id=server['_id'])
# Global rules
global_rules = alerts_model.get_global_alerts(account_id=account_id)
if global_rules:
alerts = system_alerts.check(data=data, rules=global_rules, server=server)
if alerts:
alerts_model.save_system_occurence(alerts, server_id=server['_id'])
return alerts # For the test suite | amonapp/amon | [
1338,
115,
1338,
37,
1284053941
] |
def check_rule_and_save(self, process_data_dict=None, rule=None, process_id=None, server_id=None):
process_data = next((item for item in process_data_dict if item["p"] == process_id), None)
if process_data:
alert = process_alerts.check(process_data, rule)
if alert:
alerts_model.save_occurence(alert, server_id=server_id) | amonapp/amon | [
1338,
115,
1338,
37,
1284053941
] |
def check(self, data=None, plugin=None, server=None):
plugin_data = data.get('gauges', None)
rules = alerts_model.get_alerts_for_plugin(plugin=plugin)
if len(rules) > 0:
for rule in rules:
alert = plugin_alerts.check(data=plugin_data, rule=rule)
if alert:
alerts_model.save_occurence(alert)
# Global alerts
rules = alerts_model.get_alerts(type='plugin_global')
if len(rules) > 0:
all_plugins = plugin_model.get_for_server(server_id=server['_id'])
for rule in rules:
valid_rule = self.check_tags(server=server, rule=rule)
if valid_rule:
plugin_name = rule.get('plugin')
plugin_id = None
# Check if this server has a plugin with this name
for p in all_plugins.clone():
if p.get('name') == plugin_name:
plugin_id = p.get('_id')
if plugin_id:
alert = plugin_alerts.check(data=plugin_data, rule=rule)
if alert:
alerts_model.save_occurence(alert, server_id=server['_id']) | amonapp/amon | [
1338,
115,
1338,
37,
1284053941
] |
def check(self, data, server):
process_data_dict = data.get('data', None)
rules = alerts_model.get_alerts(type='uptime', server=server)
if len(rules) + len(process_data_dict) > 0:
for rule in rules:
process_id = rule['process']
process_data = next((item for item in process_data_dict if item["p"] == process_id), None)
# Process is down
if not process_data:
alerts_model.save_uptime_occurence(rule, data=process_data) | amonapp/amon | [
1338,
115,
1338,
37,
1284053941
] |
def check(self):
time_now = unix_utc_now()
alerts = alerts_model.get_alerts_not_sending_data()
for alert in alerts:
period = alert.get('period')
for server in alert.get('server_data'):
last_check = server.get('last_check')
# Skip all the servers with no agent installed
if last_check != None:
since_last_check = time_now - last_check # 65 seconds, 60 seconds sleep, 5 seconds to collect
if since_last_check > (period + 10): # Trigger alert, add 10 seconds buffer
alert['server'] = server
alerts_model.save_notsendingdata_occurence(alert=alert) | amonapp/amon | [
1338,
115,
1338,
37,
1284053941
] |
def check(self, data=None, server=None):
alerts = alerts_model.get_alerts(type='health_check')
for alert in alerts:
# Data is list
for d in data:
trigger = healthcheck_alert_checker.check(data=d, rule=alert)
# Will scan all the data, check for relevancy and then check the specific entry
if trigger:
alerts_model.save_healtcheck_occurence(trigger=trigger, server_id=server['_id']) | amonapp/amon | [
1338,
115,
1338,
37,
1284053941
] |
def visit_account_settings_page(self):
"""
Visit the account settings page for the current user, and store the page instance
as self.account_settings_page.
"""
# pylint: disable=attribute-defined-outside-init
self.account_settings_page = AccountSettingsPage(self.browser)
self.account_settings_page.visit()
self.account_settings_page.wait_for_ajax() | analyseuc3m/ANALYSE-v1 | [
10,
5,
10,
1,
1460987160
] |
def settings_changed_event_filter(self, event):
"""Filter out any events that are not "settings changed" events."""
return event['event_type'] == self.USER_SETTINGS_CHANGED_EVENT_NAME | analyseuc3m/ANALYSE-v1 | [
10,
5,
10,
1,
1460987160
] |
def settings_change_initiated_event_filter(self, event):
"""Filter out any events that are not "settings change initiated" events."""
return event['event_type'] == self.CHANGE_INITIATED_EVENT_NAME | analyseuc3m/ANALYSE-v1 | [
10,
5,
10,
1,
1460987160
] |
def get_settings_page_url(self):
"""The absolute URL of the account settings page given the test context."""
return self.relative_path_to_absolute_uri(self.ACCOUNT_SETTINGS_REFERER) | analyseuc3m/ANALYSE-v1 | [
10,
5,
10,
1,
1460987160
] |
def test_link_on_dashboard_works(self):
"""
Scenario: Verify that the "Account" link works from the dashboard.
Given that I am a registered user
And I visit my dashboard
And I click on "Account" in the top drop down
Then I should see my account settings page
"""
self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
dashboard_page.click_username_dropdown()
self.assertIn('Account', dashboard_page.username_dropdown_link_text)
dashboard_page.click_account_settings_link() | analyseuc3m/ANALYSE-v1 | [
10,
5,
10,
1,
1460987160
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.