repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
django-danceschool/django-danceschool | danceschool/core/models.py | Invoice.allocateFees | def allocateFees(self):
'''
Fees are allocated across invoice items based on their discounted
total price net of adjustments as a proportion of the overall
invoice's total price
'''
items = list(self.invoiceitem_set.all())
# Check that totals and adjusments match. If they do not, raise an error.
if self.total != sum([x.total for x in items]):
msg = _('Invoice item totals do not match invoice total. Unable to allocate fees.')
logger.error(str(msg))
raise ValidationError(msg)
if self.adjustments != sum([x.adjustments for x in items]):
msg = _('Invoice item adjustments do not match invoice adjustments. Unable to allocate fees.')
logger.error(str(msg))
raise ValidationError(msg)
for item in items:
saveFlag = False
if self.total - self.adjustments > 0:
item.fees = self.fees * ((item.total - item.adjustments) / (self.total - self.adjustments))
saveFlag = True
# In the case of full refunds, allocate fees according to the
# initial total price of the item only.
elif self.total - self.adjustments == 0 and self.total > 0:
item.fees = self.fees * (item.total / self.total)
saveFlag = True
# In the unexpected event of fees with no total, just divide
# the fees equally among the items.
elif self.fees:
item.fees = self.fees * (1 / len(items))
saveFlag = True
if saveFlag:
item.save() | python | def allocateFees(self):
'''
Fees are allocated across invoice items based on their discounted
total price net of adjustments as a proportion of the overall
invoice's total price
'''
items = list(self.invoiceitem_set.all())
# Check that totals and adjusments match. If they do not, raise an error.
if self.total != sum([x.total for x in items]):
msg = _('Invoice item totals do not match invoice total. Unable to allocate fees.')
logger.error(str(msg))
raise ValidationError(msg)
if self.adjustments != sum([x.adjustments for x in items]):
msg = _('Invoice item adjustments do not match invoice adjustments. Unable to allocate fees.')
logger.error(str(msg))
raise ValidationError(msg)
for item in items:
saveFlag = False
if self.total - self.adjustments > 0:
item.fees = self.fees * ((item.total - item.adjustments) / (self.total - self.adjustments))
saveFlag = True
# In the case of full refunds, allocate fees according to the
# initial total price of the item only.
elif self.total - self.adjustments == 0 and self.total > 0:
item.fees = self.fees * (item.total / self.total)
saveFlag = True
# In the unexpected event of fees with no total, just divide
# the fees equally among the items.
elif self.fees:
item.fees = self.fees * (1 / len(items))
saveFlag = True
if saveFlag:
item.save() | [
"def",
"allocateFees",
"(",
"self",
")",
":",
"items",
"=",
"list",
"(",
"self",
".",
"invoiceitem_set",
".",
"all",
"(",
")",
")",
"# Check that totals and adjusments match. If they do not, raise an error.",
"if",
"self",
".",
"total",
"!=",
"sum",
"(",
"[",
"x",
".",
"total",
"for",
"x",
"in",
"items",
"]",
")",
":",
"msg",
"=",
"_",
"(",
"'Invoice item totals do not match invoice total. Unable to allocate fees.'",
")",
"logger",
".",
"error",
"(",
"str",
"(",
"msg",
")",
")",
"raise",
"ValidationError",
"(",
"msg",
")",
"if",
"self",
".",
"adjustments",
"!=",
"sum",
"(",
"[",
"x",
".",
"adjustments",
"for",
"x",
"in",
"items",
"]",
")",
":",
"msg",
"=",
"_",
"(",
"'Invoice item adjustments do not match invoice adjustments. Unable to allocate fees.'",
")",
"logger",
".",
"error",
"(",
"str",
"(",
"msg",
")",
")",
"raise",
"ValidationError",
"(",
"msg",
")",
"for",
"item",
"in",
"items",
":",
"saveFlag",
"=",
"False",
"if",
"self",
".",
"total",
"-",
"self",
".",
"adjustments",
">",
"0",
":",
"item",
".",
"fees",
"=",
"self",
".",
"fees",
"*",
"(",
"(",
"item",
".",
"total",
"-",
"item",
".",
"adjustments",
")",
"/",
"(",
"self",
".",
"total",
"-",
"self",
".",
"adjustments",
")",
")",
"saveFlag",
"=",
"True",
"# In the case of full refunds, allocate fees according to the",
"# initial total price of the item only.",
"elif",
"self",
".",
"total",
"-",
"self",
".",
"adjustments",
"==",
"0",
"and",
"self",
".",
"total",
">",
"0",
":",
"item",
".",
"fees",
"=",
"self",
".",
"fees",
"*",
"(",
"item",
".",
"total",
"/",
"self",
".",
"total",
")",
"saveFlag",
"=",
"True",
"# In the unexpected event of fees with no total, just divide",
"# the fees equally among the items.",
"elif",
"self",
".",
"fees",
":",
"item",
".",
"fees",
"=",
"self",
".",
"fees",
"*",
"(",
"1",
"/",
"len",
"(",
"items",
")",
")",
"saveFlag",
"=",
"True",
"if",
"saveFlag",
":",
"item",
".",
"save",
"(",
")"
] | Fees are allocated across invoice items based on their discounted
total price net of adjustments as a proportion of the overall
invoice's total price | [
"Fees",
"are",
"allocated",
"across",
"invoice",
"items",
"based",
"on",
"their",
"discounted",
"total",
"price",
"net",
"of",
"adjustments",
"as",
"a",
"proportion",
"of",
"the",
"overall",
"invoice",
"s",
"total",
"price"
] | bb08cbf39017a812a5a94bdb4ea34170bf1a30ba | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/models.py#L2865-L2903 | train | 236,500 |
django-danceschool/django-danceschool | danceschool/discounts/models.py | DiscountCombo.applyAndAllocate | def applyAndAllocate(self,allocatedPrices,tieredTuples,payAtDoor=False):
'''
This method takes an initial allocation of prices across events, and
an identical length list of allocation tuples. It applies the rule
specified by this discount, allocates the discount across the listed
items, and returns both the price and the allocation
'''
initial_net_price = sum([x for x in allocatedPrices])
if self.discountType == self.DiscountType.flatPrice:
# Flat-price for all applicable items (partial application for items which are
# only partially needed to apply the discount). Flat prices ignore any previous discounts
# in other categories which may have been the best, but they only are applied if they are
# lower than the price that would be feasible by applying those prior discounts alone.
applicable_price = self.getFlatPrice(payAtDoor) or 0
this_price = applicable_price \
+ sum([x[0].event.getBasePrice(payAtDoor=payAtDoor) * x[1] if x[1] != 1 else x[0].price for x in tieredTuples])
# Flat prices are allocated equally across all events
this_allocated_prices = [x * (this_price / initial_net_price) for x in allocatedPrices]
elif self.discountType == self.DiscountType.dollarDiscount:
# Discount the set of applicable items by a specific number of dollars (currency units)
# Dollar discounts are allocated equally across all events.
this_price = initial_net_price - self.dollarDiscount
this_allocated_prices = [x * (this_price / initial_net_price) for x in allocatedPrices]
elif self.discountType == DiscountCombo.DiscountType.percentDiscount:
# Percentage off discounts, which may be applied to all items in the cart,
# or just to the items that were needed to apply the discount
if self.percentUniversallyApplied:
this_price = \
initial_net_price * (1 - (max(min(self.percentDiscount or 0,100),0) / 100))
this_allocated_prices = [x * (this_price / initial_net_price) for x in allocatedPrices]
else:
# Allocate the percentage discount based on the prior allocation from the prior category
this_price = 0
this_allocated_prices = []
for idx, val in enumerate(tieredTuples):
this_val = (
allocatedPrices[idx] *
(1 - val[1]) * (1 - (max(min(self.percentDiscount or 0,100),0) / 100)) +
allocatedPrices[idx] * val[1]
)
this_allocated_prices.append(this_val)
this_price += this_val
else:
raise KeyError(_('Invalid discount type.'))
if this_price < initial_net_price:
# Ensure no negative prices
this_price = max(this_price, 0)
return self.DiscountInfo(self, this_price, initial_net_price - this_price, this_allocated_prices) | python | def applyAndAllocate(self,allocatedPrices,tieredTuples,payAtDoor=False):
'''
This method takes an initial allocation of prices across events, and
an identical length list of allocation tuples. It applies the rule
specified by this discount, allocates the discount across the listed
items, and returns both the price and the allocation
'''
initial_net_price = sum([x for x in allocatedPrices])
if self.discountType == self.DiscountType.flatPrice:
# Flat-price for all applicable items (partial application for items which are
# only partially needed to apply the discount). Flat prices ignore any previous discounts
# in other categories which may have been the best, but they only are applied if they are
# lower than the price that would be feasible by applying those prior discounts alone.
applicable_price = self.getFlatPrice(payAtDoor) or 0
this_price = applicable_price \
+ sum([x[0].event.getBasePrice(payAtDoor=payAtDoor) * x[1] if x[1] != 1 else x[0].price for x in tieredTuples])
# Flat prices are allocated equally across all events
this_allocated_prices = [x * (this_price / initial_net_price) for x in allocatedPrices]
elif self.discountType == self.DiscountType.dollarDiscount:
# Discount the set of applicable items by a specific number of dollars (currency units)
# Dollar discounts are allocated equally across all events.
this_price = initial_net_price - self.dollarDiscount
this_allocated_prices = [x * (this_price / initial_net_price) for x in allocatedPrices]
elif self.discountType == DiscountCombo.DiscountType.percentDiscount:
# Percentage off discounts, which may be applied to all items in the cart,
# or just to the items that were needed to apply the discount
if self.percentUniversallyApplied:
this_price = \
initial_net_price * (1 - (max(min(self.percentDiscount or 0,100),0) / 100))
this_allocated_prices = [x * (this_price / initial_net_price) for x in allocatedPrices]
else:
# Allocate the percentage discount based on the prior allocation from the prior category
this_price = 0
this_allocated_prices = []
for idx, val in enumerate(tieredTuples):
this_val = (
allocatedPrices[idx] *
(1 - val[1]) * (1 - (max(min(self.percentDiscount or 0,100),0) / 100)) +
allocatedPrices[idx] * val[1]
)
this_allocated_prices.append(this_val)
this_price += this_val
else:
raise KeyError(_('Invalid discount type.'))
if this_price < initial_net_price:
# Ensure no negative prices
this_price = max(this_price, 0)
return self.DiscountInfo(self, this_price, initial_net_price - this_price, this_allocated_prices) | [
"def",
"applyAndAllocate",
"(",
"self",
",",
"allocatedPrices",
",",
"tieredTuples",
",",
"payAtDoor",
"=",
"False",
")",
":",
"initial_net_price",
"=",
"sum",
"(",
"[",
"x",
"for",
"x",
"in",
"allocatedPrices",
"]",
")",
"if",
"self",
".",
"discountType",
"==",
"self",
".",
"DiscountType",
".",
"flatPrice",
":",
"# Flat-price for all applicable items (partial application for items which are",
"# only partially needed to apply the discount). Flat prices ignore any previous discounts",
"# in other categories which may have been the best, but they only are applied if they are",
"# lower than the price that would be feasible by applying those prior discounts alone.",
"applicable_price",
"=",
"self",
".",
"getFlatPrice",
"(",
"payAtDoor",
")",
"or",
"0",
"this_price",
"=",
"applicable_price",
"+",
"sum",
"(",
"[",
"x",
"[",
"0",
"]",
".",
"event",
".",
"getBasePrice",
"(",
"payAtDoor",
"=",
"payAtDoor",
")",
"*",
"x",
"[",
"1",
"]",
"if",
"x",
"[",
"1",
"]",
"!=",
"1",
"else",
"x",
"[",
"0",
"]",
".",
"price",
"for",
"x",
"in",
"tieredTuples",
"]",
")",
"# Flat prices are allocated equally across all events",
"this_allocated_prices",
"=",
"[",
"x",
"*",
"(",
"this_price",
"/",
"initial_net_price",
")",
"for",
"x",
"in",
"allocatedPrices",
"]",
"elif",
"self",
".",
"discountType",
"==",
"self",
".",
"DiscountType",
".",
"dollarDiscount",
":",
"# Discount the set of applicable items by a specific number of dollars (currency units)",
"# Dollar discounts are allocated equally across all events.",
"this_price",
"=",
"initial_net_price",
"-",
"self",
".",
"dollarDiscount",
"this_allocated_prices",
"=",
"[",
"x",
"*",
"(",
"this_price",
"/",
"initial_net_price",
")",
"for",
"x",
"in",
"allocatedPrices",
"]",
"elif",
"self",
".",
"discountType",
"==",
"DiscountCombo",
".",
"DiscountType",
".",
"percentDiscount",
":",
"# Percentage off discounts, which may be applied to all items in the cart,",
"# or just to the items that were needed to apply the discount",
"if",
"self",
".",
"percentUniversallyApplied",
":",
"this_price",
"=",
"initial_net_price",
"*",
"(",
"1",
"-",
"(",
"max",
"(",
"min",
"(",
"self",
".",
"percentDiscount",
"or",
"0",
",",
"100",
")",
",",
"0",
")",
"/",
"100",
")",
")",
"this_allocated_prices",
"=",
"[",
"x",
"*",
"(",
"this_price",
"/",
"initial_net_price",
")",
"for",
"x",
"in",
"allocatedPrices",
"]",
"else",
":",
"# Allocate the percentage discount based on the prior allocation from the prior category",
"this_price",
"=",
"0",
"this_allocated_prices",
"=",
"[",
"]",
"for",
"idx",
",",
"val",
"in",
"enumerate",
"(",
"tieredTuples",
")",
":",
"this_val",
"=",
"(",
"allocatedPrices",
"[",
"idx",
"]",
"*",
"(",
"1",
"-",
"val",
"[",
"1",
"]",
")",
"*",
"(",
"1",
"-",
"(",
"max",
"(",
"min",
"(",
"self",
".",
"percentDiscount",
"or",
"0",
",",
"100",
")",
",",
"0",
")",
"/",
"100",
")",
")",
"+",
"allocatedPrices",
"[",
"idx",
"]",
"*",
"val",
"[",
"1",
"]",
")",
"this_allocated_prices",
".",
"append",
"(",
"this_val",
")",
"this_price",
"+=",
"this_val",
"else",
":",
"raise",
"KeyError",
"(",
"_",
"(",
"'Invalid discount type.'",
")",
")",
"if",
"this_price",
"<",
"initial_net_price",
":",
"# Ensure no negative prices",
"this_price",
"=",
"max",
"(",
"this_price",
",",
"0",
")",
"return",
"self",
".",
"DiscountInfo",
"(",
"self",
",",
"this_price",
",",
"initial_net_price",
"-",
"this_price",
",",
"this_allocated_prices",
")"
] | This method takes an initial allocation of prices across events, and
an identical length list of allocation tuples. It applies the rule
specified by this discount, allocates the discount across the listed
items, and returns both the price and the allocation | [
"This",
"method",
"takes",
"an",
"initial",
"allocation",
"of",
"prices",
"across",
"events",
"and",
"an",
"identical",
"length",
"list",
"of",
"allocation",
"tuples",
".",
"It",
"applies",
"the",
"rule",
"specified",
"by",
"this",
"discount",
"allocates",
"the",
"discount",
"across",
"the",
"listed",
"items",
"and",
"returns",
"both",
"the",
"price",
"and",
"the",
"allocation"
] | bb08cbf39017a812a5a94bdb4ea34170bf1a30ba | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/discounts/models.py#L146-L201 | train | 236,501 |
django-danceschool/django-danceschool | danceschool/discounts/models.py | DiscountCombo.getComponentList | def getComponentList(self):
'''
This function just returns a list with items that are supposed to
be present in the the list multiple times as multiple elements
of the list. It simplifies checking whether a discount's conditions
are satisfied.
'''
component_list = []
for x in self.discountcombocomponent_set.all():
for y in range(0,x.quantity):
component_list += [x]
component_list.sort(key=lambda x: x.quantity, reverse=True)
return component_list | python | def getComponentList(self):
'''
This function just returns a list with items that are supposed to
be present in the the list multiple times as multiple elements
of the list. It simplifies checking whether a discount's conditions
are satisfied.
'''
component_list = []
for x in self.discountcombocomponent_set.all():
for y in range(0,x.quantity):
component_list += [x]
component_list.sort(key=lambda x: x.quantity, reverse=True)
return component_list | [
"def",
"getComponentList",
"(",
"self",
")",
":",
"component_list",
"=",
"[",
"]",
"for",
"x",
"in",
"self",
".",
"discountcombocomponent_set",
".",
"all",
"(",
")",
":",
"for",
"y",
"in",
"range",
"(",
"0",
",",
"x",
".",
"quantity",
")",
":",
"component_list",
"+=",
"[",
"x",
"]",
"component_list",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"quantity",
",",
"reverse",
"=",
"True",
")",
"return",
"component_list"
] | This function just returns a list with items that are supposed to
be present in the the list multiple times as multiple elements
of the list. It simplifies checking whether a discount's conditions
are satisfied. | [
"This",
"function",
"just",
"returns",
"a",
"list",
"with",
"items",
"that",
"are",
"supposed",
"to",
"be",
"present",
"in",
"the",
"the",
"list",
"multiple",
"times",
"as",
"multiple",
"elements",
"of",
"the",
"list",
".",
"It",
"simplifies",
"checking",
"whether",
"a",
"discount",
"s",
"conditions",
"are",
"satisfied",
"."
] | bb08cbf39017a812a5a94bdb4ea34170bf1a30ba | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/discounts/models.py#L215-L230 | train | 236,502 |
django-danceschool/django-danceschool | danceschool/discounts/models.py | DiscountCombo.save | def save(self, *args, **kwargs):
'''
Don't save any passed values related to a type of discount
that is not the specified type
'''
if self.discountType != self.DiscountType.flatPrice:
self.onlinePrice = None
self.doorPrice = None
if self.discountType != self.DiscountType.dollarDiscount:
self.dollarDiscount = None
if self.discountType != self.DiscountType.percentDiscount:
self.percentDiscount = None
self.percentUniversallyApplied = False
super(DiscountCombo, self).save(*args, **kwargs) | python | def save(self, *args, **kwargs):
'''
Don't save any passed values related to a type of discount
that is not the specified type
'''
if self.discountType != self.DiscountType.flatPrice:
self.onlinePrice = None
self.doorPrice = None
if self.discountType != self.DiscountType.dollarDiscount:
self.dollarDiscount = None
if self.discountType != self.DiscountType.percentDiscount:
self.percentDiscount = None
self.percentUniversallyApplied = False
super(DiscountCombo, self).save(*args, **kwargs) | [
"def",
"save",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"discountType",
"!=",
"self",
".",
"DiscountType",
".",
"flatPrice",
":",
"self",
".",
"onlinePrice",
"=",
"None",
"self",
".",
"doorPrice",
"=",
"None",
"if",
"self",
".",
"discountType",
"!=",
"self",
".",
"DiscountType",
".",
"dollarDiscount",
":",
"self",
".",
"dollarDiscount",
"=",
"None",
"if",
"self",
".",
"discountType",
"!=",
"self",
".",
"DiscountType",
".",
"percentDiscount",
":",
"self",
".",
"percentDiscount",
"=",
"None",
"self",
".",
"percentUniversallyApplied",
"=",
"False",
"super",
"(",
"DiscountCombo",
",",
"self",
")",
".",
"save",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Don't save any passed values related to a type of discount
that is not the specified type | [
"Don",
"t",
"save",
"any",
"passed",
"values",
"related",
"to",
"a",
"type",
"of",
"discount",
"that",
"is",
"not",
"the",
"specified",
"type"
] | bb08cbf39017a812a5a94bdb4ea34170bf1a30ba | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/discounts/models.py#L232-L249 | train | 236,503 |
django-danceschool/django-danceschool | danceschool/vouchers/handlers.py | checkVoucherCode | def checkVoucherCode(sender,**kwargs):
'''
Check that the given voucher code is valid
'''
logger.debug('Signal to check RegistrationContactForm handled by vouchers app.')
formData = kwargs.get('formData',{})
request = kwargs.get('request',{})
registration = kwargs.get('registration',None)
session = getattr(request,'session',{}).get(REG_VALIDATION_STR,{})
id = formData.get('gift','')
first = formData.get('firstName')
last = formData.get('lastName')
email = formData.get('email')
# Clean out the session data relating to vouchers so that we can revalidate it.
session.pop('total_voucher_amount',0)
session.pop('voucher_names',None)
session.pop('gift',None)
if id == '':
return
if not getConstant('vouchers__enableVouchers'):
raise ValidationError({'gift': _('Vouchers are disabled.')})
if session.get('gift','') != '':
raise ValidationError({'gift': _('Can\'t have more than one voucher')})
eventids = [x.event.id for x in registration.temporaryeventregistration_set.exclude(dropIn=True)]
seriess = Series.objects.filter(id__in=eventids)
obj = Voucher.objects.filter(voucherId=id).first()
if not obj:
raise ValidationError({'gift':_('Invalid Voucher Id')})
else:
customer = Customer.objects.filter(
first_name=first,
last_name=last,
email=email).first()
# This will raise any other errors that may be relevant
try:
obj.validateForCustomerAndSeriess(customer,seriess)
except ValidationError as e:
# Ensures that the error is applied to the correct field
raise ValidationError({'gift': e})
# If we got this far, then the voucher is determined to be valid, so the registration
# can proceed with no errors.
return | python | def checkVoucherCode(sender,**kwargs):
'''
Check that the given voucher code is valid
'''
logger.debug('Signal to check RegistrationContactForm handled by vouchers app.')
formData = kwargs.get('formData',{})
request = kwargs.get('request',{})
registration = kwargs.get('registration',None)
session = getattr(request,'session',{}).get(REG_VALIDATION_STR,{})
id = formData.get('gift','')
first = formData.get('firstName')
last = formData.get('lastName')
email = formData.get('email')
# Clean out the session data relating to vouchers so that we can revalidate it.
session.pop('total_voucher_amount',0)
session.pop('voucher_names',None)
session.pop('gift',None)
if id == '':
return
if not getConstant('vouchers__enableVouchers'):
raise ValidationError({'gift': _('Vouchers are disabled.')})
if session.get('gift','') != '':
raise ValidationError({'gift': _('Can\'t have more than one voucher')})
eventids = [x.event.id for x in registration.temporaryeventregistration_set.exclude(dropIn=True)]
seriess = Series.objects.filter(id__in=eventids)
obj = Voucher.objects.filter(voucherId=id).first()
if not obj:
raise ValidationError({'gift':_('Invalid Voucher Id')})
else:
customer = Customer.objects.filter(
first_name=first,
last_name=last,
email=email).first()
# This will raise any other errors that may be relevant
try:
obj.validateForCustomerAndSeriess(customer,seriess)
except ValidationError as e:
# Ensures that the error is applied to the correct field
raise ValidationError({'gift': e})
# If we got this far, then the voucher is determined to be valid, so the registration
# can proceed with no errors.
return | [
"def",
"checkVoucherCode",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"debug",
"(",
"'Signal to check RegistrationContactForm handled by vouchers app.'",
")",
"formData",
"=",
"kwargs",
".",
"get",
"(",
"'formData'",
",",
"{",
"}",
")",
"request",
"=",
"kwargs",
".",
"get",
"(",
"'request'",
",",
"{",
"}",
")",
"registration",
"=",
"kwargs",
".",
"get",
"(",
"'registration'",
",",
"None",
")",
"session",
"=",
"getattr",
"(",
"request",
",",
"'session'",
",",
"{",
"}",
")",
".",
"get",
"(",
"REG_VALIDATION_STR",
",",
"{",
"}",
")",
"id",
"=",
"formData",
".",
"get",
"(",
"'gift'",
",",
"''",
")",
"first",
"=",
"formData",
".",
"get",
"(",
"'firstName'",
")",
"last",
"=",
"formData",
".",
"get",
"(",
"'lastName'",
")",
"email",
"=",
"formData",
".",
"get",
"(",
"'email'",
")",
"# Clean out the session data relating to vouchers so that we can revalidate it.\r",
"session",
".",
"pop",
"(",
"'total_voucher_amount'",
",",
"0",
")",
"session",
".",
"pop",
"(",
"'voucher_names'",
",",
"None",
")",
"session",
".",
"pop",
"(",
"'gift'",
",",
"None",
")",
"if",
"id",
"==",
"''",
":",
"return",
"if",
"not",
"getConstant",
"(",
"'vouchers__enableVouchers'",
")",
":",
"raise",
"ValidationError",
"(",
"{",
"'gift'",
":",
"_",
"(",
"'Vouchers are disabled.'",
")",
"}",
")",
"if",
"session",
".",
"get",
"(",
"'gift'",
",",
"''",
")",
"!=",
"''",
":",
"raise",
"ValidationError",
"(",
"{",
"'gift'",
":",
"_",
"(",
"'Can\\'t have more than one voucher'",
")",
"}",
")",
"eventids",
"=",
"[",
"x",
".",
"event",
".",
"id",
"for",
"x",
"in",
"registration",
".",
"temporaryeventregistration_set",
".",
"exclude",
"(",
"dropIn",
"=",
"True",
")",
"]",
"seriess",
"=",
"Series",
".",
"objects",
".",
"filter",
"(",
"id__in",
"=",
"eventids",
")",
"obj",
"=",
"Voucher",
".",
"objects",
".",
"filter",
"(",
"voucherId",
"=",
"id",
")",
".",
"first",
"(",
")",
"if",
"not",
"obj",
":",
"raise",
"ValidationError",
"(",
"{",
"'gift'",
":",
"_",
"(",
"'Invalid Voucher Id'",
")",
"}",
")",
"else",
":",
"customer",
"=",
"Customer",
".",
"objects",
".",
"filter",
"(",
"first_name",
"=",
"first",
",",
"last_name",
"=",
"last",
",",
"email",
"=",
"email",
")",
".",
"first",
"(",
")",
"# This will raise any other errors that may be relevant\r",
"try",
":",
"obj",
".",
"validateForCustomerAndSeriess",
"(",
"customer",
",",
"seriess",
")",
"except",
"ValidationError",
"as",
"e",
":",
"# Ensures that the error is applied to the correct field\r",
"raise",
"ValidationError",
"(",
"{",
"'gift'",
":",
"e",
"}",
")",
"# If we got this far, then the voucher is determined to be valid, so the registration\r",
"# can proceed with no errors.\r",
"return"
] | Check that the given voucher code is valid | [
"Check",
"that",
"the",
"given",
"voucher",
"code",
"is",
"valid"
] | bb08cbf39017a812a5a94bdb4ea34170bf1a30ba | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/vouchers/handlers.py#L20-L71 | train | 236,504 |
django-danceschool/django-danceschool | danceschool/vouchers/handlers.py | applyVoucherCodeTemporarily | def applyVoucherCodeTemporarily(sender,**kwargs):
'''
When the core registration system creates a temporary registration with a voucher code,
the voucher app looks for vouchers that match that code and creates TemporaryVoucherUse
objects to keep track of the fact that the voucher may be used.
'''
logger.debug('Signal fired to apply temporary vouchers.')
reg = kwargs.pop('registration')
voucherId = reg.data.get('gift','')
try:
voucher = Voucher.objects.get(voucherId=voucherId)
except ObjectDoesNotExist:
logger.debug('No applicable vouchers found.')
return
tvu = TemporaryVoucherUse(voucher=voucher,registration=reg,amount=0)
tvu.save()
logger.debug('Temporary voucher use object created.') | python | def applyVoucherCodeTemporarily(sender,**kwargs):
'''
When the core registration system creates a temporary registration with a voucher code,
the voucher app looks for vouchers that match that code and creates TemporaryVoucherUse
objects to keep track of the fact that the voucher may be used.
'''
logger.debug('Signal fired to apply temporary vouchers.')
reg = kwargs.pop('registration')
voucherId = reg.data.get('gift','')
try:
voucher = Voucher.objects.get(voucherId=voucherId)
except ObjectDoesNotExist:
logger.debug('No applicable vouchers found.')
return
tvu = TemporaryVoucherUse(voucher=voucher,registration=reg,amount=0)
tvu.save()
logger.debug('Temporary voucher use object created.') | [
"def",
"applyVoucherCodeTemporarily",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"debug",
"(",
"'Signal fired to apply temporary vouchers.'",
")",
"reg",
"=",
"kwargs",
".",
"pop",
"(",
"'registration'",
")",
"voucherId",
"=",
"reg",
".",
"data",
".",
"get",
"(",
"'gift'",
",",
"''",
")",
"try",
":",
"voucher",
"=",
"Voucher",
".",
"objects",
".",
"get",
"(",
"voucherId",
"=",
"voucherId",
")",
"except",
"ObjectDoesNotExist",
":",
"logger",
".",
"debug",
"(",
"'No applicable vouchers found.'",
")",
"return",
"tvu",
"=",
"TemporaryVoucherUse",
"(",
"voucher",
"=",
"voucher",
",",
"registration",
"=",
"reg",
",",
"amount",
"=",
"0",
")",
"tvu",
".",
"save",
"(",
")",
"logger",
".",
"debug",
"(",
"'Temporary voucher use object created.'",
")"
] | When the core registration system creates a temporary registration with a voucher code,
the voucher app looks for vouchers that match that code and creates TemporaryVoucherUse
objects to keep track of the fact that the voucher may be used. | [
"When",
"the",
"core",
"registration",
"system",
"creates",
"a",
"temporary",
"registration",
"with",
"a",
"voucher",
"code",
"the",
"voucher",
"app",
"looks",
"for",
"vouchers",
"that",
"match",
"that",
"code",
"and",
"creates",
"TemporaryVoucherUse",
"objects",
"to",
"keep",
"track",
"of",
"the",
"fact",
"that",
"the",
"voucher",
"may",
"be",
"used",
"."
] | bb08cbf39017a812a5a94bdb4ea34170bf1a30ba | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/vouchers/handlers.py#L75-L94 | train | 236,505 |
django-danceschool/django-danceschool | danceschool/vouchers/handlers.py | applyReferrerVouchersTemporarily | def applyReferrerVouchersTemporarily(sender,**kwargs):
'''
Unlike voucher codes which have to be manually supplied, referrer discounts are
automatically applied here, assuming that the referral program is enabled.
'''
# Only continue if the referral program is enabled
if not getConstant('referrals__enableReferralProgram'):
return
logger.debug('Signal fired to temporarily apply referrer vouchers.')
reg = kwargs.pop('registration')
# Email address is unique for users, so use that
try:
c = Customer.objects.get(user__email=reg.email)
vouchers = c.getReferralVouchers()
except ObjectDoesNotExist:
vouchers = None
if not vouchers:
logger.debug('No referral vouchers found.')
return
for v in vouchers:
TemporaryVoucherUse(voucher=v,registration=reg,amount=0).save() | python | def applyReferrerVouchersTemporarily(sender,**kwargs):
'''
Unlike voucher codes which have to be manually supplied, referrer discounts are
automatically applied here, assuming that the referral program is enabled.
'''
# Only continue if the referral program is enabled
if not getConstant('referrals__enableReferralProgram'):
return
logger.debug('Signal fired to temporarily apply referrer vouchers.')
reg = kwargs.pop('registration')
# Email address is unique for users, so use that
try:
c = Customer.objects.get(user__email=reg.email)
vouchers = c.getReferralVouchers()
except ObjectDoesNotExist:
vouchers = None
if not vouchers:
logger.debug('No referral vouchers found.')
return
for v in vouchers:
TemporaryVoucherUse(voucher=v,registration=reg,amount=0).save() | [
"def",
"applyReferrerVouchersTemporarily",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"# Only continue if the referral program is enabled\r",
"if",
"not",
"getConstant",
"(",
"'referrals__enableReferralProgram'",
")",
":",
"return",
"logger",
".",
"debug",
"(",
"'Signal fired to temporarily apply referrer vouchers.'",
")",
"reg",
"=",
"kwargs",
".",
"pop",
"(",
"'registration'",
")",
"# Email address is unique for users, so use that\r",
"try",
":",
"c",
"=",
"Customer",
".",
"objects",
".",
"get",
"(",
"user__email",
"=",
"reg",
".",
"email",
")",
"vouchers",
"=",
"c",
".",
"getReferralVouchers",
"(",
")",
"except",
"ObjectDoesNotExist",
":",
"vouchers",
"=",
"None",
"if",
"not",
"vouchers",
":",
"logger",
".",
"debug",
"(",
"'No referral vouchers found.'",
")",
"return",
"for",
"v",
"in",
"vouchers",
":",
"TemporaryVoucherUse",
"(",
"voucher",
"=",
"v",
",",
"registration",
"=",
"reg",
",",
"amount",
"=",
"0",
")",
".",
"save",
"(",
")"
] | Unlike voucher codes which have to be manually supplied, referrer discounts are
automatically applied here, assuming that the referral program is enabled. | [
"Unlike",
"voucher",
"codes",
"which",
"have",
"to",
"be",
"manually",
"supplied",
"referrer",
"discounts",
"are",
"automatically",
"applied",
"here",
"assuming",
"that",
"the",
"referral",
"program",
"is",
"enabled",
"."
] | bb08cbf39017a812a5a94bdb4ea34170bf1a30ba | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/vouchers/handlers.py#L98-L124 | train | 236,506 |
django-danceschool/django-danceschool | danceschool/vouchers/handlers.py | applyVoucherCodesFinal | def applyVoucherCodesFinal(sender,**kwargs):
'''
Once a registration has been completed, vouchers are used and referrers are awarded
'''
logger.debug('Signal fired to mark voucher codes as applied.')
finalReg = kwargs.pop('registration')
tr = finalReg.temporaryRegistration
tvus = TemporaryVoucherUse.objects.filter(registration=tr)
for tvu in tvus:
vu = VoucherUse(voucher=tvu.voucher,registration=finalReg,amount=tvu.amount)
vu.save()
if getConstant('referrals__enableReferralProgram'):
awardReferrers(vu) | python | def applyVoucherCodesFinal(sender,**kwargs):
'''
Once a registration has been completed, vouchers are used and referrers are awarded
'''
logger.debug('Signal fired to mark voucher codes as applied.')
finalReg = kwargs.pop('registration')
tr = finalReg.temporaryRegistration
tvus = TemporaryVoucherUse.objects.filter(registration=tr)
for tvu in tvus:
vu = VoucherUse(voucher=tvu.voucher,registration=finalReg,amount=tvu.amount)
vu.save()
if getConstant('referrals__enableReferralProgram'):
awardReferrers(vu) | [
"def",
"applyVoucherCodesFinal",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"debug",
"(",
"'Signal fired to mark voucher codes as applied.'",
")",
"finalReg",
"=",
"kwargs",
".",
"pop",
"(",
"'registration'",
")",
"tr",
"=",
"finalReg",
".",
"temporaryRegistration",
"tvus",
"=",
"TemporaryVoucherUse",
".",
"objects",
".",
"filter",
"(",
"registration",
"=",
"tr",
")",
"for",
"tvu",
"in",
"tvus",
":",
"vu",
"=",
"VoucherUse",
"(",
"voucher",
"=",
"tvu",
".",
"voucher",
",",
"registration",
"=",
"finalReg",
",",
"amount",
"=",
"tvu",
".",
"amount",
")",
"vu",
".",
"save",
"(",
")",
"if",
"getConstant",
"(",
"'referrals__enableReferralProgram'",
")",
":",
"awardReferrers",
"(",
"vu",
")"
] | Once a registration has been completed, vouchers are used and referrers are awarded | [
"Once",
"a",
"registration",
"has",
"been",
"completed",
"vouchers",
"are",
"used",
"and",
"referrers",
"are",
"awarded"
] | bb08cbf39017a812a5a94bdb4ea34170bf1a30ba | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/vouchers/handlers.py#L170-L185 | train | 236,507 |
django-danceschool/django-danceschool | danceschool/vouchers/handlers.py | provideCustomerReferralCode | def provideCustomerReferralCode(sender,**kwargs):
'''
If the vouchers app is installed and referrals are enabled, then the customer's profile page can show their voucher referral code.
'''
customer = kwargs.pop('customer')
if getConstant('vouchers__enableVouchers') and getConstant('referrals__enableReferralProgram'):
vrd = ensureReferralVouchersExist(customer)
return {
'referralVoucherId': vrd.referreeVoucher.voucherId
} | python | def provideCustomerReferralCode(sender,**kwargs):
'''
If the vouchers app is installed and referrals are enabled, then the customer's profile page can show their voucher referral code.
'''
customer = kwargs.pop('customer')
if getConstant('vouchers__enableVouchers') and getConstant('referrals__enableReferralProgram'):
vrd = ensureReferralVouchersExist(customer)
return {
'referralVoucherId': vrd.referreeVoucher.voucherId
} | [
"def",
"provideCustomerReferralCode",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"customer",
"=",
"kwargs",
".",
"pop",
"(",
"'customer'",
")",
"if",
"getConstant",
"(",
"'vouchers__enableVouchers'",
")",
"and",
"getConstant",
"(",
"'referrals__enableReferralProgram'",
")",
":",
"vrd",
"=",
"ensureReferralVouchersExist",
"(",
"customer",
")",
"return",
"{",
"'referralVoucherId'",
":",
"vrd",
".",
"referreeVoucher",
".",
"voucherId",
"}"
] | If the vouchers app is installed and referrals are enabled, then the customer's profile page can show their voucher referral code. | [
"If",
"the",
"vouchers",
"app",
"is",
"installed",
"and",
"referrals",
"are",
"enabled",
"then",
"the",
"customer",
"s",
"profile",
"page",
"can",
"show",
"their",
"voucher",
"referral",
"code",
"."
] | bb08cbf39017a812a5a94bdb4ea34170bf1a30ba | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/vouchers/handlers.py#L189-L199 | train | 236,508 |
orcasgit/django-fernet-fields | fernet_fields/fields.py | get_prep_lookup | def get_prep_lookup(self):
"""Raise errors for unsupported lookups"""
raise FieldError("{} '{}' does not support lookups".format(
self.lhs.field.__class__.__name__, self.lookup_name)) | python | def get_prep_lookup(self):
"""Raise errors for unsupported lookups"""
raise FieldError("{} '{}' does not support lookups".format(
self.lhs.field.__class__.__name__, self.lookup_name)) | [
"def",
"get_prep_lookup",
"(",
"self",
")",
":",
"raise",
"FieldError",
"(",
"\"{} '{}' does not support lookups\"",
".",
"format",
"(",
"self",
".",
"lhs",
".",
"field",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"lookup_name",
")",
")"
] | Raise errors for unsupported lookups | [
"Raise",
"errors",
"for",
"unsupported",
"lookups"
] | 888777e5bdb93c72339663e7464f6ceaf4f5e7dd | https://github.com/orcasgit/django-fernet-fields/blob/888777e5bdb93c72339663e7464f6ceaf4f5e7dd/fernet_fields/fields.py#L93-L96 | train | 236,509 |
orcasgit/django-fernet-fields | fernet_fields/hkdf.py | derive_fernet_key | def derive_fernet_key(input_key):
"""Derive a 32-bit b64-encoded Fernet key from arbitrary input key."""
hkdf = HKDF(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
info=info,
backend=backend,
)
return base64.urlsafe_b64encode(hkdf.derive(force_bytes(input_key))) | python | def derive_fernet_key(input_key):
"""Derive a 32-bit b64-encoded Fernet key from arbitrary input key."""
hkdf = HKDF(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
info=info,
backend=backend,
)
return base64.urlsafe_b64encode(hkdf.derive(force_bytes(input_key))) | [
"def",
"derive_fernet_key",
"(",
"input_key",
")",
":",
"hkdf",
"=",
"HKDF",
"(",
"algorithm",
"=",
"hashes",
".",
"SHA256",
"(",
")",
",",
"length",
"=",
"32",
",",
"salt",
"=",
"salt",
",",
"info",
"=",
"info",
",",
"backend",
"=",
"backend",
",",
")",
"return",
"base64",
".",
"urlsafe_b64encode",
"(",
"hkdf",
".",
"derive",
"(",
"force_bytes",
"(",
"input_key",
")",
")",
")"
] | Derive a 32-bit b64-encoded Fernet key from arbitrary input key. | [
"Derive",
"a",
"32",
"-",
"bit",
"b64",
"-",
"encoded",
"Fernet",
"key",
"from",
"arbitrary",
"input",
"key",
"."
] | 888777e5bdb93c72339663e7464f6ceaf4f5e7dd | https://github.com/orcasgit/django-fernet-fields/blob/888777e5bdb93c72339663e7464f6ceaf4f5e7dd/fernet_fields/hkdf.py#L14-L23 | train | 236,510 |
xnd-project/gumath | python/gumath/__init__.py | reduce_cpu | def reduce_cpu(f, x, axes, dtype):
"""NumPy's reduce in terms of fold."""
axes = _get_axes(axes, x.ndim)
if not axes:
return x
permute = [n for n in range(x.ndim) if n not in axes]
permute = axes + permute
T = x.transpose(permute=permute)
N = len(axes)
t = T.type.at(N, dtype=dtype)
acc = x.empty(t, device=x.device)
if f.identity is not None:
_copyto(acc, f.identity)
tl = T
elif N == 1 and T.type.shape[0] > 0:
hd, tl = T[0], T[1:]
acc[()] = hd
else:
raise ValueError(
"reduction not possible for function without an identity element")
return fold(f, acc, tl) | python | def reduce_cpu(f, x, axes, dtype):
"""NumPy's reduce in terms of fold."""
axes = _get_axes(axes, x.ndim)
if not axes:
return x
permute = [n for n in range(x.ndim) if n not in axes]
permute = axes + permute
T = x.transpose(permute=permute)
N = len(axes)
t = T.type.at(N, dtype=dtype)
acc = x.empty(t, device=x.device)
if f.identity is not None:
_copyto(acc, f.identity)
tl = T
elif N == 1 and T.type.shape[0] > 0:
hd, tl = T[0], T[1:]
acc[()] = hd
else:
raise ValueError(
"reduction not possible for function without an identity element")
return fold(f, acc, tl) | [
"def",
"reduce_cpu",
"(",
"f",
",",
"x",
",",
"axes",
",",
"dtype",
")",
":",
"axes",
"=",
"_get_axes",
"(",
"axes",
",",
"x",
".",
"ndim",
")",
"if",
"not",
"axes",
":",
"return",
"x",
"permute",
"=",
"[",
"n",
"for",
"n",
"in",
"range",
"(",
"x",
".",
"ndim",
")",
"if",
"n",
"not",
"in",
"axes",
"]",
"permute",
"=",
"axes",
"+",
"permute",
"T",
"=",
"x",
".",
"transpose",
"(",
"permute",
"=",
"permute",
")",
"N",
"=",
"len",
"(",
"axes",
")",
"t",
"=",
"T",
".",
"type",
".",
"at",
"(",
"N",
",",
"dtype",
"=",
"dtype",
")",
"acc",
"=",
"x",
".",
"empty",
"(",
"t",
",",
"device",
"=",
"x",
".",
"device",
")",
"if",
"f",
".",
"identity",
"is",
"not",
"None",
":",
"_copyto",
"(",
"acc",
",",
"f",
".",
"identity",
")",
"tl",
"=",
"T",
"elif",
"N",
"==",
"1",
"and",
"T",
".",
"type",
".",
"shape",
"[",
"0",
"]",
">",
"0",
":",
"hd",
",",
"tl",
"=",
"T",
"[",
"0",
"]",
",",
"T",
"[",
"1",
":",
"]",
"acc",
"[",
"(",
")",
"]",
"=",
"hd",
"else",
":",
"raise",
"ValueError",
"(",
"\"reduction not possible for function without an identity element\"",
")",
"return",
"fold",
"(",
"f",
",",
"acc",
",",
"tl",
")"
] | NumPy's reduce in terms of fold. | [
"NumPy",
"s",
"reduce",
"in",
"terms",
"of",
"fold",
"."
] | a20ed5621db566ef805b8fb27ba4d8487f48c6b5 | https://github.com/xnd-project/gumath/blob/a20ed5621db566ef805b8fb27ba4d8487f48c6b5/python/gumath/__init__.py#L93-L118 | train | 236,511 |
xnd-project/gumath | python/gumath/__init__.py | reduce_cuda | def reduce_cuda(g, x, axes, dtype):
"""Reductions in CUDA use the thrust library for speed and have limited
functionality."""
if axes != 0:
raise NotImplementedError("'axes' keyword is not implemented for CUDA")
return g(x, dtype=dtype) | python | def reduce_cuda(g, x, axes, dtype):
"""Reductions in CUDA use the thrust library for speed and have limited
functionality."""
if axes != 0:
raise NotImplementedError("'axes' keyword is not implemented for CUDA")
return g(x, dtype=dtype) | [
"def",
"reduce_cuda",
"(",
"g",
",",
"x",
",",
"axes",
",",
"dtype",
")",
":",
"if",
"axes",
"!=",
"0",
":",
"raise",
"NotImplementedError",
"(",
"\"'axes' keyword is not implemented for CUDA\"",
")",
"return",
"g",
"(",
"x",
",",
"dtype",
"=",
"dtype",
")"
] | Reductions in CUDA use the thrust library for speed and have limited
functionality. | [
"Reductions",
"in",
"CUDA",
"use",
"the",
"thrust",
"library",
"for",
"speed",
"and",
"have",
"limited",
"functionality",
"."
] | a20ed5621db566ef805b8fb27ba4d8487f48c6b5 | https://github.com/xnd-project/gumath/blob/a20ed5621db566ef805b8fb27ba4d8487f48c6b5/python/gumath/__init__.py#L120-L126 | train | 236,512 |
xnd-project/gumath | python/gumath_aux.py | maxlevel | def maxlevel(lst):
"""Return maximum nesting depth"""
maxlev = 0
def f(lst, level):
nonlocal maxlev
if isinstance(lst, list):
level += 1
maxlev = max(level, maxlev)
for item in lst:
f(item, level)
f(lst, 0)
return maxlev | python | def maxlevel(lst):
"""Return maximum nesting depth"""
maxlev = 0
def f(lst, level):
nonlocal maxlev
if isinstance(lst, list):
level += 1
maxlev = max(level, maxlev)
for item in lst:
f(item, level)
f(lst, 0)
return maxlev | [
"def",
"maxlevel",
"(",
"lst",
")",
":",
"maxlev",
"=",
"0",
"def",
"f",
"(",
"lst",
",",
"level",
")",
":",
"nonlocal",
"maxlev",
"if",
"isinstance",
"(",
"lst",
",",
"list",
")",
":",
"level",
"+=",
"1",
"maxlev",
"=",
"max",
"(",
"level",
",",
"maxlev",
")",
"for",
"item",
"in",
"lst",
":",
"f",
"(",
"item",
",",
"level",
")",
"f",
"(",
"lst",
",",
"0",
")",
"return",
"maxlev"
] | Return maximum nesting depth | [
"Return",
"maximum",
"nesting",
"depth"
] | a20ed5621db566ef805b8fb27ba4d8487f48c6b5 | https://github.com/xnd-project/gumath/blob/a20ed5621db566ef805b8fb27ba4d8487f48c6b5/python/gumath_aux.py#L101-L112 | train | 236,513 |
xnd-project/gumath | python/gumath_aux.py | getitem | def getitem(lst, indices):
"""Definition for multidimensional slicing and indexing on arbitrarily
shaped nested lists.
"""
if not indices:
return lst
i, indices = indices[0], indices[1:]
item = list.__getitem__(lst, i)
if isinstance(i, int):
return getitem(item, indices)
# Empty slice: check if all subsequent indices are in range for the
# full slice, raise IndexError otherwise. This is NumPy's behavior.
if not item:
if lst:
_ = getitem(lst, (slice(None),) + indices)
elif any(isinstance(k, int) for k in indices):
raise IndexError
return []
return [getitem(x, indices) for x in item] | python | def getitem(lst, indices):
"""Definition for multidimensional slicing and indexing on arbitrarily
shaped nested lists.
"""
if not indices:
return lst
i, indices = indices[0], indices[1:]
item = list.__getitem__(lst, i)
if isinstance(i, int):
return getitem(item, indices)
# Empty slice: check if all subsequent indices are in range for the
# full slice, raise IndexError otherwise. This is NumPy's behavior.
if not item:
if lst:
_ = getitem(lst, (slice(None),) + indices)
elif any(isinstance(k, int) for k in indices):
raise IndexError
return []
return [getitem(x, indices) for x in item] | [
"def",
"getitem",
"(",
"lst",
",",
"indices",
")",
":",
"if",
"not",
"indices",
":",
"return",
"lst",
"i",
",",
"indices",
"=",
"indices",
"[",
"0",
"]",
",",
"indices",
"[",
"1",
":",
"]",
"item",
"=",
"list",
".",
"__getitem__",
"(",
"lst",
",",
"i",
")",
"if",
"isinstance",
"(",
"i",
",",
"int",
")",
":",
"return",
"getitem",
"(",
"item",
",",
"indices",
")",
"# Empty slice: check if all subsequent indices are in range for the",
"# full slice, raise IndexError otherwise. This is NumPy's behavior.",
"if",
"not",
"item",
":",
"if",
"lst",
":",
"_",
"=",
"getitem",
"(",
"lst",
",",
"(",
"slice",
"(",
"None",
")",
",",
")",
"+",
"indices",
")",
"elif",
"any",
"(",
"isinstance",
"(",
"k",
",",
"int",
")",
"for",
"k",
"in",
"indices",
")",
":",
"raise",
"IndexError",
"return",
"[",
"]",
"return",
"[",
"getitem",
"(",
"x",
",",
"indices",
")",
"for",
"x",
"in",
"item",
"]"
] | Definition for multidimensional slicing and indexing on arbitrarily
shaped nested lists. | [
"Definition",
"for",
"multidimensional",
"slicing",
"and",
"indexing",
"on",
"arbitrarily",
"shaped",
"nested",
"lists",
"."
] | a20ed5621db566ef805b8fb27ba4d8487f48c6b5 | https://github.com/xnd-project/gumath/blob/a20ed5621db566ef805b8fb27ba4d8487f48c6b5/python/gumath_aux.py#L114-L136 | train | 236,514 |
xnd-project/gumath | python/gumath_aux.py | genslices | def genslices(n):
"""Generate all possible slices for a single dimension."""
def range_with_none():
yield None
yield from range(-n, n+1)
for t in product(range_with_none(), range_with_none(), range_with_none()):
s = slice(*t)
if s.step != 0:
yield s | python | def genslices(n):
"""Generate all possible slices for a single dimension."""
def range_with_none():
yield None
yield from range(-n, n+1)
for t in product(range_with_none(), range_with_none(), range_with_none()):
s = slice(*t)
if s.step != 0:
yield s | [
"def",
"genslices",
"(",
"n",
")",
":",
"def",
"range_with_none",
"(",
")",
":",
"yield",
"None",
"yield",
"from",
"range",
"(",
"-",
"n",
",",
"n",
"+",
"1",
")",
"for",
"t",
"in",
"product",
"(",
"range_with_none",
"(",
")",
",",
"range_with_none",
"(",
")",
",",
"range_with_none",
"(",
")",
")",
":",
"s",
"=",
"slice",
"(",
"*",
"t",
")",
"if",
"s",
".",
"step",
"!=",
"0",
":",
"yield",
"s"
] | Generate all possible slices for a single dimension. | [
"Generate",
"all",
"possible",
"slices",
"for",
"a",
"single",
"dimension",
"."
] | a20ed5621db566ef805b8fb27ba4d8487f48c6b5 | https://github.com/xnd-project/gumath/blob/a20ed5621db566ef805b8fb27ba4d8487f48c6b5/python/gumath_aux.py#L276-L285 | train | 236,515 |
xnd-project/gumath | python/gumath_aux.py | genslices_ndim | def genslices_ndim(ndim, shape):
"""Generate all possible slice tuples for 'shape'."""
iterables = [genslices(shape[n]) for n in range(ndim)]
yield from product(*iterables) | python | def genslices_ndim(ndim, shape):
"""Generate all possible slice tuples for 'shape'."""
iterables = [genslices(shape[n]) for n in range(ndim)]
yield from product(*iterables) | [
"def",
"genslices_ndim",
"(",
"ndim",
",",
"shape",
")",
":",
"iterables",
"=",
"[",
"genslices",
"(",
"shape",
"[",
"n",
"]",
")",
"for",
"n",
"in",
"range",
"(",
"ndim",
")",
"]",
"yield",
"from",
"product",
"(",
"*",
"iterables",
")"
] | Generate all possible slice tuples for 'shape'. | [
"Generate",
"all",
"possible",
"slice",
"tuples",
"for",
"shape",
"."
] | a20ed5621db566ef805b8fb27ba4d8487f48c6b5 | https://github.com/xnd-project/gumath/blob/a20ed5621db566ef805b8fb27ba4d8487f48c6b5/python/gumath_aux.py#L287-L290 | train | 236,516 |
aarongarrett/inspyred | inspyred/ec/variators/mutators.py | mutator | def mutator(mutate):
"""Return an inspyred mutator function based on the given function.
This function generator takes a function that operates on only
one candidate to produce a single mutated candidate. The generator
handles the iteration over each candidate in the set to be mutated.
The given function ``mutate`` must have the following signature::
mutant = mutate(random, candidate, args)
This function is most commonly used as a function decorator with
the following usage::
@mutator
def mutate(random, candidate, args):
# Implementation of mutation
pass
The generated function also contains an attribute named
``single_mutation`` which holds the original mutation function.
In this way, the original single-candidate function can be
retrieved if necessary.
"""
@functools.wraps(mutate)
def inspyred_mutator(random, candidates, args):
mutants = []
for i, cs in enumerate(candidates):
mutants.append(mutate(random, cs, args))
return mutants
inspyred_mutator.single_mutation = mutate
return inspyred_mutator | python | def mutator(mutate):
"""Return an inspyred mutator function based on the given function.
This function generator takes a function that operates on only
one candidate to produce a single mutated candidate. The generator
handles the iteration over each candidate in the set to be mutated.
The given function ``mutate`` must have the following signature::
mutant = mutate(random, candidate, args)
This function is most commonly used as a function decorator with
the following usage::
@mutator
def mutate(random, candidate, args):
# Implementation of mutation
pass
The generated function also contains an attribute named
``single_mutation`` which holds the original mutation function.
In this way, the original single-candidate function can be
retrieved if necessary.
"""
@functools.wraps(mutate)
def inspyred_mutator(random, candidates, args):
mutants = []
for i, cs in enumerate(candidates):
mutants.append(mutate(random, cs, args))
return mutants
inspyred_mutator.single_mutation = mutate
return inspyred_mutator | [
"def",
"mutator",
"(",
"mutate",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"mutate",
")",
"def",
"inspyred_mutator",
"(",
"random",
",",
"candidates",
",",
"args",
")",
":",
"mutants",
"=",
"[",
"]",
"for",
"i",
",",
"cs",
"in",
"enumerate",
"(",
"candidates",
")",
":",
"mutants",
".",
"append",
"(",
"mutate",
"(",
"random",
",",
"cs",
",",
"args",
")",
")",
"return",
"mutants",
"inspyred_mutator",
".",
"single_mutation",
"=",
"mutate",
"return",
"inspyred_mutator"
] | Return an inspyred mutator function based on the given function.
This function generator takes a function that operates on only
one candidate to produce a single mutated candidate. The generator
handles the iteration over each candidate in the set to be mutated.
The given function ``mutate`` must have the following signature::
mutant = mutate(random, candidate, args)
This function is most commonly used as a function decorator with
the following usage::
@mutator
def mutate(random, candidate, args):
# Implementation of mutation
pass
The generated function also contains an attribute named
``single_mutation`` which holds the original mutation function.
In this way, the original single-candidate function can be
retrieved if necessary. | [
"Return",
"an",
"inspyred",
"mutator",
"function",
"based",
"on",
"the",
"given",
"function",
".",
"This",
"function",
"generator",
"takes",
"a",
"function",
"that",
"operates",
"on",
"only",
"one",
"candidate",
"to",
"produce",
"a",
"single",
"mutated",
"candidate",
".",
"The",
"generator",
"handles",
"the",
"iteration",
"over",
"each",
"candidate",
"in",
"the",
"set",
"to",
"be",
"mutated",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/variators/mutators.py#L33-L65 | train | 236,517 |
aarongarrett/inspyred | inspyred/ec/variators/mutators.py | bit_flip_mutation | def bit_flip_mutation(random, candidate, args):
"""Return the mutants produced by bit-flip mutation on the candidates.
This function performs bit-flip mutation. If a candidate solution contains
non-binary values, this function leaves it unchanged.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
The mutation rate is applied on a bit by bit basis.
"""
rate = args.setdefault('mutation_rate', 0.1)
mutant = copy.copy(candidate)
if len(mutant) == len([x for x in mutant if x in [0, 1]]):
for i, m in enumerate(mutant):
if random.random() < rate:
mutant[i] = (m + 1) % 2
return mutant | python | def bit_flip_mutation(random, candidate, args):
"""Return the mutants produced by bit-flip mutation on the candidates.
This function performs bit-flip mutation. If a candidate solution contains
non-binary values, this function leaves it unchanged.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
The mutation rate is applied on a bit by bit basis.
"""
rate = args.setdefault('mutation_rate', 0.1)
mutant = copy.copy(candidate)
if len(mutant) == len([x for x in mutant if x in [0, 1]]):
for i, m in enumerate(mutant):
if random.random() < rate:
mutant[i] = (m + 1) % 2
return mutant | [
"def",
"bit_flip_mutation",
"(",
"random",
",",
"candidate",
",",
"args",
")",
":",
"rate",
"=",
"args",
".",
"setdefault",
"(",
"'mutation_rate'",
",",
"0.1",
")",
"mutant",
"=",
"copy",
".",
"copy",
"(",
"candidate",
")",
"if",
"len",
"(",
"mutant",
")",
"==",
"len",
"(",
"[",
"x",
"for",
"x",
"in",
"mutant",
"if",
"x",
"in",
"[",
"0",
",",
"1",
"]",
"]",
")",
":",
"for",
"i",
",",
"m",
"in",
"enumerate",
"(",
"mutant",
")",
":",
"if",
"random",
".",
"random",
"(",
")",
"<",
"rate",
":",
"mutant",
"[",
"i",
"]",
"=",
"(",
"m",
"+",
"1",
")",
"%",
"2",
"return",
"mutant"
] | Return the mutants produced by bit-flip mutation on the candidates.
This function performs bit-flip mutation. If a candidate solution contains
non-binary values, this function leaves it unchanged.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
The mutation rate is applied on a bit by bit basis. | [
"Return",
"the",
"mutants",
"produced",
"by",
"bit",
"-",
"flip",
"mutation",
"on",
"the",
"candidates",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/variators/mutators.py#L69-L93 | train | 236,518 |
aarongarrett/inspyred | inspyred/ec/variators/mutators.py | random_reset_mutation | def random_reset_mutation(random, candidate, args):
"""Return the mutants produced by randomly choosing new values.
This function performs random-reset mutation. It assumes that
candidate solutions are composed of discrete values. This function
makes use of the bounder function as specified in the EC's
``evolve`` method, and it assumes that the bounder contains
an attribute called *values* (which is true for instances of
``DiscreteBounder``).
The mutation moves through a candidate solution and, with rate
equal to the *mutation_rate*, randomly chooses a value from the
set of allowed values to be used in that location. Note that this
value may be the same as the original value.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
The mutation rate is applied on an element by element basis.
"""
bounder = args['_ec'].bounder
try:
values = bounder.values
except AttributeError:
values = None
if values is not None:
rate = args.setdefault('mutation_rate', 0.1)
mutant = copy.copy(candidate)
for i, m in enumerate(mutant):
if random.random() < rate:
mutant[i] = random.choice(values)
return mutant
else:
return candidate | python | def random_reset_mutation(random, candidate, args):
"""Return the mutants produced by randomly choosing new values.
This function performs random-reset mutation. It assumes that
candidate solutions are composed of discrete values. This function
makes use of the bounder function as specified in the EC's
``evolve`` method, and it assumes that the bounder contains
an attribute called *values* (which is true for instances of
``DiscreteBounder``).
The mutation moves through a candidate solution and, with rate
equal to the *mutation_rate*, randomly chooses a value from the
set of allowed values to be used in that location. Note that this
value may be the same as the original value.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
The mutation rate is applied on an element by element basis.
"""
bounder = args['_ec'].bounder
try:
values = bounder.values
except AttributeError:
values = None
if values is not None:
rate = args.setdefault('mutation_rate', 0.1)
mutant = copy.copy(candidate)
for i, m in enumerate(mutant):
if random.random() < rate:
mutant[i] = random.choice(values)
return mutant
else:
return candidate | [
"def",
"random_reset_mutation",
"(",
"random",
",",
"candidate",
",",
"args",
")",
":",
"bounder",
"=",
"args",
"[",
"'_ec'",
"]",
".",
"bounder",
"try",
":",
"values",
"=",
"bounder",
".",
"values",
"except",
"AttributeError",
":",
"values",
"=",
"None",
"if",
"values",
"is",
"not",
"None",
":",
"rate",
"=",
"args",
".",
"setdefault",
"(",
"'mutation_rate'",
",",
"0.1",
")",
"mutant",
"=",
"copy",
".",
"copy",
"(",
"candidate",
")",
"for",
"i",
",",
"m",
"in",
"enumerate",
"(",
"mutant",
")",
":",
"if",
"random",
".",
"random",
"(",
")",
"<",
"rate",
":",
"mutant",
"[",
"i",
"]",
"=",
"random",
".",
"choice",
"(",
"values",
")",
"return",
"mutant",
"else",
":",
"return",
"candidate"
] | Return the mutants produced by randomly choosing new values.
This function performs random-reset mutation. It assumes that
candidate solutions are composed of discrete values. This function
makes use of the bounder function as specified in the EC's
``evolve`` method, and it assumes that the bounder contains
an attribute called *values* (which is true for instances of
``DiscreteBounder``).
The mutation moves through a candidate solution and, with rate
equal to the *mutation_rate*, randomly chooses a value from the
set of allowed values to be used in that location. Note that this
value may be the same as the original value.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
The mutation rate is applied on an element by element basis. | [
"Return",
"the",
"mutants",
"produced",
"by",
"randomly",
"choosing",
"new",
"values",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/variators/mutators.py#L97-L137 | train | 236,519 |
aarongarrett/inspyred | inspyred/ec/variators/mutators.py | scramble_mutation | def scramble_mutation(random, candidate, args):
"""Return the mutants created by scramble mutation on the candidates.
This function performs scramble mutation. It randomly chooses two
locations along the candidate and scrambles the values within that
slice.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
The mutation rate is applied to the candidate as a whole (i.e., it
either mutates or it does not, based on the rate).
"""
rate = args.setdefault('mutation_rate', 0.1)
if random.random() < rate:
size = len(candidate)
p = random.randint(0, size-1)
q = random.randint(0, size-1)
p, q = min(p, q), max(p, q)
s = candidate[p:q+1]
random.shuffle(s)
return candidate[:p] + s[::-1] + candidate[q+1:]
else:
return candidate | python | def scramble_mutation(random, candidate, args):
"""Return the mutants created by scramble mutation on the candidates.
This function performs scramble mutation. It randomly chooses two
locations along the candidate and scrambles the values within that
slice.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
The mutation rate is applied to the candidate as a whole (i.e., it
either mutates or it does not, based on the rate).
"""
rate = args.setdefault('mutation_rate', 0.1)
if random.random() < rate:
size = len(candidate)
p = random.randint(0, size-1)
q = random.randint(0, size-1)
p, q = min(p, q), max(p, q)
s = candidate[p:q+1]
random.shuffle(s)
return candidate[:p] + s[::-1] + candidate[q+1:]
else:
return candidate | [
"def",
"scramble_mutation",
"(",
"random",
",",
"candidate",
",",
"args",
")",
":",
"rate",
"=",
"args",
".",
"setdefault",
"(",
"'mutation_rate'",
",",
"0.1",
")",
"if",
"random",
".",
"random",
"(",
")",
"<",
"rate",
":",
"size",
"=",
"len",
"(",
"candidate",
")",
"p",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"size",
"-",
"1",
")",
"q",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"size",
"-",
"1",
")",
"p",
",",
"q",
"=",
"min",
"(",
"p",
",",
"q",
")",
",",
"max",
"(",
"p",
",",
"q",
")",
"s",
"=",
"candidate",
"[",
"p",
":",
"q",
"+",
"1",
"]",
"random",
".",
"shuffle",
"(",
"s",
")",
"return",
"candidate",
"[",
":",
"p",
"]",
"+",
"s",
"[",
":",
":",
"-",
"1",
"]",
"+",
"candidate",
"[",
"q",
"+",
"1",
":",
"]",
"else",
":",
"return",
"candidate"
] | Return the mutants created by scramble mutation on the candidates.
This function performs scramble mutation. It randomly chooses two
locations along the candidate and scrambles the values within that
slice.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
The mutation rate is applied to the candidate as a whole (i.e., it
either mutates or it does not, based on the rate). | [
"Return",
"the",
"mutants",
"created",
"by",
"scramble",
"mutation",
"on",
"the",
"candidates",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/variators/mutators.py#L141-L171 | train | 236,520 |
aarongarrett/inspyred | inspyred/ec/variators/mutators.py | gaussian_mutation | def gaussian_mutation(random, candidate, args):
"""Return the mutants created by Gaussian mutation on the candidates.
This function performs Gaussian mutation. This function
makes use of the bounder function as specified in the EC's
``evolve`` method.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
- *gaussian_mean* -- the mean used in the Gaussian function (default 0)
- *gaussian_stdev* -- the standard deviation used in the Gaussian function
(default 1)
The mutation rate is applied on an element by element basis.
"""
mut_rate = args.setdefault('mutation_rate', 0.1)
mean = args.setdefault('gaussian_mean', 0.0)
stdev = args.setdefault('gaussian_stdev', 1.0)
bounder = args['_ec'].bounder
mutant = copy.copy(candidate)
for i, m in enumerate(mutant):
if random.random() < mut_rate:
mutant[i] += random.gauss(mean, stdev)
mutant = bounder(mutant, args)
return mutant | python | def gaussian_mutation(random, candidate, args):
"""Return the mutants created by Gaussian mutation on the candidates.
This function performs Gaussian mutation. This function
makes use of the bounder function as specified in the EC's
``evolve`` method.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
- *gaussian_mean* -- the mean used in the Gaussian function (default 0)
- *gaussian_stdev* -- the standard deviation used in the Gaussian function
(default 1)
The mutation rate is applied on an element by element basis.
"""
mut_rate = args.setdefault('mutation_rate', 0.1)
mean = args.setdefault('gaussian_mean', 0.0)
stdev = args.setdefault('gaussian_stdev', 1.0)
bounder = args['_ec'].bounder
mutant = copy.copy(candidate)
for i, m in enumerate(mutant):
if random.random() < mut_rate:
mutant[i] += random.gauss(mean, stdev)
mutant = bounder(mutant, args)
return mutant | [
"def",
"gaussian_mutation",
"(",
"random",
",",
"candidate",
",",
"args",
")",
":",
"mut_rate",
"=",
"args",
".",
"setdefault",
"(",
"'mutation_rate'",
",",
"0.1",
")",
"mean",
"=",
"args",
".",
"setdefault",
"(",
"'gaussian_mean'",
",",
"0.0",
")",
"stdev",
"=",
"args",
".",
"setdefault",
"(",
"'gaussian_stdev'",
",",
"1.0",
")",
"bounder",
"=",
"args",
"[",
"'_ec'",
"]",
".",
"bounder",
"mutant",
"=",
"copy",
".",
"copy",
"(",
"candidate",
")",
"for",
"i",
",",
"m",
"in",
"enumerate",
"(",
"mutant",
")",
":",
"if",
"random",
".",
"random",
"(",
")",
"<",
"mut_rate",
":",
"mutant",
"[",
"i",
"]",
"+=",
"random",
".",
"gauss",
"(",
"mean",
",",
"stdev",
")",
"mutant",
"=",
"bounder",
"(",
"mutant",
",",
"args",
")",
"return",
"mutant"
] | Return the mutants created by Gaussian mutation on the candidates.
This function performs Gaussian mutation. This function
makes use of the bounder function as specified in the EC's
``evolve`` method.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
- *gaussian_mean* -- the mean used in the Gaussian function (default 0)
- *gaussian_stdev* -- the standard deviation used in the Gaussian function
(default 1)
The mutation rate is applied on an element by element basis. | [
"Return",
"the",
"mutants",
"created",
"by",
"Gaussian",
"mutation",
"on",
"the",
"candidates",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/variators/mutators.py#L208-L239 | train | 236,521 |
aarongarrett/inspyred | inspyred/ec/variators/mutators.py | nonuniform_mutation | def nonuniform_mutation(random, candidate, args):
"""Return the mutants produced by nonuniform mutation on the candidates.
The function performs nonuniform mutation as specified in
(Michalewicz, "Genetic Algorithms + Data Structures = Evolution
Programs," Springer, 1996). This function also makes use of the
bounder function as specified in the EC's ``evolve`` method.
.. note::
This function **requires** that *max_generations* be specified in
the *args* dictionary. Therefore, it is best to use this operator
in conjunction with the ``generation_termination`` terminator.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *max_generations* -- the maximum number of generations for which
evolution should take place
Optional keyword arguments in args:
- *mutation_strength* -- the strength of the mutation, where higher
values correspond to greater variation (default 1)
"""
bounder = args['_ec'].bounder
num_gens = args['_ec'].num_generations
max_gens = args['max_generations']
strength = args.setdefault('mutation_strength', 1)
exponent = (1.0 - num_gens / float(max_gens)) ** strength
mutant = copy.copy(candidate)
for i, (c, lo, hi) in enumerate(zip(candidate, bounder.lower_bound, bounder.upper_bound)):
if random.random() <= 0.5:
new_value = c + (hi - c) * (1.0 - random.random() ** exponent)
else:
new_value = c - (c - lo) * (1.0 - random.random() ** exponent)
mutant[i] = new_value
return mutant | python | def nonuniform_mutation(random, candidate, args):
"""Return the mutants produced by nonuniform mutation on the candidates.
The function performs nonuniform mutation as specified in
(Michalewicz, "Genetic Algorithms + Data Structures = Evolution
Programs," Springer, 1996). This function also makes use of the
bounder function as specified in the EC's ``evolve`` method.
.. note::
This function **requires** that *max_generations* be specified in
the *args* dictionary. Therefore, it is best to use this operator
in conjunction with the ``generation_termination`` terminator.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *max_generations* -- the maximum number of generations for which
evolution should take place
Optional keyword arguments in args:
- *mutation_strength* -- the strength of the mutation, where higher
values correspond to greater variation (default 1)
"""
bounder = args['_ec'].bounder
num_gens = args['_ec'].num_generations
max_gens = args['max_generations']
strength = args.setdefault('mutation_strength', 1)
exponent = (1.0 - num_gens / float(max_gens)) ** strength
mutant = copy.copy(candidate)
for i, (c, lo, hi) in enumerate(zip(candidate, bounder.lower_bound, bounder.upper_bound)):
if random.random() <= 0.5:
new_value = c + (hi - c) * (1.0 - random.random() ** exponent)
else:
new_value = c - (c - lo) * (1.0 - random.random() ** exponent)
mutant[i] = new_value
return mutant | [
"def",
"nonuniform_mutation",
"(",
"random",
",",
"candidate",
",",
"args",
")",
":",
"bounder",
"=",
"args",
"[",
"'_ec'",
"]",
".",
"bounder",
"num_gens",
"=",
"args",
"[",
"'_ec'",
"]",
".",
"num_generations",
"max_gens",
"=",
"args",
"[",
"'max_generations'",
"]",
"strength",
"=",
"args",
".",
"setdefault",
"(",
"'mutation_strength'",
",",
"1",
")",
"exponent",
"=",
"(",
"1.0",
"-",
"num_gens",
"/",
"float",
"(",
"max_gens",
")",
")",
"**",
"strength",
"mutant",
"=",
"copy",
".",
"copy",
"(",
"candidate",
")",
"for",
"i",
",",
"(",
"c",
",",
"lo",
",",
"hi",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"candidate",
",",
"bounder",
".",
"lower_bound",
",",
"bounder",
".",
"upper_bound",
")",
")",
":",
"if",
"random",
".",
"random",
"(",
")",
"<=",
"0.5",
":",
"new_value",
"=",
"c",
"+",
"(",
"hi",
"-",
"c",
")",
"*",
"(",
"1.0",
"-",
"random",
".",
"random",
"(",
")",
"**",
"exponent",
")",
"else",
":",
"new_value",
"=",
"c",
"-",
"(",
"c",
"-",
"lo",
")",
"*",
"(",
"1.0",
"-",
"random",
".",
"random",
"(",
")",
"**",
"exponent",
")",
"mutant",
"[",
"i",
"]",
"=",
"new_value",
"return",
"mutant"
] | Return the mutants produced by nonuniform mutation on the candidates.
The function performs nonuniform mutation as specified in
(Michalewicz, "Genetic Algorithms + Data Structures = Evolution
Programs," Springer, 1996). This function also makes use of the
bounder function as specified in the EC's ``evolve`` method.
.. note::
This function **requires** that *max_generations* be specified in
the *args* dictionary. Therefore, it is best to use this operator
in conjunction with the ``generation_termination`` terminator.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *max_generations* -- the maximum number of generations for which
evolution should take place
Optional keyword arguments in args:
- *mutation_strength* -- the strength of the mutation, where higher
values correspond to greater variation (default 1) | [
"Return",
"the",
"mutants",
"produced",
"by",
"nonuniform",
"mutation",
"on",
"the",
"candidates",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/variators/mutators.py#L243-L285 | train | 236,522 |
aarongarrett/inspyred | inspyred/ec/variators/crossovers.py | crossover | def crossover(cross):
"""Return an inspyred crossover function based on the given function.
This function generator takes a function that operates on only
two parent candidates to produce an iterable sequence of offspring
(typically two). The generator handles the pairing of selected
parents and collecting of all offspring.
The generated function chooses every odd candidate as a 'mom' and
every even as a 'dad' (discounting the last candidate if there is
an odd number). For each mom-dad pair, offspring are produced via
the `cross` function.
The given function ``cross`` must have the following signature::
offspring = cross(random, mom, dad, args)
This function is most commonly used as a function decorator with
the following usage::
@crossover
def cross(random, mom, dad, args):
# Implementation of paired crossing
pass
The generated function also contains an attribute named
``single_crossover`` which holds the original crossover function.
In this way, the original single-set-of-parents function can be
retrieved if necessary.
"""
@functools.wraps(cross)
def inspyred_crossover(random, candidates, args):
if len(candidates) % 2 == 1:
candidates = candidates[:-1]
moms = candidates[::2]
dads = candidates[1::2]
children = []
for i, (mom, dad) in enumerate(zip(moms, dads)):
cross.index = i
offspring = cross(random, mom, dad, args)
for o in offspring:
children.append(o)
return children
inspyred_crossover.single_crossover = cross
return inspyred_crossover | python | def crossover(cross):
"""Return an inspyred crossover function based on the given function.
This function generator takes a function that operates on only
two parent candidates to produce an iterable sequence of offspring
(typically two). The generator handles the pairing of selected
parents and collecting of all offspring.
The generated function chooses every odd candidate as a 'mom' and
every even as a 'dad' (discounting the last candidate if there is
an odd number). For each mom-dad pair, offspring are produced via
the `cross` function.
The given function ``cross`` must have the following signature::
offspring = cross(random, mom, dad, args)
This function is most commonly used as a function decorator with
the following usage::
@crossover
def cross(random, mom, dad, args):
# Implementation of paired crossing
pass
The generated function also contains an attribute named
``single_crossover`` which holds the original crossover function.
In this way, the original single-set-of-parents function can be
retrieved if necessary.
"""
@functools.wraps(cross)
def inspyred_crossover(random, candidates, args):
if len(candidates) % 2 == 1:
candidates = candidates[:-1]
moms = candidates[::2]
dads = candidates[1::2]
children = []
for i, (mom, dad) in enumerate(zip(moms, dads)):
cross.index = i
offspring = cross(random, mom, dad, args)
for o in offspring:
children.append(o)
return children
inspyred_crossover.single_crossover = cross
return inspyred_crossover | [
"def",
"crossover",
"(",
"cross",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"cross",
")",
"def",
"inspyred_crossover",
"(",
"random",
",",
"candidates",
",",
"args",
")",
":",
"if",
"len",
"(",
"candidates",
")",
"%",
"2",
"==",
"1",
":",
"candidates",
"=",
"candidates",
"[",
":",
"-",
"1",
"]",
"moms",
"=",
"candidates",
"[",
":",
":",
"2",
"]",
"dads",
"=",
"candidates",
"[",
"1",
":",
":",
"2",
"]",
"children",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"mom",
",",
"dad",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"moms",
",",
"dads",
")",
")",
":",
"cross",
".",
"index",
"=",
"i",
"offspring",
"=",
"cross",
"(",
"random",
",",
"mom",
",",
"dad",
",",
"args",
")",
"for",
"o",
"in",
"offspring",
":",
"children",
".",
"append",
"(",
"o",
")",
"return",
"children",
"inspyred_crossover",
".",
"single_crossover",
"=",
"cross",
"return",
"inspyred_crossover"
] | Return an inspyred crossover function based on the given function.
This function generator takes a function that operates on only
two parent candidates to produce an iterable sequence of offspring
(typically two). The generator handles the pairing of selected
parents and collecting of all offspring.
The generated function chooses every odd candidate as a 'mom' and
every even as a 'dad' (discounting the last candidate if there is
an odd number). For each mom-dad pair, offspring are produced via
the `cross` function.
The given function ``cross`` must have the following signature::
offspring = cross(random, mom, dad, args)
This function is most commonly used as a function decorator with
the following usage::
@crossover
def cross(random, mom, dad, args):
# Implementation of paired crossing
pass
The generated function also contains an attribute named
``single_crossover`` which holds the original crossover function.
In this way, the original single-set-of-parents function can be
retrieved if necessary. | [
"Return",
"an",
"inspyred",
"crossover",
"function",
"based",
"on",
"the",
"given",
"function",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/variators/crossovers.py#L38-L83 | train | 236,523 |
aarongarrett/inspyred | inspyred/ec/variators/crossovers.py | n_point_crossover | def n_point_crossover(random, mom, dad, args):
"""Return the offspring of n-point crossover on the candidates.
This function performs n-point crossover (NPX). It selects *n*
random points without replacement at which to 'cut' the candidate
solutions and recombine them.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *num_crossover_points* -- the number of crossover points used (default 1)
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
num_crossover_points = args.setdefault('num_crossover_points', 1)
children = []
if random.random() < crossover_rate:
num_cuts = min(len(mom)-1, num_crossover_points)
cut_points = random.sample(range(1, len(mom)), num_cuts)
cut_points.sort()
bro = copy.copy(dad)
sis = copy.copy(mom)
normal = True
for i, (m, d) in enumerate(zip(mom, dad)):
if i in cut_points:
normal = not normal
if not normal:
bro[i] = m
sis[i] = d
normal = not normal
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | python | def n_point_crossover(random, mom, dad, args):
"""Return the offspring of n-point crossover on the candidates.
This function performs n-point crossover (NPX). It selects *n*
random points without replacement at which to 'cut' the candidate
solutions and recombine them.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *num_crossover_points* -- the number of crossover points used (default 1)
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
num_crossover_points = args.setdefault('num_crossover_points', 1)
children = []
if random.random() < crossover_rate:
num_cuts = min(len(mom)-1, num_crossover_points)
cut_points = random.sample(range(1, len(mom)), num_cuts)
cut_points.sort()
bro = copy.copy(dad)
sis = copy.copy(mom)
normal = True
for i, (m, d) in enumerate(zip(mom, dad)):
if i in cut_points:
normal = not normal
if not normal:
bro[i] = m
sis[i] = d
normal = not normal
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | [
"def",
"n_point_crossover",
"(",
"random",
",",
"mom",
",",
"dad",
",",
"args",
")",
":",
"crossover_rate",
"=",
"args",
".",
"setdefault",
"(",
"'crossover_rate'",
",",
"1.0",
")",
"num_crossover_points",
"=",
"args",
".",
"setdefault",
"(",
"'num_crossover_points'",
",",
"1",
")",
"children",
"=",
"[",
"]",
"if",
"random",
".",
"random",
"(",
")",
"<",
"crossover_rate",
":",
"num_cuts",
"=",
"min",
"(",
"len",
"(",
"mom",
")",
"-",
"1",
",",
"num_crossover_points",
")",
"cut_points",
"=",
"random",
".",
"sample",
"(",
"range",
"(",
"1",
",",
"len",
"(",
"mom",
")",
")",
",",
"num_cuts",
")",
"cut_points",
".",
"sort",
"(",
")",
"bro",
"=",
"copy",
".",
"copy",
"(",
"dad",
")",
"sis",
"=",
"copy",
".",
"copy",
"(",
"mom",
")",
"normal",
"=",
"True",
"for",
"i",
",",
"(",
"m",
",",
"d",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"mom",
",",
"dad",
")",
")",
":",
"if",
"i",
"in",
"cut_points",
":",
"normal",
"=",
"not",
"normal",
"if",
"not",
"normal",
":",
"bro",
"[",
"i",
"]",
"=",
"m",
"sis",
"[",
"i",
"]",
"=",
"d",
"normal",
"=",
"not",
"normal",
"children",
".",
"append",
"(",
"bro",
")",
"children",
".",
"append",
"(",
"sis",
")",
"else",
":",
"children",
".",
"append",
"(",
"mom",
")",
"children",
".",
"append",
"(",
"dad",
")",
"return",
"children"
] | Return the offspring of n-point crossover on the candidates.
This function performs n-point crossover (NPX). It selects *n*
random points without replacement at which to 'cut' the candidate
solutions and recombine them.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *num_crossover_points* -- the number of crossover points used (default 1) | [
"Return",
"the",
"offspring",
"of",
"n",
"-",
"point",
"crossover",
"on",
"the",
"candidates",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/variators/crossovers.py#L87-L129 | train | 236,524 |
aarongarrett/inspyred | inspyred/ec/variators/crossovers.py | uniform_crossover | def uniform_crossover(random, mom, dad, args):
"""Return the offspring of uniform crossover on the candidates.
This function performs uniform crossover (UX). For each element
of the parents, a biased coin is flipped to determine whether
the first offspring gets the 'mom' or the 'dad' element. An
optional keyword argument in args, ``ux_bias``, determines the bias.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *ux_bias* -- the bias toward the first candidate in the crossover
(default 0.5)
"""
ux_bias = args.setdefault('ux_bias', 0.5)
crossover_rate = args.setdefault('crossover_rate', 1.0)
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
for i, (m, d) in enumerate(zip(mom, dad)):
if random.random() < ux_bias:
bro[i] = m
sis[i] = d
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | python | def uniform_crossover(random, mom, dad, args):
"""Return the offspring of uniform crossover on the candidates.
This function performs uniform crossover (UX). For each element
of the parents, a biased coin is flipped to determine whether
the first offspring gets the 'mom' or the 'dad' element. An
optional keyword argument in args, ``ux_bias``, determines the bias.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *ux_bias* -- the bias toward the first candidate in the crossover
(default 0.5)
"""
ux_bias = args.setdefault('ux_bias', 0.5)
crossover_rate = args.setdefault('crossover_rate', 1.0)
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
for i, (m, d) in enumerate(zip(mom, dad)):
if random.random() < ux_bias:
bro[i] = m
sis[i] = d
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | [
"def",
"uniform_crossover",
"(",
"random",
",",
"mom",
",",
"dad",
",",
"args",
")",
":",
"ux_bias",
"=",
"args",
".",
"setdefault",
"(",
"'ux_bias'",
",",
"0.5",
")",
"crossover_rate",
"=",
"args",
".",
"setdefault",
"(",
"'crossover_rate'",
",",
"1.0",
")",
"children",
"=",
"[",
"]",
"if",
"random",
".",
"random",
"(",
")",
"<",
"crossover_rate",
":",
"bro",
"=",
"copy",
".",
"copy",
"(",
"dad",
")",
"sis",
"=",
"copy",
".",
"copy",
"(",
"mom",
")",
"for",
"i",
",",
"(",
"m",
",",
"d",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"mom",
",",
"dad",
")",
")",
":",
"if",
"random",
".",
"random",
"(",
")",
"<",
"ux_bias",
":",
"bro",
"[",
"i",
"]",
"=",
"m",
"sis",
"[",
"i",
"]",
"=",
"d",
"children",
".",
"append",
"(",
"bro",
")",
"children",
".",
"append",
"(",
"sis",
")",
"else",
":",
"children",
".",
"append",
"(",
"mom",
")",
"children",
".",
"append",
"(",
"dad",
")",
"return",
"children"
] | Return the offspring of uniform crossover on the candidates.
This function performs uniform crossover (UX). For each element
of the parents, a biased coin is flipped to determine whether
the first offspring gets the 'mom' or the 'dad' element. An
optional keyword argument in args, ``ux_bias``, determines the bias.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *ux_bias* -- the bias toward the first candidate in the crossover
(default 0.5) | [
"Return",
"the",
"offspring",
"of",
"uniform",
"crossover",
"on",
"the",
"candidates",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/variators/crossovers.py#L133-L170 | train | 236,525 |
aarongarrett/inspyred | inspyred/ec/variators/crossovers.py | partially_matched_crossover | def partially_matched_crossover(random, mom, dad, args):
"""Return the offspring of partially matched crossover on the candidates.
This function performs partially matched crossover (PMX). This type of
crossover assumes that candidates are composed of discrete values that
are permutations of a given set (typically integers). It produces offspring
that are themselves permutations of the set.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
size = len(mom)
points = random.sample(range(size), 2)
x, y = min(points), max(points)
bro = copy.copy(dad)
bro[x:y+1] = mom[x:y+1]
sis = copy.copy(mom)
sis[x:y+1] = dad[x:y+1]
for parent, child in zip([dad, mom], [bro, sis]):
for i in range(x, y+1):
if parent[i] not in child[x:y+1]:
spot = i
while x <= spot <= y:
spot = parent.index(child[spot])
child[spot] = parent[i]
return [bro, sis]
else:
return [mom, dad] | python | def partially_matched_crossover(random, mom, dad, args):
"""Return the offspring of partially matched crossover on the candidates.
This function performs partially matched crossover (PMX). This type of
crossover assumes that candidates are composed of discrete values that
are permutations of a given set (typically integers). It produces offspring
that are themselves permutations of the set.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
size = len(mom)
points = random.sample(range(size), 2)
x, y = min(points), max(points)
bro = copy.copy(dad)
bro[x:y+1] = mom[x:y+1]
sis = copy.copy(mom)
sis[x:y+1] = dad[x:y+1]
for parent, child in zip([dad, mom], [bro, sis]):
for i in range(x, y+1):
if parent[i] not in child[x:y+1]:
spot = i
while x <= spot <= y:
spot = parent.index(child[spot])
child[spot] = parent[i]
return [bro, sis]
else:
return [mom, dad] | [
"def",
"partially_matched_crossover",
"(",
"random",
",",
"mom",
",",
"dad",
",",
"args",
")",
":",
"crossover_rate",
"=",
"args",
".",
"setdefault",
"(",
"'crossover_rate'",
",",
"1.0",
")",
"if",
"random",
".",
"random",
"(",
")",
"<",
"crossover_rate",
":",
"size",
"=",
"len",
"(",
"mom",
")",
"points",
"=",
"random",
".",
"sample",
"(",
"range",
"(",
"size",
")",
",",
"2",
")",
"x",
",",
"y",
"=",
"min",
"(",
"points",
")",
",",
"max",
"(",
"points",
")",
"bro",
"=",
"copy",
".",
"copy",
"(",
"dad",
")",
"bro",
"[",
"x",
":",
"y",
"+",
"1",
"]",
"=",
"mom",
"[",
"x",
":",
"y",
"+",
"1",
"]",
"sis",
"=",
"copy",
".",
"copy",
"(",
"mom",
")",
"sis",
"[",
"x",
":",
"y",
"+",
"1",
"]",
"=",
"dad",
"[",
"x",
":",
"y",
"+",
"1",
"]",
"for",
"parent",
",",
"child",
"in",
"zip",
"(",
"[",
"dad",
",",
"mom",
"]",
",",
"[",
"bro",
",",
"sis",
"]",
")",
":",
"for",
"i",
"in",
"range",
"(",
"x",
",",
"y",
"+",
"1",
")",
":",
"if",
"parent",
"[",
"i",
"]",
"not",
"in",
"child",
"[",
"x",
":",
"y",
"+",
"1",
"]",
":",
"spot",
"=",
"i",
"while",
"x",
"<=",
"spot",
"<=",
"y",
":",
"spot",
"=",
"parent",
".",
"index",
"(",
"child",
"[",
"spot",
"]",
")",
"child",
"[",
"spot",
"]",
"=",
"parent",
"[",
"i",
"]",
"return",
"[",
"bro",
",",
"sis",
"]",
"else",
":",
"return",
"[",
"mom",
",",
"dad",
"]"
] | Return the offspring of partially matched crossover on the candidates.
This function performs partially matched crossover (PMX). This type of
crossover assumes that candidates are composed of discrete values that
are permutations of a given set (typically integers). It produces offspring
that are themselves permutations of the set.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0) | [
"Return",
"the",
"offspring",
"of",
"partially",
"matched",
"crossover",
"on",
"the",
"candidates",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/variators/crossovers.py#L174-L212 | train | 236,526 |
aarongarrett/inspyred | inspyred/ec/variators/crossovers.py | arithmetic_crossover | def arithmetic_crossover(random, mom, dad, args):
"""Return the offspring of arithmetic crossover on the candidates.
This function performs arithmetic crossover (AX), which is similar to a
generalized weighted averaging of the candidate elements. The allele
of each parent is weighted by the *ax_alpha* keyword argument, and
the allele of the complement parent is weighted by 1 - *ax_alpha*.
This averaging is only done on the alleles listed in the *ax_points*
keyword argument. If this argument is ``None``, then all alleles
are used. This means that if this function is used with all default
values, then offspring are simple averages of their parents.
This function also makes use of the bounder function as specified
in the EC's ``evolve`` method.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *ax_alpha* -- the weight for the averaging (default 0.5)
- *ax_points* -- a list of points specifying the alleles to
recombine (default None)
"""
ax_alpha = args.setdefault('ax_alpha', 0.5)
ax_points = args.setdefault('ax_points', None)
crossover_rate = args.setdefault('crossover_rate', 1.0)
bounder = args['_ec'].bounder
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
if ax_points is None:
ax_points = list(range(min(len(bro), len(sis))))
for i in ax_points:
bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]
sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]
bro = bounder(bro, args)
sis = bounder(sis, args)
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | python | def arithmetic_crossover(random, mom, dad, args):
"""Return the offspring of arithmetic crossover on the candidates.
This function performs arithmetic crossover (AX), which is similar to a
generalized weighted averaging of the candidate elements. The allele
of each parent is weighted by the *ax_alpha* keyword argument, and
the allele of the complement parent is weighted by 1 - *ax_alpha*.
This averaging is only done on the alleles listed in the *ax_points*
keyword argument. If this argument is ``None``, then all alleles
are used. This means that if this function is used with all default
values, then offspring are simple averages of their parents.
This function also makes use of the bounder function as specified
in the EC's ``evolve`` method.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *ax_alpha* -- the weight for the averaging (default 0.5)
- *ax_points* -- a list of points specifying the alleles to
recombine (default None)
"""
ax_alpha = args.setdefault('ax_alpha', 0.5)
ax_points = args.setdefault('ax_points', None)
crossover_rate = args.setdefault('crossover_rate', 1.0)
bounder = args['_ec'].bounder
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
if ax_points is None:
ax_points = list(range(min(len(bro), len(sis))))
for i in ax_points:
bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]
sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]
bro = bounder(bro, args)
sis = bounder(sis, args)
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | [
"def",
"arithmetic_crossover",
"(",
"random",
",",
"mom",
",",
"dad",
",",
"args",
")",
":",
"ax_alpha",
"=",
"args",
".",
"setdefault",
"(",
"'ax_alpha'",
",",
"0.5",
")",
"ax_points",
"=",
"args",
".",
"setdefault",
"(",
"'ax_points'",
",",
"None",
")",
"crossover_rate",
"=",
"args",
".",
"setdefault",
"(",
"'crossover_rate'",
",",
"1.0",
")",
"bounder",
"=",
"args",
"[",
"'_ec'",
"]",
".",
"bounder",
"children",
"=",
"[",
"]",
"if",
"random",
".",
"random",
"(",
")",
"<",
"crossover_rate",
":",
"bro",
"=",
"copy",
".",
"copy",
"(",
"dad",
")",
"sis",
"=",
"copy",
".",
"copy",
"(",
"mom",
")",
"if",
"ax_points",
"is",
"None",
":",
"ax_points",
"=",
"list",
"(",
"range",
"(",
"min",
"(",
"len",
"(",
"bro",
")",
",",
"len",
"(",
"sis",
")",
")",
")",
")",
"for",
"i",
"in",
"ax_points",
":",
"bro",
"[",
"i",
"]",
"=",
"ax_alpha",
"*",
"mom",
"[",
"i",
"]",
"+",
"(",
"1",
"-",
"ax_alpha",
")",
"*",
"dad",
"[",
"i",
"]",
"sis",
"[",
"i",
"]",
"=",
"ax_alpha",
"*",
"dad",
"[",
"i",
"]",
"+",
"(",
"1",
"-",
"ax_alpha",
")",
"*",
"mom",
"[",
"i",
"]",
"bro",
"=",
"bounder",
"(",
"bro",
",",
"args",
")",
"sis",
"=",
"bounder",
"(",
"sis",
",",
"args",
")",
"children",
".",
"append",
"(",
"bro",
")",
"children",
".",
"append",
"(",
"sis",
")",
"else",
":",
"children",
".",
"append",
"(",
"mom",
")",
"children",
".",
"append",
"(",
"dad",
")",
"return",
"children"
] | Return the offspring of arithmetic crossover on the candidates.
This function performs arithmetic crossover (AX), which is similar to a
generalized weighted averaging of the candidate elements. The allele
of each parent is weighted by the *ax_alpha* keyword argument, and
the allele of the complement parent is weighted by 1 - *ax_alpha*.
This averaging is only done on the alleles listed in the *ax_points*
keyword argument. If this argument is ``None``, then all alleles
are used. This means that if this function is used with all default
values, then offspring are simple averages of their parents.
This function also makes use of the bounder function as specified
in the EC's ``evolve`` method.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *ax_alpha* -- the weight for the averaging (default 0.5)
- *ax_points* -- a list of points specifying the alleles to
recombine (default None) | [
"Return",
"the",
"offspring",
"of",
"arithmetic",
"crossover",
"on",
"the",
"candidates",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/variators/crossovers.py#L216-L265 | train | 236,527 |
aarongarrett/inspyred | inspyred/ec/variators/crossovers.py | blend_crossover | def blend_crossover(random, mom, dad, args):
"""Return the offspring of blend crossover on the candidates.
This function performs blend crossover (BLX), which is similar to
arithmetic crossover with a bit of mutation. It creates offspring
whose values are chosen randomly from a range bounded by the
parent alleles but that is also extended by some amount proportional
to the *blx_alpha* keyword argument. It is this extension of the
range that provides the additional exploration. This averaging is
only done on the alleles listed in the *blx_points* keyword argument.
If this argument is ``None``, then all alleles are used. This function
also makes use of the bounder function as specified in the EC's
``evolve`` method.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *blx_alpha* -- the blending rate (default 0.1)
- *blx_points* -- a list of points specifying the alleles to
recombine (default None)
"""
blx_alpha = args.setdefault('blx_alpha', 0.1)
blx_points = args.setdefault('blx_points', None)
crossover_rate = args.setdefault('crossover_rate', 1.0)
bounder = args['_ec'].bounder
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
if blx_points is None:
blx_points = list(range(min(len(bro), len(sis))))
for i in blx_points:
smallest, largest = min(mom[i], dad[i]), max(mom[i], dad[i])
delta = blx_alpha * (largest - smallest)
bro[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)
sis[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)
bro = bounder(bro, args)
sis = bounder(sis, args)
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | python | def blend_crossover(random, mom, dad, args):
"""Return the offspring of blend crossover on the candidates.
This function performs blend crossover (BLX), which is similar to
arithmetic crossover with a bit of mutation. It creates offspring
whose values are chosen randomly from a range bounded by the
parent alleles but that is also extended by some amount proportional
to the *blx_alpha* keyword argument. It is this extension of the
range that provides the additional exploration. This averaging is
only done on the alleles listed in the *blx_points* keyword argument.
If this argument is ``None``, then all alleles are used. This function
also makes use of the bounder function as specified in the EC's
``evolve`` method.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *blx_alpha* -- the blending rate (default 0.1)
- *blx_points* -- a list of points specifying the alleles to
recombine (default None)
"""
blx_alpha = args.setdefault('blx_alpha', 0.1)
blx_points = args.setdefault('blx_points', None)
crossover_rate = args.setdefault('crossover_rate', 1.0)
bounder = args['_ec'].bounder
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
if blx_points is None:
blx_points = list(range(min(len(bro), len(sis))))
for i in blx_points:
smallest, largest = min(mom[i], dad[i]), max(mom[i], dad[i])
delta = blx_alpha * (largest - smallest)
bro[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)
sis[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)
bro = bounder(bro, args)
sis = bounder(sis, args)
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | [
"def",
"blend_crossover",
"(",
"random",
",",
"mom",
",",
"dad",
",",
"args",
")",
":",
"blx_alpha",
"=",
"args",
".",
"setdefault",
"(",
"'blx_alpha'",
",",
"0.1",
")",
"blx_points",
"=",
"args",
".",
"setdefault",
"(",
"'blx_points'",
",",
"None",
")",
"crossover_rate",
"=",
"args",
".",
"setdefault",
"(",
"'crossover_rate'",
",",
"1.0",
")",
"bounder",
"=",
"args",
"[",
"'_ec'",
"]",
".",
"bounder",
"children",
"=",
"[",
"]",
"if",
"random",
".",
"random",
"(",
")",
"<",
"crossover_rate",
":",
"bro",
"=",
"copy",
".",
"copy",
"(",
"dad",
")",
"sis",
"=",
"copy",
".",
"copy",
"(",
"mom",
")",
"if",
"blx_points",
"is",
"None",
":",
"blx_points",
"=",
"list",
"(",
"range",
"(",
"min",
"(",
"len",
"(",
"bro",
")",
",",
"len",
"(",
"sis",
")",
")",
")",
")",
"for",
"i",
"in",
"blx_points",
":",
"smallest",
",",
"largest",
"=",
"min",
"(",
"mom",
"[",
"i",
"]",
",",
"dad",
"[",
"i",
"]",
")",
",",
"max",
"(",
"mom",
"[",
"i",
"]",
",",
"dad",
"[",
"i",
"]",
")",
"delta",
"=",
"blx_alpha",
"*",
"(",
"largest",
"-",
"smallest",
")",
"bro",
"[",
"i",
"]",
"=",
"smallest",
"-",
"delta",
"+",
"random",
".",
"random",
"(",
")",
"*",
"(",
"largest",
"-",
"smallest",
"+",
"2",
"*",
"delta",
")",
"sis",
"[",
"i",
"]",
"=",
"smallest",
"-",
"delta",
"+",
"random",
".",
"random",
"(",
")",
"*",
"(",
"largest",
"-",
"smallest",
"+",
"2",
"*",
"delta",
")",
"bro",
"=",
"bounder",
"(",
"bro",
",",
"args",
")",
"sis",
"=",
"bounder",
"(",
"sis",
",",
"args",
")",
"children",
".",
"append",
"(",
"bro",
")",
"children",
".",
"append",
"(",
"sis",
")",
"else",
":",
"children",
".",
"append",
"(",
"mom",
")",
"children",
".",
"append",
"(",
"dad",
")",
"return",
"children"
] | Return the offspring of blend crossover on the candidates.
This function performs blend crossover (BLX), which is similar to
arithmetic crossover with a bit of mutation. It creates offspring
whose values are chosen randomly from a range bounded by the
parent alleles but that is also extended by some amount proportional
to the *blx_alpha* keyword argument. It is this extension of the
range that provides the additional exploration. This averaging is
only done on the alleles listed in the *blx_points* keyword argument.
If this argument is ``None``, then all alleles are used. This function
also makes use of the bounder function as specified in the EC's
``evolve`` method.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *blx_alpha* -- the blending rate (default 0.1)
- *blx_points* -- a list of points specifying the alleles to
recombine (default None) | [
"Return",
"the",
"offspring",
"of",
"blend",
"crossover",
"on",
"the",
"candidates",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/variators/crossovers.py#L269-L320 | train | 236,528 |
aarongarrett/inspyred | inspyred/ec/variators/crossovers.py | heuristic_crossover | def heuristic_crossover(random, candidates, args):
"""Return the offspring of heuristic crossover on the candidates.
It performs heuristic crossover (HX), which is similar to the
update rule used in particle swarm optimization. This function
also makes use of the bounder function as specified in the EC's
``evolve`` method.
.. note::
This function assumes that candidates can be pickled (for hashing
as keys to a dictionary).
.. Arguments:
random -- the random number generator object
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
bounder = args['_ec'].bounder
if len(candidates) % 2 == 1:
candidates = candidates[:-1]
# Since we don't have fitness information in the candidates, we need
# to make a dictionary containing the candidate and its corresponding
# individual in the population.
population = list(args['_ec'].population)
lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))
moms = candidates[::2]
dads = candidates[1::2]
children = []
for mom, dad in zip(moms, dads):
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]
for i, (m, d) in enumerate(zip(mom, dad)):
negpos = 1 if mom_is_better else -1
val = d if mom_is_better else m
bro[i] = val + random.random() * negpos * (m - d)
sis[i] = val + random.random() * negpos * (m - d)
bro = bounder(bro, args)
sis = bounder(sis, args)
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | python | def heuristic_crossover(random, candidates, args):
"""Return the offspring of heuristic crossover on the candidates.
It performs heuristic crossover (HX), which is similar to the
update rule used in particle swarm optimization. This function
also makes use of the bounder function as specified in the EC's
``evolve`` method.
.. note::
This function assumes that candidates can be pickled (for hashing
as keys to a dictionary).
.. Arguments:
random -- the random number generator object
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
bounder = args['_ec'].bounder
if len(candidates) % 2 == 1:
candidates = candidates[:-1]
# Since we don't have fitness information in the candidates, we need
# to make a dictionary containing the candidate and its corresponding
# individual in the population.
population = list(args['_ec'].population)
lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))
moms = candidates[::2]
dads = candidates[1::2]
children = []
for mom, dad in zip(moms, dads):
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]
for i, (m, d) in enumerate(zip(mom, dad)):
negpos = 1 if mom_is_better else -1
val = d if mom_is_better else m
bro[i] = val + random.random() * negpos * (m - d)
sis[i] = val + random.random() * negpos * (m - d)
bro = bounder(bro, args)
sis = bounder(sis, args)
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | [
"def",
"heuristic_crossover",
"(",
"random",
",",
"candidates",
",",
"args",
")",
":",
"crossover_rate",
"=",
"args",
".",
"setdefault",
"(",
"'crossover_rate'",
",",
"1.0",
")",
"bounder",
"=",
"args",
"[",
"'_ec'",
"]",
".",
"bounder",
"if",
"len",
"(",
"candidates",
")",
"%",
"2",
"==",
"1",
":",
"candidates",
"=",
"candidates",
"[",
":",
"-",
"1",
"]",
"# Since we don't have fitness information in the candidates, we need ",
"# to make a dictionary containing the candidate and its corresponding ",
"# individual in the population.",
"population",
"=",
"list",
"(",
"args",
"[",
"'_ec'",
"]",
".",
"population",
")",
"lookup",
"=",
"dict",
"(",
"zip",
"(",
"[",
"pickle",
".",
"dumps",
"(",
"p",
".",
"candidate",
",",
"1",
")",
"for",
"p",
"in",
"population",
"]",
",",
"population",
")",
")",
"moms",
"=",
"candidates",
"[",
":",
":",
"2",
"]",
"dads",
"=",
"candidates",
"[",
"1",
":",
":",
"2",
"]",
"children",
"=",
"[",
"]",
"for",
"mom",
",",
"dad",
"in",
"zip",
"(",
"moms",
",",
"dads",
")",
":",
"if",
"random",
".",
"random",
"(",
")",
"<",
"crossover_rate",
":",
"bro",
"=",
"copy",
".",
"copy",
"(",
"dad",
")",
"sis",
"=",
"copy",
".",
"copy",
"(",
"mom",
")",
"mom_is_better",
"=",
"lookup",
"[",
"pickle",
".",
"dumps",
"(",
"mom",
",",
"1",
")",
"]",
">",
"lookup",
"[",
"pickle",
".",
"dumps",
"(",
"dad",
",",
"1",
")",
"]",
"for",
"i",
",",
"(",
"m",
",",
"d",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"mom",
",",
"dad",
")",
")",
":",
"negpos",
"=",
"1",
"if",
"mom_is_better",
"else",
"-",
"1",
"val",
"=",
"d",
"if",
"mom_is_better",
"else",
"m",
"bro",
"[",
"i",
"]",
"=",
"val",
"+",
"random",
".",
"random",
"(",
")",
"*",
"negpos",
"*",
"(",
"m",
"-",
"d",
")",
"sis",
"[",
"i",
"]",
"=",
"val",
"+",
"random",
".",
"random",
"(",
")",
"*",
"negpos",
"*",
"(",
"m",
"-",
"d",
")",
"bro",
"=",
"bounder",
"(",
"bro",
",",
"args",
")",
"sis",
"=",
"bounder",
"(",
"sis",
",",
"args",
")",
"children",
".",
"append",
"(",
"bro",
")",
"children",
".",
"append",
"(",
"sis",
")",
"else",
":",
"children",
".",
"append",
"(",
"mom",
")",
"children",
".",
"append",
"(",
"dad",
")",
"return",
"children"
] | Return the offspring of heuristic crossover on the candidates.
It performs heuristic crossover (HX), which is similar to the
update rule used in particle swarm optimization. This function
also makes use of the bounder function as specified in the EC's
``evolve`` method.
.. note::
This function assumes that candidates can be pickled (for hashing
as keys to a dictionary).
.. Arguments:
random -- the random number generator object
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0) | [
"Return",
"the",
"offspring",
"of",
"heuristic",
"crossover",
"on",
"the",
"candidates",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/variators/crossovers.py#L323-L379 | train | 236,529 |
aarongarrett/inspyred | docs/moonshot.py | gravitational_force | def gravitational_force(position_a, mass_a, position_b, mass_b):
"""Returns the gravitational force between the two bodies a and b."""
distance = distance_between(position_a, position_b)
# Calculate the direction and magnitude of the force.
angle = math.atan2(position_a[1] - position_b[1], position_a[0] - position_b[0])
magnitude = G * mass_a * mass_b / (distance**2)
# Find the x and y components of the force.
# Determine sign based on which one is the larger body.
sign = -1 if mass_b > mass_a else 1
x_force = sign * magnitude * math.cos(angle)
y_force = sign * magnitude * math.sin(angle)
return x_force, y_force | python | def gravitational_force(position_a, mass_a, position_b, mass_b):
"""Returns the gravitational force between the two bodies a and b."""
distance = distance_between(position_a, position_b)
# Calculate the direction and magnitude of the force.
angle = math.atan2(position_a[1] - position_b[1], position_a[0] - position_b[0])
magnitude = G * mass_a * mass_b / (distance**2)
# Find the x and y components of the force.
# Determine sign based on which one is the larger body.
sign = -1 if mass_b > mass_a else 1
x_force = sign * magnitude * math.cos(angle)
y_force = sign * magnitude * math.sin(angle)
return x_force, y_force | [
"def",
"gravitational_force",
"(",
"position_a",
",",
"mass_a",
",",
"position_b",
",",
"mass_b",
")",
":",
"distance",
"=",
"distance_between",
"(",
"position_a",
",",
"position_b",
")",
"# Calculate the direction and magnitude of the force.",
"angle",
"=",
"math",
".",
"atan2",
"(",
"position_a",
"[",
"1",
"]",
"-",
"position_b",
"[",
"1",
"]",
",",
"position_a",
"[",
"0",
"]",
"-",
"position_b",
"[",
"0",
"]",
")",
"magnitude",
"=",
"G",
"*",
"mass_a",
"*",
"mass_b",
"/",
"(",
"distance",
"**",
"2",
")",
"# Find the x and y components of the force.",
"# Determine sign based on which one is the larger body.",
"sign",
"=",
"-",
"1",
"if",
"mass_b",
">",
"mass_a",
"else",
"1",
"x_force",
"=",
"sign",
"*",
"magnitude",
"*",
"math",
".",
"cos",
"(",
"angle",
")",
"y_force",
"=",
"sign",
"*",
"magnitude",
"*",
"math",
".",
"sin",
"(",
"angle",
")",
"return",
"x_force",
",",
"y_force"
] | Returns the gravitational force between the two bodies a and b. | [
"Returns",
"the",
"gravitational",
"force",
"between",
"the",
"two",
"bodies",
"a",
"and",
"b",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/docs/moonshot.py#L37-L50 | train | 236,530 |
aarongarrett/inspyred | docs/moonshot.py | force_on_satellite | def force_on_satellite(position, mass):
"""Returns the total gravitational force acting on the body from the Earth and Moon."""
earth_grav_force = gravitational_force(position, mass, earth_position, earth_mass)
moon_grav_force = gravitational_force(position, mass, moon_position, moon_mass)
F_x = earth_grav_force[0] + moon_grav_force[0]
F_y = earth_grav_force[1] + moon_grav_force[1]
return F_x, F_y | python | def force_on_satellite(position, mass):
"""Returns the total gravitational force acting on the body from the Earth and Moon."""
earth_grav_force = gravitational_force(position, mass, earth_position, earth_mass)
moon_grav_force = gravitational_force(position, mass, moon_position, moon_mass)
F_x = earth_grav_force[0] + moon_grav_force[0]
F_y = earth_grav_force[1] + moon_grav_force[1]
return F_x, F_y | [
"def",
"force_on_satellite",
"(",
"position",
",",
"mass",
")",
":",
"earth_grav_force",
"=",
"gravitational_force",
"(",
"position",
",",
"mass",
",",
"earth_position",
",",
"earth_mass",
")",
"moon_grav_force",
"=",
"gravitational_force",
"(",
"position",
",",
"mass",
",",
"moon_position",
",",
"moon_mass",
")",
"F_x",
"=",
"earth_grav_force",
"[",
"0",
"]",
"+",
"moon_grav_force",
"[",
"0",
"]",
"F_y",
"=",
"earth_grav_force",
"[",
"1",
"]",
"+",
"moon_grav_force",
"[",
"1",
"]",
"return",
"F_x",
",",
"F_y"
] | Returns the total gravitational force acting on the body from the Earth and Moon. | [
"Returns",
"the",
"total",
"gravitational",
"force",
"acting",
"on",
"the",
"body",
"from",
"the",
"Earth",
"and",
"Moon",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/docs/moonshot.py#L52-L58 | train | 236,531 |
aarongarrett/inspyred | docs/moonshot.py | acceleration_of_satellite | def acceleration_of_satellite(position, mass):
"""Returns the acceleration based on all forces acting upon the body."""
F_x, F_y = force_on_satellite(position, mass)
return F_x / mass, F_y / mass | python | def acceleration_of_satellite(position, mass):
"""Returns the acceleration based on all forces acting upon the body."""
F_x, F_y = force_on_satellite(position, mass)
return F_x / mass, F_y / mass | [
"def",
"acceleration_of_satellite",
"(",
"position",
",",
"mass",
")",
":",
"F_x",
",",
"F_y",
"=",
"force_on_satellite",
"(",
"position",
",",
"mass",
")",
"return",
"F_x",
"/",
"mass",
",",
"F_y",
"/",
"mass"
] | Returns the acceleration based on all forces acting upon the body. | [
"Returns",
"the",
"acceleration",
"based",
"on",
"all",
"forces",
"acting",
"upon",
"the",
"body",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/docs/moonshot.py#L60-L63 | train | 236,532 |
aarongarrett/inspyred | inspyred/ec/evaluators.py | evaluator | def evaluator(evaluate):
"""Return an inspyred evaluator function based on the given function.
This function generator takes a function that evaluates only one
candidate. The generator handles the iteration over each candidate
to be evaluated.
The given function ``evaluate`` must have the following signature::
fitness = evaluate(candidate, args)
This function is most commonly used as a function decorator with
the following usage::
@evaluator
def evaluate(candidate, args):
# Implementation of evaluation
pass
The generated function also contains an attribute named
``single_evaluation`` which holds the original evaluation function.
In this way, the original single-candidate function can be
retrieved if necessary.
"""
@functools.wraps(evaluate)
def inspyred_evaluator(candidates, args):
fitness = []
for candidate in candidates:
fitness.append(evaluate(candidate, args))
return fitness
inspyred_evaluator.single_evaluation = evaluate
return inspyred_evaluator | python | def evaluator(evaluate):
"""Return an inspyred evaluator function based on the given function.
This function generator takes a function that evaluates only one
candidate. The generator handles the iteration over each candidate
to be evaluated.
The given function ``evaluate`` must have the following signature::
fitness = evaluate(candidate, args)
This function is most commonly used as a function decorator with
the following usage::
@evaluator
def evaluate(candidate, args):
# Implementation of evaluation
pass
The generated function also contains an attribute named
``single_evaluation`` which holds the original evaluation function.
In this way, the original single-candidate function can be
retrieved if necessary.
"""
@functools.wraps(evaluate)
def inspyred_evaluator(candidates, args):
fitness = []
for candidate in candidates:
fitness.append(evaluate(candidate, args))
return fitness
inspyred_evaluator.single_evaluation = evaluate
return inspyred_evaluator | [
"def",
"evaluator",
"(",
"evaluate",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"evaluate",
")",
"def",
"inspyred_evaluator",
"(",
"candidates",
",",
"args",
")",
":",
"fitness",
"=",
"[",
"]",
"for",
"candidate",
"in",
"candidates",
":",
"fitness",
".",
"append",
"(",
"evaluate",
"(",
"candidate",
",",
"args",
")",
")",
"return",
"fitness",
"inspyred_evaluator",
".",
"single_evaluation",
"=",
"evaluate",
"return",
"inspyred_evaluator"
] | Return an inspyred evaluator function based on the given function.
This function generator takes a function that evaluates only one
candidate. The generator handles the iteration over each candidate
to be evaluated.
The given function ``evaluate`` must have the following signature::
fitness = evaluate(candidate, args)
This function is most commonly used as a function decorator with
the following usage::
@evaluator
def evaluate(candidate, args):
# Implementation of evaluation
pass
The generated function also contains an attribute named
``single_evaluation`` which holds the original evaluation function.
In this way, the original single-candidate function can be
retrieved if necessary. | [
"Return",
"an",
"inspyred",
"evaluator",
"function",
"based",
"on",
"the",
"given",
"function",
".",
"This",
"function",
"generator",
"takes",
"a",
"function",
"that",
"evaluates",
"only",
"one",
"candidate",
".",
"The",
"generator",
"handles",
"the",
"iteration",
"over",
"each",
"candidate",
"to",
"be",
"evaluated",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/evaluators.py#L45-L77 | train | 236,533 |
aarongarrett/inspyred | inspyred/ec/evaluators.py | parallel_evaluation_pp | def parallel_evaluation_pp(candidates, args):
"""Evaluate the candidates in parallel using Parallel Python.
This function allows parallel evaluation of candidate solutions.
It uses the `Parallel Python <http://www.parallelpython.com>`_ (pp)
library to accomplish the parallelization. This library must already
be installed in order to use this function. The function assigns the
evaluation of each candidate to its own job, all of which are then
distributed to the available processing units.
.. note::
All arguments to the evaluation function must be pickleable.
Those that are not will not be sent through the ``args`` variable
and will be unavailable to your function.
.. Arguments:
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *pp_evaluator* -- actual evaluation function to be used (This function
should have the same signature as any other inspyred evaluation function.)
Optional keyword arguments in args:
- *pp_dependencies* -- tuple of functional dependencies of the serial
evaluator (default ())
- *pp_modules* -- tuple of modules that must be imported for the
functional dependencies (default ())
- *pp_servers* -- tuple of servers (on a cluster) that will be used
for parallel processing (default ("*",))
- *pp_secret* -- string representing the secret key needed to authenticate
on a worker node (default "inspyred")
- *pp_nprocs* -- integer representing the number of worker processes to
start on the local machine (default "autodetect", which sets it to the
number of processors in the system)
For more information about these arguments, please consult the
documentation for `Parallel Python <http://www.parallelpython.com>`_.
"""
import pp
logger = args['_ec'].logger
try:
evaluator = args['pp_evaluator']
except KeyError:
logger.error('parallel_evaluation_pp requires \'pp_evaluator\' be defined in the keyword arguments list')
raise
secret_key = args.setdefault('pp_secret', 'inspyred')
try:
job_server = args['_pp_job_server']
except KeyError:
pp_servers = args.get('pp_servers', ("*",))
pp_nprocs = args.get('pp_nprocs', 'autodetect')
job_server = pp.Server(ncpus=pp_nprocs, ppservers=pp_servers, secret=secret_key)
args['_pp_job_server'] = job_server
pp_depends = args.setdefault('pp_dependencies', ())
pp_modules = args.setdefault('pp_modules', ())
pickled_args = {}
for key in args:
try:
pickle.dumps(args[key])
pickled_args[key] = args[key]
except (TypeError, pickle.PickleError, pickle.PicklingError):
logger.debug('unable to pickle args parameter {0} in parallel_evaluation_pp'.format(key))
pass
func_template = pp.Template(job_server, evaluator, pp_depends, pp_modules)
jobs = [func_template.submit([c], pickled_args) for c in candidates]
fitness = []
for i, job in enumerate(jobs):
r = job()
try:
fitness.append(r[0])
except TypeError:
logger.warning('parallel_evaluation_pp generated an invalid fitness for candidate {0}'.format(candidates[i]))
fitness.append(None)
return fitness | python | def parallel_evaluation_pp(candidates, args):
"""Evaluate the candidates in parallel using Parallel Python.
This function allows parallel evaluation of candidate solutions.
It uses the `Parallel Python <http://www.parallelpython.com>`_ (pp)
library to accomplish the parallelization. This library must already
be installed in order to use this function. The function assigns the
evaluation of each candidate to its own job, all of which are then
distributed to the available processing units.
.. note::
All arguments to the evaluation function must be pickleable.
Those that are not will not be sent through the ``args`` variable
and will be unavailable to your function.
.. Arguments:
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *pp_evaluator* -- actual evaluation function to be used (This function
should have the same signature as any other inspyred evaluation function.)
Optional keyword arguments in args:
- *pp_dependencies* -- tuple of functional dependencies of the serial
evaluator (default ())
- *pp_modules* -- tuple of modules that must be imported for the
functional dependencies (default ())
- *pp_servers* -- tuple of servers (on a cluster) that will be used
for parallel processing (default ("*",))
- *pp_secret* -- string representing the secret key needed to authenticate
on a worker node (default "inspyred")
- *pp_nprocs* -- integer representing the number of worker processes to
start on the local machine (default "autodetect", which sets it to the
number of processors in the system)
For more information about these arguments, please consult the
documentation for `Parallel Python <http://www.parallelpython.com>`_.
"""
import pp
logger = args['_ec'].logger
try:
evaluator = args['pp_evaluator']
except KeyError:
logger.error('parallel_evaluation_pp requires \'pp_evaluator\' be defined in the keyword arguments list')
raise
secret_key = args.setdefault('pp_secret', 'inspyred')
try:
job_server = args['_pp_job_server']
except KeyError:
pp_servers = args.get('pp_servers', ("*",))
pp_nprocs = args.get('pp_nprocs', 'autodetect')
job_server = pp.Server(ncpus=pp_nprocs, ppservers=pp_servers, secret=secret_key)
args['_pp_job_server'] = job_server
pp_depends = args.setdefault('pp_dependencies', ())
pp_modules = args.setdefault('pp_modules', ())
pickled_args = {}
for key in args:
try:
pickle.dumps(args[key])
pickled_args[key] = args[key]
except (TypeError, pickle.PickleError, pickle.PicklingError):
logger.debug('unable to pickle args parameter {0} in parallel_evaluation_pp'.format(key))
pass
func_template = pp.Template(job_server, evaluator, pp_depends, pp_modules)
jobs = [func_template.submit([c], pickled_args) for c in candidates]
fitness = []
for i, job in enumerate(jobs):
r = job()
try:
fitness.append(r[0])
except TypeError:
logger.warning('parallel_evaluation_pp generated an invalid fitness for candidate {0}'.format(candidates[i]))
fitness.append(None)
return fitness | [
"def",
"parallel_evaluation_pp",
"(",
"candidates",
",",
"args",
")",
":",
"import",
"pp",
"logger",
"=",
"args",
"[",
"'_ec'",
"]",
".",
"logger",
"try",
":",
"evaluator",
"=",
"args",
"[",
"'pp_evaluator'",
"]",
"except",
"KeyError",
":",
"logger",
".",
"error",
"(",
"'parallel_evaluation_pp requires \\'pp_evaluator\\' be defined in the keyword arguments list'",
")",
"raise",
"secret_key",
"=",
"args",
".",
"setdefault",
"(",
"'pp_secret'",
",",
"'inspyred'",
")",
"try",
":",
"job_server",
"=",
"args",
"[",
"'_pp_job_server'",
"]",
"except",
"KeyError",
":",
"pp_servers",
"=",
"args",
".",
"get",
"(",
"'pp_servers'",
",",
"(",
"\"*\"",
",",
")",
")",
"pp_nprocs",
"=",
"args",
".",
"get",
"(",
"'pp_nprocs'",
",",
"'autodetect'",
")",
"job_server",
"=",
"pp",
".",
"Server",
"(",
"ncpus",
"=",
"pp_nprocs",
",",
"ppservers",
"=",
"pp_servers",
",",
"secret",
"=",
"secret_key",
")",
"args",
"[",
"'_pp_job_server'",
"]",
"=",
"job_server",
"pp_depends",
"=",
"args",
".",
"setdefault",
"(",
"'pp_dependencies'",
",",
"(",
")",
")",
"pp_modules",
"=",
"args",
".",
"setdefault",
"(",
"'pp_modules'",
",",
"(",
")",
")",
"pickled_args",
"=",
"{",
"}",
"for",
"key",
"in",
"args",
":",
"try",
":",
"pickle",
".",
"dumps",
"(",
"args",
"[",
"key",
"]",
")",
"pickled_args",
"[",
"key",
"]",
"=",
"args",
"[",
"key",
"]",
"except",
"(",
"TypeError",
",",
"pickle",
".",
"PickleError",
",",
"pickle",
".",
"PicklingError",
")",
":",
"logger",
".",
"debug",
"(",
"'unable to pickle args parameter {0} in parallel_evaluation_pp'",
".",
"format",
"(",
"key",
")",
")",
"pass",
"func_template",
"=",
"pp",
".",
"Template",
"(",
"job_server",
",",
"evaluator",
",",
"pp_depends",
",",
"pp_modules",
")",
"jobs",
"=",
"[",
"func_template",
".",
"submit",
"(",
"[",
"c",
"]",
",",
"pickled_args",
")",
"for",
"c",
"in",
"candidates",
"]",
"fitness",
"=",
"[",
"]",
"for",
"i",
",",
"job",
"in",
"enumerate",
"(",
"jobs",
")",
":",
"r",
"=",
"job",
"(",
")",
"try",
":",
"fitness",
".",
"append",
"(",
"r",
"[",
"0",
"]",
")",
"except",
"TypeError",
":",
"logger",
".",
"warning",
"(",
"'parallel_evaluation_pp generated an invalid fitness for candidate {0}'",
".",
"format",
"(",
"candidates",
"[",
"i",
"]",
")",
")",
"fitness",
".",
"append",
"(",
"None",
")",
"return",
"fitness"
] | Evaluate the candidates in parallel using Parallel Python.
This function allows parallel evaluation of candidate solutions.
It uses the `Parallel Python <http://www.parallelpython.com>`_ (pp)
library to accomplish the parallelization. This library must already
be installed in order to use this function. The function assigns the
evaluation of each candidate to its own job, all of which are then
distributed to the available processing units.
.. note::
All arguments to the evaluation function must be pickleable.
Those that are not will not be sent through the ``args`` variable
and will be unavailable to your function.
.. Arguments:
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *pp_evaluator* -- actual evaluation function to be used (This function
should have the same signature as any other inspyred evaluation function.)
Optional keyword arguments in args:
- *pp_dependencies* -- tuple of functional dependencies of the serial
evaluator (default ())
- *pp_modules* -- tuple of modules that must be imported for the
functional dependencies (default ())
- *pp_servers* -- tuple of servers (on a cluster) that will be used
for parallel processing (default ("*",))
- *pp_secret* -- string representing the secret key needed to authenticate
on a worker node (default "inspyred")
- *pp_nprocs* -- integer representing the number of worker processes to
start on the local machine (default "autodetect", which sets it to the
number of processors in the system)
For more information about these arguments, please consult the
documentation for `Parallel Python <http://www.parallelpython.com>`_. | [
"Evaluate",
"the",
"candidates",
"in",
"parallel",
"using",
"Parallel",
"Python",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/evaluators.py#L80-L162 | train | 236,534 |
aarongarrett/inspyred | inspyred/ec/evaluators.py | parallel_evaluation_mp | def parallel_evaluation_mp(candidates, args):
"""Evaluate the candidates in parallel using ``multiprocessing``.
This function allows parallel evaluation of candidate solutions.
It uses the standard multiprocessing library to accomplish the
parallelization. The function assigns the evaluation of each
candidate to its own job, all of which are then distributed to the
available processing units.
.. note::
All arguments to the evaluation function must be pickleable.
Those that are not will not be sent through the ``args`` variable
and will be unavailable to your function.
.. Arguments:
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *mp_evaluator* -- actual evaluation function to be used (This function
should have the same signature as any other inspyred evaluation function.)
Optional keyword arguments in args:
- *mp_nprocs* -- number of processors that will be used (default machine
cpu count)
"""
import time
import multiprocessing
logger = args['_ec'].logger
try:
evaluator = args['mp_evaluator']
except KeyError:
logger.error('parallel_evaluation_mp requires \'mp_evaluator\' be defined in the keyword arguments list')
raise
try:
nprocs = args['mp_nprocs']
except KeyError:
nprocs = multiprocessing.cpu_count()
pickled_args = {}
for key in args:
try:
pickle.dumps(args[key])
pickled_args[key] = args[key]
except (TypeError, pickle.PickleError, pickle.PicklingError):
logger.debug('unable to pickle args parameter {0} in parallel_evaluation_mp'.format(key))
pass
start = time.time()
try:
pool = multiprocessing.Pool(processes=nprocs)
results = [pool.apply_async(evaluator, ([c], pickled_args)) for c in candidates]
pool.close()
pool.join()
return [r.get()[0] for r in results]
except (OSError, RuntimeError) as e:
logger.error('failed parallel_evaluation_mp: {0}'.format(str(e)))
raise
else:
end = time.time()
logger.debug('completed parallel_evaluation_mp in {0} seconds'.format(end - start)) | python | def parallel_evaluation_mp(candidates, args):
"""Evaluate the candidates in parallel using ``multiprocessing``.
This function allows parallel evaluation of candidate solutions.
It uses the standard multiprocessing library to accomplish the
parallelization. The function assigns the evaluation of each
candidate to its own job, all of which are then distributed to the
available processing units.
.. note::
All arguments to the evaluation function must be pickleable.
Those that are not will not be sent through the ``args`` variable
and will be unavailable to your function.
.. Arguments:
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *mp_evaluator* -- actual evaluation function to be used (This function
should have the same signature as any other inspyred evaluation function.)
Optional keyword arguments in args:
- *mp_nprocs* -- number of processors that will be used (default machine
cpu count)
"""
import time
import multiprocessing
logger = args['_ec'].logger
try:
evaluator = args['mp_evaluator']
except KeyError:
logger.error('parallel_evaluation_mp requires \'mp_evaluator\' be defined in the keyword arguments list')
raise
try:
nprocs = args['mp_nprocs']
except KeyError:
nprocs = multiprocessing.cpu_count()
pickled_args = {}
for key in args:
try:
pickle.dumps(args[key])
pickled_args[key] = args[key]
except (TypeError, pickle.PickleError, pickle.PicklingError):
logger.debug('unable to pickle args parameter {0} in parallel_evaluation_mp'.format(key))
pass
start = time.time()
try:
pool = multiprocessing.Pool(processes=nprocs)
results = [pool.apply_async(evaluator, ([c], pickled_args)) for c in candidates]
pool.close()
pool.join()
return [r.get()[0] for r in results]
except (OSError, RuntimeError) as e:
logger.error('failed parallel_evaluation_mp: {0}'.format(str(e)))
raise
else:
end = time.time()
logger.debug('completed parallel_evaluation_mp in {0} seconds'.format(end - start)) | [
"def",
"parallel_evaluation_mp",
"(",
"candidates",
",",
"args",
")",
":",
"import",
"time",
"import",
"multiprocessing",
"logger",
"=",
"args",
"[",
"'_ec'",
"]",
".",
"logger",
"try",
":",
"evaluator",
"=",
"args",
"[",
"'mp_evaluator'",
"]",
"except",
"KeyError",
":",
"logger",
".",
"error",
"(",
"'parallel_evaluation_mp requires \\'mp_evaluator\\' be defined in the keyword arguments list'",
")",
"raise",
"try",
":",
"nprocs",
"=",
"args",
"[",
"'mp_nprocs'",
"]",
"except",
"KeyError",
":",
"nprocs",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
"pickled_args",
"=",
"{",
"}",
"for",
"key",
"in",
"args",
":",
"try",
":",
"pickle",
".",
"dumps",
"(",
"args",
"[",
"key",
"]",
")",
"pickled_args",
"[",
"key",
"]",
"=",
"args",
"[",
"key",
"]",
"except",
"(",
"TypeError",
",",
"pickle",
".",
"PickleError",
",",
"pickle",
".",
"PicklingError",
")",
":",
"logger",
".",
"debug",
"(",
"'unable to pickle args parameter {0} in parallel_evaluation_mp'",
".",
"format",
"(",
"key",
")",
")",
"pass",
"start",
"=",
"time",
".",
"time",
"(",
")",
"try",
":",
"pool",
"=",
"multiprocessing",
".",
"Pool",
"(",
"processes",
"=",
"nprocs",
")",
"results",
"=",
"[",
"pool",
".",
"apply_async",
"(",
"evaluator",
",",
"(",
"[",
"c",
"]",
",",
"pickled_args",
")",
")",
"for",
"c",
"in",
"candidates",
"]",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"return",
"[",
"r",
".",
"get",
"(",
")",
"[",
"0",
"]",
"for",
"r",
"in",
"results",
"]",
"except",
"(",
"OSError",
",",
"RuntimeError",
")",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'failed parallel_evaluation_mp: {0}'",
".",
"format",
"(",
"str",
"(",
"e",
")",
")",
")",
"raise",
"else",
":",
"end",
"=",
"time",
".",
"time",
"(",
")",
"logger",
".",
"debug",
"(",
"'completed parallel_evaluation_mp in {0} seconds'",
".",
"format",
"(",
"end",
"-",
"start",
")",
")"
] | Evaluate the candidates in parallel using ``multiprocessing``.
This function allows parallel evaluation of candidate solutions.
It uses the standard multiprocessing library to accomplish the
parallelization. The function assigns the evaluation of each
candidate to its own job, all of which are then distributed to the
available processing units.
.. note::
All arguments to the evaluation function must be pickleable.
Those that are not will not be sent through the ``args`` variable
and will be unavailable to your function.
.. Arguments:
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *mp_evaluator* -- actual evaluation function to be used (This function
should have the same signature as any other inspyred evaluation function.)
Optional keyword arguments in args:
- *mp_nprocs* -- number of processors that will be used (default machine
cpu count) | [
"Evaluate",
"the",
"candidates",
"in",
"parallel",
"using",
"multiprocessing",
"."
] | d5976ab503cc9d51c6f586cbb7bb601a38c01128 | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/evaluators.py#L165-L230 | train | 236,535 |
djsutho/django-debug-toolbar-request-history | ddt_request_history/panels/request_history.py | allow_ajax | def allow_ajax(request):
"""
Default function to determine whether to show the toolbar on a given page.
"""
if request.META.get('REMOTE_ADDR', None) not in settings.INTERNAL_IPS:
return False
if toolbar_version < LooseVersion('1.8') \
and request.get_full_path().startswith(DEBUG_TOOLBAR_URL_PREFIX) \
and request.GET.get('panel_id', None) != 'RequestHistoryPanel':
return False
return bool(settings.DEBUG) | python | def allow_ajax(request):
"""
Default function to determine whether to show the toolbar on a given page.
"""
if request.META.get('REMOTE_ADDR', None) not in settings.INTERNAL_IPS:
return False
if toolbar_version < LooseVersion('1.8') \
and request.get_full_path().startswith(DEBUG_TOOLBAR_URL_PREFIX) \
and request.GET.get('panel_id', None) != 'RequestHistoryPanel':
return False
return bool(settings.DEBUG) | [
"def",
"allow_ajax",
"(",
"request",
")",
":",
"if",
"request",
".",
"META",
".",
"get",
"(",
"'REMOTE_ADDR'",
",",
"None",
")",
"not",
"in",
"settings",
".",
"INTERNAL_IPS",
":",
"return",
"False",
"if",
"toolbar_version",
"<",
"LooseVersion",
"(",
"'1.8'",
")",
"and",
"request",
".",
"get_full_path",
"(",
")",
".",
"startswith",
"(",
"DEBUG_TOOLBAR_URL_PREFIX",
")",
"and",
"request",
".",
"GET",
".",
"get",
"(",
"'panel_id'",
",",
"None",
")",
"!=",
"'RequestHistoryPanel'",
":",
"return",
"False",
"return",
"bool",
"(",
"settings",
".",
"DEBUG",
")"
] | Default function to determine whether to show the toolbar on a given page. | [
"Default",
"function",
"to",
"determine",
"whether",
"to",
"show",
"the",
"toolbar",
"on",
"a",
"given",
"page",
"."
] | b3da3e12762d68c23a307ffb279e6047f80ba695 | https://github.com/djsutho/django-debug-toolbar-request-history/blob/b3da3e12762d68c23a307ffb279e6047f80ba695/ddt_request_history/panels/request_history.py#L104-L114 | train | 236,536 |
djsutho/django-debug-toolbar-request-history | ddt_request_history/panels/request_history.py | RequestHistoryPanel.content | def content(self):
""" Content of the panel when it's displayed in full screen. """
toolbars = OrderedDict()
for id, toolbar in DebugToolbar._store.items():
content = {}
for panel in toolbar.panels:
panel_id = None
nav_title = ''
nav_subtitle = ''
try:
panel_id = panel.panel_id
nav_title = panel.nav_title
nav_subtitle = panel.nav_subtitle() if isinstance(
panel.nav_subtitle, Callable) else panel.nav_subtitle
except Exception:
logger.debug('Error parsing panel info:', exc_info=True)
if panel_id is not None:
content.update({
panel_id: {
'panel_id': panel_id,
'nav_title': nav_title,
'nav_subtitle': nav_subtitle,
}
})
toolbars[id] = {
'toolbar': toolbar,
'content': content
}
return get_template().render(Context({
'toolbars': OrderedDict(reversed(list(toolbars.items()))),
'trunc_length': CONFIG.get('RH_POST_TRUNC_LENGTH', 0)
})) | python | def content(self):
""" Content of the panel when it's displayed in full screen. """
toolbars = OrderedDict()
for id, toolbar in DebugToolbar._store.items():
content = {}
for panel in toolbar.panels:
panel_id = None
nav_title = ''
nav_subtitle = ''
try:
panel_id = panel.panel_id
nav_title = panel.nav_title
nav_subtitle = panel.nav_subtitle() if isinstance(
panel.nav_subtitle, Callable) else panel.nav_subtitle
except Exception:
logger.debug('Error parsing panel info:', exc_info=True)
if panel_id is not None:
content.update({
panel_id: {
'panel_id': panel_id,
'nav_title': nav_title,
'nav_subtitle': nav_subtitle,
}
})
toolbars[id] = {
'toolbar': toolbar,
'content': content
}
return get_template().render(Context({
'toolbars': OrderedDict(reversed(list(toolbars.items()))),
'trunc_length': CONFIG.get('RH_POST_TRUNC_LENGTH', 0)
})) | [
"def",
"content",
"(",
"self",
")",
":",
"toolbars",
"=",
"OrderedDict",
"(",
")",
"for",
"id",
",",
"toolbar",
"in",
"DebugToolbar",
".",
"_store",
".",
"items",
"(",
")",
":",
"content",
"=",
"{",
"}",
"for",
"panel",
"in",
"toolbar",
".",
"panels",
":",
"panel_id",
"=",
"None",
"nav_title",
"=",
"''",
"nav_subtitle",
"=",
"''",
"try",
":",
"panel_id",
"=",
"panel",
".",
"panel_id",
"nav_title",
"=",
"panel",
".",
"nav_title",
"nav_subtitle",
"=",
"panel",
".",
"nav_subtitle",
"(",
")",
"if",
"isinstance",
"(",
"panel",
".",
"nav_subtitle",
",",
"Callable",
")",
"else",
"panel",
".",
"nav_subtitle",
"except",
"Exception",
":",
"logger",
".",
"debug",
"(",
"'Error parsing panel info:'",
",",
"exc_info",
"=",
"True",
")",
"if",
"panel_id",
"is",
"not",
"None",
":",
"content",
".",
"update",
"(",
"{",
"panel_id",
":",
"{",
"'panel_id'",
":",
"panel_id",
",",
"'nav_title'",
":",
"nav_title",
",",
"'nav_subtitle'",
":",
"nav_subtitle",
",",
"}",
"}",
")",
"toolbars",
"[",
"id",
"]",
"=",
"{",
"'toolbar'",
":",
"toolbar",
",",
"'content'",
":",
"content",
"}",
"return",
"get_template",
"(",
")",
".",
"render",
"(",
"Context",
"(",
"{",
"'toolbars'",
":",
"OrderedDict",
"(",
"reversed",
"(",
"list",
"(",
"toolbars",
".",
"items",
"(",
")",
")",
")",
")",
",",
"'trunc_length'",
":",
"CONFIG",
".",
"get",
"(",
"'RH_POST_TRUNC_LENGTH'",
",",
"0",
")",
"}",
")",
")"
] | Content of the panel when it's displayed in full screen. | [
"Content",
"of",
"the",
"panel",
"when",
"it",
"s",
"displayed",
"in",
"full",
"screen",
"."
] | b3da3e12762d68c23a307ffb279e6047f80ba695 | https://github.com/djsutho/django-debug-toolbar-request-history/blob/b3da3e12762d68c23a307ffb279e6047f80ba695/ddt_request_history/panels/request_history.py#L184-L215 | train | 236,537 |
AlexMathew/scrapple | scrapple/selectors/selector.py | Selector.extract_content | def extract_content(self, selector='', attr='', default='', connector='', *args, **kwargs):
"""
Method for performing the content extraction for the particular selector type. \
If the selector is "url", the URL of the current web page is returned.
Otherwise, the selector expression is used to extract content. The particular \
attribute to be extracted ("text", "href", etc.) is specified in the method \
arguments, and this is used to extract the required content. If the content \
extracted is a link (from an attr value of "href" or "src"), the URL is parsed \
to convert the relative path into an absolute path.
If the selector does not fetch any content, the default value is returned. \
If no default value is specified, an exception is raised.
:param selector: The XPath expression
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param connector: String connector for list of data returned for a particular selector
:return: The extracted content
"""
try:
if selector.lower() == "url":
return self.url
if attr.lower() == "text":
tag = self.get_tree_tag(selector=selector, get_one=True)
content = connector.join([make_ascii(x).strip() for x in tag.itertext()])
content = content.replace("\n", " ").strip()
else:
tag = self.get_tree_tag(selector=selector, get_one=True)
content = tag.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
return content
except IndexError:
if default is not "":
return default
raise Exception("There is no content for the %s selector - %s" % (self.__selector_type__, selector))
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector)) | python | def extract_content(self, selector='', attr='', default='', connector='', *args, **kwargs):
"""
Method for performing the content extraction for the particular selector type. \
If the selector is "url", the URL of the current web page is returned.
Otherwise, the selector expression is used to extract content. The particular \
attribute to be extracted ("text", "href", etc.) is specified in the method \
arguments, and this is used to extract the required content. If the content \
extracted is a link (from an attr value of "href" or "src"), the URL is parsed \
to convert the relative path into an absolute path.
If the selector does not fetch any content, the default value is returned. \
If no default value is specified, an exception is raised.
:param selector: The XPath expression
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param connector: String connector for list of data returned for a particular selector
:return: The extracted content
"""
try:
if selector.lower() == "url":
return self.url
if attr.lower() == "text":
tag = self.get_tree_tag(selector=selector, get_one=True)
content = connector.join([make_ascii(x).strip() for x in tag.itertext()])
content = content.replace("\n", " ").strip()
else:
tag = self.get_tree_tag(selector=selector, get_one=True)
content = tag.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
return content
except IndexError:
if default is not "":
return default
raise Exception("There is no content for the %s selector - %s" % (self.__selector_type__, selector))
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector)) | [
"def",
"extract_content",
"(",
"self",
",",
"selector",
"=",
"''",
",",
"attr",
"=",
"''",
",",
"default",
"=",
"''",
",",
"connector",
"=",
"''",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"if",
"selector",
".",
"lower",
"(",
")",
"==",
"\"url\"",
":",
"return",
"self",
".",
"url",
"if",
"attr",
".",
"lower",
"(",
")",
"==",
"\"text\"",
":",
"tag",
"=",
"self",
".",
"get_tree_tag",
"(",
"selector",
"=",
"selector",
",",
"get_one",
"=",
"True",
")",
"content",
"=",
"connector",
".",
"join",
"(",
"[",
"make_ascii",
"(",
"x",
")",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"tag",
".",
"itertext",
"(",
")",
"]",
")",
"content",
"=",
"content",
".",
"replace",
"(",
"\"\\n\"",
",",
"\" \"",
")",
".",
"strip",
"(",
")",
"else",
":",
"tag",
"=",
"self",
".",
"get_tree_tag",
"(",
"selector",
"=",
"selector",
",",
"get_one",
"=",
"True",
")",
"content",
"=",
"tag",
".",
"get",
"(",
"attr",
")",
"if",
"attr",
"in",
"[",
"\"href\"",
",",
"\"src\"",
"]",
":",
"content",
"=",
"urljoin",
"(",
"self",
".",
"url",
",",
"content",
")",
"return",
"content",
"except",
"IndexError",
":",
"if",
"default",
"is",
"not",
"\"\"",
":",
"return",
"default",
"raise",
"Exception",
"(",
"\"There is no content for the %s selector - %s\"",
"%",
"(",
"self",
".",
"__selector_type__",
",",
"selector",
")",
")",
"except",
"XPathError",
":",
"raise",
"Exception",
"(",
"\"Invalid %s selector - %s\"",
"%",
"(",
"self",
".",
"__selector_type__",
",",
"selector",
")",
")"
] | Method for performing the content extraction for the particular selector type. \
If the selector is "url", the URL of the current web page is returned.
Otherwise, the selector expression is used to extract content. The particular \
attribute to be extracted ("text", "href", etc.) is specified in the method \
arguments, and this is used to extract the required content. If the content \
extracted is a link (from an attr value of "href" or "src"), the URL is parsed \
to convert the relative path into an absolute path.
If the selector does not fetch any content, the default value is returned. \
If no default value is specified, an exception is raised.
:param selector: The XPath expression
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param connector: String connector for list of data returned for a particular selector
:return: The extracted content | [
"Method",
"for",
"performing",
"the",
"content",
"extraction",
"for",
"the",
"particular",
"selector",
"type",
".",
"\\"
] | eeb604601b155d6cc7e035855ff4d3f48f8bed74 | https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/selectors/selector.py#L81-L119 | train | 236,538 |
AlexMathew/scrapple | scrapple/selectors/selector.py | Selector.extract_links | def extract_links(self, selector='', *args, **kwargs):
"""
Method for performing the link extraction for the crawler. \
The selector passed as the argument is a selector to point to the anchor tags \
that the crawler should pass through. A list of links is obtained, and the links \
are iterated through. The relative paths are converted into absolute paths and \
a ``XpathSelector``/``CssSelector`` object (as is the case) is created with the URL of the next page as the argument \
and this created object is yielded.
The extract_links method basically generates ``XpathSelector``/``CssSelector`` objects for all of \
the links to be crawled through.
:param selector: The selector for the anchor tags to be crawled through
:return: A ``XpathSelector``/``CssSelector`` object for every page to be crawled through
"""
try:
links = self.get_tree_tag(selector=selector)
for link in links:
next_url = urljoin(self.url, link.get('href'))
yield type(self)(next_url)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except Exception:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector)) | python | def extract_links(self, selector='', *args, **kwargs):
"""
Method for performing the link extraction for the crawler. \
The selector passed as the argument is a selector to point to the anchor tags \
that the crawler should pass through. A list of links is obtained, and the links \
are iterated through. The relative paths are converted into absolute paths and \
a ``XpathSelector``/``CssSelector`` object (as is the case) is created with the URL of the next page as the argument \
and this created object is yielded.
The extract_links method basically generates ``XpathSelector``/``CssSelector`` objects for all of \
the links to be crawled through.
:param selector: The selector for the anchor tags to be crawled through
:return: A ``XpathSelector``/``CssSelector`` object for every page to be crawled through
"""
try:
links = self.get_tree_tag(selector=selector)
for link in links:
next_url = urljoin(self.url, link.get('href'))
yield type(self)(next_url)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except Exception:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector)) | [
"def",
"extract_links",
"(",
"self",
",",
"selector",
"=",
"''",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"links",
"=",
"self",
".",
"get_tree_tag",
"(",
"selector",
"=",
"selector",
")",
"for",
"link",
"in",
"links",
":",
"next_url",
"=",
"urljoin",
"(",
"self",
".",
"url",
",",
"link",
".",
"get",
"(",
"'href'",
")",
")",
"yield",
"type",
"(",
"self",
")",
"(",
"next_url",
")",
"except",
"XPathError",
":",
"raise",
"Exception",
"(",
"\"Invalid %s selector - %s\"",
"%",
"(",
"self",
".",
"__selector_type__",
",",
"selector",
")",
")",
"except",
"Exception",
":",
"raise",
"Exception",
"(",
"\"Invalid %s selector - %s\"",
"%",
"(",
"self",
".",
"__selector_type__",
",",
"selector",
")",
")"
] | Method for performing the link extraction for the crawler. \
The selector passed as the argument is a selector to point to the anchor tags \
that the crawler should pass through. A list of links is obtained, and the links \
are iterated through. The relative paths are converted into absolute paths and \
a ``XpathSelector``/``CssSelector`` object (as is the case) is created with the URL of the next page as the argument \
and this created object is yielded.
The extract_links method basically generates ``XpathSelector``/``CssSelector`` objects for all of \
the links to be crawled through.
:param selector: The selector for the anchor tags to be crawled through
:return: A ``XpathSelector``/``CssSelector`` object for every page to be crawled through | [
"Method",
"for",
"performing",
"the",
"link",
"extraction",
"for",
"the",
"crawler",
".",
"\\"
] | eeb604601b155d6cc7e035855ff4d3f48f8bed74 | https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/selectors/selector.py#L122-L147 | train | 236,539 |
AlexMathew/scrapple | scrapple/selectors/selector.py | Selector.extract_tabular | def extract_tabular(self, header='', prefix='', suffix='', table_type='', *args, **kwargs):
"""
Method for performing the tabular data extraction. \
:param result: A dictionary containing the extracted data so far
:param table_type: Can be "rows" or "columns". This determines the type of table to be extracted. \
A row extraction is when there is a single row to be extracted and mapped to a set of headers. \
A column extraction is when a set of rows have to be extracted, giving a list of header-value mappings.
:param header: The headers to be used for the table. This can be a list of headers, or a selector that gives the list of headers
:param prefix: A prefix to be added to each header
:param suffix: A suffix to be added to each header
:param selector: For row extraction, this is a selector that gives the row to be extracted. \
For column extraction, this is a list of selectors for each column.
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param verbosity: The verbosity set as the argument for scrapple run
:return: A 2-tuple containing the list of all the column headers extracted and the list of \
dictionaries which contain (header, content) pairs
"""
if type(header) in [str, unicode]:
try:
header_list = self.get_tree_tag(header)
table_headers = [prefix + h.text + suffix for h in header_list]
except XPathError:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
except Exception:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
else:
table_headers = [prefix + h + suffix for h in header]
if len(table_headers) == 0:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
if table_type not in ["rows", "columns"]:
raise Exception("Specify 'rows' or 'columns' in table_type")
if table_type == "rows":
result_list = self.extract_rows(table_headers=table_headers, *args, **kwargs)
else:
result_list = self.extract_columns(table_headers=table_headers, *args, **kwargs)
return table_headers, result_list | python | def extract_tabular(self, header='', prefix='', suffix='', table_type='', *args, **kwargs):
"""
Method for performing the tabular data extraction. \
:param result: A dictionary containing the extracted data so far
:param table_type: Can be "rows" or "columns". This determines the type of table to be extracted. \
A row extraction is when there is a single row to be extracted and mapped to a set of headers. \
A column extraction is when a set of rows have to be extracted, giving a list of header-value mappings.
:param header: The headers to be used for the table. This can be a list of headers, or a selector that gives the list of headers
:param prefix: A prefix to be added to each header
:param suffix: A suffix to be added to each header
:param selector: For row extraction, this is a selector that gives the row to be extracted. \
For column extraction, this is a list of selectors for each column.
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param verbosity: The verbosity set as the argument for scrapple run
:return: A 2-tuple containing the list of all the column headers extracted and the list of \
dictionaries which contain (header, content) pairs
"""
if type(header) in [str, unicode]:
try:
header_list = self.get_tree_tag(header)
table_headers = [prefix + h.text + suffix for h in header_list]
except XPathError:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
except Exception:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
else:
table_headers = [prefix + h + suffix for h in header]
if len(table_headers) == 0:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
if table_type not in ["rows", "columns"]:
raise Exception("Specify 'rows' or 'columns' in table_type")
if table_type == "rows":
result_list = self.extract_rows(table_headers=table_headers, *args, **kwargs)
else:
result_list = self.extract_columns(table_headers=table_headers, *args, **kwargs)
return table_headers, result_list | [
"def",
"extract_tabular",
"(",
"self",
",",
"header",
"=",
"''",
",",
"prefix",
"=",
"''",
",",
"suffix",
"=",
"''",
",",
"table_type",
"=",
"''",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"type",
"(",
"header",
")",
"in",
"[",
"str",
",",
"unicode",
"]",
":",
"try",
":",
"header_list",
"=",
"self",
".",
"get_tree_tag",
"(",
"header",
")",
"table_headers",
"=",
"[",
"prefix",
"+",
"h",
".",
"text",
"+",
"suffix",
"for",
"h",
"in",
"header_list",
"]",
"except",
"XPathError",
":",
"raise",
"Exception",
"(",
"\"Invalid %s selector for table header - %s\"",
"%",
"(",
"self",
".",
"__selector_type__",
",",
"header",
")",
")",
"except",
"Exception",
":",
"raise",
"Exception",
"(",
"\"Invalid %s selector for table header - %s\"",
"%",
"(",
"self",
".",
"__selector_type__",
",",
"header",
")",
")",
"else",
":",
"table_headers",
"=",
"[",
"prefix",
"+",
"h",
"+",
"suffix",
"for",
"h",
"in",
"header",
"]",
"if",
"len",
"(",
"table_headers",
")",
"==",
"0",
":",
"raise",
"Exception",
"(",
"\"Invalid %s selector for table header - %s\"",
"%",
"(",
"self",
".",
"__selector_type__",
",",
"header",
")",
")",
"if",
"table_type",
"not",
"in",
"[",
"\"rows\"",
",",
"\"columns\"",
"]",
":",
"raise",
"Exception",
"(",
"\"Specify 'rows' or 'columns' in table_type\"",
")",
"if",
"table_type",
"==",
"\"rows\"",
":",
"result_list",
"=",
"self",
".",
"extract_rows",
"(",
"table_headers",
"=",
"table_headers",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"result_list",
"=",
"self",
".",
"extract_columns",
"(",
"table_headers",
"=",
"table_headers",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"table_headers",
",",
"result_list"
] | Method for performing the tabular data extraction. \
:param result: A dictionary containing the extracted data so far
:param table_type: Can be "rows" or "columns". This determines the type of table to be extracted. \
A row extraction is when there is a single row to be extracted and mapped to a set of headers. \
A column extraction is when a set of rows have to be extracted, giving a list of header-value mappings.
:param header: The headers to be used for the table. This can be a list of headers, or a selector that gives the list of headers
:param prefix: A prefix to be added to each header
:param suffix: A suffix to be added to each header
:param selector: For row extraction, this is a selector that gives the row to be extracted. \
For column extraction, this is a list of selectors for each column.
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param verbosity: The verbosity set as the argument for scrapple run
:return: A 2-tuple containing the list of all the column headers extracted and the list of \
dictionaries which contain (header, content) pairs | [
"Method",
"for",
"performing",
"the",
"tabular",
"data",
"extraction",
".",
"\\"
] | eeb604601b155d6cc7e035855ff4d3f48f8bed74 | https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/selectors/selector.py#L150-L187 | train | 236,540 |
AlexMathew/scrapple | scrapple/selectors/selector.py | Selector.extract_rows | def extract_rows(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):
"""
Row data extraction for extract_tabular
"""
result_list = []
try:
values = self.get_tree_tag(selector)
if len(table_headers) >= len(values):
from itertools import izip_longest
pairs = izip_longest(table_headers, values, fillvalue=default)
else:
from itertools import izip
pairs = izip(table_headers, values)
for head, val in pairs:
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
if attr.lower() == "text":
try:
content = connector.join([make_ascii(x).strip() for x in val.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = val.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
result[head] = content
result_list.append(result)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return result_list | python | def extract_rows(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):
"""
Row data extraction for extract_tabular
"""
result_list = []
try:
values = self.get_tree_tag(selector)
if len(table_headers) >= len(values):
from itertools import izip_longest
pairs = izip_longest(table_headers, values, fillvalue=default)
else:
from itertools import izip
pairs = izip(table_headers, values)
for head, val in pairs:
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
if attr.lower() == "text":
try:
content = connector.join([make_ascii(x).strip() for x in val.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = val.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
result[head] = content
result_list.append(result)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return result_list | [
"def",
"extract_rows",
"(",
"self",
",",
"result",
"=",
"{",
"}",
",",
"selector",
"=",
"''",
",",
"table_headers",
"=",
"[",
"]",
",",
"attr",
"=",
"''",
",",
"connector",
"=",
"''",
",",
"default",
"=",
"''",
",",
"verbosity",
"=",
"0",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result_list",
"=",
"[",
"]",
"try",
":",
"values",
"=",
"self",
".",
"get_tree_tag",
"(",
"selector",
")",
"if",
"len",
"(",
"table_headers",
")",
">=",
"len",
"(",
"values",
")",
":",
"from",
"itertools",
"import",
"izip_longest",
"pairs",
"=",
"izip_longest",
"(",
"table_headers",
",",
"values",
",",
"fillvalue",
"=",
"default",
")",
"else",
":",
"from",
"itertools",
"import",
"izip",
"pairs",
"=",
"izip",
"(",
"table_headers",
",",
"values",
")",
"for",
"head",
",",
"val",
"in",
"pairs",
":",
"if",
"verbosity",
">",
"1",
":",
"print",
"(",
"\"\\nExtracting\"",
",",
"head",
",",
"\"attribute\"",
",",
"sep",
"=",
"' '",
",",
"end",
"=",
"''",
")",
"if",
"attr",
".",
"lower",
"(",
")",
"==",
"\"text\"",
":",
"try",
":",
"content",
"=",
"connector",
".",
"join",
"(",
"[",
"make_ascii",
"(",
"x",
")",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"val",
".",
"itertext",
"(",
")",
"]",
")",
"except",
"Exception",
":",
"content",
"=",
"default",
"content",
"=",
"content",
".",
"replace",
"(",
"\"\\n\"",
",",
"\" \"",
")",
".",
"strip",
"(",
")",
"else",
":",
"content",
"=",
"val",
".",
"get",
"(",
"attr",
")",
"if",
"attr",
"in",
"[",
"\"href\"",
",",
"\"src\"",
"]",
":",
"content",
"=",
"urljoin",
"(",
"self",
".",
"url",
",",
"content",
")",
"result",
"[",
"head",
"]",
"=",
"content",
"result_list",
".",
"append",
"(",
"result",
")",
"except",
"XPathError",
":",
"raise",
"Exception",
"(",
"\"Invalid %s selector - %s\"",
"%",
"(",
"self",
".",
"__selector_type__",
",",
"selector",
")",
")",
"except",
"TypeError",
":",
"raise",
"Exception",
"(",
"\"Selector expression string to be provided. Got \"",
"+",
"selector",
")",
"return",
"result_list"
] | Row data extraction for extract_tabular | [
"Row",
"data",
"extraction",
"for",
"extract_tabular"
] | eeb604601b155d6cc7e035855ff4d3f48f8bed74 | https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/selectors/selector.py#L190-L224 | train | 236,541 |
AlexMathew/scrapple | scrapple/selectors/selector.py | Selector.extract_columns | def extract_columns(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):
"""
Column data extraction for extract_tabular
"""
result_list = []
try:
if type(selector) in [str, unicode]:
selectors = [selector]
elif type(selector) == list:
selectors = selector[:]
else:
raise Exception("Use a list of selector expressions for the various columns")
from itertools import izip, count
pairs = izip(table_headers, selectors)
columns = {}
for head, selector in pairs:
columns[head] = self.get_tree_tag(selector)
try:
for i in count(start=0):
r = result.copy()
for head in columns.keys():
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
col = columns[head][i]
if attr == "text":
try:
content = connector.join([make_ascii(x).strip() for x in col.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = col.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
r[head] = content
result_list.append(r)
except IndexError:
pass
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return result_list | python | def extract_columns(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):
"""
Column data extraction for extract_tabular
"""
result_list = []
try:
if type(selector) in [str, unicode]:
selectors = [selector]
elif type(selector) == list:
selectors = selector[:]
else:
raise Exception("Use a list of selector expressions for the various columns")
from itertools import izip, count
pairs = izip(table_headers, selectors)
columns = {}
for head, selector in pairs:
columns[head] = self.get_tree_tag(selector)
try:
for i in count(start=0):
r = result.copy()
for head in columns.keys():
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
col = columns[head][i]
if attr == "text":
try:
content = connector.join([make_ascii(x).strip() for x in col.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = col.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
r[head] = content
result_list.append(r)
except IndexError:
pass
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return result_list | [
"def",
"extract_columns",
"(",
"self",
",",
"result",
"=",
"{",
"}",
",",
"selector",
"=",
"''",
",",
"table_headers",
"=",
"[",
"]",
",",
"attr",
"=",
"''",
",",
"connector",
"=",
"''",
",",
"default",
"=",
"''",
",",
"verbosity",
"=",
"0",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result_list",
"=",
"[",
"]",
"try",
":",
"if",
"type",
"(",
"selector",
")",
"in",
"[",
"str",
",",
"unicode",
"]",
":",
"selectors",
"=",
"[",
"selector",
"]",
"elif",
"type",
"(",
"selector",
")",
"==",
"list",
":",
"selectors",
"=",
"selector",
"[",
":",
"]",
"else",
":",
"raise",
"Exception",
"(",
"\"Use a list of selector expressions for the various columns\"",
")",
"from",
"itertools",
"import",
"izip",
",",
"count",
"pairs",
"=",
"izip",
"(",
"table_headers",
",",
"selectors",
")",
"columns",
"=",
"{",
"}",
"for",
"head",
",",
"selector",
"in",
"pairs",
":",
"columns",
"[",
"head",
"]",
"=",
"self",
".",
"get_tree_tag",
"(",
"selector",
")",
"try",
":",
"for",
"i",
"in",
"count",
"(",
"start",
"=",
"0",
")",
":",
"r",
"=",
"result",
".",
"copy",
"(",
")",
"for",
"head",
"in",
"columns",
".",
"keys",
"(",
")",
":",
"if",
"verbosity",
">",
"1",
":",
"print",
"(",
"\"\\nExtracting\"",
",",
"head",
",",
"\"attribute\"",
",",
"sep",
"=",
"' '",
",",
"end",
"=",
"''",
")",
"col",
"=",
"columns",
"[",
"head",
"]",
"[",
"i",
"]",
"if",
"attr",
"==",
"\"text\"",
":",
"try",
":",
"content",
"=",
"connector",
".",
"join",
"(",
"[",
"make_ascii",
"(",
"x",
")",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"col",
".",
"itertext",
"(",
")",
"]",
")",
"except",
"Exception",
":",
"content",
"=",
"default",
"content",
"=",
"content",
".",
"replace",
"(",
"\"\\n\"",
",",
"\" \"",
")",
".",
"strip",
"(",
")",
"else",
":",
"content",
"=",
"col",
".",
"get",
"(",
"attr",
")",
"if",
"attr",
"in",
"[",
"\"href\"",
",",
"\"src\"",
"]",
":",
"content",
"=",
"urljoin",
"(",
"self",
".",
"url",
",",
"content",
")",
"r",
"[",
"head",
"]",
"=",
"content",
"result_list",
".",
"append",
"(",
"r",
")",
"except",
"IndexError",
":",
"pass",
"except",
"XPathError",
":",
"raise",
"Exception",
"(",
"\"Invalid %s selector - %s\"",
"%",
"(",
"self",
".",
"__selector_type__",
",",
"selector",
")",
")",
"except",
"TypeError",
":",
"raise",
"Exception",
"(",
"\"Selector expression string to be provided. Got \"",
"+",
"selector",
")",
"return",
"result_list"
] | Column data extraction for extract_tabular | [
"Column",
"data",
"extraction",
"for",
"extract_tabular"
] | eeb604601b155d6cc7e035855ff4d3f48f8bed74 | https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/selectors/selector.py#L227-L271 | train | 236,542 |
AlexMathew/scrapple | scrapple/cmd.py | runCLI | def runCLI():
"""
The starting point for the execution of the Scrapple command line tool.
runCLI uses the docstring as the usage description for the scrapple command. \
The class for the required command is selected by a dynamic dispatch, and the \
command is executed through the execute_command() method of the command class.
"""
args = docopt(__doc__, version='0.3.0')
try:
check_arguments(args)
command_list = ['genconfig', 'run', 'generate']
select = itemgetter('genconfig', 'run', 'generate')
selectedCommand = command_list[select(args).index(True)]
cmdClass = get_command_class(selectedCommand)
obj = cmdClass(args)
obj.execute_command()
except POSSIBLE_EXCEPTIONS as e:
print('\n', e, '\n') | python | def runCLI():
"""
The starting point for the execution of the Scrapple command line tool.
runCLI uses the docstring as the usage description for the scrapple command. \
The class for the required command is selected by a dynamic dispatch, and the \
command is executed through the execute_command() method of the command class.
"""
args = docopt(__doc__, version='0.3.0')
try:
check_arguments(args)
command_list = ['genconfig', 'run', 'generate']
select = itemgetter('genconfig', 'run', 'generate')
selectedCommand = command_list[select(args).index(True)]
cmdClass = get_command_class(selectedCommand)
obj = cmdClass(args)
obj.execute_command()
except POSSIBLE_EXCEPTIONS as e:
print('\n', e, '\n') | [
"def",
"runCLI",
"(",
")",
":",
"args",
"=",
"docopt",
"(",
"__doc__",
",",
"version",
"=",
"'0.3.0'",
")",
"try",
":",
"check_arguments",
"(",
"args",
")",
"command_list",
"=",
"[",
"'genconfig'",
",",
"'run'",
",",
"'generate'",
"]",
"select",
"=",
"itemgetter",
"(",
"'genconfig'",
",",
"'run'",
",",
"'generate'",
")",
"selectedCommand",
"=",
"command_list",
"[",
"select",
"(",
"args",
")",
".",
"index",
"(",
"True",
")",
"]",
"cmdClass",
"=",
"get_command_class",
"(",
"selectedCommand",
")",
"obj",
"=",
"cmdClass",
"(",
"args",
")",
"obj",
".",
"execute_command",
"(",
")",
"except",
"POSSIBLE_EXCEPTIONS",
"as",
"e",
":",
"print",
"(",
"'\\n'",
",",
"e",
",",
"'\\n'",
")"
] | The starting point for the execution of the Scrapple command line tool.
runCLI uses the docstring as the usage description for the scrapple command. \
The class for the required command is selected by a dynamic dispatch, and the \
command is executed through the execute_command() method of the command class. | [
"The",
"starting",
"point",
"for",
"the",
"execution",
"of",
"the",
"Scrapple",
"command",
"line",
"tool",
"."
] | eeb604601b155d6cc7e035855ff4d3f48f8bed74 | https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/cmd.py#L49-L67 | train | 236,543 |
AlexMathew/scrapple | scrapple/utils/exceptions.py | check_arguments | def check_arguments(args):
"""
Validates the arguments passed through the CLI commands.
:param args: The arguments passed in the CLI, parsed by the docopt module
:return: None
"""
projectname_re = re.compile(r'[^a-zA-Z0-9_]')
if args['genconfig']:
if args['--type'] not in ['scraper', 'crawler']:
raise InvalidType("--type has to be 'scraper' or 'crawler'")
if args['--selector'] not in ['xpath', 'css']:
raise InvalidSelector("--selector has to be 'xpath' or 'css'")
if args['generate'] or args['run']:
if args['--output_type'] not in ['json', 'csv']:
raise InvalidOutputType("--output_type has to be 'json' or 'csv'")
if args['genconfig'] or args['generate'] or args['run']:
if projectname_re.search(args['<projectname>']) is not None:
message = "<projectname> should consist of letters, digits or _"
raise InvalidProjectName(message)
try:
if int(args['--levels']) < 1:
message = "--levels should be greater than, or equal to 1"
raise InvalidLevels(message)
except (TypeError, ValueError):
message = " ".join([
"--levels should be an integer and not of type",
"{}".format(type(args['--levels']))
])
raise InvalidLevels(message) | python | def check_arguments(args):
"""
Validates the arguments passed through the CLI commands.
:param args: The arguments passed in the CLI, parsed by the docopt module
:return: None
"""
projectname_re = re.compile(r'[^a-zA-Z0-9_]')
if args['genconfig']:
if args['--type'] not in ['scraper', 'crawler']:
raise InvalidType("--type has to be 'scraper' or 'crawler'")
if args['--selector'] not in ['xpath', 'css']:
raise InvalidSelector("--selector has to be 'xpath' or 'css'")
if args['generate'] or args['run']:
if args['--output_type'] not in ['json', 'csv']:
raise InvalidOutputType("--output_type has to be 'json' or 'csv'")
if args['genconfig'] or args['generate'] or args['run']:
if projectname_re.search(args['<projectname>']) is not None:
message = "<projectname> should consist of letters, digits or _"
raise InvalidProjectName(message)
try:
if int(args['--levels']) < 1:
message = "--levels should be greater than, or equal to 1"
raise InvalidLevels(message)
except (TypeError, ValueError):
message = " ".join([
"--levels should be an integer and not of type",
"{}".format(type(args['--levels']))
])
raise InvalidLevels(message) | [
"def",
"check_arguments",
"(",
"args",
")",
":",
"projectname_re",
"=",
"re",
".",
"compile",
"(",
"r'[^a-zA-Z0-9_]'",
")",
"if",
"args",
"[",
"'genconfig'",
"]",
":",
"if",
"args",
"[",
"'--type'",
"]",
"not",
"in",
"[",
"'scraper'",
",",
"'crawler'",
"]",
":",
"raise",
"InvalidType",
"(",
"\"--type has to be 'scraper' or 'crawler'\"",
")",
"if",
"args",
"[",
"'--selector'",
"]",
"not",
"in",
"[",
"'xpath'",
",",
"'css'",
"]",
":",
"raise",
"InvalidSelector",
"(",
"\"--selector has to be 'xpath' or 'css'\"",
")",
"if",
"args",
"[",
"'generate'",
"]",
"or",
"args",
"[",
"'run'",
"]",
":",
"if",
"args",
"[",
"'--output_type'",
"]",
"not",
"in",
"[",
"'json'",
",",
"'csv'",
"]",
":",
"raise",
"InvalidOutputType",
"(",
"\"--output_type has to be 'json' or 'csv'\"",
")",
"if",
"args",
"[",
"'genconfig'",
"]",
"or",
"args",
"[",
"'generate'",
"]",
"or",
"args",
"[",
"'run'",
"]",
":",
"if",
"projectname_re",
".",
"search",
"(",
"args",
"[",
"'<projectname>'",
"]",
")",
"is",
"not",
"None",
":",
"message",
"=",
"\"<projectname> should consist of letters, digits or _\"",
"raise",
"InvalidProjectName",
"(",
"message",
")",
"try",
":",
"if",
"int",
"(",
"args",
"[",
"'--levels'",
"]",
")",
"<",
"1",
":",
"message",
"=",
"\"--levels should be greater than, or equal to 1\"",
"raise",
"InvalidLevels",
"(",
"message",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"message",
"=",
"\" \"",
".",
"join",
"(",
"[",
"\"--levels should be an integer and not of type\"",
",",
"\"{}\"",
".",
"format",
"(",
"type",
"(",
"args",
"[",
"'--levels'",
"]",
")",
")",
"]",
")",
"raise",
"InvalidLevels",
"(",
"message",
")"
] | Validates the arguments passed through the CLI commands.
:param args: The arguments passed in the CLI, parsed by the docopt module
:return: None | [
"Validates",
"the",
"arguments",
"passed",
"through",
"the",
"CLI",
"commands",
"."
] | eeb604601b155d6cc7e035855ff4d3f48f8bed74 | https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/utils/exceptions.py#L36-L66 | train | 236,544 |
AlexMathew/scrapple | scrapple/utils/form.py | form_to_json | def form_to_json(form):
"""
Takes the form from the POST request in the web interface, and generates the JSON config\
file
:param form: The form from the POST request
:return: None
"""
config = dict()
if form['project_name'] == "":
raise Exception('Project name cannot be empty.')
if form['selector_type'] not in ["css", "xpath"]:
raise Exception('Selector type has to css or xpath')
config['project_name'] = form['project_name']
config['selector_type'] = form['selector_type']
config['scraping'] = dict()
if form['url'] == "":
raise Exception('URL cannot be empty')
config['scraping']['url'] = form['url']
config['scraping']['data'] = list()
for i in itertools.count(start=1):
try:
data = {
'field': form['field_' + str(i)],
'selector': form['selector_' + str(i)],
'attr': form['attribute_' + str(i)],
'default': form['default_' + str(i)]
}
config['scraping']['data'].append(data)
except KeyError:
break
# TODO : Crawler 'next' parameter handling
with open(os.path.join(os.getcwd(), form['project_name'] + '.json'), 'w') as f:
json.dump(config, f)
return | python | def form_to_json(form):
"""
Takes the form from the POST request in the web interface, and generates the JSON config\
file
:param form: The form from the POST request
:return: None
"""
config = dict()
if form['project_name'] == "":
raise Exception('Project name cannot be empty.')
if form['selector_type'] not in ["css", "xpath"]:
raise Exception('Selector type has to css or xpath')
config['project_name'] = form['project_name']
config['selector_type'] = form['selector_type']
config['scraping'] = dict()
if form['url'] == "":
raise Exception('URL cannot be empty')
config['scraping']['url'] = form['url']
config['scraping']['data'] = list()
for i in itertools.count(start=1):
try:
data = {
'field': form['field_' + str(i)],
'selector': form['selector_' + str(i)],
'attr': form['attribute_' + str(i)],
'default': form['default_' + str(i)]
}
config['scraping']['data'].append(data)
except KeyError:
break
# TODO : Crawler 'next' parameter handling
with open(os.path.join(os.getcwd(), form['project_name'] + '.json'), 'w') as f:
json.dump(config, f)
return | [
"def",
"form_to_json",
"(",
"form",
")",
":",
"config",
"=",
"dict",
"(",
")",
"if",
"form",
"[",
"'project_name'",
"]",
"==",
"\"\"",
":",
"raise",
"Exception",
"(",
"'Project name cannot be empty.'",
")",
"if",
"form",
"[",
"'selector_type'",
"]",
"not",
"in",
"[",
"\"css\"",
",",
"\"xpath\"",
"]",
":",
"raise",
"Exception",
"(",
"'Selector type has to css or xpath'",
")",
"config",
"[",
"'project_name'",
"]",
"=",
"form",
"[",
"'project_name'",
"]",
"config",
"[",
"'selector_type'",
"]",
"=",
"form",
"[",
"'selector_type'",
"]",
"config",
"[",
"'scraping'",
"]",
"=",
"dict",
"(",
")",
"if",
"form",
"[",
"'url'",
"]",
"==",
"\"\"",
":",
"raise",
"Exception",
"(",
"'URL cannot be empty'",
")",
"config",
"[",
"'scraping'",
"]",
"[",
"'url'",
"]",
"=",
"form",
"[",
"'url'",
"]",
"config",
"[",
"'scraping'",
"]",
"[",
"'data'",
"]",
"=",
"list",
"(",
")",
"for",
"i",
"in",
"itertools",
".",
"count",
"(",
"start",
"=",
"1",
")",
":",
"try",
":",
"data",
"=",
"{",
"'field'",
":",
"form",
"[",
"'field_'",
"+",
"str",
"(",
"i",
")",
"]",
",",
"'selector'",
":",
"form",
"[",
"'selector_'",
"+",
"str",
"(",
"i",
")",
"]",
",",
"'attr'",
":",
"form",
"[",
"'attribute_'",
"+",
"str",
"(",
"i",
")",
"]",
",",
"'default'",
":",
"form",
"[",
"'default_'",
"+",
"str",
"(",
"i",
")",
"]",
"}",
"config",
"[",
"'scraping'",
"]",
"[",
"'data'",
"]",
".",
"append",
"(",
"data",
")",
"except",
"KeyError",
":",
"break",
"# TODO : Crawler 'next' parameter handling",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"form",
"[",
"'project_name'",
"]",
"+",
"'.json'",
")",
",",
"'w'",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"config",
",",
"f",
")",
"return"
] | Takes the form from the POST request in the web interface, and generates the JSON config\
file
:param form: The form from the POST request
:return: None | [
"Takes",
"the",
"form",
"from",
"the",
"POST",
"request",
"in",
"the",
"web",
"interface",
"and",
"generates",
"the",
"JSON",
"config",
"\\",
"file"
] | eeb604601b155d6cc7e035855ff4d3f48f8bed74 | https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/utils/form.py#L13-L48 | train | 236,545 |
AlexMathew/scrapple | scrapple/commands/run.py | RunCommand.execute_command | def execute_command(self):
"""
The run command implements the web content extractor corresponding to the given \
configuration file.
The execute_command() validates the input project name and opens the JSON \
configuration file. The run() method handles the execution of the extractor run.
The extractor implementation follows these primary steps :
1. Selects the appropriate :ref:`selector class <implementation-selectors>` through \
a dynamic dispatch, with the selector_type argument from the CLI input.
#. Iterate through the data section in level-0 of the configuration file. \
On each data item, call the extract_content() method from the selector class to \
extract the content according to the specified extractor rule.
#. If there are multiple levels of the extractor, i.e, if there is a 'next' \
attribute in the configuration file, call the traverse_next() \
:ref:`utility function <implementation-utils>` and parse through successive levels \
of the configuration file.
#. According to the --output_type argument, the result data is saved in a JSON \
document or a CSV document.
"""
try:
self.args['--verbosity'] = int(self.args['--verbosity'])
if self.args['--verbosity'] not in [0, 1, 2]:
raise ValueError
if self.args['--verbosity'] > 0:
print(Back.GREEN + Fore.BLACK + "Scrapple Run")
print(Back.RESET + Fore.RESET)
import json
with open(self.args['<projectname>'] + '.json', 'r') as f:
self.config = json.load(f)
validate_config(self.config)
self.run()
except ValueError:
print(Back.WHITE + Fore.RED + "Use 0, 1 or 2 for verbosity." \
+ Back.RESET + Fore.RESET, sep="")
except IOError:
print(Back.WHITE + Fore.RED + self.args['<projectname>'], ".json does not ", \
"exist. Use ``scrapple genconfig``." + Back.RESET + Fore.RESET, sep="")
except InvalidConfigException as e:
print(Back.WHITE + Fore.RED + e + Back.RESET + Fore.RESET, sep="") | python | def execute_command(self):
"""
The run command implements the web content extractor corresponding to the given \
configuration file.
The execute_command() validates the input project name and opens the JSON \
configuration file. The run() method handles the execution of the extractor run.
The extractor implementation follows these primary steps :
1. Selects the appropriate :ref:`selector class <implementation-selectors>` through \
a dynamic dispatch, with the selector_type argument from the CLI input.
#. Iterate through the data section in level-0 of the configuration file. \
On each data item, call the extract_content() method from the selector class to \
extract the content according to the specified extractor rule.
#. If there are multiple levels of the extractor, i.e, if there is a 'next' \
attribute in the configuration file, call the traverse_next() \
:ref:`utility function <implementation-utils>` and parse through successive levels \
of the configuration file.
#. According to the --output_type argument, the result data is saved in a JSON \
document or a CSV document.
"""
try:
self.args['--verbosity'] = int(self.args['--verbosity'])
if self.args['--verbosity'] not in [0, 1, 2]:
raise ValueError
if self.args['--verbosity'] > 0:
print(Back.GREEN + Fore.BLACK + "Scrapple Run")
print(Back.RESET + Fore.RESET)
import json
with open(self.args['<projectname>'] + '.json', 'r') as f:
self.config = json.load(f)
validate_config(self.config)
self.run()
except ValueError:
print(Back.WHITE + Fore.RED + "Use 0, 1 or 2 for verbosity." \
+ Back.RESET + Fore.RESET, sep="")
except IOError:
print(Back.WHITE + Fore.RED + self.args['<projectname>'], ".json does not ", \
"exist. Use ``scrapple genconfig``." + Back.RESET + Fore.RESET, sep="")
except InvalidConfigException as e:
print(Back.WHITE + Fore.RED + e + Back.RESET + Fore.RESET, sep="") | [
"def",
"execute_command",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"args",
"[",
"'--verbosity'",
"]",
"=",
"int",
"(",
"self",
".",
"args",
"[",
"'--verbosity'",
"]",
")",
"if",
"self",
".",
"args",
"[",
"'--verbosity'",
"]",
"not",
"in",
"[",
"0",
",",
"1",
",",
"2",
"]",
":",
"raise",
"ValueError",
"if",
"self",
".",
"args",
"[",
"'--verbosity'",
"]",
">",
"0",
":",
"print",
"(",
"Back",
".",
"GREEN",
"+",
"Fore",
".",
"BLACK",
"+",
"\"Scrapple Run\"",
")",
"print",
"(",
"Back",
".",
"RESET",
"+",
"Fore",
".",
"RESET",
")",
"import",
"json",
"with",
"open",
"(",
"self",
".",
"args",
"[",
"'<projectname>'",
"]",
"+",
"'.json'",
",",
"'r'",
")",
"as",
"f",
":",
"self",
".",
"config",
"=",
"json",
".",
"load",
"(",
"f",
")",
"validate_config",
"(",
"self",
".",
"config",
")",
"self",
".",
"run",
"(",
")",
"except",
"ValueError",
":",
"print",
"(",
"Back",
".",
"WHITE",
"+",
"Fore",
".",
"RED",
"+",
"\"Use 0, 1 or 2 for verbosity.\"",
"+",
"Back",
".",
"RESET",
"+",
"Fore",
".",
"RESET",
",",
"sep",
"=",
"\"\"",
")",
"except",
"IOError",
":",
"print",
"(",
"Back",
".",
"WHITE",
"+",
"Fore",
".",
"RED",
"+",
"self",
".",
"args",
"[",
"'<projectname>'",
"]",
",",
"\".json does not \"",
",",
"\"exist. Use ``scrapple genconfig``.\"",
"+",
"Back",
".",
"RESET",
"+",
"Fore",
".",
"RESET",
",",
"sep",
"=",
"\"\"",
")",
"except",
"InvalidConfigException",
"as",
"e",
":",
"print",
"(",
"Back",
".",
"WHITE",
"+",
"Fore",
".",
"RED",
"+",
"e",
"+",
"Back",
".",
"RESET",
"+",
"Fore",
".",
"RESET",
",",
"sep",
"=",
"\"\"",
")"
] | The run command implements the web content extractor corresponding to the given \
configuration file.
The execute_command() validates the input project name and opens the JSON \
configuration file. The run() method handles the execution of the extractor run.
The extractor implementation follows these primary steps :
1. Selects the appropriate :ref:`selector class <implementation-selectors>` through \
a dynamic dispatch, with the selector_type argument from the CLI input.
#. Iterate through the data section in level-0 of the configuration file. \
On each data item, call the extract_content() method from the selector class to \
extract the content according to the specified extractor rule.
#. If there are multiple levels of the extractor, i.e, if there is a 'next' \
attribute in the configuration file, call the traverse_next() \
:ref:`utility function <implementation-utils>` and parse through successive levels \
of the configuration file.
#. According to the --output_type argument, the result data is saved in a JSON \
document or a CSV document. | [
"The",
"run",
"command",
"implements",
"the",
"web",
"content",
"extractor",
"corresponding",
"to",
"the",
"given",
"\\",
"configuration",
"file",
"."
] | eeb604601b155d6cc7e035855ff4d3f48f8bed74 | https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/commands/run.py#L29-L74 | train | 236,546 |
AlexMathew/scrapple | scrapple/utils/config.py | traverse_next | def traverse_next(page, nextx, results, tabular_data_headers=[], verbosity=0):
"""
Recursive generator to traverse through the next attribute and \
crawl through the links to be followed.
:param page: The current page being parsed
:param next: The next attribute of the current scraping dict
:param results: The current extracted content, stored in a dict
:return: The extracted content, through a generator
"""
for link in page.extract_links(selector=nextx['follow_link']):
if verbosity > 0:
print('\n')
print(Back.YELLOW + Fore.BLUE + "Loading page ", link.url + Back.RESET + Fore.RESET, end='')
r = results.copy()
for attribute in nextx['scraping'].get('data'):
if attribute['field'] != "":
if verbosity > 1:
print("\nExtracting", attribute['field'], "attribute", sep=' ', end='')
r[attribute['field']] = link.extract_content(**attribute)
if not nextx['scraping'].get('table'):
result_list = [r]
else:
tables = nextx['scraping'].get('table', [])
for table in tables:
table.update({
'result': r,
'verbosity': verbosity
})
table_headers, result_list = link.extract_tabular(**table)
tabular_data_headers.extend(table_headers)
if not nextx['scraping'].get('next'):
for r in result_list:
yield (tabular_data_headers, r)
else:
for nextx2 in nextx['scraping'].get('next'):
for tdh, result in traverse_next(link, nextx2, r, tabular_data_headers=tabular_data_headers, verbosity=verbosity):
yield (tdh, result) | python | def traverse_next(page, nextx, results, tabular_data_headers=[], verbosity=0):
"""
Recursive generator to traverse through the next attribute and \
crawl through the links to be followed.
:param page: The current page being parsed
:param next: The next attribute of the current scraping dict
:param results: The current extracted content, stored in a dict
:return: The extracted content, through a generator
"""
for link in page.extract_links(selector=nextx['follow_link']):
if verbosity > 0:
print('\n')
print(Back.YELLOW + Fore.BLUE + "Loading page ", link.url + Back.RESET + Fore.RESET, end='')
r = results.copy()
for attribute in nextx['scraping'].get('data'):
if attribute['field'] != "":
if verbosity > 1:
print("\nExtracting", attribute['field'], "attribute", sep=' ', end='')
r[attribute['field']] = link.extract_content(**attribute)
if not nextx['scraping'].get('table'):
result_list = [r]
else:
tables = nextx['scraping'].get('table', [])
for table in tables:
table.update({
'result': r,
'verbosity': verbosity
})
table_headers, result_list = link.extract_tabular(**table)
tabular_data_headers.extend(table_headers)
if not nextx['scraping'].get('next'):
for r in result_list:
yield (tabular_data_headers, r)
else:
for nextx2 in nextx['scraping'].get('next'):
for tdh, result in traverse_next(link, nextx2, r, tabular_data_headers=tabular_data_headers, verbosity=verbosity):
yield (tdh, result) | [
"def",
"traverse_next",
"(",
"page",
",",
"nextx",
",",
"results",
",",
"tabular_data_headers",
"=",
"[",
"]",
",",
"verbosity",
"=",
"0",
")",
":",
"for",
"link",
"in",
"page",
".",
"extract_links",
"(",
"selector",
"=",
"nextx",
"[",
"'follow_link'",
"]",
")",
":",
"if",
"verbosity",
">",
"0",
":",
"print",
"(",
"'\\n'",
")",
"print",
"(",
"Back",
".",
"YELLOW",
"+",
"Fore",
".",
"BLUE",
"+",
"\"Loading page \"",
",",
"link",
".",
"url",
"+",
"Back",
".",
"RESET",
"+",
"Fore",
".",
"RESET",
",",
"end",
"=",
"''",
")",
"r",
"=",
"results",
".",
"copy",
"(",
")",
"for",
"attribute",
"in",
"nextx",
"[",
"'scraping'",
"]",
".",
"get",
"(",
"'data'",
")",
":",
"if",
"attribute",
"[",
"'field'",
"]",
"!=",
"\"\"",
":",
"if",
"verbosity",
">",
"1",
":",
"print",
"(",
"\"\\nExtracting\"",
",",
"attribute",
"[",
"'field'",
"]",
",",
"\"attribute\"",
",",
"sep",
"=",
"' '",
",",
"end",
"=",
"''",
")",
"r",
"[",
"attribute",
"[",
"'field'",
"]",
"]",
"=",
"link",
".",
"extract_content",
"(",
"*",
"*",
"attribute",
")",
"if",
"not",
"nextx",
"[",
"'scraping'",
"]",
".",
"get",
"(",
"'table'",
")",
":",
"result_list",
"=",
"[",
"r",
"]",
"else",
":",
"tables",
"=",
"nextx",
"[",
"'scraping'",
"]",
".",
"get",
"(",
"'table'",
",",
"[",
"]",
")",
"for",
"table",
"in",
"tables",
":",
"table",
".",
"update",
"(",
"{",
"'result'",
":",
"r",
",",
"'verbosity'",
":",
"verbosity",
"}",
")",
"table_headers",
",",
"result_list",
"=",
"link",
".",
"extract_tabular",
"(",
"*",
"*",
"table",
")",
"tabular_data_headers",
".",
"extend",
"(",
"table_headers",
")",
"if",
"not",
"nextx",
"[",
"'scraping'",
"]",
".",
"get",
"(",
"'next'",
")",
":",
"for",
"r",
"in",
"result_list",
":",
"yield",
"(",
"tabular_data_headers",
",",
"r",
")",
"else",
":",
"for",
"nextx2",
"in",
"nextx",
"[",
"'scraping'",
"]",
".",
"get",
"(",
"'next'",
")",
":",
"for",
"tdh",
",",
"result",
"in",
"traverse_next",
"(",
"link",
",",
"nextx2",
",",
"r",
",",
"tabular_data_headers",
"=",
"tabular_data_headers",
",",
"verbosity",
"=",
"verbosity",
")",
":",
"yield",
"(",
"tdh",
",",
"result",
")"
] | Recursive generator to traverse through the next attribute and \
crawl through the links to be followed.
:param page: The current page being parsed
:param next: The next attribute of the current scraping dict
:param results: The current extracted content, stored in a dict
:return: The extracted content, through a generator | [
"Recursive",
"generator",
"to",
"traverse",
"through",
"the",
"next",
"attribute",
"and",
"\\",
"crawl",
"through",
"the",
"links",
"to",
"be",
"followed",
"."
] | eeb604601b155d6cc7e035855ff4d3f48f8bed74 | https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/utils/config.py#L20-L58 | train | 236,547 |
AlexMathew/scrapple | scrapple/utils/config.py | validate_config | def validate_config(config):
"""
Validates the extractor configuration file. Ensures that there are no duplicate field names, etc.
:param config: The configuration file that contains the specification of the extractor
:return: True if config is valid, else raises a exception that specifies the correction to be made
"""
fields = [f for f in get_fields(config)]
if len(fields) != len(set(fields)):
raise InvalidConfigException(
"Invalid configuration file - %d duplicate field names" % len(fields) - len(set(fields))
)
return True | python | def validate_config(config):
"""
Validates the extractor configuration file. Ensures that there are no duplicate field names, etc.
:param config: The configuration file that contains the specification of the extractor
:return: True if config is valid, else raises a exception that specifies the correction to be made
"""
fields = [f for f in get_fields(config)]
if len(fields) != len(set(fields)):
raise InvalidConfigException(
"Invalid configuration file - %d duplicate field names" % len(fields) - len(set(fields))
)
return True | [
"def",
"validate_config",
"(",
"config",
")",
":",
"fields",
"=",
"[",
"f",
"for",
"f",
"in",
"get_fields",
"(",
"config",
")",
"]",
"if",
"len",
"(",
"fields",
")",
"!=",
"len",
"(",
"set",
"(",
"fields",
")",
")",
":",
"raise",
"InvalidConfigException",
"(",
"\"Invalid configuration file - %d duplicate field names\"",
"%",
"len",
"(",
"fields",
")",
"-",
"len",
"(",
"set",
"(",
"fields",
")",
")",
")",
"return",
"True"
] | Validates the extractor configuration file. Ensures that there are no duplicate field names, etc.
:param config: The configuration file that contains the specification of the extractor
:return: True if config is valid, else raises a exception that specifies the correction to be made | [
"Validates",
"the",
"extractor",
"configuration",
"file",
".",
"Ensures",
"that",
"there",
"are",
"no",
"duplicate",
"field",
"names",
"etc",
"."
] | eeb604601b155d6cc7e035855ff4d3f48f8bed74 | https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/utils/config.py#L61-L74 | train | 236,548 |
AlexMathew/scrapple | scrapple/utils/config.py | get_fields | def get_fields(config):
"""
Recursive generator that yields the field names in the config file
:param config: The configuration file that contains the specification of the extractor
:return: The field names in the config file, through a generator
"""
for data in config['scraping']['data']:
if data['field'] != '':
yield data['field']
if 'next' in config['scraping']:
for n in config['scraping']['next']:
for f in get_fields(n):
yield f | python | def get_fields(config):
"""
Recursive generator that yields the field names in the config file
:param config: The configuration file that contains the specification of the extractor
:return: The field names in the config file, through a generator
"""
for data in config['scraping']['data']:
if data['field'] != '':
yield data['field']
if 'next' in config['scraping']:
for n in config['scraping']['next']:
for f in get_fields(n):
yield f | [
"def",
"get_fields",
"(",
"config",
")",
":",
"for",
"data",
"in",
"config",
"[",
"'scraping'",
"]",
"[",
"'data'",
"]",
":",
"if",
"data",
"[",
"'field'",
"]",
"!=",
"''",
":",
"yield",
"data",
"[",
"'field'",
"]",
"if",
"'next'",
"in",
"config",
"[",
"'scraping'",
"]",
":",
"for",
"n",
"in",
"config",
"[",
"'scraping'",
"]",
"[",
"'next'",
"]",
":",
"for",
"f",
"in",
"get_fields",
"(",
"n",
")",
":",
"yield",
"f"
] | Recursive generator that yields the field names in the config file
:param config: The configuration file that contains the specification of the extractor
:return: The field names in the config file, through a generator | [
"Recursive",
"generator",
"that",
"yields",
"the",
"field",
"names",
"in",
"the",
"config",
"file"
] | eeb604601b155d6cc7e035855ff4d3f48f8bed74 | https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/utils/config.py#L77-L91 | train | 236,549 |
AlexMathew/scrapple | scrapple/utils/config.py | extract_fieldnames | def extract_fieldnames(config):
"""
Function to return a list of unique field names from the config file
:param config: The configuration file that contains the specification of the extractor
:return: A list of field names from the config file
"""
fields = []
for x in get_fields(config):
if x in fields:
fields.append(x + '_' + str(fields.count(x) + 1))
else:
fields.append(x)
return fields | python | def extract_fieldnames(config):
"""
Function to return a list of unique field names from the config file
:param config: The configuration file that contains the specification of the extractor
:return: A list of field names from the config file
"""
fields = []
for x in get_fields(config):
if x in fields:
fields.append(x + '_' + str(fields.count(x) + 1))
else:
fields.append(x)
return fields | [
"def",
"extract_fieldnames",
"(",
"config",
")",
":",
"fields",
"=",
"[",
"]",
"for",
"x",
"in",
"get_fields",
"(",
"config",
")",
":",
"if",
"x",
"in",
"fields",
":",
"fields",
".",
"append",
"(",
"x",
"+",
"'_'",
"+",
"str",
"(",
"fields",
".",
"count",
"(",
"x",
")",
"+",
"1",
")",
")",
"else",
":",
"fields",
".",
"append",
"(",
"x",
")",
"return",
"fields"
] | Function to return a list of unique field names from the config file
:param config: The configuration file that contains the specification of the extractor
:return: A list of field names from the config file | [
"Function",
"to",
"return",
"a",
"list",
"of",
"unique",
"field",
"names",
"from",
"the",
"config",
"file"
] | eeb604601b155d6cc7e035855ff4d3f48f8bed74 | https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/utils/config.py#L94-L108 | train | 236,550 |
CQCL/pytket | pytket/qiskit/tket_pass.py | TketPass.run | def run(self, dag:DAGCircuit) -> DAGCircuit:
"""
Run one pass of optimisation on the circuit and route for the given backend.
:param dag: The circuit to optimise and route
:return: The modified circuit
"""
circ = dagcircuit_to_tk(dag, _DROP_CONDS=self.DROP_CONDS,_BOX_UNKNOWN=self.BOX_UNKNOWN)
circ, circlay = self.process_circ(circ)
newdag = tk_to_dagcircuit(circ)
newdag.name = dag.name
finlay = dict()
for i, qi in enumerate(circlay):
finlay[('q', i)] = ('q', qi)
newdag.final_layout = finlay
return newdag | python | def run(self, dag:DAGCircuit) -> DAGCircuit:
"""
Run one pass of optimisation on the circuit and route for the given backend.
:param dag: The circuit to optimise and route
:return: The modified circuit
"""
circ = dagcircuit_to_tk(dag, _DROP_CONDS=self.DROP_CONDS,_BOX_UNKNOWN=self.BOX_UNKNOWN)
circ, circlay = self.process_circ(circ)
newdag = tk_to_dagcircuit(circ)
newdag.name = dag.name
finlay = dict()
for i, qi in enumerate(circlay):
finlay[('q', i)] = ('q', qi)
newdag.final_layout = finlay
return newdag | [
"def",
"run",
"(",
"self",
",",
"dag",
":",
"DAGCircuit",
")",
"->",
"DAGCircuit",
":",
"circ",
"=",
"dagcircuit_to_tk",
"(",
"dag",
",",
"_DROP_CONDS",
"=",
"self",
".",
"DROP_CONDS",
",",
"_BOX_UNKNOWN",
"=",
"self",
".",
"BOX_UNKNOWN",
")",
"circ",
",",
"circlay",
"=",
"self",
".",
"process_circ",
"(",
"circ",
")",
"newdag",
"=",
"tk_to_dagcircuit",
"(",
"circ",
")",
"newdag",
".",
"name",
"=",
"dag",
".",
"name",
"finlay",
"=",
"dict",
"(",
")",
"for",
"i",
",",
"qi",
"in",
"enumerate",
"(",
"circlay",
")",
":",
"finlay",
"[",
"(",
"'q'",
",",
"i",
")",
"]",
"=",
"(",
"'q'",
",",
"qi",
")",
"newdag",
".",
"final_layout",
"=",
"finlay",
"return",
"newdag"
] | Run one pass of optimisation on the circuit and route for the given backend.
:param dag: The circuit to optimise and route
:return: The modified circuit | [
"Run",
"one",
"pass",
"of",
"optimisation",
"on",
"the",
"circuit",
"and",
"route",
"for",
"the",
"given",
"backend",
"."
] | ae68f7402dcb5fb45221832cc6185d267bdd7a71 | https://github.com/CQCL/pytket/blob/ae68f7402dcb5fb45221832cc6185d267bdd7a71/pytket/qiskit/tket_pass.py#L51-L68 | train | 236,551 |
CQCL/pytket | pytket/cirq/qubits.py | _sort_row_col | def _sort_row_col(qubits: Iterator[GridQubit]) -> List[GridQubit]:
"""Sort grid qubits first by row then by column"""
return sorted(qubits, key=lambda x: (x.row, x.col)) | python | def _sort_row_col(qubits: Iterator[GridQubit]) -> List[GridQubit]:
"""Sort grid qubits first by row then by column"""
return sorted(qubits, key=lambda x: (x.row, x.col)) | [
"def",
"_sort_row_col",
"(",
"qubits",
":",
"Iterator",
"[",
"GridQubit",
"]",
")",
"->",
"List",
"[",
"GridQubit",
"]",
":",
"return",
"sorted",
"(",
"qubits",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"x",
".",
"row",
",",
"x",
".",
"col",
")",
")"
] | Sort grid qubits first by row then by column | [
"Sort",
"grid",
"qubits",
"first",
"by",
"row",
"then",
"by",
"column"
] | ae68f7402dcb5fb45221832cc6185d267bdd7a71 | https://github.com/CQCL/pytket/blob/ae68f7402dcb5fb45221832cc6185d267bdd7a71/pytket/cirq/qubits.py#L51-L54 | train | 236,552 |
CQCL/pytket | pytket/chemistry/aqua/qse.py | QSE.print_setting | def print_setting(self) -> str:
"""
Presents the QSE settings as a string.
:return: The formatted settings of the QSE instance
"""
ret = "\n"
ret += "==================== Setting of {} ============================\n".format(self.configuration['name'])
ret += "{}".format(self.setting)
ret += "===============================================================\n"
ret += "{}".format(self._var_form.setting)
ret += "===============================================================\n"
return ret | python | def print_setting(self) -> str:
"""
Presents the QSE settings as a string.
:return: The formatted settings of the QSE instance
"""
ret = "\n"
ret += "==================== Setting of {} ============================\n".format(self.configuration['name'])
ret += "{}".format(self.setting)
ret += "===============================================================\n"
ret += "{}".format(self._var_form.setting)
ret += "===============================================================\n"
return ret | [
"def",
"print_setting",
"(",
"self",
")",
"->",
"str",
":",
"ret",
"=",
"\"\\n\"",
"ret",
"+=",
"\"==================== Setting of {} ============================\\n\"",
".",
"format",
"(",
"self",
".",
"configuration",
"[",
"'name'",
"]",
")",
"ret",
"+=",
"\"{}\"",
".",
"format",
"(",
"self",
".",
"setting",
")",
"ret",
"+=",
"\"===============================================================\\n\"",
"ret",
"+=",
"\"{}\"",
".",
"format",
"(",
"self",
".",
"_var_form",
".",
"setting",
")",
"ret",
"+=",
"\"===============================================================\\n\"",
"return",
"ret"
] | Presents the QSE settings as a string.
:return: The formatted settings of the QSE instance | [
"Presents",
"the",
"QSE",
"settings",
"as",
"a",
"string",
"."
] | ae68f7402dcb5fb45221832cc6185d267bdd7a71 | https://github.com/CQCL/pytket/blob/ae68f7402dcb5fb45221832cc6185d267bdd7a71/pytket/chemistry/aqua/qse.py#L117-L129 | train | 236,553 |
CQCL/pytket | pytket/chemistry/aqua/qse.py | QSE._energy_evaluation | def _energy_evaluation(self, operator):
"""
Evaluate the energy of the current input circuit with respect to the given operator.
:param operator: Hamiltonian of the system
:return: Energy of the Hamiltonian
"""
if self._quantum_state is not None:
input_circuit = self._quantum_state
else:
input_circuit = [self.opt_circuit]
if operator._paulis:
mean_energy, std_energy = operator.evaluate_with_result(self._operator_mode, input_circuit,
self._quantum_instance.backend, self.ret)
else:
mean_energy = 0.0
std_energy = 0.0
operator.disable_summarize_circuits()
logger.debug('Energy evaluation {} returned {}'.format(self._eval_count, np.real(mean_energy)))
return np.real(mean_energy), np.real(std_energy) | python | def _energy_evaluation(self, operator):
"""
Evaluate the energy of the current input circuit with respect to the given operator.
:param operator: Hamiltonian of the system
:return: Energy of the Hamiltonian
"""
if self._quantum_state is not None:
input_circuit = self._quantum_state
else:
input_circuit = [self.opt_circuit]
if operator._paulis:
mean_energy, std_energy = operator.evaluate_with_result(self._operator_mode, input_circuit,
self._quantum_instance.backend, self.ret)
else:
mean_energy = 0.0
std_energy = 0.0
operator.disable_summarize_circuits()
logger.debug('Energy evaluation {} returned {}'.format(self._eval_count, np.real(mean_energy)))
return np.real(mean_energy), np.real(std_energy) | [
"def",
"_energy_evaluation",
"(",
"self",
",",
"operator",
")",
":",
"if",
"self",
".",
"_quantum_state",
"is",
"not",
"None",
":",
"input_circuit",
"=",
"self",
".",
"_quantum_state",
"else",
":",
"input_circuit",
"=",
"[",
"self",
".",
"opt_circuit",
"]",
"if",
"operator",
".",
"_paulis",
":",
"mean_energy",
",",
"std_energy",
"=",
"operator",
".",
"evaluate_with_result",
"(",
"self",
".",
"_operator_mode",
",",
"input_circuit",
",",
"self",
".",
"_quantum_instance",
".",
"backend",
",",
"self",
".",
"ret",
")",
"else",
":",
"mean_energy",
"=",
"0.0",
"std_energy",
"=",
"0.0",
"operator",
".",
"disable_summarize_circuits",
"(",
")",
"logger",
".",
"debug",
"(",
"'Energy evaluation {} returned {}'",
".",
"format",
"(",
"self",
".",
"_eval_count",
",",
"np",
".",
"real",
"(",
"mean_energy",
")",
")",
")",
"return",
"np",
".",
"real",
"(",
"mean_energy",
")",
",",
"np",
".",
"real",
"(",
"std_energy",
")"
] | Evaluate the energy of the current input circuit with respect to the given operator.
:param operator: Hamiltonian of the system
:return: Energy of the Hamiltonian | [
"Evaluate",
"the",
"energy",
"of",
"the",
"current",
"input",
"circuit",
"with",
"respect",
"to",
"the",
"given",
"operator",
"."
] | ae68f7402dcb5fb45221832cc6185d267bdd7a71 | https://github.com/CQCL/pytket/blob/ae68f7402dcb5fb45221832cc6185d267bdd7a71/pytket/chemistry/aqua/qse.py#L131-L151 | train | 236,554 |
CQCL/pytket | pytket/chemistry/aqua/qse.py | QSE._run | def _run(self) -> dict:
"""
Runs the QSE algorithm to compute the eigenvalues of the Hamiltonian.
:return: Dictionary of results
"""
if not self._quantum_instance.is_statevector:
raise AquaError("Can only calculate state for QSE with statevector backends")
ret = self._quantum_instance.execute(self.opt_circuit)
self.ret = ret
self._eval_count = 0
self._solve()
self._ret['eval_count'] = self._eval_count
self._ret['eval_time'] = self._eval_time
return self._ret | python | def _run(self) -> dict:
"""
Runs the QSE algorithm to compute the eigenvalues of the Hamiltonian.
:return: Dictionary of results
"""
if not self._quantum_instance.is_statevector:
raise AquaError("Can only calculate state for QSE with statevector backends")
ret = self._quantum_instance.execute(self.opt_circuit)
self.ret = ret
self._eval_count = 0
self._solve()
self._ret['eval_count'] = self._eval_count
self._ret['eval_time'] = self._eval_time
return self._ret | [
"def",
"_run",
"(",
"self",
")",
"->",
"dict",
":",
"if",
"not",
"self",
".",
"_quantum_instance",
".",
"is_statevector",
":",
"raise",
"AquaError",
"(",
"\"Can only calculate state for QSE with statevector backends\"",
")",
"ret",
"=",
"self",
".",
"_quantum_instance",
".",
"execute",
"(",
"self",
".",
"opt_circuit",
")",
"self",
".",
"ret",
"=",
"ret",
"self",
".",
"_eval_count",
"=",
"0",
"self",
".",
"_solve",
"(",
")",
"self",
".",
"_ret",
"[",
"'eval_count'",
"]",
"=",
"self",
".",
"_eval_count",
"self",
".",
"_ret",
"[",
"'eval_time'",
"]",
"=",
"self",
".",
"_eval_time",
"return",
"self",
".",
"_ret"
] | Runs the QSE algorithm to compute the eigenvalues of the Hamiltonian.
:return: Dictionary of results | [
"Runs",
"the",
"QSE",
"algorithm",
"to",
"compute",
"the",
"eigenvalues",
"of",
"the",
"Hamiltonian",
"."
] | ae68f7402dcb5fb45221832cc6185d267bdd7a71 | https://github.com/CQCL/pytket/blob/ae68f7402dcb5fb45221832cc6185d267bdd7a71/pytket/chemistry/aqua/qse.py#L260-L274 | train | 236,555 |
bkabrda/flask-whooshee | flask_whooshee.py | WhoosheeQuery.whooshee_search | def whooshee_search(self, search_string, group=whoosh.qparser.OrGroup, whoosheer=None,
match_substrings=True, limit=None, order_by_relevance=10):
"""Do a fulltext search on the query.
Returns a query filtered with results of the fulltext search.
:param search_string: The string to search for.
:param group: The whoosh group to use for searching.
Defaults to :class:`whoosh.qparser.OrGroup` which
searches for all words in all columns.
:param match_substrings: ``True`` if you want to match substrings,
``False`` otherwise
:param limit: The number of the top records to be returned.
Defaults to ``None`` and returns all records.
"""
if not whoosheer:
### inspiration taken from flask-WhooshAlchemy
# find out all entities in join
entities = set()
# directly queried entities
for cd in self.column_descriptions:
entities.add(cd['type'])
# joined entities
if self._join_entities and isinstance(self._join_entities[0], Mapper):
# SQLAlchemy >= 0.8.0
entities.update(set([x.entity for x in self._join_entities]))
else:
# SQLAlchemy < 0.8.0
entities.update(set(self._join_entities))
# make sure we can work with aliased entities
unaliased = set()
for entity in entities:
if isinstance(entity, (AliasedClass, AliasedInsp)):
unaliased.add(inspect(entity).mapper.class_)
else:
unaliased.add(entity)
whoosheer = next(w for w in _get_config(self)['whoosheers']
if set(w.models) == unaliased)
# TODO what if unique field doesn't exist or there are multiple?
for fname, field in list(whoosheer.schema._fields.items()):
if field.unique:
uniq = fname
# TODO: use something more general than id
res = whoosheer.search(search_string=search_string,
values_of=uniq,
group=group,
match_substrings=match_substrings,
limit=limit)
if not res:
return self.filter(text('null'))
# transform unique field name into model attribute field
attr = None
if hasattr(whoosheer, '_is_model_whoosheer'):
attr = getattr(whoosheer.models[0], uniq)
else:
# non-model whoosheers must have unique field named
# model.__name__.lower + '_' + attr
for m in whoosheer.models:
if m.__name__.lower() == uniq.split('_')[0]:
attr = getattr(m, uniq.split('_')[1])
search_query = self.filter(attr.in_(res))
if order_by_relevance < 0: # we want all returned rows ordered
search_query = search_query.order_by(sqlalchemy.sql.expression.case(
[(attr == uniq_val, index) for index, uniq_val in enumerate(res)],
))
elif order_by_relevance > 0: # we want only number of specified rows ordered
search_query = search_query.order_by(sqlalchemy.sql.expression.case(
[(attr == uniq_val, index) for index, uniq_val in enumerate(res) if index < order_by_relevance],
else_=order_by_relevance
))
else: # no ordering
pass
return search_query | python | def whooshee_search(self, search_string, group=whoosh.qparser.OrGroup, whoosheer=None,
match_substrings=True, limit=None, order_by_relevance=10):
"""Do a fulltext search on the query.
Returns a query filtered with results of the fulltext search.
:param search_string: The string to search for.
:param group: The whoosh group to use for searching.
Defaults to :class:`whoosh.qparser.OrGroup` which
searches for all words in all columns.
:param match_substrings: ``True`` if you want to match substrings,
``False`` otherwise
:param limit: The number of the top records to be returned.
Defaults to ``None`` and returns all records.
"""
if not whoosheer:
### inspiration taken from flask-WhooshAlchemy
# find out all entities in join
entities = set()
# directly queried entities
for cd in self.column_descriptions:
entities.add(cd['type'])
# joined entities
if self._join_entities and isinstance(self._join_entities[0], Mapper):
# SQLAlchemy >= 0.8.0
entities.update(set([x.entity for x in self._join_entities]))
else:
# SQLAlchemy < 0.8.0
entities.update(set(self._join_entities))
# make sure we can work with aliased entities
unaliased = set()
for entity in entities:
if isinstance(entity, (AliasedClass, AliasedInsp)):
unaliased.add(inspect(entity).mapper.class_)
else:
unaliased.add(entity)
whoosheer = next(w for w in _get_config(self)['whoosheers']
if set(w.models) == unaliased)
# TODO what if unique field doesn't exist or there are multiple?
for fname, field in list(whoosheer.schema._fields.items()):
if field.unique:
uniq = fname
# TODO: use something more general than id
res = whoosheer.search(search_string=search_string,
values_of=uniq,
group=group,
match_substrings=match_substrings,
limit=limit)
if not res:
return self.filter(text('null'))
# transform unique field name into model attribute field
attr = None
if hasattr(whoosheer, '_is_model_whoosheer'):
attr = getattr(whoosheer.models[0], uniq)
else:
# non-model whoosheers must have unique field named
# model.__name__.lower + '_' + attr
for m in whoosheer.models:
if m.__name__.lower() == uniq.split('_')[0]:
attr = getattr(m, uniq.split('_')[1])
search_query = self.filter(attr.in_(res))
if order_by_relevance < 0: # we want all returned rows ordered
search_query = search_query.order_by(sqlalchemy.sql.expression.case(
[(attr == uniq_val, index) for index, uniq_val in enumerate(res)],
))
elif order_by_relevance > 0: # we want only number of specified rows ordered
search_query = search_query.order_by(sqlalchemy.sql.expression.case(
[(attr == uniq_val, index) for index, uniq_val in enumerate(res) if index < order_by_relevance],
else_=order_by_relevance
))
else: # no ordering
pass
return search_query | [
"def",
"whooshee_search",
"(",
"self",
",",
"search_string",
",",
"group",
"=",
"whoosh",
".",
"qparser",
".",
"OrGroup",
",",
"whoosheer",
"=",
"None",
",",
"match_substrings",
"=",
"True",
",",
"limit",
"=",
"None",
",",
"order_by_relevance",
"=",
"10",
")",
":",
"if",
"not",
"whoosheer",
":",
"### inspiration taken from flask-WhooshAlchemy",
"# find out all entities in join",
"entities",
"=",
"set",
"(",
")",
"# directly queried entities",
"for",
"cd",
"in",
"self",
".",
"column_descriptions",
":",
"entities",
".",
"add",
"(",
"cd",
"[",
"'type'",
"]",
")",
"# joined entities",
"if",
"self",
".",
"_join_entities",
"and",
"isinstance",
"(",
"self",
".",
"_join_entities",
"[",
"0",
"]",
",",
"Mapper",
")",
":",
"# SQLAlchemy >= 0.8.0",
"entities",
".",
"update",
"(",
"set",
"(",
"[",
"x",
".",
"entity",
"for",
"x",
"in",
"self",
".",
"_join_entities",
"]",
")",
")",
"else",
":",
"# SQLAlchemy < 0.8.0",
"entities",
".",
"update",
"(",
"set",
"(",
"self",
".",
"_join_entities",
")",
")",
"# make sure we can work with aliased entities",
"unaliased",
"=",
"set",
"(",
")",
"for",
"entity",
"in",
"entities",
":",
"if",
"isinstance",
"(",
"entity",
",",
"(",
"AliasedClass",
",",
"AliasedInsp",
")",
")",
":",
"unaliased",
".",
"add",
"(",
"inspect",
"(",
"entity",
")",
".",
"mapper",
".",
"class_",
")",
"else",
":",
"unaliased",
".",
"add",
"(",
"entity",
")",
"whoosheer",
"=",
"next",
"(",
"w",
"for",
"w",
"in",
"_get_config",
"(",
"self",
")",
"[",
"'whoosheers'",
"]",
"if",
"set",
"(",
"w",
".",
"models",
")",
"==",
"unaliased",
")",
"# TODO what if unique field doesn't exist or there are multiple?",
"for",
"fname",
",",
"field",
"in",
"list",
"(",
"whoosheer",
".",
"schema",
".",
"_fields",
".",
"items",
"(",
")",
")",
":",
"if",
"field",
".",
"unique",
":",
"uniq",
"=",
"fname",
"# TODO: use something more general than id",
"res",
"=",
"whoosheer",
".",
"search",
"(",
"search_string",
"=",
"search_string",
",",
"values_of",
"=",
"uniq",
",",
"group",
"=",
"group",
",",
"match_substrings",
"=",
"match_substrings",
",",
"limit",
"=",
"limit",
")",
"if",
"not",
"res",
":",
"return",
"self",
".",
"filter",
"(",
"text",
"(",
"'null'",
")",
")",
"# transform unique field name into model attribute field",
"attr",
"=",
"None",
"if",
"hasattr",
"(",
"whoosheer",
",",
"'_is_model_whoosheer'",
")",
":",
"attr",
"=",
"getattr",
"(",
"whoosheer",
".",
"models",
"[",
"0",
"]",
",",
"uniq",
")",
"else",
":",
"# non-model whoosheers must have unique field named",
"# model.__name__.lower + '_' + attr",
"for",
"m",
"in",
"whoosheer",
".",
"models",
":",
"if",
"m",
".",
"__name__",
".",
"lower",
"(",
")",
"==",
"uniq",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
":",
"attr",
"=",
"getattr",
"(",
"m",
",",
"uniq",
".",
"split",
"(",
"'_'",
")",
"[",
"1",
"]",
")",
"search_query",
"=",
"self",
".",
"filter",
"(",
"attr",
".",
"in_",
"(",
"res",
")",
")",
"if",
"order_by_relevance",
"<",
"0",
":",
"# we want all returned rows ordered",
"search_query",
"=",
"search_query",
".",
"order_by",
"(",
"sqlalchemy",
".",
"sql",
".",
"expression",
".",
"case",
"(",
"[",
"(",
"attr",
"==",
"uniq_val",
",",
"index",
")",
"for",
"index",
",",
"uniq_val",
"in",
"enumerate",
"(",
"res",
")",
"]",
",",
")",
")",
"elif",
"order_by_relevance",
">",
"0",
":",
"# we want only number of specified rows ordered",
"search_query",
"=",
"search_query",
".",
"order_by",
"(",
"sqlalchemy",
".",
"sql",
".",
"expression",
".",
"case",
"(",
"[",
"(",
"attr",
"==",
"uniq_val",
",",
"index",
")",
"for",
"index",
",",
"uniq_val",
"in",
"enumerate",
"(",
"res",
")",
"if",
"index",
"<",
"order_by_relevance",
"]",
",",
"else_",
"=",
"order_by_relevance",
")",
")",
"else",
":",
"# no ordering",
"pass",
"return",
"search_query"
] | Do a fulltext search on the query.
Returns a query filtered with results of the fulltext search.
:param search_string: The string to search for.
:param group: The whoosh group to use for searching.
Defaults to :class:`whoosh.qparser.OrGroup` which
searches for all words in all columns.
:param match_substrings: ``True`` if you want to match substrings,
``False`` otherwise
:param limit: The number of the top records to be returned.
Defaults to ``None`` and returns all records. | [
"Do",
"a",
"fulltext",
"search",
"on",
"the",
"query",
".",
"Returns",
"a",
"query",
"filtered",
"with",
"results",
"of",
"the",
"fulltext",
"search",
"."
] | 773fc51ed53043bd5e92c65eadef5663845ae8c4 | https://github.com/bkabrda/flask-whooshee/blob/773fc51ed53043bd5e92c65eadef5663845ae8c4/flask_whooshee.py#L44-L123 | train | 236,556 |
bkabrda/flask-whooshee | flask_whooshee.py | AbstractWhoosheer.search | def search(cls, search_string, values_of='', group=whoosh.qparser.OrGroup, match_substrings=True, limit=None):
"""Searches the fields for given search_string.
Returns the found records if 'values_of' is left empty,
else the values of the given columns.
:param search_string: The string to search for.
:param values_of: If given, the method will not return the whole
records, but only values of given column.
Defaults to returning whole records.
:param group: The whoosh group to use for searching.
Defaults to :class:`whoosh.qparser.OrGroup` which
searches for all words in all columns.
:param match_substrings: ``True`` if you want to match substrings,
``False`` otherwise.
:param limit: The number of the top records to be returned.
Defaults to ``None`` and returns all records.
"""
index = Whooshee.get_or_create_index(_get_app(cls), cls)
prepped_string = cls.prep_search_string(search_string, match_substrings)
with index.searcher() as searcher:
parser = whoosh.qparser.MultifieldParser(cls.schema.names(), index.schema, group=group)
query = parser.parse(prepped_string)
results = searcher.search(query, limit=limit)
if values_of:
return [x[values_of] for x in results]
return results | python | def search(cls, search_string, values_of='', group=whoosh.qparser.OrGroup, match_substrings=True, limit=None):
"""Searches the fields for given search_string.
Returns the found records if 'values_of' is left empty,
else the values of the given columns.
:param search_string: The string to search for.
:param values_of: If given, the method will not return the whole
records, but only values of given column.
Defaults to returning whole records.
:param group: The whoosh group to use for searching.
Defaults to :class:`whoosh.qparser.OrGroup` which
searches for all words in all columns.
:param match_substrings: ``True`` if you want to match substrings,
``False`` otherwise.
:param limit: The number of the top records to be returned.
Defaults to ``None`` and returns all records.
"""
index = Whooshee.get_or_create_index(_get_app(cls), cls)
prepped_string = cls.prep_search_string(search_string, match_substrings)
with index.searcher() as searcher:
parser = whoosh.qparser.MultifieldParser(cls.schema.names(), index.schema, group=group)
query = parser.parse(prepped_string)
results = searcher.search(query, limit=limit)
if values_of:
return [x[values_of] for x in results]
return results | [
"def",
"search",
"(",
"cls",
",",
"search_string",
",",
"values_of",
"=",
"''",
",",
"group",
"=",
"whoosh",
".",
"qparser",
".",
"OrGroup",
",",
"match_substrings",
"=",
"True",
",",
"limit",
"=",
"None",
")",
":",
"index",
"=",
"Whooshee",
".",
"get_or_create_index",
"(",
"_get_app",
"(",
"cls",
")",
",",
"cls",
")",
"prepped_string",
"=",
"cls",
".",
"prep_search_string",
"(",
"search_string",
",",
"match_substrings",
")",
"with",
"index",
".",
"searcher",
"(",
")",
"as",
"searcher",
":",
"parser",
"=",
"whoosh",
".",
"qparser",
".",
"MultifieldParser",
"(",
"cls",
".",
"schema",
".",
"names",
"(",
")",
",",
"index",
".",
"schema",
",",
"group",
"=",
"group",
")",
"query",
"=",
"parser",
".",
"parse",
"(",
"prepped_string",
")",
"results",
"=",
"searcher",
".",
"search",
"(",
"query",
",",
"limit",
"=",
"limit",
")",
"if",
"values_of",
":",
"return",
"[",
"x",
"[",
"values_of",
"]",
"for",
"x",
"in",
"results",
"]",
"return",
"results"
] | Searches the fields for given search_string.
Returns the found records if 'values_of' is left empty,
else the values of the given columns.
:param search_string: The string to search for.
:param values_of: If given, the method will not return the whole
records, but only values of given column.
Defaults to returning whole records.
:param group: The whoosh group to use for searching.
Defaults to :class:`whoosh.qparser.OrGroup` which
searches for all words in all columns.
:param match_substrings: ``True`` if you want to match substrings,
``False`` otherwise.
:param limit: The number of the top records to be returned.
Defaults to ``None`` and returns all records. | [
"Searches",
"the",
"fields",
"for",
"given",
"search_string",
".",
"Returns",
"the",
"found",
"records",
"if",
"values_of",
"is",
"left",
"empty",
"else",
"the",
"values",
"of",
"the",
"given",
"columns",
"."
] | 773fc51ed53043bd5e92c65eadef5663845ae8c4 | https://github.com/bkabrda/flask-whooshee/blob/773fc51ed53043bd5e92c65eadef5663845ae8c4/flask_whooshee.py#L138-L163 | train | 236,557 |
bkabrda/flask-whooshee | flask_whooshee.py | Whooshee.create_index | def create_index(cls, app, wh):
"""Creates and opens an index for the given whoosheer and app.
If the index already exists, it just opens it, otherwise it creates
it first.
:param app: The application instance.
:param wh: The whoosheer instance for which a index should be created.
"""
# TODO: do we really want/need to use camel casing?
# everywhere else, there is just .lower()
if app.extensions['whooshee']['memory_storage']:
storage = RamStorage()
index = storage.create_index(wh.schema)
assert index
return index
else:
index_path = os.path.join(app.extensions['whooshee']['index_path_root'],
getattr(wh, 'index_subdir', cls.camel_to_snake(wh.__name__)))
if whoosh.index.exists_in(index_path):
index = whoosh.index.open_dir(index_path)
else:
if not os.path.exists(index_path):
os.makedirs(index_path)
index = whoosh.index.create_in(index_path, wh.schema)
return index | python | def create_index(cls, app, wh):
"""Creates and opens an index for the given whoosheer and app.
If the index already exists, it just opens it, otherwise it creates
it first.
:param app: The application instance.
:param wh: The whoosheer instance for which a index should be created.
"""
# TODO: do we really want/need to use camel casing?
# everywhere else, there is just .lower()
if app.extensions['whooshee']['memory_storage']:
storage = RamStorage()
index = storage.create_index(wh.schema)
assert index
return index
else:
index_path = os.path.join(app.extensions['whooshee']['index_path_root'],
getattr(wh, 'index_subdir', cls.camel_to_snake(wh.__name__)))
if whoosh.index.exists_in(index_path):
index = whoosh.index.open_dir(index_path)
else:
if not os.path.exists(index_path):
os.makedirs(index_path)
index = whoosh.index.create_in(index_path, wh.schema)
return index | [
"def",
"create_index",
"(",
"cls",
",",
"app",
",",
"wh",
")",
":",
"# TODO: do we really want/need to use camel casing?",
"# everywhere else, there is just .lower()",
"if",
"app",
".",
"extensions",
"[",
"'whooshee'",
"]",
"[",
"'memory_storage'",
"]",
":",
"storage",
"=",
"RamStorage",
"(",
")",
"index",
"=",
"storage",
".",
"create_index",
"(",
"wh",
".",
"schema",
")",
"assert",
"index",
"return",
"index",
"else",
":",
"index_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app",
".",
"extensions",
"[",
"'whooshee'",
"]",
"[",
"'index_path_root'",
"]",
",",
"getattr",
"(",
"wh",
",",
"'index_subdir'",
",",
"cls",
".",
"camel_to_snake",
"(",
"wh",
".",
"__name__",
")",
")",
")",
"if",
"whoosh",
".",
"index",
".",
"exists_in",
"(",
"index_path",
")",
":",
"index",
"=",
"whoosh",
".",
"index",
".",
"open_dir",
"(",
"index_path",
")",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"index_path",
")",
":",
"os",
".",
"makedirs",
"(",
"index_path",
")",
"index",
"=",
"whoosh",
".",
"index",
".",
"create_in",
"(",
"index_path",
",",
"wh",
".",
"schema",
")",
"return",
"index"
] | Creates and opens an index for the given whoosheer and app.
If the index already exists, it just opens it, otherwise it creates
it first.
:param app: The application instance.
:param wh: The whoosheer instance for which a index should be created. | [
"Creates",
"and",
"opens",
"an",
"index",
"for",
"the",
"given",
"whoosheer",
"and",
"app",
".",
"If",
"the",
"index",
"already",
"exists",
"it",
"just",
"opens",
"it",
"otherwise",
"it",
"creates",
"it",
"first",
"."
] | 773fc51ed53043bd5e92c65eadef5663845ae8c4 | https://github.com/bkabrda/flask-whooshee/blob/773fc51ed53043bd5e92c65eadef5663845ae8c4/flask_whooshee.py#L375-L399 | train | 236,558 |
bkabrda/flask-whooshee | flask_whooshee.py | Whooshee.get_or_create_index | def get_or_create_index(cls, app, wh):
"""Gets a previously cached index or creates a new one for the
given app and whoosheer.
:param app: The application instance.
:param wh: The whoosheer instance for which the index should be
retrieved or created.
"""
if wh in app.extensions['whooshee']['whoosheers_indexes']:
return app.extensions['whooshee']['whoosheers_indexes'][wh]
index = cls.create_index(app, wh)
app.extensions['whooshee']['whoosheers_indexes'][wh] = index
return index | python | def get_or_create_index(cls, app, wh):
"""Gets a previously cached index or creates a new one for the
given app and whoosheer.
:param app: The application instance.
:param wh: The whoosheer instance for which the index should be
retrieved or created.
"""
if wh in app.extensions['whooshee']['whoosheers_indexes']:
return app.extensions['whooshee']['whoosheers_indexes'][wh]
index = cls.create_index(app, wh)
app.extensions['whooshee']['whoosheers_indexes'][wh] = index
return index | [
"def",
"get_or_create_index",
"(",
"cls",
",",
"app",
",",
"wh",
")",
":",
"if",
"wh",
"in",
"app",
".",
"extensions",
"[",
"'whooshee'",
"]",
"[",
"'whoosheers_indexes'",
"]",
":",
"return",
"app",
".",
"extensions",
"[",
"'whooshee'",
"]",
"[",
"'whoosheers_indexes'",
"]",
"[",
"wh",
"]",
"index",
"=",
"cls",
".",
"create_index",
"(",
"app",
",",
"wh",
")",
"app",
".",
"extensions",
"[",
"'whooshee'",
"]",
"[",
"'whoosheers_indexes'",
"]",
"[",
"wh",
"]",
"=",
"index",
"return",
"index"
] | Gets a previously cached index or creates a new one for the
given app and whoosheer.
:param app: The application instance.
:param wh: The whoosheer instance for which the index should be
retrieved or created. | [
"Gets",
"a",
"previously",
"cached",
"index",
"or",
"creates",
"a",
"new",
"one",
"for",
"the",
"given",
"app",
"and",
"whoosheer",
"."
] | 773fc51ed53043bd5e92c65eadef5663845ae8c4 | https://github.com/bkabrda/flask-whooshee/blob/773fc51ed53043bd5e92c65eadef5663845ae8c4/flask_whooshee.py#L410-L422 | train | 236,559 |
bkabrda/flask-whooshee | flask_whooshee.py | Whooshee.on_commit | def on_commit(self, changes):
"""Method that gets called when a model is changed. This serves
to do the actual index writing.
"""
if _get_config(self)['enable_indexing'] is False:
return None
for wh in self.whoosheers:
if not wh.auto_update:
continue
writer = None
for change in changes:
if change[0].__class__ in wh.models:
method_name = '{0}_{1}'.format(change[1], change[0].__class__.__name__.lower())
method = getattr(wh, method_name, None)
if method:
if not writer:
writer = type(self).get_or_create_index(_get_app(self), wh).\
writer(timeout=_get_config(self)['writer_timeout'])
method(writer, change[0])
if writer:
writer.commit() | python | def on_commit(self, changes):
"""Method that gets called when a model is changed. This serves
to do the actual index writing.
"""
if _get_config(self)['enable_indexing'] is False:
return None
for wh in self.whoosheers:
if not wh.auto_update:
continue
writer = None
for change in changes:
if change[0].__class__ in wh.models:
method_name = '{0}_{1}'.format(change[1], change[0].__class__.__name__.lower())
method = getattr(wh, method_name, None)
if method:
if not writer:
writer = type(self).get_or_create_index(_get_app(self), wh).\
writer(timeout=_get_config(self)['writer_timeout'])
method(writer, change[0])
if writer:
writer.commit() | [
"def",
"on_commit",
"(",
"self",
",",
"changes",
")",
":",
"if",
"_get_config",
"(",
"self",
")",
"[",
"'enable_indexing'",
"]",
"is",
"False",
":",
"return",
"None",
"for",
"wh",
"in",
"self",
".",
"whoosheers",
":",
"if",
"not",
"wh",
".",
"auto_update",
":",
"continue",
"writer",
"=",
"None",
"for",
"change",
"in",
"changes",
":",
"if",
"change",
"[",
"0",
"]",
".",
"__class__",
"in",
"wh",
".",
"models",
":",
"method_name",
"=",
"'{0}_{1}'",
".",
"format",
"(",
"change",
"[",
"1",
"]",
",",
"change",
"[",
"0",
"]",
".",
"__class__",
".",
"__name__",
".",
"lower",
"(",
")",
")",
"method",
"=",
"getattr",
"(",
"wh",
",",
"method_name",
",",
"None",
")",
"if",
"method",
":",
"if",
"not",
"writer",
":",
"writer",
"=",
"type",
"(",
"self",
")",
".",
"get_or_create_index",
"(",
"_get_app",
"(",
"self",
")",
",",
"wh",
")",
".",
"writer",
"(",
"timeout",
"=",
"_get_config",
"(",
"self",
")",
"[",
"'writer_timeout'",
"]",
")",
"method",
"(",
"writer",
",",
"change",
"[",
"0",
"]",
")",
"if",
"writer",
":",
"writer",
".",
"commit",
"(",
")"
] | Method that gets called when a model is changed. This serves
to do the actual index writing. | [
"Method",
"that",
"gets",
"called",
"when",
"a",
"model",
"is",
"changed",
".",
"This",
"serves",
"to",
"do",
"the",
"actual",
"index",
"writing",
"."
] | 773fc51ed53043bd5e92c65eadef5663845ae8c4 | https://github.com/bkabrda/flask-whooshee/blob/773fc51ed53043bd5e92c65eadef5663845ae8c4/flask_whooshee.py#L433-L454 | train | 236,560 |
bkabrda/flask-whooshee | flask_whooshee.py | Whooshee.reindex | def reindex(self):
"""Reindex all data
This method retrieves all the data from the registered models and
calls the ``update_<model>()`` function for every instance of such
model.
"""
for wh in self.whoosheers:
index = type(self).get_or_create_index(_get_app(self), wh)
writer = index.writer(timeout=_get_config(self)['writer_timeout'])
for model in wh.models:
method_name = "{0}_{1}".format(UPDATE_KWD, model.__name__.lower())
for item in model.query.all():
getattr(wh, method_name)(writer, item)
writer.commit() | python | def reindex(self):
"""Reindex all data
This method retrieves all the data from the registered models and
calls the ``update_<model>()`` function for every instance of such
model.
"""
for wh in self.whoosheers:
index = type(self).get_or_create_index(_get_app(self), wh)
writer = index.writer(timeout=_get_config(self)['writer_timeout'])
for model in wh.models:
method_name = "{0}_{1}".format(UPDATE_KWD, model.__name__.lower())
for item in model.query.all():
getattr(wh, method_name)(writer, item)
writer.commit() | [
"def",
"reindex",
"(",
"self",
")",
":",
"for",
"wh",
"in",
"self",
".",
"whoosheers",
":",
"index",
"=",
"type",
"(",
"self",
")",
".",
"get_or_create_index",
"(",
"_get_app",
"(",
"self",
")",
",",
"wh",
")",
"writer",
"=",
"index",
".",
"writer",
"(",
"timeout",
"=",
"_get_config",
"(",
"self",
")",
"[",
"'writer_timeout'",
"]",
")",
"for",
"model",
"in",
"wh",
".",
"models",
":",
"method_name",
"=",
"\"{0}_{1}\"",
".",
"format",
"(",
"UPDATE_KWD",
",",
"model",
".",
"__name__",
".",
"lower",
"(",
")",
")",
"for",
"item",
"in",
"model",
".",
"query",
".",
"all",
"(",
")",
":",
"getattr",
"(",
"wh",
",",
"method_name",
")",
"(",
"writer",
",",
"item",
")",
"writer",
".",
"commit",
"(",
")"
] | Reindex all data
This method retrieves all the data from the registered models and
calls the ``update_<model>()`` function for every instance of such
model. | [
"Reindex",
"all",
"data"
] | 773fc51ed53043bd5e92c65eadef5663845ae8c4 | https://github.com/bkabrda/flask-whooshee/blob/773fc51ed53043bd5e92c65eadef5663845ae8c4/flask_whooshee.py#L456-L470 | train | 236,561 |
spry-group/python-vultr | examples/basic_list.py | dump_info | def dump_info():
'''Shows various details about the account & servers'''
vultr = Vultr(API_KEY)
try:
logging.info('Listing account info:\n%s', dumps(
vultr.account.info(), indent=2
))
logging.info('Listing apps:\n%s', dumps(
vultr.app.list(), indent=2
))
logging.info('Listing backups:\n%s', dumps(
vultr.backup.list(), indent=2
))
logging.info('Listing DNS:\n%s', dumps(
vultr.dns.list(), indent=2
))
logging.info('Listing ISOs:\n%s', dumps(
vultr.iso.list(), indent=2
))
logging.info('Listing OSs:\n%s', dumps(
vultr.os.list(), indent=2
))
logging.info('Listing plans:\n%s', dumps(
vultr.plans.list(), indent=2
))
logging.info('Listing regions:\n%s', dumps(
vultr.regions.list(), indent=2
))
logging.info('Listing servers:\n%s', dumps(
vultr.server.list(), indent=2
))
logging.info('Listing snapshots:\n%s', dumps(
vultr.snapshot.list(), indent=2
))
logging.info('Listing SSH keys:\n%s', dumps(
vultr.sshkey.list(), indent=2
))
logging.info('Listing startup scripts:\n%s', dumps(
vultr.startupscript.list(), indent=2
))
except VultrError as ex:
logging.error('VultrError: %s', ex) | python | def dump_info():
'''Shows various details about the account & servers'''
vultr = Vultr(API_KEY)
try:
logging.info('Listing account info:\n%s', dumps(
vultr.account.info(), indent=2
))
logging.info('Listing apps:\n%s', dumps(
vultr.app.list(), indent=2
))
logging.info('Listing backups:\n%s', dumps(
vultr.backup.list(), indent=2
))
logging.info('Listing DNS:\n%s', dumps(
vultr.dns.list(), indent=2
))
logging.info('Listing ISOs:\n%s', dumps(
vultr.iso.list(), indent=2
))
logging.info('Listing OSs:\n%s', dumps(
vultr.os.list(), indent=2
))
logging.info('Listing plans:\n%s', dumps(
vultr.plans.list(), indent=2
))
logging.info('Listing regions:\n%s', dumps(
vultr.regions.list(), indent=2
))
logging.info('Listing servers:\n%s', dumps(
vultr.server.list(), indent=2
))
logging.info('Listing snapshots:\n%s', dumps(
vultr.snapshot.list(), indent=2
))
logging.info('Listing SSH keys:\n%s', dumps(
vultr.sshkey.list(), indent=2
))
logging.info('Listing startup scripts:\n%s', dumps(
vultr.startupscript.list(), indent=2
))
except VultrError as ex:
logging.error('VultrError: %s', ex) | [
"def",
"dump_info",
"(",
")",
":",
"vultr",
"=",
"Vultr",
"(",
"API_KEY",
")",
"try",
":",
"logging",
".",
"info",
"(",
"'Listing account info:\\n%s'",
",",
"dumps",
"(",
"vultr",
".",
"account",
".",
"info",
"(",
")",
",",
"indent",
"=",
"2",
")",
")",
"logging",
".",
"info",
"(",
"'Listing apps:\\n%s'",
",",
"dumps",
"(",
"vultr",
".",
"app",
".",
"list",
"(",
")",
",",
"indent",
"=",
"2",
")",
")",
"logging",
".",
"info",
"(",
"'Listing backups:\\n%s'",
",",
"dumps",
"(",
"vultr",
".",
"backup",
".",
"list",
"(",
")",
",",
"indent",
"=",
"2",
")",
")",
"logging",
".",
"info",
"(",
"'Listing DNS:\\n%s'",
",",
"dumps",
"(",
"vultr",
".",
"dns",
".",
"list",
"(",
")",
",",
"indent",
"=",
"2",
")",
")",
"logging",
".",
"info",
"(",
"'Listing ISOs:\\n%s'",
",",
"dumps",
"(",
"vultr",
".",
"iso",
".",
"list",
"(",
")",
",",
"indent",
"=",
"2",
")",
")",
"logging",
".",
"info",
"(",
"'Listing OSs:\\n%s'",
",",
"dumps",
"(",
"vultr",
".",
"os",
".",
"list",
"(",
")",
",",
"indent",
"=",
"2",
")",
")",
"logging",
".",
"info",
"(",
"'Listing plans:\\n%s'",
",",
"dumps",
"(",
"vultr",
".",
"plans",
".",
"list",
"(",
")",
",",
"indent",
"=",
"2",
")",
")",
"logging",
".",
"info",
"(",
"'Listing regions:\\n%s'",
",",
"dumps",
"(",
"vultr",
".",
"regions",
".",
"list",
"(",
")",
",",
"indent",
"=",
"2",
")",
")",
"logging",
".",
"info",
"(",
"'Listing servers:\\n%s'",
",",
"dumps",
"(",
"vultr",
".",
"server",
".",
"list",
"(",
")",
",",
"indent",
"=",
"2",
")",
")",
"logging",
".",
"info",
"(",
"'Listing snapshots:\\n%s'",
",",
"dumps",
"(",
"vultr",
".",
"snapshot",
".",
"list",
"(",
")",
",",
"indent",
"=",
"2",
")",
")",
"logging",
".",
"info",
"(",
"'Listing SSH keys:\\n%s'",
",",
"dumps",
"(",
"vultr",
".",
"sshkey",
".",
"list",
"(",
")",
",",
"indent",
"=",
"2",
")",
")",
"logging",
".",
"info",
"(",
"'Listing startup scripts:\\n%s'",
",",
"dumps",
"(",
"vultr",
".",
"startupscript",
".",
"list",
"(",
")",
",",
"indent",
"=",
"2",
")",
")",
"except",
"VultrError",
"as",
"ex",
":",
"logging",
".",
"error",
"(",
"'VultrError: %s'",
",",
"ex",
")"
] | Shows various details about the account & servers | [
"Shows",
"various",
"details",
"about",
"the",
"account",
"&",
"servers"
] | bad1448f1df7b5dba70fd3d11434f32580f0b850 | https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/examples/basic_list.py#L19-L72 | train | 236,562 |
spry-group/python-vultr | vultr/utils.py | update_params | def update_params(params, updates):
'''Merges updates into params'''
params = params.copy() if isinstance(params, dict) else dict()
params.update(updates)
return params | python | def update_params(params, updates):
'''Merges updates into params'''
params = params.copy() if isinstance(params, dict) else dict()
params.update(updates)
return params | [
"def",
"update_params",
"(",
"params",
",",
"updates",
")",
":",
"params",
"=",
"params",
".",
"copy",
"(",
")",
"if",
"isinstance",
"(",
"params",
",",
"dict",
")",
"else",
"dict",
"(",
")",
"params",
".",
"update",
"(",
"updates",
")",
"return",
"params"
] | Merges updates into params | [
"Merges",
"updates",
"into",
"params"
] | bad1448f1df7b5dba70fd3d11434f32580f0b850 | https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/utils.py#L94-L98 | train | 236,563 |
spry-group/python-vultr | vultr/utils.py | VultrBase._request_get_helper | def _request_get_helper(self, url, params=None):
'''API GET request helper'''
if not isinstance(params, dict):
params = dict()
if self.api_key:
params['api_key'] = self.api_key
return requests.get(url, params=params, timeout=60) | python | def _request_get_helper(self, url, params=None):
'''API GET request helper'''
if not isinstance(params, dict):
params = dict()
if self.api_key:
params['api_key'] = self.api_key
return requests.get(url, params=params, timeout=60) | [
"def",
"_request_get_helper",
"(",
"self",
",",
"url",
",",
"params",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"params",
",",
"dict",
")",
":",
"params",
"=",
"dict",
"(",
")",
"if",
"self",
".",
"api_key",
":",
"params",
"[",
"'api_key'",
"]",
"=",
"self",
".",
"api_key",
"return",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"params",
",",
"timeout",
"=",
"60",
")"
] | API GET request helper | [
"API",
"GET",
"request",
"helper"
] | bad1448f1df7b5dba70fd3d11434f32580f0b850 | https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/utils.py#L26-L33 | train | 236,564 |
spry-group/python-vultr | vultr/utils.py | VultrBase._request_post_helper | def _request_post_helper(self, url, params=None):
'''API POST helper'''
if self.api_key:
query = {'api_key': self.api_key}
return requests.post(url, params=query, data=params, timeout=60) | python | def _request_post_helper(self, url, params=None):
'''API POST helper'''
if self.api_key:
query = {'api_key': self.api_key}
return requests.post(url, params=query, data=params, timeout=60) | [
"def",
"_request_post_helper",
"(",
"self",
",",
"url",
",",
"params",
"=",
"None",
")",
":",
"if",
"self",
".",
"api_key",
":",
"query",
"=",
"{",
"'api_key'",
":",
"self",
".",
"api_key",
"}",
"return",
"requests",
".",
"post",
"(",
"url",
",",
"params",
"=",
"query",
",",
"data",
"=",
"params",
",",
"timeout",
"=",
"60",
")"
] | API POST helper | [
"API",
"POST",
"helper"
] | bad1448f1df7b5dba70fd3d11434f32580f0b850 | https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/utils.py#L35-L39 | train | 236,565 |
spry-group/python-vultr | vultr/utils.py | VultrBase._request_helper | def _request_helper(self, url, params, method):
'''API request helper method'''
try:
if method == 'POST':
return self._request_post_helper(url, params)
elif method == 'GET':
return self._request_get_helper(url, params)
raise VultrError('Unsupported method %s' % method)
except requests.RequestException as ex:
raise RuntimeError(ex) | python | def _request_helper(self, url, params, method):
'''API request helper method'''
try:
if method == 'POST':
return self._request_post_helper(url, params)
elif method == 'GET':
return self._request_get_helper(url, params)
raise VultrError('Unsupported method %s' % method)
except requests.RequestException as ex:
raise RuntimeError(ex) | [
"def",
"_request_helper",
"(",
"self",
",",
"url",
",",
"params",
",",
"method",
")",
":",
"try",
":",
"if",
"method",
"==",
"'POST'",
":",
"return",
"self",
".",
"_request_post_helper",
"(",
"url",
",",
"params",
")",
"elif",
"method",
"==",
"'GET'",
":",
"return",
"self",
".",
"_request_get_helper",
"(",
"url",
",",
"params",
")",
"raise",
"VultrError",
"(",
"'Unsupported method %s'",
"%",
"method",
")",
"except",
"requests",
".",
"RequestException",
"as",
"ex",
":",
"raise",
"RuntimeError",
"(",
"ex",
")"
] | API request helper method | [
"API",
"request",
"helper",
"method"
] | bad1448f1df7b5dba70fd3d11434f32580f0b850 | https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/utils.py#L41-L50 | train | 236,566 |
spry-group/python-vultr | examples/basic_haltRunning.py | halt_running | def halt_running():
'''Halts all running servers'''
vultr = Vultr(API_KEY)
try:
serverList = vultr.server.list()
#logging.info('Listing servers:\n%s', dumps(
#serverList, indent=2
#))
except VultrError as ex:
logging.error('VultrError: %s', ex)
for serverID in serverList:
if serverList[serverID]['power_status'] == 'running':
logging.info(serverList[serverID]['label'] + " will be gracefully shutdown.")
vultr.server.halt(serverID) | python | def halt_running():
'''Halts all running servers'''
vultr = Vultr(API_KEY)
try:
serverList = vultr.server.list()
#logging.info('Listing servers:\n%s', dumps(
#serverList, indent=2
#))
except VultrError as ex:
logging.error('VultrError: %s', ex)
for serverID in serverList:
if serverList[serverID]['power_status'] == 'running':
logging.info(serverList[serverID]['label'] + " will be gracefully shutdown.")
vultr.server.halt(serverID) | [
"def",
"halt_running",
"(",
")",
":",
"vultr",
"=",
"Vultr",
"(",
"API_KEY",
")",
"try",
":",
"serverList",
"=",
"vultr",
".",
"server",
".",
"list",
"(",
")",
"#logging.info('Listing servers:\\n%s', dumps(",
"#serverList, indent=2",
"#))",
"except",
"VultrError",
"as",
"ex",
":",
"logging",
".",
"error",
"(",
"'VultrError: %s'",
",",
"ex",
")",
"for",
"serverID",
"in",
"serverList",
":",
"if",
"serverList",
"[",
"serverID",
"]",
"[",
"'power_status'",
"]",
"==",
"'running'",
":",
"logging",
".",
"info",
"(",
"serverList",
"[",
"serverID",
"]",
"[",
"'label'",
"]",
"+",
"\" will be gracefully shutdown.\"",
")",
"vultr",
".",
"server",
".",
"halt",
"(",
"serverID",
")"
] | Halts all running servers | [
"Halts",
"all",
"running",
"servers"
] | bad1448f1df7b5dba70fd3d11434f32580f0b850 | https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/examples/basic_haltRunning.py#L18-L33 | train | 236,567 |
inspirehep/refextract | refextract/references/tag.py | tag_arxiv | def tag_arxiv(line):
"""Tag arxiv report numbers
We handle arXiv in 2 ways:
* starting with arXiv:1022.1111
* this format exactly 9999.9999
We also format the output to the standard arxiv notation:
* arXiv:2007.12.1111
* arXiv:2007.12.1111v2
"""
def tagger(match):
groups = match.groupdict()
if match.group('suffix'):
groups['suffix'] = ' ' + groups['suffix']
else:
groups['suffix'] = ''
return u'<cds.REPORTNUMBER>arXiv:%(year)s'\
u'%(month)s.%(num)s%(suffix)s' \
u'</cds.REPORTNUMBER>' % groups
line = re_arxiv_5digits.sub(tagger, line)
line = re_arxiv.sub(tagger, line)
line = re_new_arxiv_5digits.sub(tagger, line)
line = re_new_arxiv.sub(tagger, line)
return line | python | def tag_arxiv(line):
"""Tag arxiv report numbers
We handle arXiv in 2 ways:
* starting with arXiv:1022.1111
* this format exactly 9999.9999
We also format the output to the standard arxiv notation:
* arXiv:2007.12.1111
* arXiv:2007.12.1111v2
"""
def tagger(match):
groups = match.groupdict()
if match.group('suffix'):
groups['suffix'] = ' ' + groups['suffix']
else:
groups['suffix'] = ''
return u'<cds.REPORTNUMBER>arXiv:%(year)s'\
u'%(month)s.%(num)s%(suffix)s' \
u'</cds.REPORTNUMBER>' % groups
line = re_arxiv_5digits.sub(tagger, line)
line = re_arxiv.sub(tagger, line)
line = re_new_arxiv_5digits.sub(tagger, line)
line = re_new_arxiv.sub(tagger, line)
return line | [
"def",
"tag_arxiv",
"(",
"line",
")",
":",
"def",
"tagger",
"(",
"match",
")",
":",
"groups",
"=",
"match",
".",
"groupdict",
"(",
")",
"if",
"match",
".",
"group",
"(",
"'suffix'",
")",
":",
"groups",
"[",
"'suffix'",
"]",
"=",
"' '",
"+",
"groups",
"[",
"'suffix'",
"]",
"else",
":",
"groups",
"[",
"'suffix'",
"]",
"=",
"''",
"return",
"u'<cds.REPORTNUMBER>arXiv:%(year)s'",
"u'%(month)s.%(num)s%(suffix)s'",
"u'</cds.REPORTNUMBER>'",
"%",
"groups",
"line",
"=",
"re_arxiv_5digits",
".",
"sub",
"(",
"tagger",
",",
"line",
")",
"line",
"=",
"re_arxiv",
".",
"sub",
"(",
"tagger",
",",
"line",
")",
"line",
"=",
"re_new_arxiv_5digits",
".",
"sub",
"(",
"tagger",
",",
"line",
")",
"line",
"=",
"re_new_arxiv",
".",
"sub",
"(",
"tagger",
",",
"line",
")",
"return",
"line"
] | Tag arxiv report numbers
We handle arXiv in 2 ways:
* starting with arXiv:1022.1111
* this format exactly 9999.9999
We also format the output to the standard arxiv notation:
* arXiv:2007.12.1111
* arXiv:2007.12.1111v2 | [
"Tag",
"arxiv",
"report",
"numbers"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L360-L384 | train | 236,568 |
inspirehep/refextract | refextract/references/tag.py | tag_arxiv_more | def tag_arxiv_more(line):
"""Tag old arxiv report numbers
Either formats:
* hep-th/1234567
* arXiv:1022111 [hep-ph] which transforms to hep-ph/1022111
"""
line = RE_ARXIV_CATCHUP.sub(ur"\g<suffix>/\g<year>\g<month>\g<num>", line)
for report_re, report_repl in RE_OLD_ARXIV:
report_number = report_repl + ur"/\g<num>"
line = report_re.sub(
u'<cds.REPORTNUMBER>' + report_number + u'</cds.REPORTNUMBER>',
line
)
return line | python | def tag_arxiv_more(line):
"""Tag old arxiv report numbers
Either formats:
* hep-th/1234567
* arXiv:1022111 [hep-ph] which transforms to hep-ph/1022111
"""
line = RE_ARXIV_CATCHUP.sub(ur"\g<suffix>/\g<year>\g<month>\g<num>", line)
for report_re, report_repl in RE_OLD_ARXIV:
report_number = report_repl + ur"/\g<num>"
line = report_re.sub(
u'<cds.REPORTNUMBER>' + report_number + u'</cds.REPORTNUMBER>',
line
)
return line | [
"def",
"tag_arxiv_more",
"(",
"line",
")",
":",
"line",
"=",
"RE_ARXIV_CATCHUP",
".",
"sub",
"(",
"ur\"\\g<suffix>/\\g<year>\\g<month>\\g<num>\"",
",",
"line",
")",
"for",
"report_re",
",",
"report_repl",
"in",
"RE_OLD_ARXIV",
":",
"report_number",
"=",
"report_repl",
"+",
"ur\"/\\g<num>\"",
"line",
"=",
"report_re",
".",
"sub",
"(",
"u'<cds.REPORTNUMBER>'",
"+",
"report_number",
"+",
"u'</cds.REPORTNUMBER>'",
",",
"line",
")",
"return",
"line"
] | Tag old arxiv report numbers
Either formats:
* hep-th/1234567
* arXiv:1022111 [hep-ph] which transforms to hep-ph/1022111 | [
"Tag",
"old",
"arxiv",
"report",
"numbers"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L387-L402 | train | 236,569 |
inspirehep/refextract | refextract/references/tag.py | tag_pos_volume | def tag_pos_volume(line):
"""Tag POS volume number
POS is journal that has special volume numbers
e.g. PoS LAT2007 (2007) 369
"""
def tagger(match):
groups = match.groupdict()
try:
year = match.group('year')
except IndexError:
# Extract year from volume name
# which should always include the year
g = re.search(re_pos_year_num, match.group(
'volume_num'), re.UNICODE)
year = g.group(0)
if year:
groups[
'year'] = ' <cds.YR>(%s)</cds.YR>' % year.strip().strip('()')
else:
groups['year'] = ''
return '<cds.JOURNAL>PoS</cds.JOURNAL>' \
' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>' \
'%(year)s' \
' <cds.PG>%(page)s</cds.PG>' % groups
for p in re_pos:
line = p.sub(tagger, line)
return line | python | def tag_pos_volume(line):
"""Tag POS volume number
POS is journal that has special volume numbers
e.g. PoS LAT2007 (2007) 369
"""
def tagger(match):
groups = match.groupdict()
try:
year = match.group('year')
except IndexError:
# Extract year from volume name
# which should always include the year
g = re.search(re_pos_year_num, match.group(
'volume_num'), re.UNICODE)
year = g.group(0)
if year:
groups[
'year'] = ' <cds.YR>(%s)</cds.YR>' % year.strip().strip('()')
else:
groups['year'] = ''
return '<cds.JOURNAL>PoS</cds.JOURNAL>' \
' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>' \
'%(year)s' \
' <cds.PG>%(page)s</cds.PG>' % groups
for p in re_pos:
line = p.sub(tagger, line)
return line | [
"def",
"tag_pos_volume",
"(",
"line",
")",
":",
"def",
"tagger",
"(",
"match",
")",
":",
"groups",
"=",
"match",
".",
"groupdict",
"(",
")",
"try",
":",
"year",
"=",
"match",
".",
"group",
"(",
"'year'",
")",
"except",
"IndexError",
":",
"# Extract year from volume name",
"# which should always include the year",
"g",
"=",
"re",
".",
"search",
"(",
"re_pos_year_num",
",",
"match",
".",
"group",
"(",
"'volume_num'",
")",
",",
"re",
".",
"UNICODE",
")",
"year",
"=",
"g",
".",
"group",
"(",
"0",
")",
"if",
"year",
":",
"groups",
"[",
"'year'",
"]",
"=",
"' <cds.YR>(%s)</cds.YR>'",
"%",
"year",
".",
"strip",
"(",
")",
".",
"strip",
"(",
"'()'",
")",
"else",
":",
"groups",
"[",
"'year'",
"]",
"=",
"''",
"return",
"'<cds.JOURNAL>PoS</cds.JOURNAL>'",
"' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>'",
"'%(year)s'",
"' <cds.PG>%(page)s</cds.PG>'",
"%",
"groups",
"for",
"p",
"in",
"re_pos",
":",
"line",
"=",
"p",
".",
"sub",
"(",
"tagger",
",",
"line",
")",
"return",
"line"
] | Tag POS volume number
POS is journal that has special volume numbers
e.g. PoS LAT2007 (2007) 369 | [
"Tag",
"POS",
"volume",
"number"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L405-L436 | train | 236,570 |
inspirehep/refextract | refextract/references/tag.py | find_numeration_more | def find_numeration_more(line):
"""Look for other numeration in line."""
# First, attempt to use marked-up titles
patterns = (
re_correct_numeration_2nd_try_ptn1,
re_correct_numeration_2nd_try_ptn2,
re_correct_numeration_2nd_try_ptn3,
re_correct_numeration_2nd_try_ptn4,
)
for pattern in patterns:
match = pattern.search(line)
if match:
info = match.groupdict()
series = extract_series_from_volume(info['vol'])
if not info['vol_num']:
info['vol_num'] = info['vol_num_alt']
if not info['vol_num']:
info['vol_num'] = info['vol_num_alt2']
return {'year': info.get('year', None),
'series': series,
'volume': info['vol_num'],
'page': info['page'] or info['jinst_page'],
'page_end': info['page_end'],
'len': len(info['aftertitle'])}
return None | python | def find_numeration_more(line):
"""Look for other numeration in line."""
# First, attempt to use marked-up titles
patterns = (
re_correct_numeration_2nd_try_ptn1,
re_correct_numeration_2nd_try_ptn2,
re_correct_numeration_2nd_try_ptn3,
re_correct_numeration_2nd_try_ptn4,
)
for pattern in patterns:
match = pattern.search(line)
if match:
info = match.groupdict()
series = extract_series_from_volume(info['vol'])
if not info['vol_num']:
info['vol_num'] = info['vol_num_alt']
if not info['vol_num']:
info['vol_num'] = info['vol_num_alt2']
return {'year': info.get('year', None),
'series': series,
'volume': info['vol_num'],
'page': info['page'] or info['jinst_page'],
'page_end': info['page_end'],
'len': len(info['aftertitle'])}
return None | [
"def",
"find_numeration_more",
"(",
"line",
")",
":",
"# First, attempt to use marked-up titles",
"patterns",
"=",
"(",
"re_correct_numeration_2nd_try_ptn1",
",",
"re_correct_numeration_2nd_try_ptn2",
",",
"re_correct_numeration_2nd_try_ptn3",
",",
"re_correct_numeration_2nd_try_ptn4",
",",
")",
"for",
"pattern",
"in",
"patterns",
":",
"match",
"=",
"pattern",
".",
"search",
"(",
"line",
")",
"if",
"match",
":",
"info",
"=",
"match",
".",
"groupdict",
"(",
")",
"series",
"=",
"extract_series_from_volume",
"(",
"info",
"[",
"'vol'",
"]",
")",
"if",
"not",
"info",
"[",
"'vol_num'",
"]",
":",
"info",
"[",
"'vol_num'",
"]",
"=",
"info",
"[",
"'vol_num_alt'",
"]",
"if",
"not",
"info",
"[",
"'vol_num'",
"]",
":",
"info",
"[",
"'vol_num'",
"]",
"=",
"info",
"[",
"'vol_num_alt2'",
"]",
"return",
"{",
"'year'",
":",
"info",
".",
"get",
"(",
"'year'",
",",
"None",
")",
",",
"'series'",
":",
"series",
",",
"'volume'",
":",
"info",
"[",
"'vol_num'",
"]",
",",
"'page'",
":",
"info",
"[",
"'page'",
"]",
"or",
"info",
"[",
"'jinst_page'",
"]",
",",
"'page_end'",
":",
"info",
"[",
"'page_end'",
"]",
",",
"'len'",
":",
"len",
"(",
"info",
"[",
"'aftertitle'",
"]",
")",
"}",
"return",
"None"
] | Look for other numeration in line. | [
"Look",
"for",
"other",
"numeration",
"in",
"line",
"."
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L456-L481 | train | 236,571 |
inspirehep/refextract | refextract/references/tag.py | identify_ibids | def identify_ibids(line):
"""Find IBIDs within the line, record their position and length,
and replace them with underscores.
@param line: (string) the working reference line
@return: (tuple) containing 2 dictionaries and a string:
Dictionary: matched IBID text: (Key: position of IBID in
line; Value: matched IBID text)
String: working line with matched IBIDs removed
"""
ibid_match_txt = {}
# Record details of each matched ibid:
for m_ibid in re_ibid.finditer(line):
ibid_match_txt[m_ibid.start()] = m_ibid.group(0)
# Replace matched text in line with underscores:
line = line[0:m_ibid.start()] + \
"_" * len(m_ibid.group(0)) + \
line[m_ibid.end():]
return ibid_match_txt, line | python | def identify_ibids(line):
"""Find IBIDs within the line, record their position and length,
and replace them with underscores.
@param line: (string) the working reference line
@return: (tuple) containing 2 dictionaries and a string:
Dictionary: matched IBID text: (Key: position of IBID in
line; Value: matched IBID text)
String: working line with matched IBIDs removed
"""
ibid_match_txt = {}
# Record details of each matched ibid:
for m_ibid in re_ibid.finditer(line):
ibid_match_txt[m_ibid.start()] = m_ibid.group(0)
# Replace matched text in line with underscores:
line = line[0:m_ibid.start()] + \
"_" * len(m_ibid.group(0)) + \
line[m_ibid.end():]
return ibid_match_txt, line | [
"def",
"identify_ibids",
"(",
"line",
")",
":",
"ibid_match_txt",
"=",
"{",
"}",
"# Record details of each matched ibid:",
"for",
"m_ibid",
"in",
"re_ibid",
".",
"finditer",
"(",
"line",
")",
":",
"ibid_match_txt",
"[",
"m_ibid",
".",
"start",
"(",
")",
"]",
"=",
"m_ibid",
".",
"group",
"(",
"0",
")",
"# Replace matched text in line with underscores:",
"line",
"=",
"line",
"[",
"0",
":",
"m_ibid",
".",
"start",
"(",
")",
"]",
"+",
"\"_\"",
"*",
"len",
"(",
"m_ibid",
".",
"group",
"(",
"0",
")",
")",
"+",
"line",
"[",
"m_ibid",
".",
"end",
"(",
")",
":",
"]",
"return",
"ibid_match_txt",
",",
"line"
] | Find IBIDs within the line, record their position and length,
and replace them with underscores.
@param line: (string) the working reference line
@return: (tuple) containing 2 dictionaries and a string:
Dictionary: matched IBID text: (Key: position of IBID in
line; Value: matched IBID text)
String: working line with matched IBIDs removed | [
"Find",
"IBIDs",
"within",
"the",
"line",
"record",
"their",
"position",
"and",
"length",
"and",
"replace",
"them",
"with",
"underscores",
"."
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L1052-L1070 | train | 236,572 |
inspirehep/refextract | refextract/references/tag.py | find_numeration | def find_numeration(line):
"""Given a reference line, attempt to locate instances of citation
'numeration' in the line.
@param line: (string) the reference line.
@return: (string) the reference line after numeration has been checked
and possibly recognized/marked-up.
"""
patterns = (
# vol,page,year
re_numeration_vol_page_yr,
re_numeration_vol_nucphys_page_yr,
re_numeration_nucphys_vol_page_yr,
# With sub volume
re_numeration_vol_subvol_nucphys_yr_page,
re_numeration_vol_nucphys_yr_subvol_page,
# vol,year,page
re_numeration_vol_yr_page,
re_numeration_nucphys_vol_yr_page,
re_numeration_vol_nucphys_series_yr_page,
# vol,page,year
re_numeration_vol_series_nucphys_page_yr,
re_numeration_vol_nucphys_series_page_yr,
# year,vol,page
re_numeration_yr_vol_page,
)
for pattern in patterns:
match = pattern.match(line)
if match:
info = match.groupdict()
series = info.get('series', None)
if not series:
series = extract_series_from_volume(info['vol'])
if not info['vol_num']:
info['vol_num'] = info['vol_num_alt']
if not info['vol_num']:
info['vol_num'] = info['vol_num_alt2']
return {'year': info.get('year', None),
'series': series,
'volume': info['vol_num'],
'page': info['page'] or info['jinst_page'],
'page_end': info['page_end'],
'len': match.end()}
return None | python | def find_numeration(line):
"""Given a reference line, attempt to locate instances of citation
'numeration' in the line.
@param line: (string) the reference line.
@return: (string) the reference line after numeration has been checked
and possibly recognized/marked-up.
"""
patterns = (
# vol,page,year
re_numeration_vol_page_yr,
re_numeration_vol_nucphys_page_yr,
re_numeration_nucphys_vol_page_yr,
# With sub volume
re_numeration_vol_subvol_nucphys_yr_page,
re_numeration_vol_nucphys_yr_subvol_page,
# vol,year,page
re_numeration_vol_yr_page,
re_numeration_nucphys_vol_yr_page,
re_numeration_vol_nucphys_series_yr_page,
# vol,page,year
re_numeration_vol_series_nucphys_page_yr,
re_numeration_vol_nucphys_series_page_yr,
# year,vol,page
re_numeration_yr_vol_page,
)
for pattern in patterns:
match = pattern.match(line)
if match:
info = match.groupdict()
series = info.get('series', None)
if not series:
series = extract_series_from_volume(info['vol'])
if not info['vol_num']:
info['vol_num'] = info['vol_num_alt']
if not info['vol_num']:
info['vol_num'] = info['vol_num_alt2']
return {'year': info.get('year', None),
'series': series,
'volume': info['vol_num'],
'page': info['page'] or info['jinst_page'],
'page_end': info['page_end'],
'len': match.end()}
return None | [
"def",
"find_numeration",
"(",
"line",
")",
":",
"patterns",
"=",
"(",
"# vol,page,year",
"re_numeration_vol_page_yr",
",",
"re_numeration_vol_nucphys_page_yr",
",",
"re_numeration_nucphys_vol_page_yr",
",",
"# With sub volume",
"re_numeration_vol_subvol_nucphys_yr_page",
",",
"re_numeration_vol_nucphys_yr_subvol_page",
",",
"# vol,year,page",
"re_numeration_vol_yr_page",
",",
"re_numeration_nucphys_vol_yr_page",
",",
"re_numeration_vol_nucphys_series_yr_page",
",",
"# vol,page,year",
"re_numeration_vol_series_nucphys_page_yr",
",",
"re_numeration_vol_nucphys_series_page_yr",
",",
"# year,vol,page",
"re_numeration_yr_vol_page",
",",
")",
"for",
"pattern",
"in",
"patterns",
":",
"match",
"=",
"pattern",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"info",
"=",
"match",
".",
"groupdict",
"(",
")",
"series",
"=",
"info",
".",
"get",
"(",
"'series'",
",",
"None",
")",
"if",
"not",
"series",
":",
"series",
"=",
"extract_series_from_volume",
"(",
"info",
"[",
"'vol'",
"]",
")",
"if",
"not",
"info",
"[",
"'vol_num'",
"]",
":",
"info",
"[",
"'vol_num'",
"]",
"=",
"info",
"[",
"'vol_num_alt'",
"]",
"if",
"not",
"info",
"[",
"'vol_num'",
"]",
":",
"info",
"[",
"'vol_num'",
"]",
"=",
"info",
"[",
"'vol_num_alt2'",
"]",
"return",
"{",
"'year'",
":",
"info",
".",
"get",
"(",
"'year'",
",",
"None",
")",
",",
"'series'",
":",
"series",
",",
"'volume'",
":",
"info",
"[",
"'vol_num'",
"]",
",",
"'page'",
":",
"info",
"[",
"'page'",
"]",
"or",
"info",
"[",
"'jinst_page'",
"]",
",",
"'page_end'",
":",
"info",
"[",
"'page_end'",
"]",
",",
"'len'",
":",
"match",
".",
"end",
"(",
")",
"}",
"return",
"None"
] | Given a reference line, attempt to locate instances of citation
'numeration' in the line.
@param line: (string) the reference line.
@return: (string) the reference line after numeration has been checked
and possibly recognized/marked-up. | [
"Given",
"a",
"reference",
"line",
"attempt",
"to",
"locate",
"instances",
"of",
"citation",
"numeration",
"in",
"the",
"line",
"."
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L1083-L1127 | train | 236,573 |
inspirehep/refextract | refextract/references/engine.py | remove_reference_line_marker | def remove_reference_line_marker(line):
"""Trim a reference line's 'marker' from the beginning of the line.
@param line: (string) - the reference line.
@return: (tuple) containing two strings:
+ The reference line's marker (or if there was not one,
a 'space' character.
+ The reference line with it's marker removed from the
beginning.
"""
# Get patterns to identify reference-line marker patterns:
marker_patterns = get_reference_line_numeration_marker_patterns()
line = line.lstrip()
marker_match = regex_match_list(line, marker_patterns)
if marker_match is not None:
# found a marker:
marker_val = marker_match.group(u'mark')
# trim the marker from the start of the line:
line = line[marker_match.end():].lstrip()
else:
marker_val = u" "
return (marker_val, line) | python | def remove_reference_line_marker(line):
"""Trim a reference line's 'marker' from the beginning of the line.
@param line: (string) - the reference line.
@return: (tuple) containing two strings:
+ The reference line's marker (or if there was not one,
a 'space' character.
+ The reference line with it's marker removed from the
beginning.
"""
# Get patterns to identify reference-line marker patterns:
marker_patterns = get_reference_line_numeration_marker_patterns()
line = line.lstrip()
marker_match = regex_match_list(line, marker_patterns)
if marker_match is not None:
# found a marker:
marker_val = marker_match.group(u'mark')
# trim the marker from the start of the line:
line = line[marker_match.end():].lstrip()
else:
marker_val = u" "
return (marker_val, line) | [
"def",
"remove_reference_line_marker",
"(",
"line",
")",
":",
"# Get patterns to identify reference-line marker patterns:",
"marker_patterns",
"=",
"get_reference_line_numeration_marker_patterns",
"(",
")",
"line",
"=",
"line",
".",
"lstrip",
"(",
")",
"marker_match",
"=",
"regex_match_list",
"(",
"line",
",",
"marker_patterns",
")",
"if",
"marker_match",
"is",
"not",
"None",
":",
"# found a marker:",
"marker_val",
"=",
"marker_match",
".",
"group",
"(",
"u'mark'",
")",
"# trim the marker from the start of the line:",
"line",
"=",
"line",
"[",
"marker_match",
".",
"end",
"(",
")",
":",
"]",
".",
"lstrip",
"(",
")",
"else",
":",
"marker_val",
"=",
"u\" \"",
"return",
"(",
"marker_val",
",",
"line",
")"
] | Trim a reference line's 'marker' from the beginning of the line.
@param line: (string) - the reference line.
@return: (tuple) containing two strings:
+ The reference line's marker (or if there was not one,
a 'space' character.
+ The reference line with it's marker removed from the
beginning. | [
"Trim",
"a",
"reference",
"line",
"s",
"marker",
"from",
"the",
"beginning",
"of",
"the",
"line",
"."
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L92-L114 | train | 236,574 |
inspirehep/refextract | refextract/references/engine.py | roman2arabic | def roman2arabic(num):
"""Convert numbers from roman to arabic
This function expects a string like XXII
and outputs an integer
"""
t = 0
p = 0
for r in num:
n = 10 ** (205558 % ord(r) % 7) % 9995
t += n - 2 * p % n
p = n
return t | python | def roman2arabic(num):
"""Convert numbers from roman to arabic
This function expects a string like XXII
and outputs an integer
"""
t = 0
p = 0
for r in num:
n = 10 ** (205558 % ord(r) % 7) % 9995
t += n - 2 * p % n
p = n
return t | [
"def",
"roman2arabic",
"(",
"num",
")",
":",
"t",
"=",
"0",
"p",
"=",
"0",
"for",
"r",
"in",
"num",
":",
"n",
"=",
"10",
"**",
"(",
"205558",
"%",
"ord",
"(",
"r",
")",
"%",
"7",
")",
"%",
"9995",
"t",
"+=",
"n",
"-",
"2",
"*",
"p",
"%",
"n",
"p",
"=",
"n",
"return",
"t"
] | Convert numbers from roman to arabic
This function expects a string like XXII
and outputs an integer | [
"Convert",
"numbers",
"from",
"roman",
"to",
"arabic"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L117-L129 | train | 236,575 |
inspirehep/refextract | refextract/references/engine.py | format_report_number | def format_report_number(citation_elements):
"""Format report numbers that are missing a dash
e.g. CERN-LCHH2003-01 to CERN-LHCC-2003-01
"""
re_report = re.compile(ur'^(?P<name>[A-Z-]+)(?P<nums>[\d-]+)$', re.UNICODE)
for el in citation_elements:
if el['type'] == 'REPORTNUMBER':
m = re_report.match(el['report_num'])
if m:
name = m.group('name')
if not name.endswith('-'):
el['report_num'] = m.group('name') + '-' + m.group('nums')
return citation_elements | python | def format_report_number(citation_elements):
"""Format report numbers that are missing a dash
e.g. CERN-LCHH2003-01 to CERN-LHCC-2003-01
"""
re_report = re.compile(ur'^(?P<name>[A-Z-]+)(?P<nums>[\d-]+)$', re.UNICODE)
for el in citation_elements:
if el['type'] == 'REPORTNUMBER':
m = re_report.match(el['report_num'])
if m:
name = m.group('name')
if not name.endswith('-'):
el['report_num'] = m.group('name') + '-' + m.group('nums')
return citation_elements | [
"def",
"format_report_number",
"(",
"citation_elements",
")",
":",
"re_report",
"=",
"re",
".",
"compile",
"(",
"ur'^(?P<name>[A-Z-]+)(?P<nums>[\\d-]+)$'",
",",
"re",
".",
"UNICODE",
")",
"for",
"el",
"in",
"citation_elements",
":",
"if",
"el",
"[",
"'type'",
"]",
"==",
"'REPORTNUMBER'",
":",
"m",
"=",
"re_report",
".",
"match",
"(",
"el",
"[",
"'report_num'",
"]",
")",
"if",
"m",
":",
"name",
"=",
"m",
".",
"group",
"(",
"'name'",
")",
"if",
"not",
"name",
".",
"endswith",
"(",
"'-'",
")",
":",
"el",
"[",
"'report_num'",
"]",
"=",
"m",
".",
"group",
"(",
"'name'",
")",
"+",
"'-'",
"+",
"m",
".",
"group",
"(",
"'nums'",
")",
"return",
"citation_elements"
] | Format report numbers that are missing a dash
e.g. CERN-LCHH2003-01 to CERN-LHCC-2003-01 | [
"Format",
"report",
"numbers",
"that",
"are",
"missing",
"a",
"dash"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L171-L184 | train | 236,576 |
inspirehep/refextract | refextract/references/engine.py | format_hep | def format_hep(citation_elements):
"""Format hep-th report numbers with a dash
e.g. replaces hep-th-9711200 with hep-th/9711200
"""
prefixes = ('astro-ph-', 'hep-th-', 'hep-ph-', 'hep-ex-', 'hep-lat-',
'math-ph-')
for el in citation_elements:
if el['type'] == 'REPORTNUMBER':
for p in prefixes:
if el['report_num'].startswith(p):
el['report_num'] = el['report_num'][:len(p) - 1] + '/' + \
el['report_num'][len(p):]
return citation_elements | python | def format_hep(citation_elements):
"""Format hep-th report numbers with a dash
e.g. replaces hep-th-9711200 with hep-th/9711200
"""
prefixes = ('astro-ph-', 'hep-th-', 'hep-ph-', 'hep-ex-', 'hep-lat-',
'math-ph-')
for el in citation_elements:
if el['type'] == 'REPORTNUMBER':
for p in prefixes:
if el['report_num'].startswith(p):
el['report_num'] = el['report_num'][:len(p) - 1] + '/' + \
el['report_num'][len(p):]
return citation_elements | [
"def",
"format_hep",
"(",
"citation_elements",
")",
":",
"prefixes",
"=",
"(",
"'astro-ph-'",
",",
"'hep-th-'",
",",
"'hep-ph-'",
",",
"'hep-ex-'",
",",
"'hep-lat-'",
",",
"'math-ph-'",
")",
"for",
"el",
"in",
"citation_elements",
":",
"if",
"el",
"[",
"'type'",
"]",
"==",
"'REPORTNUMBER'",
":",
"for",
"p",
"in",
"prefixes",
":",
"if",
"el",
"[",
"'report_num'",
"]",
".",
"startswith",
"(",
"p",
")",
":",
"el",
"[",
"'report_num'",
"]",
"=",
"el",
"[",
"'report_num'",
"]",
"[",
":",
"len",
"(",
"p",
")",
"-",
"1",
"]",
"+",
"'/'",
"+",
"el",
"[",
"'report_num'",
"]",
"[",
"len",
"(",
"p",
")",
":",
"]",
"return",
"citation_elements"
] | Format hep-th report numbers with a dash
e.g. replaces hep-th-9711200 with hep-th/9711200 | [
"Format",
"hep",
"-",
"th",
"report",
"numbers",
"with",
"a",
"dash"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L187-L200 | train | 236,577 |
inspirehep/refextract | refextract/references/engine.py | look_for_books | def look_for_books(citation_elements, kbs):
"""Look for books in our kb
Create book tags by using the authors and the title to find books
in our knowledge base
"""
title = None
for el in citation_elements:
if el['type'] == 'QUOTED':
title = el
break
if title:
normalized_title = title['title'].upper()
if normalized_title in kbs['books']:
line = kbs['books'][normalized_title]
el = {'type': 'BOOK',
'misc_txt': '',
'authors': line[0],
'title': line[1],
'year': line[2].strip(';')}
citation_elements.append(el)
citation_elements.remove(title)
return citation_elements | python | def look_for_books(citation_elements, kbs):
"""Look for books in our kb
Create book tags by using the authors and the title to find books
in our knowledge base
"""
title = None
for el in citation_elements:
if el['type'] == 'QUOTED':
title = el
break
if title:
normalized_title = title['title'].upper()
if normalized_title in kbs['books']:
line = kbs['books'][normalized_title]
el = {'type': 'BOOK',
'misc_txt': '',
'authors': line[0],
'title': line[1],
'year': line[2].strip(';')}
citation_elements.append(el)
citation_elements.remove(title)
return citation_elements | [
"def",
"look_for_books",
"(",
"citation_elements",
",",
"kbs",
")",
":",
"title",
"=",
"None",
"for",
"el",
"in",
"citation_elements",
":",
"if",
"el",
"[",
"'type'",
"]",
"==",
"'QUOTED'",
":",
"title",
"=",
"el",
"break",
"if",
"title",
":",
"normalized_title",
"=",
"title",
"[",
"'title'",
"]",
".",
"upper",
"(",
")",
"if",
"normalized_title",
"in",
"kbs",
"[",
"'books'",
"]",
":",
"line",
"=",
"kbs",
"[",
"'books'",
"]",
"[",
"normalized_title",
"]",
"el",
"=",
"{",
"'type'",
":",
"'BOOK'",
",",
"'misc_txt'",
":",
"''",
",",
"'authors'",
":",
"line",
"[",
"0",
"]",
",",
"'title'",
":",
"line",
"[",
"1",
"]",
",",
"'year'",
":",
"line",
"[",
"2",
"]",
".",
"strip",
"(",
"';'",
")",
"}",
"citation_elements",
".",
"append",
"(",
"el",
")",
"citation_elements",
".",
"remove",
"(",
"title",
")",
"return",
"citation_elements"
] | Look for books in our kb
Create book tags by using the authors and the title to find books
in our knowledge base | [
"Look",
"for",
"books",
"in",
"our",
"kb"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L215-L239 | train | 236,578 |
inspirehep/refextract | refextract/references/engine.py | split_volume_from_journal | def split_volume_from_journal(citation_elements):
"""Split volume from journal title
We need this because sometimes the volume is attached to the journal title
instead of the volume. In those cases we move it here from the title to the
volume
"""
for el in citation_elements:
if el['type'] == 'JOURNAL' and ';' in el['title']:
el['title'], series = el['title'].rsplit(';', 1)
el['volume'] = series + el['volume']
return citation_elements | python | def split_volume_from_journal(citation_elements):
"""Split volume from journal title
We need this because sometimes the volume is attached to the journal title
instead of the volume. In those cases we move it here from the title to the
volume
"""
for el in citation_elements:
if el['type'] == 'JOURNAL' and ';' in el['title']:
el['title'], series = el['title'].rsplit(';', 1)
el['volume'] = series + el['volume']
return citation_elements | [
"def",
"split_volume_from_journal",
"(",
"citation_elements",
")",
":",
"for",
"el",
"in",
"citation_elements",
":",
"if",
"el",
"[",
"'type'",
"]",
"==",
"'JOURNAL'",
"and",
"';'",
"in",
"el",
"[",
"'title'",
"]",
":",
"el",
"[",
"'title'",
"]",
",",
"series",
"=",
"el",
"[",
"'title'",
"]",
".",
"rsplit",
"(",
"';'",
",",
"1",
")",
"el",
"[",
"'volume'",
"]",
"=",
"series",
"+",
"el",
"[",
"'volume'",
"]",
"return",
"citation_elements"
] | Split volume from journal title
We need this because sometimes the volume is attached to the journal title
instead of the volume. In those cases we move it here from the title to the
volume | [
"Split",
"volume",
"from",
"journal",
"title"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L242-L253 | train | 236,579 |
inspirehep/refextract | refextract/references/engine.py | remove_b_for_nucl_phys | def remove_b_for_nucl_phys(citation_elements):
"""Removes b from the volume of some journals
Removes the B from the volume for Nucl.Phys.Proc.Suppl. because in INSPIRE
that journal is handled differently.
"""
for el in citation_elements:
if el['type'] == 'JOURNAL' and el['title'] == 'Nucl.Phys.Proc.Suppl.' \
and 'volume' in el \
and (el['volume'].startswith('b') or el['volume'].startswith('B')):
el['volume'] = el['volume'][1:]
return citation_elements | python | def remove_b_for_nucl_phys(citation_elements):
"""Removes b from the volume of some journals
Removes the B from the volume for Nucl.Phys.Proc.Suppl. because in INSPIRE
that journal is handled differently.
"""
for el in citation_elements:
if el['type'] == 'JOURNAL' and el['title'] == 'Nucl.Phys.Proc.Suppl.' \
and 'volume' in el \
and (el['volume'].startswith('b') or el['volume'].startswith('B')):
el['volume'] = el['volume'][1:]
return citation_elements | [
"def",
"remove_b_for_nucl_phys",
"(",
"citation_elements",
")",
":",
"for",
"el",
"in",
"citation_elements",
":",
"if",
"el",
"[",
"'type'",
"]",
"==",
"'JOURNAL'",
"and",
"el",
"[",
"'title'",
"]",
"==",
"'Nucl.Phys.Proc.Suppl.'",
"and",
"'volume'",
"in",
"el",
"and",
"(",
"el",
"[",
"'volume'",
"]",
".",
"startswith",
"(",
"'b'",
")",
"or",
"el",
"[",
"'volume'",
"]",
".",
"startswith",
"(",
"'B'",
")",
")",
":",
"el",
"[",
"'volume'",
"]",
"=",
"el",
"[",
"'volume'",
"]",
"[",
"1",
":",
"]",
"return",
"citation_elements"
] | Removes b from the volume of some journals
Removes the B from the volume for Nucl.Phys.Proc.Suppl. because in INSPIRE
that journal is handled differently. | [
"Removes",
"b",
"from",
"the",
"volume",
"of",
"some",
"journals"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L256-L267 | train | 236,580 |
inspirehep/refextract | refextract/references/engine.py | mangle_volume | def mangle_volume(citation_elements):
"""Make sure the volume letter is before the volume number
e.g. transforms 100B to B100
"""
volume_re = re.compile(ur"(\d+)([A-Z])", re.U | re.I)
for el in citation_elements:
if el['type'] == 'JOURNAL':
matches = volume_re.match(el['volume'])
if matches:
el['volume'] = matches.group(2) + matches.group(1)
return citation_elements | python | def mangle_volume(citation_elements):
"""Make sure the volume letter is before the volume number
e.g. transforms 100B to B100
"""
volume_re = re.compile(ur"(\d+)([A-Z])", re.U | re.I)
for el in citation_elements:
if el['type'] == 'JOURNAL':
matches = volume_re.match(el['volume'])
if matches:
el['volume'] = matches.group(2) + matches.group(1)
return citation_elements | [
"def",
"mangle_volume",
"(",
"citation_elements",
")",
":",
"volume_re",
"=",
"re",
".",
"compile",
"(",
"ur\"(\\d+)([A-Z])\"",
",",
"re",
".",
"U",
"|",
"re",
".",
"I",
")",
"for",
"el",
"in",
"citation_elements",
":",
"if",
"el",
"[",
"'type'",
"]",
"==",
"'JOURNAL'",
":",
"matches",
"=",
"volume_re",
".",
"match",
"(",
"el",
"[",
"'volume'",
"]",
")",
"if",
"matches",
":",
"el",
"[",
"'volume'",
"]",
"=",
"matches",
".",
"group",
"(",
"2",
")",
"+",
"matches",
".",
"group",
"(",
"1",
")",
"return",
"citation_elements"
] | Make sure the volume letter is before the volume number
e.g. transforms 100B to B100 | [
"Make",
"sure",
"the",
"volume",
"letter",
"is",
"before",
"the",
"volume",
"number"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L270-L282 | train | 236,581 |
inspirehep/refextract | refextract/references/engine.py | split_citations | def split_citations(citation_elements):
"""Split a citation line in multiple citations
We handle the case where the author has put 2 citations in the same line
but split with ; or some other method.
"""
splitted_citations = []
new_elements = []
current_recid = None
current_doi = None
def check_ibid(current_elements, trigger_el):
for el in new_elements:
if el['type'] == 'AUTH':
return
# Check for ibid
if trigger_el.get('is_ibid', False):
if splitted_citations:
els = chain(reversed(current_elements),
reversed(splitted_citations[-1]))
else:
els = reversed(current_elements)
for el in els:
if el['type'] == 'AUTH':
new_elements.append(el.copy())
break
def start_new_citation():
"""Start new citation"""
splitted_citations.append(new_elements[:])
del new_elements[:]
for el in citation_elements:
try:
el_recid = el['recid']
except KeyError:
el_recid = None
if current_recid and el_recid and current_recid == el_recid:
# Do not start a new citation
pass
elif current_recid and el_recid and current_recid != el_recid \
or current_doi and el['type'] == 'DOI' and \
current_doi != el['doi_string']:
start_new_citation()
# Some authors may be found in the previous citation
balance_authors(splitted_citations, new_elements)
elif ';' in el['misc_txt']:
misc_txt, el['misc_txt'] = el['misc_txt'].split(';', 1)
if misc_txt:
new_elements.append({'type': 'MISC',
'misc_txt': misc_txt})
start_new_citation()
# In case el['recid'] is None, we want to reset it
# because we are starting a new reference
current_recid = el_recid
while ';' in el['misc_txt']:
misc_txt, el['misc_txt'] = el['misc_txt'].split(';', 1)
if misc_txt:
new_elements.append({'type': 'MISC',
'misc_txt': misc_txt})
start_new_citation()
current_recid = None
if el_recid:
current_recid = el_recid
if el['type'] == 'DOI':
current_doi = el['doi_string']
check_ibid(new_elements, el)
new_elements.append(el)
splitted_citations.append(new_elements)
return [el for el in splitted_citations if not empty_citation(el)] | python | def split_citations(citation_elements):
"""Split a citation line in multiple citations
We handle the case where the author has put 2 citations in the same line
but split with ; or some other method.
"""
splitted_citations = []
new_elements = []
current_recid = None
current_doi = None
def check_ibid(current_elements, trigger_el):
for el in new_elements:
if el['type'] == 'AUTH':
return
# Check for ibid
if trigger_el.get('is_ibid', False):
if splitted_citations:
els = chain(reversed(current_elements),
reversed(splitted_citations[-1]))
else:
els = reversed(current_elements)
for el in els:
if el['type'] == 'AUTH':
new_elements.append(el.copy())
break
def start_new_citation():
"""Start new citation"""
splitted_citations.append(new_elements[:])
del new_elements[:]
for el in citation_elements:
try:
el_recid = el['recid']
except KeyError:
el_recid = None
if current_recid and el_recid and current_recid == el_recid:
# Do not start a new citation
pass
elif current_recid and el_recid and current_recid != el_recid \
or current_doi and el['type'] == 'DOI' and \
current_doi != el['doi_string']:
start_new_citation()
# Some authors may be found in the previous citation
balance_authors(splitted_citations, new_elements)
elif ';' in el['misc_txt']:
misc_txt, el['misc_txt'] = el['misc_txt'].split(';', 1)
if misc_txt:
new_elements.append({'type': 'MISC',
'misc_txt': misc_txt})
start_new_citation()
# In case el['recid'] is None, we want to reset it
# because we are starting a new reference
current_recid = el_recid
while ';' in el['misc_txt']:
misc_txt, el['misc_txt'] = el['misc_txt'].split(';', 1)
if misc_txt:
new_elements.append({'type': 'MISC',
'misc_txt': misc_txt})
start_new_citation()
current_recid = None
if el_recid:
current_recid = el_recid
if el['type'] == 'DOI':
current_doi = el['doi_string']
check_ibid(new_elements, el)
new_elements.append(el)
splitted_citations.append(new_elements)
return [el for el in splitted_citations if not empty_citation(el)] | [
"def",
"split_citations",
"(",
"citation_elements",
")",
":",
"splitted_citations",
"=",
"[",
"]",
"new_elements",
"=",
"[",
"]",
"current_recid",
"=",
"None",
"current_doi",
"=",
"None",
"def",
"check_ibid",
"(",
"current_elements",
",",
"trigger_el",
")",
":",
"for",
"el",
"in",
"new_elements",
":",
"if",
"el",
"[",
"'type'",
"]",
"==",
"'AUTH'",
":",
"return",
"# Check for ibid",
"if",
"trigger_el",
".",
"get",
"(",
"'is_ibid'",
",",
"False",
")",
":",
"if",
"splitted_citations",
":",
"els",
"=",
"chain",
"(",
"reversed",
"(",
"current_elements",
")",
",",
"reversed",
"(",
"splitted_citations",
"[",
"-",
"1",
"]",
")",
")",
"else",
":",
"els",
"=",
"reversed",
"(",
"current_elements",
")",
"for",
"el",
"in",
"els",
":",
"if",
"el",
"[",
"'type'",
"]",
"==",
"'AUTH'",
":",
"new_elements",
".",
"append",
"(",
"el",
".",
"copy",
"(",
")",
")",
"break",
"def",
"start_new_citation",
"(",
")",
":",
"\"\"\"Start new citation\"\"\"",
"splitted_citations",
".",
"append",
"(",
"new_elements",
"[",
":",
"]",
")",
"del",
"new_elements",
"[",
":",
"]",
"for",
"el",
"in",
"citation_elements",
":",
"try",
":",
"el_recid",
"=",
"el",
"[",
"'recid'",
"]",
"except",
"KeyError",
":",
"el_recid",
"=",
"None",
"if",
"current_recid",
"and",
"el_recid",
"and",
"current_recid",
"==",
"el_recid",
":",
"# Do not start a new citation",
"pass",
"elif",
"current_recid",
"and",
"el_recid",
"and",
"current_recid",
"!=",
"el_recid",
"or",
"current_doi",
"and",
"el",
"[",
"'type'",
"]",
"==",
"'DOI'",
"and",
"current_doi",
"!=",
"el",
"[",
"'doi_string'",
"]",
":",
"start_new_citation",
"(",
")",
"# Some authors may be found in the previous citation",
"balance_authors",
"(",
"splitted_citations",
",",
"new_elements",
")",
"elif",
"';'",
"in",
"el",
"[",
"'misc_txt'",
"]",
":",
"misc_txt",
",",
"el",
"[",
"'misc_txt'",
"]",
"=",
"el",
"[",
"'misc_txt'",
"]",
".",
"split",
"(",
"';'",
",",
"1",
")",
"if",
"misc_txt",
":",
"new_elements",
".",
"append",
"(",
"{",
"'type'",
":",
"'MISC'",
",",
"'misc_txt'",
":",
"misc_txt",
"}",
")",
"start_new_citation",
"(",
")",
"# In case el['recid'] is None, we want to reset it",
"# because we are starting a new reference",
"current_recid",
"=",
"el_recid",
"while",
"';'",
"in",
"el",
"[",
"'misc_txt'",
"]",
":",
"misc_txt",
",",
"el",
"[",
"'misc_txt'",
"]",
"=",
"el",
"[",
"'misc_txt'",
"]",
".",
"split",
"(",
"';'",
",",
"1",
")",
"if",
"misc_txt",
":",
"new_elements",
".",
"append",
"(",
"{",
"'type'",
":",
"'MISC'",
",",
"'misc_txt'",
":",
"misc_txt",
"}",
")",
"start_new_citation",
"(",
")",
"current_recid",
"=",
"None",
"if",
"el_recid",
":",
"current_recid",
"=",
"el_recid",
"if",
"el",
"[",
"'type'",
"]",
"==",
"'DOI'",
":",
"current_doi",
"=",
"el",
"[",
"'doi_string'",
"]",
"check_ibid",
"(",
"new_elements",
",",
"el",
")",
"new_elements",
".",
"append",
"(",
"el",
")",
"splitted_citations",
".",
"append",
"(",
"new_elements",
")",
"return",
"[",
"el",
"for",
"el",
"in",
"splitted_citations",
"if",
"not",
"empty_citation",
"(",
"el",
")",
"]"
] | Split a citation line in multiple citations
We handle the case where the author has put 2 citations in the same line
but split with ; or some other method. | [
"Split",
"a",
"citation",
"line",
"in",
"multiple",
"citations"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L307-L383 | train | 236,582 |
inspirehep/refextract | refextract/references/engine.py | look_for_hdl | def look_for_hdl(citation_elements):
"""Looks for handle identifiers in the misc txt of the citation elements
When finding an hdl, creates a new HDL element.
@param citation_elements: (list) elements to process
"""
for el in list(citation_elements):
matched_hdl = re_hdl.finditer(el['misc_txt'])
for match in reversed(list(matched_hdl)):
hdl_el = {'type': 'HDL',
'hdl_id': match.group('hdl_id'),
'misc_txt': el['misc_txt'][match.end():]}
el['misc_txt'] = el['misc_txt'][0:match.start()]
citation_elements.insert(citation_elements.index(el) + 1, hdl_el) | python | def look_for_hdl(citation_elements):
"""Looks for handle identifiers in the misc txt of the citation elements
When finding an hdl, creates a new HDL element.
@param citation_elements: (list) elements to process
"""
for el in list(citation_elements):
matched_hdl = re_hdl.finditer(el['misc_txt'])
for match in reversed(list(matched_hdl)):
hdl_el = {'type': 'HDL',
'hdl_id': match.group('hdl_id'),
'misc_txt': el['misc_txt'][match.end():]}
el['misc_txt'] = el['misc_txt'][0:match.start()]
citation_elements.insert(citation_elements.index(el) + 1, hdl_el) | [
"def",
"look_for_hdl",
"(",
"citation_elements",
")",
":",
"for",
"el",
"in",
"list",
"(",
"citation_elements",
")",
":",
"matched_hdl",
"=",
"re_hdl",
".",
"finditer",
"(",
"el",
"[",
"'misc_txt'",
"]",
")",
"for",
"match",
"in",
"reversed",
"(",
"list",
"(",
"matched_hdl",
")",
")",
":",
"hdl_el",
"=",
"{",
"'type'",
":",
"'HDL'",
",",
"'hdl_id'",
":",
"match",
".",
"group",
"(",
"'hdl_id'",
")",
",",
"'misc_txt'",
":",
"el",
"[",
"'misc_txt'",
"]",
"[",
"match",
".",
"end",
"(",
")",
":",
"]",
"}",
"el",
"[",
"'misc_txt'",
"]",
"=",
"el",
"[",
"'misc_txt'",
"]",
"[",
"0",
":",
"match",
".",
"start",
"(",
")",
"]",
"citation_elements",
".",
"insert",
"(",
"citation_elements",
".",
"index",
"(",
"el",
")",
"+",
"1",
",",
"hdl_el",
")"
] | Looks for handle identifiers in the misc txt of the citation elements
When finding an hdl, creates a new HDL element.
@param citation_elements: (list) elements to process | [
"Looks",
"for",
"handle",
"identifiers",
"in",
"the",
"misc",
"txt",
"of",
"the",
"citation",
"elements"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L596-L609 | train | 236,583 |
inspirehep/refextract | refextract/references/engine.py | look_for_hdl_urls | def look_for_hdl_urls(citation_elements):
"""Looks for handle identifiers that have already been identified as urls
When finding an hdl, creates a new HDL element.
@param citation_elements: (list) elements to process
"""
for el in citation_elements:
if el['type'] == 'URL':
match = re_hdl.match(el['url_string'])
if match:
el['type'] = 'HDL'
el['hdl_id'] = match.group('hdl_id')
del el['url_desc']
del el['url_string'] | python | def look_for_hdl_urls(citation_elements):
"""Looks for handle identifiers that have already been identified as urls
When finding an hdl, creates a new HDL element.
@param citation_elements: (list) elements to process
"""
for el in citation_elements:
if el['type'] == 'URL':
match = re_hdl.match(el['url_string'])
if match:
el['type'] = 'HDL'
el['hdl_id'] = match.group('hdl_id')
del el['url_desc']
del el['url_string'] | [
"def",
"look_for_hdl_urls",
"(",
"citation_elements",
")",
":",
"for",
"el",
"in",
"citation_elements",
":",
"if",
"el",
"[",
"'type'",
"]",
"==",
"'URL'",
":",
"match",
"=",
"re_hdl",
".",
"match",
"(",
"el",
"[",
"'url_string'",
"]",
")",
"if",
"match",
":",
"el",
"[",
"'type'",
"]",
"=",
"'HDL'",
"el",
"[",
"'hdl_id'",
"]",
"=",
"match",
".",
"group",
"(",
"'hdl_id'",
")",
"del",
"el",
"[",
"'url_desc'",
"]",
"del",
"el",
"[",
"'url_string'",
"]"
] | Looks for handle identifiers that have already been identified as urls
When finding an hdl, creates a new HDL element.
@param citation_elements: (list) elements to process | [
"Looks",
"for",
"handle",
"identifiers",
"that",
"have",
"already",
"been",
"identified",
"as",
"urls"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L612-L625 | train | 236,584 |
inspirehep/refextract | refextract/references/engine.py | parse_reference_line | def parse_reference_line(ref_line, kbs, bad_titles_count={}, linker_callback=None):
"""Parse one reference line
@input a string representing a single reference bullet
@output parsed references (a list of elements objects)
"""
# Strip the 'marker' (e.g. [1]) from this reference line:
line_marker, ref_line = remove_reference_line_marker(ref_line)
# Find DOI sections in citation
ref_line, identified_dois = identify_and_tag_DOI(ref_line)
# Identify and replace URLs in the line:
ref_line, identified_urls = identify_and_tag_URLs(ref_line)
# Tag <cds.JOURNAL>, etc.
tagged_line, bad_titles_count = tag_reference_line(ref_line,
kbs,
bad_titles_count)
# Debug print tagging (authors, titles, volumes, etc.)
LOGGER.debug("tags %r", tagged_line)
# Using the recorded information, create a MARC XML representation
# of the rebuilt line:
# At the same time, get stats of citations found in the reference line
# (titles, urls, etc):
citation_elements, line_marker, counts = \
parse_tagged_reference_line(line_marker,
tagged_line,
identified_dois,
identified_urls)
# Transformations on elements
split_volume_from_journal(citation_elements)
format_volume(citation_elements)
handle_special_journals(citation_elements, kbs)
format_report_number(citation_elements)
format_author_ed(citation_elements)
look_for_books(citation_elements, kbs)
format_hep(citation_elements)
remove_b_for_nucl_phys(citation_elements)
mangle_volume(citation_elements)
arxiv_urls_to_report_numbers(citation_elements)
look_for_hdl(citation_elements)
look_for_hdl_urls(citation_elements)
# Link references if desired
if linker_callback:
associate_recids(citation_elements, linker_callback)
# Split the reference in multiple ones if needed
splitted_citations = split_citations(citation_elements)
# Look for implied ibids
look_for_implied_ibids(splitted_citations)
# Find year
add_year_elements(splitted_citations)
# Look for books in misc field
look_for_undetected_books(splitted_citations, kbs)
if linker_callback:
# Link references with the newly added ibids/books information
for citations in splitted_citations:
associate_recids(citations, linker_callback)
# FIXME: Needed?
# Remove references with only misc text
# splitted_citations = remove_invalid_references(splitted_citations)
# Merge references with only misc text
# splitted_citations = merge_invalid_references(splitted_citations)
remove_duplicated_authors(splitted_citations)
remove_duplicated_dois(splitted_citations)
remove_duplicated_collaborations(splitted_citations)
add_recid_elements(splitted_citations)
# For debugging purposes
print_citations(splitted_citations, line_marker)
return splitted_citations, line_marker, counts, bad_titles_count | python | def parse_reference_line(ref_line, kbs, bad_titles_count={}, linker_callback=None):
"""Parse one reference line
@input a string representing a single reference bullet
@output parsed references (a list of elements objects)
"""
# Strip the 'marker' (e.g. [1]) from this reference line:
line_marker, ref_line = remove_reference_line_marker(ref_line)
# Find DOI sections in citation
ref_line, identified_dois = identify_and_tag_DOI(ref_line)
# Identify and replace URLs in the line:
ref_line, identified_urls = identify_and_tag_URLs(ref_line)
# Tag <cds.JOURNAL>, etc.
tagged_line, bad_titles_count = tag_reference_line(ref_line,
kbs,
bad_titles_count)
# Debug print tagging (authors, titles, volumes, etc.)
LOGGER.debug("tags %r", tagged_line)
# Using the recorded information, create a MARC XML representation
# of the rebuilt line:
# At the same time, get stats of citations found in the reference line
# (titles, urls, etc):
citation_elements, line_marker, counts = \
parse_tagged_reference_line(line_marker,
tagged_line,
identified_dois,
identified_urls)
# Transformations on elements
split_volume_from_journal(citation_elements)
format_volume(citation_elements)
handle_special_journals(citation_elements, kbs)
format_report_number(citation_elements)
format_author_ed(citation_elements)
look_for_books(citation_elements, kbs)
format_hep(citation_elements)
remove_b_for_nucl_phys(citation_elements)
mangle_volume(citation_elements)
arxiv_urls_to_report_numbers(citation_elements)
look_for_hdl(citation_elements)
look_for_hdl_urls(citation_elements)
# Link references if desired
if linker_callback:
associate_recids(citation_elements, linker_callback)
# Split the reference in multiple ones if needed
splitted_citations = split_citations(citation_elements)
# Look for implied ibids
look_for_implied_ibids(splitted_citations)
# Find year
add_year_elements(splitted_citations)
# Look for books in misc field
look_for_undetected_books(splitted_citations, kbs)
if linker_callback:
# Link references with the newly added ibids/books information
for citations in splitted_citations:
associate_recids(citations, linker_callback)
# FIXME: Needed?
# Remove references with only misc text
# splitted_citations = remove_invalid_references(splitted_citations)
# Merge references with only misc text
# splitted_citations = merge_invalid_references(splitted_citations)
remove_duplicated_authors(splitted_citations)
remove_duplicated_dois(splitted_citations)
remove_duplicated_collaborations(splitted_citations)
add_recid_elements(splitted_citations)
# For debugging purposes
print_citations(splitted_citations, line_marker)
return splitted_citations, line_marker, counts, bad_titles_count | [
"def",
"parse_reference_line",
"(",
"ref_line",
",",
"kbs",
",",
"bad_titles_count",
"=",
"{",
"}",
",",
"linker_callback",
"=",
"None",
")",
":",
"# Strip the 'marker' (e.g. [1]) from this reference line:",
"line_marker",
",",
"ref_line",
"=",
"remove_reference_line_marker",
"(",
"ref_line",
")",
"# Find DOI sections in citation",
"ref_line",
",",
"identified_dois",
"=",
"identify_and_tag_DOI",
"(",
"ref_line",
")",
"# Identify and replace URLs in the line:",
"ref_line",
",",
"identified_urls",
"=",
"identify_and_tag_URLs",
"(",
"ref_line",
")",
"# Tag <cds.JOURNAL>, etc.",
"tagged_line",
",",
"bad_titles_count",
"=",
"tag_reference_line",
"(",
"ref_line",
",",
"kbs",
",",
"bad_titles_count",
")",
"# Debug print tagging (authors, titles, volumes, etc.)",
"LOGGER",
".",
"debug",
"(",
"\"tags %r\"",
",",
"tagged_line",
")",
"# Using the recorded information, create a MARC XML representation",
"# of the rebuilt line:",
"# At the same time, get stats of citations found in the reference line",
"# (titles, urls, etc):",
"citation_elements",
",",
"line_marker",
",",
"counts",
"=",
"parse_tagged_reference_line",
"(",
"line_marker",
",",
"tagged_line",
",",
"identified_dois",
",",
"identified_urls",
")",
"# Transformations on elements",
"split_volume_from_journal",
"(",
"citation_elements",
")",
"format_volume",
"(",
"citation_elements",
")",
"handle_special_journals",
"(",
"citation_elements",
",",
"kbs",
")",
"format_report_number",
"(",
"citation_elements",
")",
"format_author_ed",
"(",
"citation_elements",
")",
"look_for_books",
"(",
"citation_elements",
",",
"kbs",
")",
"format_hep",
"(",
"citation_elements",
")",
"remove_b_for_nucl_phys",
"(",
"citation_elements",
")",
"mangle_volume",
"(",
"citation_elements",
")",
"arxiv_urls_to_report_numbers",
"(",
"citation_elements",
")",
"look_for_hdl",
"(",
"citation_elements",
")",
"look_for_hdl_urls",
"(",
"citation_elements",
")",
"# Link references if desired",
"if",
"linker_callback",
":",
"associate_recids",
"(",
"citation_elements",
",",
"linker_callback",
")",
"# Split the reference in multiple ones if needed",
"splitted_citations",
"=",
"split_citations",
"(",
"citation_elements",
")",
"# Look for implied ibids",
"look_for_implied_ibids",
"(",
"splitted_citations",
")",
"# Find year",
"add_year_elements",
"(",
"splitted_citations",
")",
"# Look for books in misc field",
"look_for_undetected_books",
"(",
"splitted_citations",
",",
"kbs",
")",
"if",
"linker_callback",
":",
"# Link references with the newly added ibids/books information",
"for",
"citations",
"in",
"splitted_citations",
":",
"associate_recids",
"(",
"citations",
",",
"linker_callback",
")",
"# FIXME: Needed?",
"# Remove references with only misc text",
"# splitted_citations = remove_invalid_references(splitted_citations)",
"# Merge references with only misc text",
"# splitted_citations = merge_invalid_references(splitted_citations)",
"remove_duplicated_authors",
"(",
"splitted_citations",
")",
"remove_duplicated_dois",
"(",
"splitted_citations",
")",
"remove_duplicated_collaborations",
"(",
"splitted_citations",
")",
"add_recid_elements",
"(",
"splitted_citations",
")",
"# For debugging purposes",
"print_citations",
"(",
"splitted_citations",
",",
"line_marker",
")",
"return",
"splitted_citations",
",",
"line_marker",
",",
"counts",
",",
"bad_titles_count"
] | Parse one reference line
@input a string representing a single reference bullet
@output parsed references (a list of elements objects) | [
"Parse",
"one",
"reference",
"line"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L639-L716 | train | 236,585 |
inspirehep/refextract | refextract/references/engine.py | search_for_book_in_misc | def search_for_book_in_misc(citation, kbs):
"""Searches for books in the misc_txt field if the citation is not recognized as anything like a journal, book, etc.
"""
citation_year = year_from_citation(citation)
for citation_element in citation:
LOGGER.debug(u"Searching for book title in: %s", citation_element['misc_txt'])
for title in kbs['books']:
startIndex = find_substring_ignore_special_chars(citation_element['misc_txt'], title)
if startIndex != -1:
line = kbs['books'][title.upper()]
book_year = line[2].strip(';')
book_authors = line[0]
book_found = False
if citation_year == book_year:
# For now consider the citation as valid, we are using
# an exact search, we don't need to check the authors
# However, the code below will be useful if we decide
# to introduce fuzzy matching.
book_found = True
for author in get_possible_author_names(citation):
if find_substring_ignore_special_chars(book_authors, author) != -1:
book_found = True
for author in re.findall('[a-zA-Z]{4,}', book_authors):
if find_substring_ignore_special_chars(citation_element['misc_txt'], author) != -1:
book_found = True
if book_found:
LOGGER.debug(u"Book found: %s", title)
book_element = {'type': 'BOOK',
'misc_txt': '',
'authors': book_authors,
'title': line[1],
'year': book_year}
citation.append(book_element)
citation_element['misc_txt'] = cut_substring_with_special_chars(citation_element['misc_txt'], title, startIndex)
# Remove year from misc txt
citation_element['misc_txt'] = remove_year(citation_element['misc_txt'], book_year)
return True
LOGGER.debug("Book not found!")
return False | python | def search_for_book_in_misc(citation, kbs):
"""Searches for books in the misc_txt field if the citation is not recognized as anything like a journal, book, etc.
"""
citation_year = year_from_citation(citation)
for citation_element in citation:
LOGGER.debug(u"Searching for book title in: %s", citation_element['misc_txt'])
for title in kbs['books']:
startIndex = find_substring_ignore_special_chars(citation_element['misc_txt'], title)
if startIndex != -1:
line = kbs['books'][title.upper()]
book_year = line[2].strip(';')
book_authors = line[0]
book_found = False
if citation_year == book_year:
# For now consider the citation as valid, we are using
# an exact search, we don't need to check the authors
# However, the code below will be useful if we decide
# to introduce fuzzy matching.
book_found = True
for author in get_possible_author_names(citation):
if find_substring_ignore_special_chars(book_authors, author) != -1:
book_found = True
for author in re.findall('[a-zA-Z]{4,}', book_authors):
if find_substring_ignore_special_chars(citation_element['misc_txt'], author) != -1:
book_found = True
if book_found:
LOGGER.debug(u"Book found: %s", title)
book_element = {'type': 'BOOK',
'misc_txt': '',
'authors': book_authors,
'title': line[1],
'year': book_year}
citation.append(book_element)
citation_element['misc_txt'] = cut_substring_with_special_chars(citation_element['misc_txt'], title, startIndex)
# Remove year from misc txt
citation_element['misc_txt'] = remove_year(citation_element['misc_txt'], book_year)
return True
LOGGER.debug("Book not found!")
return False | [
"def",
"search_for_book_in_misc",
"(",
"citation",
",",
"kbs",
")",
":",
"citation_year",
"=",
"year_from_citation",
"(",
"citation",
")",
"for",
"citation_element",
"in",
"citation",
":",
"LOGGER",
".",
"debug",
"(",
"u\"Searching for book title in: %s\"",
",",
"citation_element",
"[",
"'misc_txt'",
"]",
")",
"for",
"title",
"in",
"kbs",
"[",
"'books'",
"]",
":",
"startIndex",
"=",
"find_substring_ignore_special_chars",
"(",
"citation_element",
"[",
"'misc_txt'",
"]",
",",
"title",
")",
"if",
"startIndex",
"!=",
"-",
"1",
":",
"line",
"=",
"kbs",
"[",
"'books'",
"]",
"[",
"title",
".",
"upper",
"(",
")",
"]",
"book_year",
"=",
"line",
"[",
"2",
"]",
".",
"strip",
"(",
"';'",
")",
"book_authors",
"=",
"line",
"[",
"0",
"]",
"book_found",
"=",
"False",
"if",
"citation_year",
"==",
"book_year",
":",
"# For now consider the citation as valid, we are using",
"# an exact search, we don't need to check the authors",
"# However, the code below will be useful if we decide",
"# to introduce fuzzy matching.",
"book_found",
"=",
"True",
"for",
"author",
"in",
"get_possible_author_names",
"(",
"citation",
")",
":",
"if",
"find_substring_ignore_special_chars",
"(",
"book_authors",
",",
"author",
")",
"!=",
"-",
"1",
":",
"book_found",
"=",
"True",
"for",
"author",
"in",
"re",
".",
"findall",
"(",
"'[a-zA-Z]{4,}'",
",",
"book_authors",
")",
":",
"if",
"find_substring_ignore_special_chars",
"(",
"citation_element",
"[",
"'misc_txt'",
"]",
",",
"author",
")",
"!=",
"-",
"1",
":",
"book_found",
"=",
"True",
"if",
"book_found",
":",
"LOGGER",
".",
"debug",
"(",
"u\"Book found: %s\"",
",",
"title",
")",
"book_element",
"=",
"{",
"'type'",
":",
"'BOOK'",
",",
"'misc_txt'",
":",
"''",
",",
"'authors'",
":",
"book_authors",
",",
"'title'",
":",
"line",
"[",
"1",
"]",
",",
"'year'",
":",
"book_year",
"}",
"citation",
".",
"append",
"(",
"book_element",
")",
"citation_element",
"[",
"'misc_txt'",
"]",
"=",
"cut_substring_with_special_chars",
"(",
"citation_element",
"[",
"'misc_txt'",
"]",
",",
"title",
",",
"startIndex",
")",
"# Remove year from misc txt",
"citation_element",
"[",
"'misc_txt'",
"]",
"=",
"remove_year",
"(",
"citation_element",
"[",
"'misc_txt'",
"]",
",",
"book_year",
")",
"return",
"True",
"LOGGER",
".",
"debug",
"(",
"\"Book not found!\"",
")",
"return",
"False"
] | Searches for books in the misc_txt field if the citation is not recognized as anything like a journal, book, etc. | [
"Searches",
"for",
"books",
"in",
"the",
"misc_txt",
"field",
"if",
"the",
"citation",
"is",
"not",
"recognized",
"as",
"anything",
"like",
"a",
"journal",
"book",
"etc",
"."
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L736-L779 | train | 236,586 |
inspirehep/refextract | refextract/references/engine.py | map_tag_to_subfield | def map_tag_to_subfield(tag_type, line, cur_misc_txt, dest):
"""Create a new reference element"""
closing_tag = '</cds.%s>' % tag_type
# extract the institutional report-number from the line:
idx_closing_tag = line.find(closing_tag)
# Sanity check - did we find a closing tag?
if idx_closing_tag == -1:
# no closing </cds.TAG> tag found - strip the opening tag and move past this
# recognised reportnumber as it is unreliable:
identified_citation_element = None
line = line[len('<cds.%s>' % tag_type):]
else:
tag_content = line[:idx_closing_tag]
identified_citation_element = {'type': tag_type,
'misc_txt': cur_misc_txt,
dest: tag_content}
ending_tag_pos = idx_closing_tag + len(closing_tag)
line = line[ending_tag_pos:]
cur_misc_txt = u""
return identified_citation_element, line, cur_misc_txt | python | def map_tag_to_subfield(tag_type, line, cur_misc_txt, dest):
"""Create a new reference element"""
closing_tag = '</cds.%s>' % tag_type
# extract the institutional report-number from the line:
idx_closing_tag = line.find(closing_tag)
# Sanity check - did we find a closing tag?
if idx_closing_tag == -1:
# no closing </cds.TAG> tag found - strip the opening tag and move past this
# recognised reportnumber as it is unreliable:
identified_citation_element = None
line = line[len('<cds.%s>' % tag_type):]
else:
tag_content = line[:idx_closing_tag]
identified_citation_element = {'type': tag_type,
'misc_txt': cur_misc_txt,
dest: tag_content}
ending_tag_pos = idx_closing_tag + len(closing_tag)
line = line[ending_tag_pos:]
cur_misc_txt = u""
return identified_citation_element, line, cur_misc_txt | [
"def",
"map_tag_to_subfield",
"(",
"tag_type",
",",
"line",
",",
"cur_misc_txt",
",",
"dest",
")",
":",
"closing_tag",
"=",
"'</cds.%s>'",
"%",
"tag_type",
"# extract the institutional report-number from the line:",
"idx_closing_tag",
"=",
"line",
".",
"find",
"(",
"closing_tag",
")",
"# Sanity check - did we find a closing tag?",
"if",
"idx_closing_tag",
"==",
"-",
"1",
":",
"# no closing </cds.TAG> tag found - strip the opening tag and move past this",
"# recognised reportnumber as it is unreliable:",
"identified_citation_element",
"=",
"None",
"line",
"=",
"line",
"[",
"len",
"(",
"'<cds.%s>'",
"%",
"tag_type",
")",
":",
"]",
"else",
":",
"tag_content",
"=",
"line",
"[",
":",
"idx_closing_tag",
"]",
"identified_citation_element",
"=",
"{",
"'type'",
":",
"tag_type",
",",
"'misc_txt'",
":",
"cur_misc_txt",
",",
"dest",
":",
"tag_content",
"}",
"ending_tag_pos",
"=",
"idx_closing_tag",
"+",
"len",
"(",
"closing_tag",
")",
"line",
"=",
"line",
"[",
"ending_tag_pos",
":",
"]",
"cur_misc_txt",
"=",
"u\"\"",
"return",
"identified_citation_element",
",",
"line",
",",
"cur_misc_txt"
] | Create a new reference element | [
"Create",
"a",
"new",
"reference",
"element"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1292-L1312 | train | 236,587 |
inspirehep/refextract | refextract/references/engine.py | remove_leading_garbage_lines_from_reference_section | def remove_leading_garbage_lines_from_reference_section(ref_sectn):
"""Sometimes, the first lines of the extracted references are completely
blank or email addresses. These must be removed as they are not
references.
@param ref_sectn: (list) of strings - the reference section lines
@return: (list) of strings - the reference section without leading
blank lines or email addresses.
"""
p_email = re.compile(ur'^\s*e\-?mail', re.UNICODE)
while ref_sectn and (ref_sectn[0].isspace() or p_email.match(ref_sectn[0])):
ref_sectn.pop(0)
return ref_sectn | python | def remove_leading_garbage_lines_from_reference_section(ref_sectn):
"""Sometimes, the first lines of the extracted references are completely
blank or email addresses. These must be removed as they are not
references.
@param ref_sectn: (list) of strings - the reference section lines
@return: (list) of strings - the reference section without leading
blank lines or email addresses.
"""
p_email = re.compile(ur'^\s*e\-?mail', re.UNICODE)
while ref_sectn and (ref_sectn[0].isspace() or p_email.match(ref_sectn[0])):
ref_sectn.pop(0)
return ref_sectn | [
"def",
"remove_leading_garbage_lines_from_reference_section",
"(",
"ref_sectn",
")",
":",
"p_email",
"=",
"re",
".",
"compile",
"(",
"ur'^\\s*e\\-?mail'",
",",
"re",
".",
"UNICODE",
")",
"while",
"ref_sectn",
"and",
"(",
"ref_sectn",
"[",
"0",
"]",
".",
"isspace",
"(",
")",
"or",
"p_email",
".",
"match",
"(",
"ref_sectn",
"[",
"0",
"]",
")",
")",
":",
"ref_sectn",
".",
"pop",
"(",
"0",
")",
"return",
"ref_sectn"
] | Sometimes, the first lines of the extracted references are completely
blank or email addresses. These must be removed as they are not
references.
@param ref_sectn: (list) of strings - the reference section lines
@return: (list) of strings - the reference section without leading
blank lines or email addresses. | [
"Sometimes",
"the",
"first",
"lines",
"of",
"the",
"extracted",
"references",
"are",
"completely",
"blank",
"or",
"email",
"addresses",
".",
"These",
"must",
"be",
"removed",
"as",
"they",
"are",
"not",
"references",
"."
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1364-L1375 | train | 236,588 |
inspirehep/refextract | refextract/references/engine.py | get_plaintext_document_body | def get_plaintext_document_body(fpath, keep_layout=False):
"""Given a file-path to a full-text, return a list of unicode strings
whereby each string is a line of the fulltext.
In the case of a plain-text document, this simply means reading the
contents in from the file. In the case of a PDF however,
this means converting the document to plaintext.
It raises UnknownDocumentTypeError if the document is not a PDF or
plain text.
@param fpath: (string) - the path to the fulltext file
@return: (list) of strings - each string being a line in the document.
"""
textbody = []
mime_type = magic.from_file(fpath, mime=True)
if mime_type == "text/plain":
with open(fpath, "r") as f:
textbody = [line.decode("utf-8") for line in f.readlines()]
elif mime_type == "application/pdf":
textbody = convert_PDF_to_plaintext(fpath, keep_layout)
else:
raise UnknownDocumentTypeError(mime_type)
return textbody | python | def get_plaintext_document_body(fpath, keep_layout=False):
"""Given a file-path to a full-text, return a list of unicode strings
whereby each string is a line of the fulltext.
In the case of a plain-text document, this simply means reading the
contents in from the file. In the case of a PDF however,
this means converting the document to plaintext.
It raises UnknownDocumentTypeError if the document is not a PDF or
plain text.
@param fpath: (string) - the path to the fulltext file
@return: (list) of strings - each string being a line in the document.
"""
textbody = []
mime_type = magic.from_file(fpath, mime=True)
if mime_type == "text/plain":
with open(fpath, "r") as f:
textbody = [line.decode("utf-8") for line in f.readlines()]
elif mime_type == "application/pdf":
textbody = convert_PDF_to_plaintext(fpath, keep_layout)
else:
raise UnknownDocumentTypeError(mime_type)
return textbody | [
"def",
"get_plaintext_document_body",
"(",
"fpath",
",",
"keep_layout",
"=",
"False",
")",
":",
"textbody",
"=",
"[",
"]",
"mime_type",
"=",
"magic",
".",
"from_file",
"(",
"fpath",
",",
"mime",
"=",
"True",
")",
"if",
"mime_type",
"==",
"\"text/plain\"",
":",
"with",
"open",
"(",
"fpath",
",",
"\"r\"",
")",
"as",
"f",
":",
"textbody",
"=",
"[",
"line",
".",
"decode",
"(",
"\"utf-8\"",
")",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
"]",
"elif",
"mime_type",
"==",
"\"application/pdf\"",
":",
"textbody",
"=",
"convert_PDF_to_plaintext",
"(",
"fpath",
",",
"keep_layout",
")",
"else",
":",
"raise",
"UnknownDocumentTypeError",
"(",
"mime_type",
")",
"return",
"textbody"
] | Given a file-path to a full-text, return a list of unicode strings
whereby each string is a line of the fulltext.
In the case of a plain-text document, this simply means reading the
contents in from the file. In the case of a PDF however,
this means converting the document to plaintext.
It raises UnknownDocumentTypeError if the document is not a PDF or
plain text.
@param fpath: (string) - the path to the fulltext file
@return: (list) of strings - each string being a line in the document. | [
"Given",
"a",
"file",
"-",
"path",
"to",
"a",
"full",
"-",
"text",
"return",
"a",
"list",
"of",
"unicode",
"strings",
"whereby",
"each",
"string",
"is",
"a",
"line",
"of",
"the",
"fulltext",
".",
"In",
"the",
"case",
"of",
"a",
"plain",
"-",
"text",
"document",
"this",
"simply",
"means",
"reading",
"the",
"contents",
"in",
"from",
"the",
"file",
".",
"In",
"the",
"case",
"of",
"a",
"PDF",
"however",
"this",
"means",
"converting",
"the",
"document",
"to",
"plaintext",
".",
"It",
"raises",
"UnknownDocumentTypeError",
"if",
"the",
"document",
"is",
"not",
"a",
"PDF",
"or",
"plain",
"text",
"."
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1384-L1408 | train | 236,589 |
inspirehep/refextract | refextract/references/engine.py | parse_references | def parse_references(reference_lines,
recid=None,
override_kbs_files=None,
reference_format=u"{title} {volume} ({year}) {page}",
linker_callback=None):
"""Parse a list of references
Given a list of raw reference lines (list of strings),
output a list of dictionaries containing the parsed references
"""
# RefExtract knowledge bases
kbs = get_kbs(custom_kbs_files=override_kbs_files)
# Identify journal titles, report numbers, URLs, DOIs, and authors...
processed_references, counts, dummy_bad_titles_count = \
parse_references_elements(reference_lines, kbs, linker_callback)
return (build_references(processed_references, reference_format),
build_stats(counts)) | python | def parse_references(reference_lines,
recid=None,
override_kbs_files=None,
reference_format=u"{title} {volume} ({year}) {page}",
linker_callback=None):
"""Parse a list of references
Given a list of raw reference lines (list of strings),
output a list of dictionaries containing the parsed references
"""
# RefExtract knowledge bases
kbs = get_kbs(custom_kbs_files=override_kbs_files)
# Identify journal titles, report numbers, URLs, DOIs, and authors...
processed_references, counts, dummy_bad_titles_count = \
parse_references_elements(reference_lines, kbs, linker_callback)
return (build_references(processed_references, reference_format),
build_stats(counts)) | [
"def",
"parse_references",
"(",
"reference_lines",
",",
"recid",
"=",
"None",
",",
"override_kbs_files",
"=",
"None",
",",
"reference_format",
"=",
"u\"{title} {volume} ({year}) {page}\"",
",",
"linker_callback",
"=",
"None",
")",
":",
"# RefExtract knowledge bases",
"kbs",
"=",
"get_kbs",
"(",
"custom_kbs_files",
"=",
"override_kbs_files",
")",
"# Identify journal titles, report numbers, URLs, DOIs, and authors...",
"processed_references",
",",
"counts",
",",
"dummy_bad_titles_count",
"=",
"parse_references_elements",
"(",
"reference_lines",
",",
"kbs",
",",
"linker_callback",
")",
"return",
"(",
"build_references",
"(",
"processed_references",
",",
"reference_format",
")",
",",
"build_stats",
"(",
"counts",
")",
")"
] | Parse a list of references
Given a list of raw reference lines (list of strings),
output a list of dictionaries containing the parsed references | [
"Parse",
"a",
"list",
"of",
"references"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1411-L1428 | train | 236,590 |
inspirehep/refextract | refextract/references/engine.py | build_stats | def build_stats(counts):
"""Return stats information from counts structure."""
stats = {
'status': 0,
'reportnum': counts['reportnum'],
'title': counts['title'],
'author': counts['auth_group'],
'url': counts['url'],
'doi': counts['doi'],
'misc': counts['misc'],
}
stats_str = "%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s" % stats
stats["old_stats_str"] = stats_str
stats["date"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
stats["version"] = version
return stats | python | def build_stats(counts):
"""Return stats information from counts structure."""
stats = {
'status': 0,
'reportnum': counts['reportnum'],
'title': counts['title'],
'author': counts['auth_group'],
'url': counts['url'],
'doi': counts['doi'],
'misc': counts['misc'],
}
stats_str = "%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s" % stats
stats["old_stats_str"] = stats_str
stats["date"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
stats["version"] = version
return stats | [
"def",
"build_stats",
"(",
"counts",
")",
":",
"stats",
"=",
"{",
"'status'",
":",
"0",
",",
"'reportnum'",
":",
"counts",
"[",
"'reportnum'",
"]",
",",
"'title'",
":",
"counts",
"[",
"'title'",
"]",
",",
"'author'",
":",
"counts",
"[",
"'auth_group'",
"]",
",",
"'url'",
":",
"counts",
"[",
"'url'",
"]",
",",
"'doi'",
":",
"counts",
"[",
"'doi'",
"]",
",",
"'misc'",
":",
"counts",
"[",
"'misc'",
"]",
",",
"}",
"stats_str",
"=",
"\"%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s\"",
"%",
"stats",
"stats",
"[",
"\"old_stats_str\"",
"]",
"=",
"stats_str",
"stats",
"[",
"\"date\"",
"]",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"stats",
"[",
"\"version\"",
"]",
"=",
"version",
"return",
"stats"
] | Return stats information from counts structure. | [
"Return",
"stats",
"information",
"from",
"counts",
"structure",
"."
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1431-L1446 | train | 236,591 |
inspirehep/refextract | refextract/documents/pdf.py | replace_undesirable_characters | def replace_undesirable_characters(line):
"""
Replace certain bad characters in a text line.
@param line: (string) the text line in which bad characters are to
be replaced.
@return: (string) the text line after the bad characters have been
replaced.
"""
# These are separate because we want a particular order
for bad_string, replacement in UNDESIRABLE_STRING_REPLACEMENTS:
line = line.replace(bad_string, replacement)
for bad_char, replacement in iteritems(UNDESIRABLE_CHAR_REPLACEMENTS):
line = line.replace(bad_char, replacement)
return line | python | def replace_undesirable_characters(line):
"""
Replace certain bad characters in a text line.
@param line: (string) the text line in which bad characters are to
be replaced.
@return: (string) the text line after the bad characters have been
replaced.
"""
# These are separate because we want a particular order
for bad_string, replacement in UNDESIRABLE_STRING_REPLACEMENTS:
line = line.replace(bad_string, replacement)
for bad_char, replacement in iteritems(UNDESIRABLE_CHAR_REPLACEMENTS):
line = line.replace(bad_char, replacement)
return line | [
"def",
"replace_undesirable_characters",
"(",
"line",
")",
":",
"# These are separate because we want a particular order",
"for",
"bad_string",
",",
"replacement",
"in",
"UNDESIRABLE_STRING_REPLACEMENTS",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"bad_string",
",",
"replacement",
")",
"for",
"bad_char",
",",
"replacement",
"in",
"iteritems",
"(",
"UNDESIRABLE_CHAR_REPLACEMENTS",
")",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"bad_char",
",",
"replacement",
")",
"return",
"line"
] | Replace certain bad characters in a text line.
@param line: (string) the text line in which bad characters are to
be replaced.
@return: (string) the text line after the bad characters have been
replaced. | [
"Replace",
"certain",
"bad",
"characters",
"in",
"a",
"text",
"line",
"."
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/pdf.py#L434-L449 | train | 236,592 |
inspirehep/refextract | refextract/documents/pdf.py | convert_PDF_to_plaintext | def convert_PDF_to_plaintext(fpath, keep_layout=False):
""" Convert PDF to txt using pdftotext
Take the path to a PDF file and run pdftotext for this file, capturing
the output.
@param fpath: (string) path to the PDF file
@return: (list) of unicode strings (contents of the PDF file translated
into plaintext; each string is a line in the document.)
"""
if not os.path.isfile(CFG_PATH_PDFTOTEXT):
raise IOError('Missing pdftotext executable')
if keep_layout:
layout_option = "-layout"
else:
layout_option = "-raw"
doclines = []
# Pattern to check for lines with a leading page-break character.
# If this pattern is matched, we want to split the page-break into
# its own line because we rely upon this for trying to strip headers
# and footers, and for some other pattern matching.
p_break_in_line = re.compile(ur'^\s*\f(.+)$', re.UNICODE)
# build pdftotext command:
cmd_pdftotext = [CFG_PATH_PDFTOTEXT, layout_option, "-q",
"-enc", "UTF-8", fpath, "-"]
LOGGER.debug(u"%s", ' '.join(cmd_pdftotext))
# open pipe to pdftotext:
pipe_pdftotext = subprocess.Popen(cmd_pdftotext, stdout=subprocess.PIPE)
# read back results:
for docline in pipe_pdftotext.stdout:
unicodeline = docline.decode("utf-8")
# Check for a page-break in this line:
m_break_in_line = p_break_in_line.match(unicodeline)
if m_break_in_line is None:
# There was no page-break in this line. Just add the line:
doclines.append(unicodeline)
else:
# If there was a page-break character in the same line as some
# text, split it out into its own line so that we can later
# try to find headers and footers:
doclines.append(u"\f")
doclines.append(m_break_in_line.group(1))
LOGGER.debug(u"convert_PDF_to_plaintext found: %s lines of text", len(doclines))
return doclines | python | def convert_PDF_to_plaintext(fpath, keep_layout=False):
""" Convert PDF to txt using pdftotext
Take the path to a PDF file and run pdftotext for this file, capturing
the output.
@param fpath: (string) path to the PDF file
@return: (list) of unicode strings (contents of the PDF file translated
into plaintext; each string is a line in the document.)
"""
if not os.path.isfile(CFG_PATH_PDFTOTEXT):
raise IOError('Missing pdftotext executable')
if keep_layout:
layout_option = "-layout"
else:
layout_option = "-raw"
doclines = []
# Pattern to check for lines with a leading page-break character.
# If this pattern is matched, we want to split the page-break into
# its own line because we rely upon this for trying to strip headers
# and footers, and for some other pattern matching.
p_break_in_line = re.compile(ur'^\s*\f(.+)$', re.UNICODE)
# build pdftotext command:
cmd_pdftotext = [CFG_PATH_PDFTOTEXT, layout_option, "-q",
"-enc", "UTF-8", fpath, "-"]
LOGGER.debug(u"%s", ' '.join(cmd_pdftotext))
# open pipe to pdftotext:
pipe_pdftotext = subprocess.Popen(cmd_pdftotext, stdout=subprocess.PIPE)
# read back results:
for docline in pipe_pdftotext.stdout:
unicodeline = docline.decode("utf-8")
# Check for a page-break in this line:
m_break_in_line = p_break_in_line.match(unicodeline)
if m_break_in_line is None:
# There was no page-break in this line. Just add the line:
doclines.append(unicodeline)
else:
# If there was a page-break character in the same line as some
# text, split it out into its own line so that we can later
# try to find headers and footers:
doclines.append(u"\f")
doclines.append(m_break_in_line.group(1))
LOGGER.debug(u"convert_PDF_to_plaintext found: %s lines of text", len(doclines))
return doclines | [
"def",
"convert_PDF_to_plaintext",
"(",
"fpath",
",",
"keep_layout",
"=",
"False",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"CFG_PATH_PDFTOTEXT",
")",
":",
"raise",
"IOError",
"(",
"'Missing pdftotext executable'",
")",
"if",
"keep_layout",
":",
"layout_option",
"=",
"\"-layout\"",
"else",
":",
"layout_option",
"=",
"\"-raw\"",
"doclines",
"=",
"[",
"]",
"# Pattern to check for lines with a leading page-break character.",
"# If this pattern is matched, we want to split the page-break into",
"# its own line because we rely upon this for trying to strip headers",
"# and footers, and for some other pattern matching.",
"p_break_in_line",
"=",
"re",
".",
"compile",
"(",
"ur'^\\s*\\f(.+)$'",
",",
"re",
".",
"UNICODE",
")",
"# build pdftotext command:",
"cmd_pdftotext",
"=",
"[",
"CFG_PATH_PDFTOTEXT",
",",
"layout_option",
",",
"\"-q\"",
",",
"\"-enc\"",
",",
"\"UTF-8\"",
",",
"fpath",
",",
"\"-\"",
"]",
"LOGGER",
".",
"debug",
"(",
"u\"%s\"",
",",
"' '",
".",
"join",
"(",
"cmd_pdftotext",
")",
")",
"# open pipe to pdftotext:",
"pipe_pdftotext",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd_pdftotext",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"# read back results:",
"for",
"docline",
"in",
"pipe_pdftotext",
".",
"stdout",
":",
"unicodeline",
"=",
"docline",
".",
"decode",
"(",
"\"utf-8\"",
")",
"# Check for a page-break in this line:",
"m_break_in_line",
"=",
"p_break_in_line",
".",
"match",
"(",
"unicodeline",
")",
"if",
"m_break_in_line",
"is",
"None",
":",
"# There was no page-break in this line. Just add the line:",
"doclines",
".",
"append",
"(",
"unicodeline",
")",
"else",
":",
"# If there was a page-break character in the same line as some",
"# text, split it out into its own line so that we can later",
"# try to find headers and footers:",
"doclines",
".",
"append",
"(",
"u\"\\f\"",
")",
"doclines",
".",
"append",
"(",
"m_break_in_line",
".",
"group",
"(",
"1",
")",
")",
"LOGGER",
".",
"debug",
"(",
"u\"convert_PDF_to_plaintext found: %s lines of text\"",
",",
"len",
"(",
"doclines",
")",
")",
"return",
"doclines"
] | Convert PDF to txt using pdftotext
Take the path to a PDF file and run pdftotext for this file, capturing
the output.
@param fpath: (string) path to the PDF file
@return: (list) of unicode strings (contents of the PDF file translated
into plaintext; each string is a line in the document.) | [
"Convert",
"PDF",
"to",
"txt",
"using",
"pdftotext"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/pdf.py#L452-L499 | train | 236,593 |
inspirehep/refextract | refextract/authors/regexs.py | get_author_affiliation_numeration_str | def get_author_affiliation_numeration_str(punct=None):
"""The numeration which can be applied to author names. Numeration
is sometimes found next to authors of papers.
@return: (string), which can be compiled into a regex; identifies
numeration next to an author name.
"""
# FIXME cater for start or end numeration (ie two puncs)
# Number to look for, either general or specific
re_number = r'(?:\d\d?)'
re_chained_numbers = r"(?:(?:[,;]\s*%s\.?\s*))*" % re_number
# Punctuation surrounding the number, either general or specific again
if punct is None:
re_punct = r"(?:[\{\(\[]?)"
else:
re_punct = re.escape(punct)
# Generic number finder (MUST NOT INCLUDE NAMED GROUPS!!!)
numeration_str = r"""
(?:\s*(%(punct)s)\s* ## Left numeration punctuation
(%(num)s\s* ## Core numeration item, either specific or generic
%(num_chain)s ## Extra numeration, either generic or empty
)
(?:(%(punct)s)) ## Right numeration punctuation
)""" % {'num': re_number,
'num_chain': re_chained_numbers,
'punct': re_punct}
return numeration_str | python | def get_author_affiliation_numeration_str(punct=None):
"""The numeration which can be applied to author names. Numeration
is sometimes found next to authors of papers.
@return: (string), which can be compiled into a regex; identifies
numeration next to an author name.
"""
# FIXME cater for start or end numeration (ie two puncs)
# Number to look for, either general or specific
re_number = r'(?:\d\d?)'
re_chained_numbers = r"(?:(?:[,;]\s*%s\.?\s*))*" % re_number
# Punctuation surrounding the number, either general or specific again
if punct is None:
re_punct = r"(?:[\{\(\[]?)"
else:
re_punct = re.escape(punct)
# Generic number finder (MUST NOT INCLUDE NAMED GROUPS!!!)
numeration_str = r"""
(?:\s*(%(punct)s)\s* ## Left numeration punctuation
(%(num)s\s* ## Core numeration item, either specific or generic
%(num_chain)s ## Extra numeration, either generic or empty
)
(?:(%(punct)s)) ## Right numeration punctuation
)""" % {'num': re_number,
'num_chain': re_chained_numbers,
'punct': re_punct}
return numeration_str | [
"def",
"get_author_affiliation_numeration_str",
"(",
"punct",
"=",
"None",
")",
":",
"# FIXME cater for start or end numeration (ie two puncs)",
"# Number to look for, either general or specific",
"re_number",
"=",
"r'(?:\\d\\d?)'",
"re_chained_numbers",
"=",
"r\"(?:(?:[,;]\\s*%s\\.?\\s*))*\"",
"%",
"re_number",
"# Punctuation surrounding the number, either general or specific again",
"if",
"punct",
"is",
"None",
":",
"re_punct",
"=",
"r\"(?:[\\{\\(\\[]?)\"",
"else",
":",
"re_punct",
"=",
"re",
".",
"escape",
"(",
"punct",
")",
"# Generic number finder (MUST NOT INCLUDE NAMED GROUPS!!!)",
"numeration_str",
"=",
"r\"\"\"\n (?:\\s*(%(punct)s)\\s* ## Left numeration punctuation\n (%(num)s\\s* ## Core numeration item, either specific or generic\n %(num_chain)s ## Extra numeration, either generic or empty\n )\n (?:(%(punct)s)) ## Right numeration punctuation\n )\"\"\"",
"%",
"{",
"'num'",
":",
"re_number",
",",
"'num_chain'",
":",
"re_chained_numbers",
",",
"'punct'",
":",
"re_punct",
"}",
"return",
"numeration_str"
] | The numeration which can be applied to author names. Numeration
is sometimes found next to authors of papers.
@return: (string), which can be compiled into a regex; identifies
numeration next to an author name. | [
"The",
"numeration",
"which",
"can",
"be",
"applied",
"to",
"author",
"names",
".",
"Numeration",
"is",
"sometimes",
"found",
"next",
"to",
"authors",
"of",
"papers",
"."
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/authors/regexs.py#L36-L64 | train | 236,594 |
inspirehep/refextract | refextract/authors/regexs.py | make_auth_regex_str | def make_auth_regex_str(etal, initial_surname_author=None, surname_initial_author=None):
"""
Returns a regular expression to be used to identify groups of author names in a citation.
This method contains patterns for default authors, so no arguments are needed for the
most reliable form of matching.
The returned author pattern is capable of:
1. Identifying single authors, with at least one initial, of the form:
'Initial. [surname prefix...] Surname'
2. Identifying multiple authors, each with at least one initial, of the form:
'Initial. [surname prefix...] Surname, [and] [Initial. [surname prefix...] Surname ... ]'
***(Note that a full stop, hyphen or apostrophe after each initial is
absolutely vital in identifying authors for both of these above methods.
Initials must also be uppercase.)***
3. Capture 'et al' statements at the end of author groups (allows for authors with et al
to be processed differently from 'standard' authors)
4. Identifying a single author surname name positioned before the phrase 'et al',
with no initials: 'Surname et al'
5. Identifying two author surname name positioned before the phrase 'et al',
with no initials, but separated by 'and' or '&': 'Surname [and|&] Surname et al'
6. Identifying authors of the form:
'Surname Initials, Initials Surname [Initials Surname]...'. Some authors choose
to represent the most important cited author (in a list of authors) by listing first
their surname, and then their initials. Since this form has little distinguishing
characteristics which could be used to create a reliable a pattern, at least one
standard author must be present after it in order to improve the accuracy.
7. Capture editor notation, of which can take many forms e.g.
'eds. editors. edited by. etc.'. Authors captured in this way can be treated as
'editor groups', and hence processed differently if needed from standard authors
@param etal: (string) The regular expression used to identify 'etal' notation
@param author: (string) An optional argument, which replaces the default author
regex used to identify author groups (initials, surnames... etc)
@return: (string) The full author group identification regex, which will:
- detect groups of authors in a range of formats, e.g.:
C. Hayward, V van Edwards, M. J. Woodbridge, and L. Kelloggs et al.,
- detect whether the author group has been marked up as editors of the doc.
(therefore they will NOT be marked up as authors) e.g.:
ed. C Hayward | (ed) V van Edwards | ed by, M. J. Woodbridge and V van Edwards
| L. Kelloggs (editors) | M. Jackson (eds.) | ...
-detect a maximum of two surnames only if the surname(s) is followed by 'et al'
(must be separated by 'and' if there are two), e.g.:
Amaldi et al., | Hayward and Yellow et al.,
"""
if not initial_surname_author:
# Standard author, with a maximum of 6 initials, and a surname.
# The Initials MUST be uppercase, and MUST have at least a dot, hypen
# or apostrophe between them.
initial_surname_author = get_initial_surname_author_pattern()
if not surname_initial_author:
# The author name of the form: 'surname initial(s)'
# This is sometimes the represention of the first author found inside an author group.
# This author pattern is only used to find a maximum of ONE author inside an author group.
# Authors of this form MUST have either a comma after the initials, or an 'and',
# which denotes the presence of other authors in the author group.
surname_initial_author = get_surname_initial_author_pattern()
# Pattern used to locate a GROUP of author names in a reference
# The format of an author can take many forms:
# J. Bloggs, W.-H. Smith, D. De Samuel, G.L. Bayetian, C. Hayward et al.,
# (the use of 'et. al' is a giveaway that the preceeding
# text was indeed an author name)
# This will also match authors which seem to be labeled as editors (with the phrase 'ed.')
# In which case, the author will be thrown away later on.
# The regex returned has around 100 named groups already (max), so any new groups must be
# started using '?:'
return ur"""
(?:^|\s+|\() ## Must be the start of the line, or a space (or an opening bracket in very few cases)
(?P<es> ## Look for editor notation before the author
(?:(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditors?)((?:\.\s?)|(?:\.?\s))) ## 'eds?. ' | 'ed ' | 'ed.'
|(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditions?)(?:(?:\.\s?)|(?:\.?\s))by(?:\s|([:,]\s))) ## 'eds?. by, ' | 'ed. by: ' | 'ed by ' | 'ed. by '| 'ed by: '
|(?:\(\s?([Ee][Dd]s?|[Ee]dited|[Ee]ditors?)(?:(?:\.\s?)|(?:\.?\s))?\))) ## '( eds?. )' | '(ed.)' | '(ed )' | '( ed )' | '(ed)'
)?
## **** (1) , one or two surnames which MUST end with 'et al' (e.g. Amaldi et al.,)
(?P<author_names>
(?:
(?:[A-Z](?:\s*[.'’-]{1,2}\s*[A-Z]){0,4}[.\s]\s*)? ## Initials
[A-Z][^0-9_\.\s]{2,20}(?:(?:[,\.]\s*)|(?:[,\.]?\s+)) ## Surname
(?:[A-Z](?:\s*[.'’-]{1,2}\s*[A-Z]){0,4}[.\s]\s*)? ## Initials
(?P<multi_surs>
(?:(?:[Aa][Nn][Dd]|\&)\s+) ## Maybe 'and' or '&' tied with another name
[A-Z][^0-9_\.\s]{3,20}(?:(?:[,\.]\s*)|(?:[,\.]?\s+)) ## More surnames
(?:[A-Z](?:[ -][A-Z])?\s+)? ## with initials
)?
(?: # Look for editor notation after the author group...
\s*[,\s]?\s* # Eventually a coma/space
%(ed)s
)?
(?P<et2>
%(etal)s ## et al, MUST BE PRESENT however, for this author form
)
(?: # Look for editor notation after the author group...
\s*[,\s]?\s* # Eventually a coma/space
%(ed)s
)?
) |
(?:
## **** (2) , The standard author form.. (e.g. J. Bloggs)
## This author form can either start with a normal 'initial surname' author,
## or it can begin with a single 'surname initial' author
(?: ## The first author in the 'author group'
%(i_s_author)s |
(?P<sur_initial_auth>%(s_i_author)s)
)
(?P<multi_auth>
(?: ## Then 0 or more author names
\s*[,\s]\s*
(?:
%(i_s_author)s | %(s_i_author)s
)
)*
(?: ## Maybe 'and' or '&' tied with another name
(?:
\s*[,\s]\s* ## handle "J. Dan, and H. Pon"
(?:[Aa][Nn][DdsS]|\&)
\s+
)
(?P<mult_auth_sub>
%(i_s_author)s | %(s_i_author)s
)
)?
)
(?P<et> # 'et al' need not be present for either of
\s*[,\s]\s*
%(etal)s # 'initial surname' or 'surname initial' authors
)?
)
)
(?P<ee>
\s*[,\s]\s*
\(?
(?:[Ee][Dd]s|[Ee]ditors)\.?
\)?
[\.\,]{0,2}
)?
# End of all author name patterns
\)? # A possible closing bracket to finish the author group
(?=[\s,.;:]) # Consolidate by checking we are not partially matching
# something else
""" % {'etal': etal,
'i_s_author': initial_surname_author,
's_i_author': surname_initial_author,
'ed': re_ed_notation} | python | def make_auth_regex_str(etal, initial_surname_author=None, surname_initial_author=None):
"""
Returns a regular expression to be used to identify groups of author names in a citation.
This method contains patterns for default authors, so no arguments are needed for the
most reliable form of matching.
The returned author pattern is capable of:
1. Identifying single authors, with at least one initial, of the form:
'Initial. [surname prefix...] Surname'
2. Identifying multiple authors, each with at least one initial, of the form:
'Initial. [surname prefix...] Surname, [and] [Initial. [surname prefix...] Surname ... ]'
***(Note that a full stop, hyphen or apostrophe after each initial is
absolutely vital in identifying authors for both of these above methods.
Initials must also be uppercase.)***
3. Capture 'et al' statements at the end of author groups (allows for authors with et al
to be processed differently from 'standard' authors)
4. Identifying a single author surname name positioned before the phrase 'et al',
with no initials: 'Surname et al'
5. Identifying two author surname name positioned before the phrase 'et al',
with no initials, but separated by 'and' or '&': 'Surname [and|&] Surname et al'
6. Identifying authors of the form:
'Surname Initials, Initials Surname [Initials Surname]...'. Some authors choose
to represent the most important cited author (in a list of authors) by listing first
their surname, and then their initials. Since this form has little distinguishing
characteristics which could be used to create a reliable a pattern, at least one
standard author must be present after it in order to improve the accuracy.
7. Capture editor notation, of which can take many forms e.g.
'eds. editors. edited by. etc.'. Authors captured in this way can be treated as
'editor groups', and hence processed differently if needed from standard authors
@param etal: (string) The regular expression used to identify 'etal' notation
@param author: (string) An optional argument, which replaces the default author
regex used to identify author groups (initials, surnames... etc)
@return: (string) The full author group identification regex, which will:
- detect groups of authors in a range of formats, e.g.:
C. Hayward, V van Edwards, M. J. Woodbridge, and L. Kelloggs et al.,
- detect whether the author group has been marked up as editors of the doc.
(therefore they will NOT be marked up as authors) e.g.:
ed. C Hayward | (ed) V van Edwards | ed by, M. J. Woodbridge and V van Edwards
| L. Kelloggs (editors) | M. Jackson (eds.) | ...
-detect a maximum of two surnames only if the surname(s) is followed by 'et al'
(must be separated by 'and' if there are two), e.g.:
Amaldi et al., | Hayward and Yellow et al.,
"""
if not initial_surname_author:
# Standard author, with a maximum of 6 initials, and a surname.
# The Initials MUST be uppercase, and MUST have at least a dot, hypen
# or apostrophe between them.
initial_surname_author = get_initial_surname_author_pattern()
if not surname_initial_author:
# The author name of the form: 'surname initial(s)'
# This is sometimes the represention of the first author found inside an author group.
# This author pattern is only used to find a maximum of ONE author inside an author group.
# Authors of this form MUST have either a comma after the initials, or an 'and',
# which denotes the presence of other authors in the author group.
surname_initial_author = get_surname_initial_author_pattern()
# Pattern used to locate a GROUP of author names in a reference
# The format of an author can take many forms:
# J. Bloggs, W.-H. Smith, D. De Samuel, G.L. Bayetian, C. Hayward et al.,
# (the use of 'et. al' is a giveaway that the preceeding
# text was indeed an author name)
# This will also match authors which seem to be labeled as editors (with the phrase 'ed.')
# In which case, the author will be thrown away later on.
# The regex returned has around 100 named groups already (max), so any new groups must be
# started using '?:'
return ur"""
(?:^|\s+|\() ## Must be the start of the line, or a space (or an opening bracket in very few cases)
(?P<es> ## Look for editor notation before the author
(?:(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditors?)((?:\.\s?)|(?:\.?\s))) ## 'eds?. ' | 'ed ' | 'ed.'
|(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditions?)(?:(?:\.\s?)|(?:\.?\s))by(?:\s|([:,]\s))) ## 'eds?. by, ' | 'ed. by: ' | 'ed by ' | 'ed. by '| 'ed by: '
|(?:\(\s?([Ee][Dd]s?|[Ee]dited|[Ee]ditors?)(?:(?:\.\s?)|(?:\.?\s))?\))) ## '( eds?. )' | '(ed.)' | '(ed )' | '( ed )' | '(ed)'
)?
## **** (1) , one or two surnames which MUST end with 'et al' (e.g. Amaldi et al.,)
(?P<author_names>
(?:
(?:[A-Z](?:\s*[.'’-]{1,2}\s*[A-Z]){0,4}[.\s]\s*)? ## Initials
[A-Z][^0-9_\.\s]{2,20}(?:(?:[,\.]\s*)|(?:[,\.]?\s+)) ## Surname
(?:[A-Z](?:\s*[.'’-]{1,2}\s*[A-Z]){0,4}[.\s]\s*)? ## Initials
(?P<multi_surs>
(?:(?:[Aa][Nn][Dd]|\&)\s+) ## Maybe 'and' or '&' tied with another name
[A-Z][^0-9_\.\s]{3,20}(?:(?:[,\.]\s*)|(?:[,\.]?\s+)) ## More surnames
(?:[A-Z](?:[ -][A-Z])?\s+)? ## with initials
)?
(?: # Look for editor notation after the author group...
\s*[,\s]?\s* # Eventually a coma/space
%(ed)s
)?
(?P<et2>
%(etal)s ## et al, MUST BE PRESENT however, for this author form
)
(?: # Look for editor notation after the author group...
\s*[,\s]?\s* # Eventually a coma/space
%(ed)s
)?
) |
(?:
## **** (2) , The standard author form.. (e.g. J. Bloggs)
## This author form can either start with a normal 'initial surname' author,
## or it can begin with a single 'surname initial' author
(?: ## The first author in the 'author group'
%(i_s_author)s |
(?P<sur_initial_auth>%(s_i_author)s)
)
(?P<multi_auth>
(?: ## Then 0 or more author names
\s*[,\s]\s*
(?:
%(i_s_author)s | %(s_i_author)s
)
)*
(?: ## Maybe 'and' or '&' tied with another name
(?:
\s*[,\s]\s* ## handle "J. Dan, and H. Pon"
(?:[Aa][Nn][DdsS]|\&)
\s+
)
(?P<mult_auth_sub>
%(i_s_author)s | %(s_i_author)s
)
)?
)
(?P<et> # 'et al' need not be present for either of
\s*[,\s]\s*
%(etal)s # 'initial surname' or 'surname initial' authors
)?
)
)
(?P<ee>
\s*[,\s]\s*
\(?
(?:[Ee][Dd]s|[Ee]ditors)\.?
\)?
[\.\,]{0,2}
)?
# End of all author name patterns
\)? # A possible closing bracket to finish the author group
(?=[\s,.;:]) # Consolidate by checking we are not partially matching
# something else
""" % {'etal': etal,
'i_s_author': initial_surname_author,
's_i_author': surname_initial_author,
'ed': re_ed_notation} | [
"def",
"make_auth_regex_str",
"(",
"etal",
",",
"initial_surname_author",
"=",
"None",
",",
"surname_initial_author",
"=",
"None",
")",
":",
"if",
"not",
"initial_surname_author",
":",
"# Standard author, with a maximum of 6 initials, and a surname.",
"# The Initials MUST be uppercase, and MUST have at least a dot, hypen",
"# or apostrophe between them.",
"initial_surname_author",
"=",
"get_initial_surname_author_pattern",
"(",
")",
"if",
"not",
"surname_initial_author",
":",
"# The author name of the form: 'surname initial(s)'",
"# This is sometimes the represention of the first author found inside an author group.",
"# This author pattern is only used to find a maximum of ONE author inside an author group.",
"# Authors of this form MUST have either a comma after the initials, or an 'and',",
"# which denotes the presence of other authors in the author group.",
"surname_initial_author",
"=",
"get_surname_initial_author_pattern",
"(",
")",
"# Pattern used to locate a GROUP of author names in a reference",
"# The format of an author can take many forms:",
"# J. Bloggs, W.-H. Smith, D. De Samuel, G.L. Bayetian, C. Hayward et al.,",
"# (the use of 'et. al' is a giveaway that the preceeding",
"# text was indeed an author name)",
"# This will also match authors which seem to be labeled as editors (with the phrase 'ed.')",
"# In which case, the author will be thrown away later on.",
"# The regex returned has around 100 named groups already (max), so any new groups must be",
"# started using '?:'",
"return",
"ur\"\"\"\n (?:^|\\s+|\\() ## Must be the start of the line, or a space (or an opening bracket in very few cases)\n (?P<es> ## Look for editor notation before the author\n (?:(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditors?)((?:\\.\\s?)|(?:\\.?\\s))) ## 'eds?. ' | 'ed ' | 'ed.'\n |(?:(?:[Ee][Dd]s?|[Ee]dited|[Ee]ditions?)(?:(?:\\.\\s?)|(?:\\.?\\s))by(?:\\s|([:,]\\s))) ## 'eds?. by, ' | 'ed. by: ' | 'ed by ' | 'ed. by '| 'ed by: '\n |(?:\\(\\s?([Ee][Dd]s?|[Ee]dited|[Ee]ditors?)(?:(?:\\.\\s?)|(?:\\.?\\s))?\\))) ## '( eds?. )' | '(ed.)' | '(ed )' | '( ed )' | '(ed)'\n )?\n\n ## **** (1) , one or two surnames which MUST end with 'et al' (e.g. Amaldi et al.,)\n (?P<author_names>\n (?:\n (?:[A-Z](?:\\s*[.'’-]{1,2}\\s*[A-Z]){0,4}[.\\s]\\s*)? ## Initials\n [A-Z][^0-9_\\.\\s]{2,20}(?:(?:[,\\.]\\s*)|(?:[,\\.]?\\s+)) ## Surname\n (?:[A-Z](?:\\s*[.'’-]{1,2}\\s*[A-Z]){0,4}[.\\s]\\s*)? ## Initials\n (?P<multi_surs>\n (?:(?:[Aa][Nn][Dd]|\\&)\\s+) ## Maybe 'and' or '&' tied with another name\n [A-Z][^0-9_\\.\\s]{3,20}(?:(?:[,\\.]\\s*)|(?:[,\\.]?\\s+)) ## More surnames\n (?:[A-Z](?:[ -][A-Z])?\\s+)? ## with initials\n )?\n (?: # Look for editor notation after the author group...\n \\s*[,\\s]?\\s* # Eventually a coma/space\n %(ed)s\n )?\n (?P<et2>\n %(etal)s ## et al, MUST BE PRESENT however, for this author form\n )\n (?: # Look for editor notation after the author group...\n \\s*[,\\s]?\\s* # Eventually a coma/space\n %(ed)s\n )?\n ) |\n\n (?:\n ## **** (2) , The standard author form.. (e.g. J. Bloggs)\n ## This author form can either start with a normal 'initial surname' author,\n ## or it can begin with a single 'surname initial' author\n\n (?: ## The first author in the 'author group'\n %(i_s_author)s |\n (?P<sur_initial_auth>%(s_i_author)s)\n )\n\n (?P<multi_auth>\n (?: ## Then 0 or more author names\n \\s*[,\\s]\\s*\n (?:\n %(i_s_author)s | %(s_i_author)s\n )\n )*\n\n (?: ## Maybe 'and' or '&' tied with another name\n (?:\n \\s*[,\\s]\\s* ## handle \"J. Dan, and H. Pon\"\n (?:[Aa][Nn][DdsS]|\\&)\n \\s+\n )\n (?P<mult_auth_sub>\n %(i_s_author)s | %(s_i_author)s\n )\n )?\n )\n (?P<et> # 'et al' need not be present for either of\n \\s*[,\\s]\\s*\n %(etal)s # 'initial surname' or 'surname initial' authors\n )?\n )\n )\n (?P<ee>\n \\s*[,\\s]\\s*\n \\(?\n (?:[Ee][Dd]s|[Ee]ditors)\\.?\n \\)?\n [\\.\\,]{0,2}\n )?\n # End of all author name patterns\n\n \\)? # A possible closing bracket to finish the author group\n (?=[\\s,.;:]) # Consolidate by checking we are not partially matching\n # something else\n\n \"\"\"",
"%",
"{",
"'etal'",
":",
"etal",
",",
"'i_s_author'",
":",
"initial_surname_author",
",",
"'s_i_author'",
":",
"surname_initial_author",
",",
"'ed'",
":",
"re_ed_notation",
"}"
] | Returns a regular expression to be used to identify groups of author names in a citation.
This method contains patterns for default authors, so no arguments are needed for the
most reliable form of matching.
The returned author pattern is capable of:
1. Identifying single authors, with at least one initial, of the form:
'Initial. [surname prefix...] Surname'
2. Identifying multiple authors, each with at least one initial, of the form:
'Initial. [surname prefix...] Surname, [and] [Initial. [surname prefix...] Surname ... ]'
***(Note that a full stop, hyphen or apostrophe after each initial is
absolutely vital in identifying authors for both of these above methods.
Initials must also be uppercase.)***
3. Capture 'et al' statements at the end of author groups (allows for authors with et al
to be processed differently from 'standard' authors)
4. Identifying a single author surname name positioned before the phrase 'et al',
with no initials: 'Surname et al'
5. Identifying two author surname name positioned before the phrase 'et al',
with no initials, but separated by 'and' or '&': 'Surname [and|&] Surname et al'
6. Identifying authors of the form:
'Surname Initials, Initials Surname [Initials Surname]...'. Some authors choose
to represent the most important cited author (in a list of authors) by listing first
their surname, and then their initials. Since this form has little distinguishing
characteristics which could be used to create a reliable a pattern, at least one
standard author must be present after it in order to improve the accuracy.
7. Capture editor notation, of which can take many forms e.g.
'eds. editors. edited by. etc.'. Authors captured in this way can be treated as
'editor groups', and hence processed differently if needed from standard authors
@param etal: (string) The regular expression used to identify 'etal' notation
@param author: (string) An optional argument, which replaces the default author
regex used to identify author groups (initials, surnames... etc)
@return: (string) The full author group identification regex, which will:
- detect groups of authors in a range of formats, e.g.:
C. Hayward, V van Edwards, M. J. Woodbridge, and L. Kelloggs et al.,
- detect whether the author group has been marked up as editors of the doc.
(therefore they will NOT be marked up as authors) e.g.:
ed. C Hayward | (ed) V van Edwards | ed by, M. J. Woodbridge and V van Edwards
| L. Kelloggs (editors) | M. Jackson (eds.) | ...
-detect a maximum of two surnames only if the surname(s) is followed by 'et al'
(must be separated by 'and' if there are two), e.g.:
Amaldi et al., | Hayward and Yellow et al., | [
"Returns",
"a",
"regular",
"expression",
"to",
"be",
"used",
"to",
"identify",
"groups",
"of",
"author",
"names",
"in",
"a",
"citation",
".",
"This",
"method",
"contains",
"patterns",
"for",
"default",
"authors",
"so",
"no",
"arguments",
"are",
"needed",
"for",
"the",
"most",
"reliable",
"form",
"of",
"matching",
"."
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/authors/regexs.py#L192-L350 | train | 236,595 |
inspirehep/refextract | refextract/references/find.py | find_reference_section | def find_reference_section(docbody):
"""Search in document body for its reference section.
More precisely, find
the first line of the reference section. Effectively, the function starts
at the end of a document and works backwards, line-by-line, looking for
the title of a reference section. It stops when (if) it finds something
that it considers to be the first line of a reference section.
@param docbody: (list) of strings - the full document body.
@return: (dictionary) :
{ 'start_line' : (integer) - index in docbody of 1st reference line,
'title_string' : (string) - title of the reference section.
'marker' : (string) - the marker of the first reference line,
'marker_pattern' : (string) - regexp string used to find the marker,
'title_marker_same_line' : (integer) - flag to indicate whether the
reference section title was on the same
line as the first reference line's
marker or not. 1 if it was; 0 if not.
}
Much of this information is used by later functions to rebuild
a reference section.
-- OR --
(None) - when the reference section could not be found.
"""
ref_details = None
title_patterns = get_reference_section_title_patterns()
# Try to find refs section title:
for title_pattern in title_patterns:
# Look for title pattern in docbody
for reversed_index, line in enumerate(reversed(docbody)):
title_match = title_pattern.match(line)
if title_match:
title = title_match.group('title')
index = len(docbody) - 1 - reversed_index
temp_ref_details, found_title = find_numeration(docbody[index:index + 6], title)
if temp_ref_details:
if ref_details and 'title' in ref_details and ref_details['title'] and not temp_ref_details['title']:
continue
if ref_details and 'marker' in ref_details and ref_details['marker'] and not temp_ref_details['marker']:
continue
ref_details = temp_ref_details
ref_details['start_line'] = index
ref_details['title_string'] = title
if found_title:
break
if ref_details:
break
return ref_details | python | def find_reference_section(docbody):
"""Search in document body for its reference section.
More precisely, find
the first line of the reference section. Effectively, the function starts
at the end of a document and works backwards, line-by-line, looking for
the title of a reference section. It stops when (if) it finds something
that it considers to be the first line of a reference section.
@param docbody: (list) of strings - the full document body.
@return: (dictionary) :
{ 'start_line' : (integer) - index in docbody of 1st reference line,
'title_string' : (string) - title of the reference section.
'marker' : (string) - the marker of the first reference line,
'marker_pattern' : (string) - regexp string used to find the marker,
'title_marker_same_line' : (integer) - flag to indicate whether the
reference section title was on the same
line as the first reference line's
marker or not. 1 if it was; 0 if not.
}
Much of this information is used by later functions to rebuild
a reference section.
-- OR --
(None) - when the reference section could not be found.
"""
ref_details = None
title_patterns = get_reference_section_title_patterns()
# Try to find refs section title:
for title_pattern in title_patterns:
# Look for title pattern in docbody
for reversed_index, line in enumerate(reversed(docbody)):
title_match = title_pattern.match(line)
if title_match:
title = title_match.group('title')
index = len(docbody) - 1 - reversed_index
temp_ref_details, found_title = find_numeration(docbody[index:index + 6], title)
if temp_ref_details:
if ref_details and 'title' in ref_details and ref_details['title'] and not temp_ref_details['title']:
continue
if ref_details and 'marker' in ref_details and ref_details['marker'] and not temp_ref_details['marker']:
continue
ref_details = temp_ref_details
ref_details['start_line'] = index
ref_details['title_string'] = title
if found_title:
break
if ref_details:
break
return ref_details | [
"def",
"find_reference_section",
"(",
"docbody",
")",
":",
"ref_details",
"=",
"None",
"title_patterns",
"=",
"get_reference_section_title_patterns",
"(",
")",
"# Try to find refs section title:",
"for",
"title_pattern",
"in",
"title_patterns",
":",
"# Look for title pattern in docbody",
"for",
"reversed_index",
",",
"line",
"in",
"enumerate",
"(",
"reversed",
"(",
"docbody",
")",
")",
":",
"title_match",
"=",
"title_pattern",
".",
"match",
"(",
"line",
")",
"if",
"title_match",
":",
"title",
"=",
"title_match",
".",
"group",
"(",
"'title'",
")",
"index",
"=",
"len",
"(",
"docbody",
")",
"-",
"1",
"-",
"reversed_index",
"temp_ref_details",
",",
"found_title",
"=",
"find_numeration",
"(",
"docbody",
"[",
"index",
":",
"index",
"+",
"6",
"]",
",",
"title",
")",
"if",
"temp_ref_details",
":",
"if",
"ref_details",
"and",
"'title'",
"in",
"ref_details",
"and",
"ref_details",
"[",
"'title'",
"]",
"and",
"not",
"temp_ref_details",
"[",
"'title'",
"]",
":",
"continue",
"if",
"ref_details",
"and",
"'marker'",
"in",
"ref_details",
"and",
"ref_details",
"[",
"'marker'",
"]",
"and",
"not",
"temp_ref_details",
"[",
"'marker'",
"]",
":",
"continue",
"ref_details",
"=",
"temp_ref_details",
"ref_details",
"[",
"'start_line'",
"]",
"=",
"index",
"ref_details",
"[",
"'title_string'",
"]",
"=",
"title",
"if",
"found_title",
":",
"break",
"if",
"ref_details",
":",
"break",
"return",
"ref_details"
] | Search in document body for its reference section.
More precisely, find
the first line of the reference section. Effectively, the function starts
at the end of a document and works backwards, line-by-line, looking for
the title of a reference section. It stops when (if) it finds something
that it considers to be the first line of a reference section.
@param docbody: (list) of strings - the full document body.
@return: (dictionary) :
{ 'start_line' : (integer) - index in docbody of 1st reference line,
'title_string' : (string) - title of the reference section.
'marker' : (string) - the marker of the first reference line,
'marker_pattern' : (string) - regexp string used to find the marker,
'title_marker_same_line' : (integer) - flag to indicate whether the
reference section title was on the same
line as the first reference line's
marker or not. 1 if it was; 0 if not.
}
Much of this information is used by later functions to rebuild
a reference section.
-- OR --
(None) - when the reference section could not be found. | [
"Search",
"in",
"document",
"body",
"for",
"its",
"reference",
"section",
"."
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/find.py#L45-L97 | train | 236,596 |
inspirehep/refextract | refextract/references/find.py | find_numeration | def find_numeration(docbody, title):
"""Find numeration pattern
1st try to find numeration in the title
e.g.
References [4] Riotto...
2nd find the numeration alone in the line after the title
e.g.
References
1
Riotto
3rnd find the numeration in the following line
e.g.
References
[1] Riotto
"""
ref_details, found_title = find_numeration_in_title(docbody, title)
if not ref_details:
ref_details, found_title = find_numeration_in_body(docbody)
return ref_details, found_title | python | def find_numeration(docbody, title):
"""Find numeration pattern
1st try to find numeration in the title
e.g.
References [4] Riotto...
2nd find the numeration alone in the line after the title
e.g.
References
1
Riotto
3rnd find the numeration in the following line
e.g.
References
[1] Riotto
"""
ref_details, found_title = find_numeration_in_title(docbody, title)
if not ref_details:
ref_details, found_title = find_numeration_in_body(docbody)
return ref_details, found_title | [
"def",
"find_numeration",
"(",
"docbody",
",",
"title",
")",
":",
"ref_details",
",",
"found_title",
"=",
"find_numeration_in_title",
"(",
"docbody",
",",
"title",
")",
"if",
"not",
"ref_details",
":",
"ref_details",
",",
"found_title",
"=",
"find_numeration_in_body",
"(",
"docbody",
")",
"return",
"ref_details",
",",
"found_title"
] | Find numeration pattern
1st try to find numeration in the title
e.g.
References [4] Riotto...
2nd find the numeration alone in the line after the title
e.g.
References
1
Riotto
3rnd find the numeration in the following line
e.g.
References
[1] Riotto | [
"Find",
"numeration",
"pattern"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/find.py#L181-L203 | train | 236,597 |
inspirehep/refextract | refextract/references/text.py | get_reference_lines | def get_reference_lines(docbody,
ref_sect_start_line,
ref_sect_end_line,
ref_sect_title,
ref_line_marker_ptn,
title_marker_same_line):
"""After the reference section of a document has been identified, and the
first and last lines of the reference section have been recorded, this
function is called to take the reference lines out of the document body.
The document's reference lines are returned in a list of strings whereby
each string is a reference line. Before this can be done however, the
reference section is passed to another function that rebuilds any broken
reference lines.
@param docbody: (list) of strings - the entire document body.
@param ref_sect_start_line: (integer) - the index in docbody of the first
reference line.
@param ref_sect_end_line: (integer) - the index in docbody of the last
reference line.
@param ref_sect_title: (string) - the title of the reference section
(e.g. "References").
@param ref_line_marker_ptn: (string) - the patern used to match the
marker for each reference line (e.g., could be used to match lines
with markers of the form [1], [2], etc.)
@param title_marker_same_line: (integer) - a flag to indicate whether
or not the reference section title was on the same line as the first
reference line's marker.
@return: (list) of strings. Each string is a reference line, extracted
from the document.
"""
start_idx = ref_sect_start_line
if title_marker_same_line:
# Title on same line as 1st ref- take title out!
title_start = docbody[start_idx].find(ref_sect_title)
if title_start != -1:
# Set the first line with no title
docbody[start_idx] = docbody[start_idx][title_start +
len(ref_sect_title):]
elif ref_sect_title is not None:
# Set the start of the reference section to be after the title line
start_idx += 1
if ref_sect_end_line is not None:
ref_lines = docbody[start_idx:ref_sect_end_line + 1]
else:
ref_lines = docbody[start_idx:]
if ref_sect_title:
ref_lines = strip_footer(ref_lines, ref_sect_title)
# Now rebuild reference lines:
# (Go through each raw reference line, and format them into a set
# of properly ordered lines based on markers)
return rebuild_reference_lines(ref_lines, ref_line_marker_ptn) | python | def get_reference_lines(docbody,
ref_sect_start_line,
ref_sect_end_line,
ref_sect_title,
ref_line_marker_ptn,
title_marker_same_line):
"""After the reference section of a document has been identified, and the
first and last lines of the reference section have been recorded, this
function is called to take the reference lines out of the document body.
The document's reference lines are returned in a list of strings whereby
each string is a reference line. Before this can be done however, the
reference section is passed to another function that rebuilds any broken
reference lines.
@param docbody: (list) of strings - the entire document body.
@param ref_sect_start_line: (integer) - the index in docbody of the first
reference line.
@param ref_sect_end_line: (integer) - the index in docbody of the last
reference line.
@param ref_sect_title: (string) - the title of the reference section
(e.g. "References").
@param ref_line_marker_ptn: (string) - the patern used to match the
marker for each reference line (e.g., could be used to match lines
with markers of the form [1], [2], etc.)
@param title_marker_same_line: (integer) - a flag to indicate whether
or not the reference section title was on the same line as the first
reference line's marker.
@return: (list) of strings. Each string is a reference line, extracted
from the document.
"""
start_idx = ref_sect_start_line
if title_marker_same_line:
# Title on same line as 1st ref- take title out!
title_start = docbody[start_idx].find(ref_sect_title)
if title_start != -1:
# Set the first line with no title
docbody[start_idx] = docbody[start_idx][title_start +
len(ref_sect_title):]
elif ref_sect_title is not None:
# Set the start of the reference section to be after the title line
start_idx += 1
if ref_sect_end_line is not None:
ref_lines = docbody[start_idx:ref_sect_end_line + 1]
else:
ref_lines = docbody[start_idx:]
if ref_sect_title:
ref_lines = strip_footer(ref_lines, ref_sect_title)
# Now rebuild reference lines:
# (Go through each raw reference line, and format them into a set
# of properly ordered lines based on markers)
return rebuild_reference_lines(ref_lines, ref_line_marker_ptn) | [
"def",
"get_reference_lines",
"(",
"docbody",
",",
"ref_sect_start_line",
",",
"ref_sect_end_line",
",",
"ref_sect_title",
",",
"ref_line_marker_ptn",
",",
"title_marker_same_line",
")",
":",
"start_idx",
"=",
"ref_sect_start_line",
"if",
"title_marker_same_line",
":",
"# Title on same line as 1st ref- take title out!",
"title_start",
"=",
"docbody",
"[",
"start_idx",
"]",
".",
"find",
"(",
"ref_sect_title",
")",
"if",
"title_start",
"!=",
"-",
"1",
":",
"# Set the first line with no title",
"docbody",
"[",
"start_idx",
"]",
"=",
"docbody",
"[",
"start_idx",
"]",
"[",
"title_start",
"+",
"len",
"(",
"ref_sect_title",
")",
":",
"]",
"elif",
"ref_sect_title",
"is",
"not",
"None",
":",
"# Set the start of the reference section to be after the title line",
"start_idx",
"+=",
"1",
"if",
"ref_sect_end_line",
"is",
"not",
"None",
":",
"ref_lines",
"=",
"docbody",
"[",
"start_idx",
":",
"ref_sect_end_line",
"+",
"1",
"]",
"else",
":",
"ref_lines",
"=",
"docbody",
"[",
"start_idx",
":",
"]",
"if",
"ref_sect_title",
":",
"ref_lines",
"=",
"strip_footer",
"(",
"ref_lines",
",",
"ref_sect_title",
")",
"# Now rebuild reference lines:",
"# (Go through each raw reference line, and format them into a set",
"# of properly ordered lines based on markers)",
"return",
"rebuild_reference_lines",
"(",
"ref_lines",
",",
"ref_line_marker_ptn",
")"
] | After the reference section of a document has been identified, and the
first and last lines of the reference section have been recorded, this
function is called to take the reference lines out of the document body.
The document's reference lines are returned in a list of strings whereby
each string is a reference line. Before this can be done however, the
reference section is passed to another function that rebuilds any broken
reference lines.
@param docbody: (list) of strings - the entire document body.
@param ref_sect_start_line: (integer) - the index in docbody of the first
reference line.
@param ref_sect_end_line: (integer) - the index in docbody of the last
reference line.
@param ref_sect_title: (string) - the title of the reference section
(e.g. "References").
@param ref_line_marker_ptn: (string) - the patern used to match the
marker for each reference line (e.g., could be used to match lines
with markers of the form [1], [2], etc.)
@param title_marker_same_line: (integer) - a flag to indicate whether
or not the reference section title was on the same line as the first
reference line's marker.
@return: (list) of strings. Each string is a reference line, extracted
from the document. | [
"After",
"the",
"reference",
"section",
"of",
"a",
"document",
"has",
"been",
"identified",
"and",
"the",
"first",
"and",
"last",
"lines",
"of",
"the",
"reference",
"section",
"have",
"been",
"recorded",
"this",
"function",
"is",
"called",
"to",
"take",
"the",
"reference",
"lines",
"out",
"of",
"the",
"document",
"body",
".",
"The",
"document",
"s",
"reference",
"lines",
"are",
"returned",
"in",
"a",
"list",
"of",
"strings",
"whereby",
"each",
"string",
"is",
"a",
"reference",
"line",
".",
"Before",
"this",
"can",
"be",
"done",
"however",
"the",
"reference",
"section",
"is",
"passed",
"to",
"another",
"function",
"that",
"rebuilds",
"any",
"broken",
"reference",
"lines",
"."
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/text.py#L91-L142 | train | 236,598 |
inspirehep/refextract | refextract/references/text.py | match_pagination | def match_pagination(ref_line):
"""Remove footer pagination from references lines"""
pattern = ur'\(?\[?(\d{1,4})\]?\)?\.?\s*$'
re_footer = re.compile(pattern, re.UNICODE)
match = re_footer.match(ref_line)
if match:
return int(match.group(1))
return None | python | def match_pagination(ref_line):
"""Remove footer pagination from references lines"""
pattern = ur'\(?\[?(\d{1,4})\]?\)?\.?\s*$'
re_footer = re.compile(pattern, re.UNICODE)
match = re_footer.match(ref_line)
if match:
return int(match.group(1))
return None | [
"def",
"match_pagination",
"(",
"ref_line",
")",
":",
"pattern",
"=",
"ur'\\(?\\[?(\\d{1,4})\\]?\\)?\\.?\\s*$'",
"re_footer",
"=",
"re",
".",
"compile",
"(",
"pattern",
",",
"re",
".",
"UNICODE",
")",
"match",
"=",
"re_footer",
".",
"match",
"(",
"ref_line",
")",
"if",
"match",
":",
"return",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"return",
"None"
] | Remove footer pagination from references lines | [
"Remove",
"footer",
"pagination",
"from",
"references",
"lines"
] | d70e3787be3c495a3a07d1517b53f81d51c788c7 | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/text.py#L145-L152 | train | 236,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.