Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given snippet: <|code_start|> line = NAMED_LINKS_REGEX.sub(r'\1', line)
line = line.replace('[', '').replace(']', '')
section_name = line.strip('=').strip()
# Stop when we get to "See also" or "References".
if section_name in ('See also', 'References'):
break
current_depth = line.count('=') / 2 - 2
# We may need to pop some sections off the stack first.
if len(sections) > current_depth:
num_to_pop = len(sections) - current_depth
for j in xrange(num_to_pop):
sections.pop()
sections.append(section_name)
elif line:
if line.startswith('{{'):
continue
if line.startswith('[['):
continue
if line.startswith('}}') and line != '}}</ref>':
continue
if line.startswith('<!-'):
continue
if line.startswith("''Those who indicated"):
continue
if line.startswith("''Federal, state, and municipal"):
continue
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
import requests
from django.core.management.base import BaseCommand, CommandError
from wikipedia.models import BulkImport, ImportedEndorsement
from wikipedia.utils import get_ref_definitions, replace_refs, ANY_REF_REGEX, \
split_endorsements
and context:
# Path: wikipedia/models.py
# class BulkImport(models.Model):
# slug = models.SlugField(max_length=255)
# created_at = models.DateTimeField(auto_now_add=True)
# text = models.TextField()
#
# def __unicode__(self):
# return str(self.created_at)
#
# class ImportedEndorsement(models.Model):
# bulk_import = models.ForeignKey(BulkImport)
# raw_text = models.TextField()
# imported_endorser = models.ForeignKey(
# ImportedEndorser, blank=True, null=True
# )
# confirmed_endorser = models.ForeignKey(Endorser, blank=True, null=True)
# sections = models.TextField(blank=True, null=True)
# notes = models.TextField(blank=True, null=True)
#
# def parse_text(self):
# return utils.parse_wiki_text(self.raw_text)
#
# def get_likely_endorser(self):
# name = self.parse_text()['endorser_name']
# if not name:
# return
#
# name = name.lower()
#
# query = Endorser.objects.filter(
# name__iexact=name,
# )
# if query.exists():
# return query.latest('pk')
#
# # See if putting "The" in front (or removing it) helps.
# if name.startswith('the '):
# query = Endorser.objects.filter(
# name__iexact=name[4:],
# )
# if query.exists():
# return query.first()
# else:
# query = Endorser.objects.filter(
# name__iexact='the ' + name,
# )
# if query.exists():
# return query.first()
#
# # See if the last name and part of the first name match.
# if ' ' in name:
# split_name = name.split(' ')
# first_name_start = split_name[0][:3]
# last_name = split_name[-1]
# if len(last_name) > 3:
# query = Endorser.objects.filter(
# name__iendswith=last_name,
# name__istartswith=first_name_start,
# )
# if query.exists():
# return query.first()
#
# def __unicode__(self):
# return self.raw_text
#
# Path: wikipedia/utils.py
# def get_ref_definitions(line):
# for ref_match in NAMED_REF_REGEX.finditer(line):
# yield (
# ref_match.group('name1') or ref_match.group('name2'),
# ref_match.group()
# )
#
# def replace_refs(line, definitions):
# for ref_match in REF_BY_NAME_REGEX.finditer(line):
# name = ref_match.group('name1') or ref_match.group('name2')
# definition = definitions.get(name)
# if definition:
# line = line.replace(ref_match.group(0), definition)
#
# return line
#
# ANY_REF_REGEX = re.compile(r'(<ref[^>]*(?!/)>.*?</ref>|<ref[^/]*?/>)')
#
# def split_endorsements(line):
# for match in SPLIT_REGEX.finditer(line):
# yield match.group(0).strip()
which might include code, classes, or functions. Output only the next line. | if line.startswith("''Note:"): |
Continue the code snippet: <|code_start|> bulk_import__slug=self.slug,
)
return False
except ImportedEndorsement.DoesNotExist:
if self.create:
ImportedEndorsement.objects.create(
bulk_import=self.bulk_import,
raw_text=raw_text,
sections=sections_string,
)
print raw_text, sections_string
print "------"
return True
def handle(self, *args, **options):
url = URL.format(slug=options['slug'])
response = requests.get(url)
data = response.json()
wikitext = data['parse']['wikitext']['*']
self.slug = options['slug']
self.create = options['create']
if self.create:
self.bulk_import = BulkImport.objects.create(
slug=options['slug'],
text=wikitext
)
# Skip until the first line that starts with a ==.
<|code_end|>
. Use current file imports:
import re
import requests
from django.core.management.base import BaseCommand, CommandError
from wikipedia.models import BulkImport, ImportedEndorsement
from wikipedia.utils import get_ref_definitions, replace_refs, ANY_REF_REGEX, \
split_endorsements
and context (classes, functions, or code) from other files:
# Path: wikipedia/models.py
# class BulkImport(models.Model):
# slug = models.SlugField(max_length=255)
# created_at = models.DateTimeField(auto_now_add=True)
# text = models.TextField()
#
# def __unicode__(self):
# return str(self.created_at)
#
# class ImportedEndorsement(models.Model):
# bulk_import = models.ForeignKey(BulkImport)
# raw_text = models.TextField()
# imported_endorser = models.ForeignKey(
# ImportedEndorser, blank=True, null=True
# )
# confirmed_endorser = models.ForeignKey(Endorser, blank=True, null=True)
# sections = models.TextField(blank=True, null=True)
# notes = models.TextField(blank=True, null=True)
#
# def parse_text(self):
# return utils.parse_wiki_text(self.raw_text)
#
# def get_likely_endorser(self):
# name = self.parse_text()['endorser_name']
# if not name:
# return
#
# name = name.lower()
#
# query = Endorser.objects.filter(
# name__iexact=name,
# )
# if query.exists():
# return query.latest('pk')
#
# # See if putting "The" in front (or removing it) helps.
# if name.startswith('the '):
# query = Endorser.objects.filter(
# name__iexact=name[4:],
# )
# if query.exists():
# return query.first()
# else:
# query = Endorser.objects.filter(
# name__iexact='the ' + name,
# )
# if query.exists():
# return query.first()
#
# # See if the last name and part of the first name match.
# if ' ' in name:
# split_name = name.split(' ')
# first_name_start = split_name[0][:3]
# last_name = split_name[-1]
# if len(last_name) > 3:
# query = Endorser.objects.filter(
# name__iendswith=last_name,
# name__istartswith=first_name_start,
# )
# if query.exists():
# return query.first()
#
# def __unicode__(self):
# return self.raw_text
#
# Path: wikipedia/utils.py
# def get_ref_definitions(line):
# for ref_match in NAMED_REF_REGEX.finditer(line):
# yield (
# ref_match.group('name1') or ref_match.group('name2'),
# ref_match.group()
# )
#
# def replace_refs(line, definitions):
# for ref_match in REF_BY_NAME_REGEX.finditer(line):
# name = ref_match.group('name1') or ref_match.group('name2')
# definition = definitions.get(name)
# if definition:
# line = line.replace(ref_match.group(0), definition)
#
# return line
#
# ANY_REF_REGEX = re.compile(r'(<ref[^>]*(?!/)>.*?</ref>|<ref[^/]*?/>)')
#
# def split_endorsements(line):
# for match in SPLIT_REGEX.finditer(line):
# yield match.group(0).strip()
. Output only the next line. | lines = wikitext.splitlines() |
Using the snippet: <|code_start|>
COMMENTS_REGEX = re.compile(r'<!--.*?-->')
COLON_SECTION_REGEX = re.compile('(\[?\[?[a-zA-Z ]+)\]?\]?: ')
URL = 'https://en.wikipedia.org/w/api.php?action=parse&page={slug}&prop=wikitext&format=json'
NAMED_LINKS_REGEX = re.compile(r'\[\[[^|\]]+\|([^\]]+)\]\]')
class Command(BaseCommand):
help = 'Bulk import all the endorsements from a Wikipedia page'
def add_arguments(self, parser):
parser.add_argument('slug')
parser.add_argument(
'--create',
action='store_true',
dest='create',
default=False,
help="Creates everything (otherwise, it's a dry run)",
)
def import_endorsement(self, raw_text, sections):
# Make sure that named references are replaced.
raw_text = replace_refs(raw_text, self.reference_definitions)
sections_string = ' > '.join(sections)
if raw_text:
<|code_end|>
, determine the next line of code. You have imports:
import re
import requests
from django.core.management.base import BaseCommand, CommandError
from wikipedia.models import BulkImport, ImportedEndorsement
from wikipedia.utils import get_ref_definitions, replace_refs, ANY_REF_REGEX, \
split_endorsements
and context (class names, function names, or code) available:
# Path: wikipedia/models.py
# class BulkImport(models.Model):
# slug = models.SlugField(max_length=255)
# created_at = models.DateTimeField(auto_now_add=True)
# text = models.TextField()
#
# def __unicode__(self):
# return str(self.created_at)
#
# class ImportedEndorsement(models.Model):
# bulk_import = models.ForeignKey(BulkImport)
# raw_text = models.TextField()
# imported_endorser = models.ForeignKey(
# ImportedEndorser, blank=True, null=True
# )
# confirmed_endorser = models.ForeignKey(Endorser, blank=True, null=True)
# sections = models.TextField(blank=True, null=True)
# notes = models.TextField(blank=True, null=True)
#
# def parse_text(self):
# return utils.parse_wiki_text(self.raw_text)
#
# def get_likely_endorser(self):
# name = self.parse_text()['endorser_name']
# if not name:
# return
#
# name = name.lower()
#
# query = Endorser.objects.filter(
# name__iexact=name,
# )
# if query.exists():
# return query.latest('pk')
#
# # See if putting "The" in front (or removing it) helps.
# if name.startswith('the '):
# query = Endorser.objects.filter(
# name__iexact=name[4:],
# )
# if query.exists():
# return query.first()
# else:
# query = Endorser.objects.filter(
# name__iexact='the ' + name,
# )
# if query.exists():
# return query.first()
#
# # See if the last name and part of the first name match.
# if ' ' in name:
# split_name = name.split(' ')
# first_name_start = split_name[0][:3]
# last_name = split_name[-1]
# if len(last_name) > 3:
# query = Endorser.objects.filter(
# name__iendswith=last_name,
# name__istartswith=first_name_start,
# )
# if query.exists():
# return query.first()
#
# def __unicode__(self):
# return self.raw_text
#
# Path: wikipedia/utils.py
# def get_ref_definitions(line):
# for ref_match in NAMED_REF_REGEX.finditer(line):
# yield (
# ref_match.group('name1') or ref_match.group('name2'),
# ref_match.group()
# )
#
# def replace_refs(line, definitions):
# for ref_match in REF_BY_NAME_REGEX.finditer(line):
# name = ref_match.group('name1') or ref_match.group('name2')
# definition = definitions.get(name)
# if definition:
# line = line.replace(ref_match.group(0), definition)
#
# return line
#
# ANY_REF_REGEX = re.compile(r'(<ref[^>]*(?!/)>.*?</ref>|<ref[^/]*?/>)')
#
# def split_endorsements(line):
# for match in SPLIT_REGEX.finditer(line):
# yield match.group(0).strip()
. Output only the next line. | try: |
Based on the snippet: <|code_start|> current_line_sections,
)
current_line = []
if line.startswith('* '):
line = line[2:]
if line == '[[List of Donald Trump presidential campaign primary endorsements, 2016]]':
continue
# If it's like "Member of the Nevada Assembly: " etc
colon_section_match = COLON_SECTION_REGEX.match(line)
if colon_section_match:
colon_section = colon_section_match.group(1)
colon_section = colon_section.replace('[', '')
colon_section = colon_section.replace(']', '')
line_remainder = line.partition(':')[2]
for sub_line in split_endorsements(line_remainder):
num_imported += self.import_endorsement(
sub_line,
sections + [colon_section],
)
continue
current_line = [line]
current_line_sections = sections
else:
current_line_sections = list(sections)
# In case there's a <ref> tag, get rid of it.
if '<ref' in line:
line = ANY_REF_REGEX.sub('', line)
<|code_end|>
, predict the immediate next line with the help of imports:
import re
import requests
from django.core.management.base import BaseCommand, CommandError
from wikipedia.models import BulkImport, ImportedEndorsement
from wikipedia.utils import get_ref_definitions, replace_refs, ANY_REF_REGEX, \
split_endorsements
and context (classes, functions, sometimes code) from other files:
# Path: wikipedia/models.py
# class BulkImport(models.Model):
# slug = models.SlugField(max_length=255)
# created_at = models.DateTimeField(auto_now_add=True)
# text = models.TextField()
#
# def __unicode__(self):
# return str(self.created_at)
#
# class ImportedEndorsement(models.Model):
# bulk_import = models.ForeignKey(BulkImport)
# raw_text = models.TextField()
# imported_endorser = models.ForeignKey(
# ImportedEndorser, blank=True, null=True
# )
# confirmed_endorser = models.ForeignKey(Endorser, blank=True, null=True)
# sections = models.TextField(blank=True, null=True)
# notes = models.TextField(blank=True, null=True)
#
# def parse_text(self):
# return utils.parse_wiki_text(self.raw_text)
#
# def get_likely_endorser(self):
# name = self.parse_text()['endorser_name']
# if not name:
# return
#
# name = name.lower()
#
# query = Endorser.objects.filter(
# name__iexact=name,
# )
# if query.exists():
# return query.latest('pk')
#
# # See if putting "The" in front (or removing it) helps.
# if name.startswith('the '):
# query = Endorser.objects.filter(
# name__iexact=name[4:],
# )
# if query.exists():
# return query.first()
# else:
# query = Endorser.objects.filter(
# name__iexact='the ' + name,
# )
# if query.exists():
# return query.first()
#
# # See if the last name and part of the first name match.
# if ' ' in name:
# split_name = name.split(' ')
# first_name_start = split_name[0][:3]
# last_name = split_name[-1]
# if len(last_name) > 3:
# query = Endorser.objects.filter(
# name__iendswith=last_name,
# name__istartswith=first_name_start,
# )
# if query.exists():
# return query.first()
#
# def __unicode__(self):
# return self.raw_text
#
# Path: wikipedia/utils.py
# def get_ref_definitions(line):
# for ref_match in NAMED_REF_REGEX.finditer(line):
# yield (
# ref_match.group('name1') or ref_match.group('name2'),
# ref_match.group()
# )
#
# def replace_refs(line, definitions):
# for ref_match in REF_BY_NAME_REGEX.finditer(line):
# name = ref_match.group('name1') or ref_match.group('name2')
# definition = definitions.get(name)
# if definition:
# line = line.replace(ref_match.group(0), definition)
#
# return line
#
# ANY_REF_REGEX = re.compile(r'(<ref[^>]*(?!/)>.*?</ref>|<ref[^/]*?/>)')
#
# def split_endorsements(line):
# for match in SPLIT_REGEX.finditer(line):
# yield match.group(0).strip()
. Output only the next line. | if '<!--' in line: |
Given the following code snippet before the placeholder: <|code_start|>
COMMENTS_REGEX = re.compile(r'<!--.*?-->')
COLON_SECTION_REGEX = re.compile('(\[?\[?[a-zA-Z ]+)\]?\]?: ')
URL = 'https://en.wikipedia.org/w/api.php?action=parse&page={slug}&prop=wikitext&format=json'
NAMED_LINKS_REGEX = re.compile(r'\[\[[^|\]]+\|([^\]]+)\]\]')
<|code_end|>
, predict the next line using imports from the current file:
import re
import requests
from django.core.management.base import BaseCommand, CommandError
from wikipedia.models import BulkImport, ImportedEndorsement
from wikipedia.utils import get_ref_definitions, replace_refs, ANY_REF_REGEX, \
split_endorsements
and context including class names, function names, and sometimes code from other files:
# Path: wikipedia/models.py
# class BulkImport(models.Model):
# slug = models.SlugField(max_length=255)
# created_at = models.DateTimeField(auto_now_add=True)
# text = models.TextField()
#
# def __unicode__(self):
# return str(self.created_at)
#
# class ImportedEndorsement(models.Model):
# bulk_import = models.ForeignKey(BulkImport)
# raw_text = models.TextField()
# imported_endorser = models.ForeignKey(
# ImportedEndorser, blank=True, null=True
# )
# confirmed_endorser = models.ForeignKey(Endorser, blank=True, null=True)
# sections = models.TextField(blank=True, null=True)
# notes = models.TextField(blank=True, null=True)
#
# def parse_text(self):
# return utils.parse_wiki_text(self.raw_text)
#
# def get_likely_endorser(self):
# name = self.parse_text()['endorser_name']
# if not name:
# return
#
# name = name.lower()
#
# query = Endorser.objects.filter(
# name__iexact=name,
# )
# if query.exists():
# return query.latest('pk')
#
# # See if putting "The" in front (or removing it) helps.
# if name.startswith('the '):
# query = Endorser.objects.filter(
# name__iexact=name[4:],
# )
# if query.exists():
# return query.first()
# else:
# query = Endorser.objects.filter(
# name__iexact='the ' + name,
# )
# if query.exists():
# return query.first()
#
# # See if the last name and part of the first name match.
# if ' ' in name:
# split_name = name.split(' ')
# first_name_start = split_name[0][:3]
# last_name = split_name[-1]
# if len(last_name) > 3:
# query = Endorser.objects.filter(
# name__iendswith=last_name,
# name__istartswith=first_name_start,
# )
# if query.exists():
# return query.first()
#
# def __unicode__(self):
# return self.raw_text
#
# Path: wikipedia/utils.py
# def get_ref_definitions(line):
# for ref_match in NAMED_REF_REGEX.finditer(line):
# yield (
# ref_match.group('name1') or ref_match.group('name2'),
# ref_match.group()
# )
#
# def replace_refs(line, definitions):
# for ref_match in REF_BY_NAME_REGEX.finditer(line):
# name = ref_match.group('name1') or ref_match.group('name2')
# definition = definitions.get(name)
# if definition:
# line = line.replace(ref_match.group(0), definition)
#
# return line
#
# ANY_REF_REGEX = re.compile(r'(<ref[^>]*(?!/)>.*?</ref>|<ref[^/]*?/>)')
#
# def split_endorsements(line):
# for match in SPLIT_REGEX.finditer(line):
# yield match.group(0).strip()
. Output only the next line. | class Command(BaseCommand): |
Continue the code snippet: <|code_start|> colon_section = colon_section.replace(']', '')
line_remainder = line.partition(':')[2]
for sub_line in split_endorsements(line_remainder):
num_imported += self.import_endorsement(
sub_line,
sections + [colon_section],
)
continue
current_line = [line]
current_line_sections = sections
else:
current_line_sections = list(sections)
# In case there's a <ref> tag, get rid of it.
if '<ref' in line:
line = ANY_REF_REGEX.sub('', line)
if '<!--' in line:
line = COMMENTS_REGEX.sub('', line)
if '[[' in line:
if NAMED_LINKS_REGEX.search(line):
line = NAMED_LINKS_REGEX.sub(r'\1', line)
line = line.replace('[', '').replace(']', '')
section_name = line.strip('=').strip()
# Stop when we get to "See also" or "References".
if section_name in ('See also', 'References'):
break
<|code_end|>
. Use current file imports:
import re
import requests
from django.core.management.base import BaseCommand, CommandError
from wikipedia.models import BulkImport, ImportedEndorsement
from wikipedia.utils import get_ref_definitions, replace_refs, ANY_REF_REGEX, \
split_endorsements
and context (classes, functions, or code) from other files:
# Path: wikipedia/models.py
# class BulkImport(models.Model):
# slug = models.SlugField(max_length=255)
# created_at = models.DateTimeField(auto_now_add=True)
# text = models.TextField()
#
# def __unicode__(self):
# return str(self.created_at)
#
# class ImportedEndorsement(models.Model):
# bulk_import = models.ForeignKey(BulkImport)
# raw_text = models.TextField()
# imported_endorser = models.ForeignKey(
# ImportedEndorser, blank=True, null=True
# )
# confirmed_endorser = models.ForeignKey(Endorser, blank=True, null=True)
# sections = models.TextField(blank=True, null=True)
# notes = models.TextField(blank=True, null=True)
#
# def parse_text(self):
# return utils.parse_wiki_text(self.raw_text)
#
# def get_likely_endorser(self):
# name = self.parse_text()['endorser_name']
# if not name:
# return
#
# name = name.lower()
#
# query = Endorser.objects.filter(
# name__iexact=name,
# )
# if query.exists():
# return query.latest('pk')
#
# # See if putting "The" in front (or removing it) helps.
# if name.startswith('the '):
# query = Endorser.objects.filter(
# name__iexact=name[4:],
# )
# if query.exists():
# return query.first()
# else:
# query = Endorser.objects.filter(
# name__iexact='the ' + name,
# )
# if query.exists():
# return query.first()
#
# # See if the last name and part of the first name match.
# if ' ' in name:
# split_name = name.split(' ')
# first_name_start = split_name[0][:3]
# last_name = split_name[-1]
# if len(last_name) > 3:
# query = Endorser.objects.filter(
# name__iendswith=last_name,
# name__istartswith=first_name_start,
# )
# if query.exists():
# return query.first()
#
# def __unicode__(self):
# return self.raw_text
#
# Path: wikipedia/utils.py
# def get_ref_definitions(line):
# for ref_match in NAMED_REF_REGEX.finditer(line):
# yield (
# ref_match.group('name1') or ref_match.group('name2'),
# ref_match.group()
# )
#
# def replace_refs(line, definitions):
# for ref_match in REF_BY_NAME_REGEX.finditer(line):
# name = ref_match.group('name1') or ref_match.group('name2')
# definition = definitions.get(name)
# if definition:
# line = line.replace(ref_match.group(0), definition)
#
# return line
#
# ANY_REF_REGEX = re.compile(r'(<ref[^>]*(?!/)>.*?</ref>|<ref[^/]*?/>)')
#
# def split_endorsements(line):
# for match in SPLIT_REGEX.finditer(line):
# yield match.group(0).strip()
. Output only the next line. | current_depth = line.count('=') / 2 - 2 |
Given snippet: <|code_start|>
URL = 'https://en.wikipedia.org/w/api.php?action=parse&page={slug}&prop=text&format=json§ion={section}'
SLUG = 'Current_members_of_the_United_States_House_of_Representatives'
SECTION = 7
class Command(BaseCommand):
help = 'Bulk import all the election results by state'
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from datetime import datetime
from BeautifulSoup import BeautifulSoup as BS
from django.core.management.base import BaseCommand, CommandError
from endorsements.models import Tag, Candidate
from wikipedia.models import BulkImport, ImportedRepresentative
import requests
and context:
# Path: endorsements/models.py
# class Tag(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# is_personal = models.BooleanField(default=True)
# category = models.ForeignKey(Category, null=True, blank=True)
#
# def __unicode__(self):
# return self.name
#
# class Meta:
# ordering = ['name']
#
# def get_mode(self):
# allow_personal = self.category.allow_personal
# allow_org = self.category.allow_org
# if allow_personal and allow_org:
# return 'none'
# elif allow_personal:
# return 'personal'
# else:
# return 'organization'
#
# class Candidate(models.Model):
# endorser_link = models.OneToOneField(Endorser)
# name = models.CharField(max_length=50)
# description = models.TextField()
# color = models.CharField(max_length=6)
# rgb = models.CharField(max_length=13)
# still_running = models.BooleanField(default=False)
# position = models.OneToOneField('Position', blank=True, null=True)
#
# def __unicode__(self):
# return self.name
#
# Path: wikipedia/models.py
# class BulkImport(models.Model):
# slug = models.SlugField(max_length=255)
# created_at = models.DateTimeField(auto_now_add=True)
# text = models.TextField()
#
# def __unicode__(self):
# return str(self.created_at)
#
# class ImportedRepresentative(models.Model):
# bulk_import = models.ForeignKey(BulkImport)
# state = models.ForeignKey(
# Tag,
# limit_choices_to={
# 'category': 8,
# },
# related_name='+',
# )
# confirmed_endorser = models.ForeignKey(Endorser, blank=True, null=True)
# party = models.ForeignKey(
# Tag,
# limit_choices_to={
# 'category': 2
# },
# related_name='+',
# )
# name = models.CharField(max_length=50)
#
# def get_likely_endorser(self):
# name = self.name.lower()
#
# query = Endorser.objects.filter(
# name__iexact=name,
# )
# if query.exists():
# return query.first()
#
# if ' ' in name:
# split_name = name.split(' ')
# first_name_start = split_name[0][:3]
# last_name = split_name[-1]
# if len(last_name) > 3:
# query = Endorser.objects.filter(
# name__iendswith=last_name,
# name__istartswith=first_name_start,
# )
# if query.exists():
# return query.first()
#
# query = Endorser.objects.filter(
# name__icontains=name,
# )
# if query.exists():
# return query.first()
which might include code, classes, or functions. Output only the next line. | def add_arguments(self, parser): |
Next line prediction: <|code_start|>
URL = 'https://en.wikipedia.org/w/api.php?action=parse&page={slug}&prop=text&format=json§ion={section}'
SLUG = 'Current_members_of_the_United_States_House_of_Representatives'
SECTION = 7
class Command(BaseCommand):
help = 'Bulk import all the election results by state'
def add_arguments(self, parser):
parser.add_argument(
'--create',
action='store_true',
dest='create',
default=False,
help="Creates everything (otherwise, it's a dry run)",
)
def handle(self, *args, **options):
url = URL.format(slug=SLUG, section=SECTION)
response = requests.get(url)
data = response.json()
text = data['parse']['text']['*']
soup = BS(text)
<|code_end|>
. Use current file imports:
(from datetime import datetime
from BeautifulSoup import BeautifulSoup as BS
from django.core.management.base import BaseCommand, CommandError
from endorsements.models import Tag, Candidate
from wikipedia.models import BulkImport, ImportedRepresentative
import requests)
and context including class names, function names, or small code snippets from other files:
# Path: endorsements/models.py
# class Tag(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# is_personal = models.BooleanField(default=True)
# category = models.ForeignKey(Category, null=True, blank=True)
#
# def __unicode__(self):
# return self.name
#
# class Meta:
# ordering = ['name']
#
# def get_mode(self):
# allow_personal = self.category.allow_personal
# allow_org = self.category.allow_org
# if allow_personal and allow_org:
# return 'none'
# elif allow_personal:
# return 'personal'
# else:
# return 'organization'
#
# class Candidate(models.Model):
# endorser_link = models.OneToOneField(Endorser)
# name = models.CharField(max_length=50)
# description = models.TextField()
# color = models.CharField(max_length=6)
# rgb = models.CharField(max_length=13)
# still_running = models.BooleanField(default=False)
# position = models.OneToOneField('Position', blank=True, null=True)
#
# def __unicode__(self):
# return self.name
#
# Path: wikipedia/models.py
# class BulkImport(models.Model):
# slug = models.SlugField(max_length=255)
# created_at = models.DateTimeField(auto_now_add=True)
# text = models.TextField()
#
# def __unicode__(self):
# return str(self.created_at)
#
# class ImportedRepresentative(models.Model):
# bulk_import = models.ForeignKey(BulkImport)
# state = models.ForeignKey(
# Tag,
# limit_choices_to={
# 'category': 8,
# },
# related_name='+',
# )
# confirmed_endorser = models.ForeignKey(Endorser, blank=True, null=True)
# party = models.ForeignKey(
# Tag,
# limit_choices_to={
# 'category': 2
# },
# related_name='+',
# )
# name = models.CharField(max_length=50)
#
# def get_likely_endorser(self):
# name = self.name.lower()
#
# query = Endorser.objects.filter(
# name__iexact=name,
# )
# if query.exists():
# return query.first()
#
# if ' ' in name:
# split_name = name.split(' ')
# first_name_start = split_name[0][:3]
# last_name = split_name[-1]
# if len(last_name) > 3:
# query = Endorser.objects.filter(
# name__iendswith=last_name,
# name__istartswith=first_name_start,
# )
# if query.exists():
# return query.first()
#
# query = Endorser.objects.filter(
# name__icontains=name,
# )
# if query.exists():
# return query.first()
. Output only the next line. | reps = [] |
Given the following code snippet before the placeholder: <|code_start|>
def add_arguments(self, parser):
parser.add_argument(
'--create',
action='store_true',
dest='create',
default=False,
help="Creates everything (otherwise, it's a dry run)",
)
def handle(self, *args, **options):
url = URL.format(slug=SLUG, section=SECTION)
response = requests.get(url)
data = response.json()
text = data['parse']['text']['*']
soup = BS(text)
reps = []
for i, table_row in enumerate(soup.findAll('tr')):
if i == 0:
continue
table_cells = table_row.findAll('td')
assert len(table_cells) == 8
state_string = table_cells[0].find('a').string
if state_string.endswith(' At Large'):
state = state_string[:-9]
else:
<|code_end|>
, predict the next line using imports from the current file:
from datetime import datetime
from BeautifulSoup import BeautifulSoup as BS
from django.core.management.base import BaseCommand, CommandError
from endorsements.models import Tag, Candidate
from wikipedia.models import BulkImport, ImportedRepresentative
import requests
and context including class names, function names, and sometimes code from other files:
# Path: endorsements/models.py
# class Tag(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# is_personal = models.BooleanField(default=True)
# category = models.ForeignKey(Category, null=True, blank=True)
#
# def __unicode__(self):
# return self.name
#
# class Meta:
# ordering = ['name']
#
# def get_mode(self):
# allow_personal = self.category.allow_personal
# allow_org = self.category.allow_org
# if allow_personal and allow_org:
# return 'none'
# elif allow_personal:
# return 'personal'
# else:
# return 'organization'
#
# class Candidate(models.Model):
# endorser_link = models.OneToOneField(Endorser)
# name = models.CharField(max_length=50)
# description = models.TextField()
# color = models.CharField(max_length=6)
# rgb = models.CharField(max_length=13)
# still_running = models.BooleanField(default=False)
# position = models.OneToOneField('Position', blank=True, null=True)
#
# def __unicode__(self):
# return self.name
#
# Path: wikipedia/models.py
# class BulkImport(models.Model):
# slug = models.SlugField(max_length=255)
# created_at = models.DateTimeField(auto_now_add=True)
# text = models.TextField()
#
# def __unicode__(self):
# return str(self.created_at)
#
# class ImportedRepresentative(models.Model):
# bulk_import = models.ForeignKey(BulkImport)
# state = models.ForeignKey(
# Tag,
# limit_choices_to={
# 'category': 8,
# },
# related_name='+',
# )
# confirmed_endorser = models.ForeignKey(Endorser, blank=True, null=True)
# party = models.ForeignKey(
# Tag,
# limit_choices_to={
# 'category': 2
# },
# related_name='+',
# )
# name = models.CharField(max_length=50)
#
# def get_likely_endorser(self):
# name = self.name.lower()
#
# query = Endorser.objects.filter(
# name__iexact=name,
# )
# if query.exists():
# return query.first()
#
# if ' ' in name:
# split_name = name.split(' ')
# first_name_start = split_name[0][:3]
# last_name = split_name[-1]
# if len(last_name) > 3:
# query = Endorser.objects.filter(
# name__iendswith=last_name,
# name__istartswith=first_name_start,
# )
# if query.exists():
# return query.first()
#
# query = Endorser.objects.filter(
# name__icontains=name,
# )
# if query.exists():
# return query.first()
. Output only the next line. | state = state_string.rpartition(' ')[0] |
Based on the snippet: <|code_start|>from __future__ import unicode_literals
class BulkImport(models.Model):
slug = models.SlugField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
text = models.TextField()
def __unicode__(self):
return str(self.created_at)
class ImportedResult(models.Model):
bulk_import = models.ForeignKey(BulkImport)
tag = models.ForeignKey(Tag)
candidate = models.ForeignKey(Candidate)
count = models.PositiveIntegerField(default=0)
percent = models.DecimalField(max_digits=4, decimal_places=2)
class Meta:
unique_together = ('tag', 'candidate')
class ImportedRepresentative(models.Model):
<|code_end|>
, predict the immediate next line with the help of imports:
from django.db import models
from endorsements.models import Endorser, Tag, Candidate
from wikipedia import utils
and context (classes, functions, sometimes code) from other files:
# Path: endorsements/models.py
# class Endorser(models.Model):
# name = models.CharField(max_length=100)
# description = models.TextField(null=True, blank=True)
# url = models.URLField(null=True, blank=True)
# is_personal = models.BooleanField(default=True)
# max_followers = models.PositiveIntegerField(default=0)
# tags = models.ManyToManyField(Tag, blank=True)
# missing_image = models.BooleanField(default=True)
# current_position = models.ForeignKey('Position', blank=True, null=True)
#
# class Meta:
# ordering = ['-max_followers']
#
# def __unicode__(self):
# return self.name
#
# def has_url(self):
# return bool(self.url)
# has_url.boolean = True
#
# def get_tags(self):
# return ' / '.join(tag.name for tag in self.tags.all())
#
# def get_current_endorsement(self):
# try:
# return self.endorsement_set.latest('quote')
# except Endorsement.DoesNotExist:
# pass
#
# def get_image(self):
# return '<img src="{url}" width="100" />'.format(
# url=self.get_image_url()
# )
# get_image.allow_tags = True
#
# def get_image_url(self):
# image_name = 'missing' if self.missing_image else self.pk
# return 'https://s3.amazonaws.com/endorsementdb.com/images/endorsers/%s.png' % image_name
#
# def needs_quotes(self):
# current_endorsement = self.get_current_endorsement()
# return not current_endorsement.quote.text
# needs_quotes.boolean = True
#
# def get_absolute_url(self):
# return reverse('view-endorser', args=[self.pk])
#
# class Tag(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# is_personal = models.BooleanField(default=True)
# category = models.ForeignKey(Category, null=True, blank=True)
#
# def __unicode__(self):
# return self.name
#
# class Meta:
# ordering = ['name']
#
# def get_mode(self):
# allow_personal = self.category.allow_personal
# allow_org = self.category.allow_org
# if allow_personal and allow_org:
# return 'none'
# elif allow_personal:
# return 'personal'
# else:
# return 'organization'
#
# class Candidate(models.Model):
# endorser_link = models.OneToOneField(Endorser)
# name = models.CharField(max_length=50)
# description = models.TextField()
# color = models.CharField(max_length=6)
# rgb = models.CharField(max_length=13)
# still_running = models.BooleanField(default=False)
# position = models.OneToOneField('Position', blank=True, null=True)
#
# def __unicode__(self):
# return self.name
#
# Path: wikipedia/utils.py
# NAMED_REF_REGEX = re.compile(
# r'<ref name=("(?P<name1>[^"]+?)"|(?P<name2>[^ /"]+?))>[^<]+</ref>'
# )
# REF_BY_NAME_REGEX = re.compile(
# r'<ref name=("(?P<name1>[^"]+)?"|(?P<name2>[^ ]+?)) ?/>'
# )
# SPLIT_REGEX = re.compile(
# '('
# r'(\[\[[^\]]{5,}\]\]( \([^\)]+\))?|[A-Za-z .\-\(\)]+)'
# r'(, ?| and |)'
# r'(<ref name="[^"]+" ?/>|<ref[^<]+?</ref>)'
# ')'
# )
# BRACES_REGEX = re.compile(r'{{[^}]+}}')
# USEFUL_REF_REGEX = re.compile(r'<ref( [^>/]*|)(?!/)>(?P<ref>.*?)</ref>')
# ANY_REF_REGEX = re.compile(r'(<ref[^>]*(?!/)>.*?</ref>|<ref[^/]*?/>)')
# NUMBERS_DATE_FORMAT = re.compile(r'\d{4}-\d{2}-\d{2}$')
# SHORT_DATE_FORMAT = re.compile('[A-z][a-z]{2} ')
# NAMED_LINKS_REGEX = re.compile(r'\[\[[^|\]]+\|([^\]]+)\]\]')
# def get_ref_definitions(line):
# def replace_refs(line, definitions):
# def split_endorsements(line):
# def parse_wiki_text(text):
. Output only the next line. | bulk_import = models.ForeignKey(BulkImport) |
Continue the code snippet: <|code_start|>
class ImportedRepresentative(models.Model):
bulk_import = models.ForeignKey(BulkImport)
state = models.ForeignKey(
Tag,
limit_choices_to={
'category': 8,
},
related_name='+',
)
confirmed_endorser = models.ForeignKey(Endorser, blank=True, null=True)
party = models.ForeignKey(
Tag,
limit_choices_to={
'category': 2
},
related_name='+',
)
name = models.CharField(max_length=50)
def get_likely_endorser(self):
name = self.name.lower()
query = Endorser.objects.filter(
name__iexact=name,
)
if query.exists():
return query.first()
<|code_end|>
. Use current file imports:
from django.db import models
from endorsements.models import Endorser, Tag, Candidate
from wikipedia import utils
and context (classes, functions, or code) from other files:
# Path: endorsements/models.py
# class Endorser(models.Model):
# name = models.CharField(max_length=100)
# description = models.TextField(null=True, blank=True)
# url = models.URLField(null=True, blank=True)
# is_personal = models.BooleanField(default=True)
# max_followers = models.PositiveIntegerField(default=0)
# tags = models.ManyToManyField(Tag, blank=True)
# missing_image = models.BooleanField(default=True)
# current_position = models.ForeignKey('Position', blank=True, null=True)
#
# class Meta:
# ordering = ['-max_followers']
#
# def __unicode__(self):
# return self.name
#
# def has_url(self):
# return bool(self.url)
# has_url.boolean = True
#
# def get_tags(self):
# return ' / '.join(tag.name for tag in self.tags.all())
#
# def get_current_endorsement(self):
# try:
# return self.endorsement_set.latest('quote')
# except Endorsement.DoesNotExist:
# pass
#
# def get_image(self):
# return '<img src="{url}" width="100" />'.format(
# url=self.get_image_url()
# )
# get_image.allow_tags = True
#
# def get_image_url(self):
# image_name = 'missing' if self.missing_image else self.pk
# return 'https://s3.amazonaws.com/endorsementdb.com/images/endorsers/%s.png' % image_name
#
# def needs_quotes(self):
# current_endorsement = self.get_current_endorsement()
# return not current_endorsement.quote.text
# needs_quotes.boolean = True
#
# def get_absolute_url(self):
# return reverse('view-endorser', args=[self.pk])
#
# class Tag(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# is_personal = models.BooleanField(default=True)
# category = models.ForeignKey(Category, null=True, blank=True)
#
# def __unicode__(self):
# return self.name
#
# class Meta:
# ordering = ['name']
#
# def get_mode(self):
# allow_personal = self.category.allow_personal
# allow_org = self.category.allow_org
# if allow_personal and allow_org:
# return 'none'
# elif allow_personal:
# return 'personal'
# else:
# return 'organization'
#
# class Candidate(models.Model):
# endorser_link = models.OneToOneField(Endorser)
# name = models.CharField(max_length=50)
# description = models.TextField()
# color = models.CharField(max_length=6)
# rgb = models.CharField(max_length=13)
# still_running = models.BooleanField(default=False)
# position = models.OneToOneField('Position', blank=True, null=True)
#
# def __unicode__(self):
# return self.name
#
# Path: wikipedia/utils.py
# NAMED_REF_REGEX = re.compile(
# r'<ref name=("(?P<name1>[^"]+?)"|(?P<name2>[^ /"]+?))>[^<]+</ref>'
# )
# REF_BY_NAME_REGEX = re.compile(
# r'<ref name=("(?P<name1>[^"]+)?"|(?P<name2>[^ ]+?)) ?/>'
# )
# SPLIT_REGEX = re.compile(
# '('
# r'(\[\[[^\]]{5,}\]\]( \([^\)]+\))?|[A-Za-z .\-\(\)]+)'
# r'(, ?| and |)'
# r'(<ref name="[^"]+" ?/>|<ref[^<]+?</ref>)'
# ')'
# )
# BRACES_REGEX = re.compile(r'{{[^}]+}}')
# USEFUL_REF_REGEX = re.compile(r'<ref( [^>/]*|)(?!/)>(?P<ref>.*?)</ref>')
# ANY_REF_REGEX = re.compile(r'(<ref[^>]*(?!/)>.*?</ref>|<ref[^/]*?/>)')
# NUMBERS_DATE_FORMAT = re.compile(r'\d{4}-\d{2}-\d{2}$')
# SHORT_DATE_FORMAT = re.compile('[A-z][a-z]{2} ')
# NAMED_LINKS_REGEX = re.compile(r'\[\[[^|\]]+\|([^\]]+)\]\]')
# def get_ref_definitions(line):
# def replace_refs(line, definitions):
# def split_endorsements(line):
# def parse_wiki_text(text):
. Output only the next line. | if ' ' in name: |
Given the following code snippet before the placeholder: <|code_start|> 'category': 8,
},
related_name='+',
)
confirmed_endorser = models.ForeignKey(Endorser, blank=True, null=True)
party = models.ForeignKey(
Tag,
limit_choices_to={
'category': 2
},
related_name='+',
)
name = models.CharField(max_length=50)
def get_likely_endorser(self):
name = self.name.lower()
query = Endorser.objects.filter(
name__iexact=name,
)
if query.exists():
return query.first()
if ' ' in name:
split_name = name.split(' ')
first_name_start = split_name[0][:3]
last_name = split_name[-1]
if len(last_name) > 3:
query = Endorser.objects.filter(
name__iendswith=last_name,
<|code_end|>
, predict the next line using imports from the current file:
from django.db import models
from endorsements.models import Endorser, Tag, Candidate
from wikipedia import utils
and context including class names, function names, and sometimes code from other files:
# Path: endorsements/models.py
# class Endorser(models.Model):
# name = models.CharField(max_length=100)
# description = models.TextField(null=True, blank=True)
# url = models.URLField(null=True, blank=True)
# is_personal = models.BooleanField(default=True)
# max_followers = models.PositiveIntegerField(default=0)
# tags = models.ManyToManyField(Tag, blank=True)
# missing_image = models.BooleanField(default=True)
# current_position = models.ForeignKey('Position', blank=True, null=True)
#
# class Meta:
# ordering = ['-max_followers']
#
# def __unicode__(self):
# return self.name
#
# def has_url(self):
# return bool(self.url)
# has_url.boolean = True
#
# def get_tags(self):
# return ' / '.join(tag.name for tag in self.tags.all())
#
# def get_current_endorsement(self):
# try:
# return self.endorsement_set.latest('quote')
# except Endorsement.DoesNotExist:
# pass
#
# def get_image(self):
# return '<img src="{url}" width="100" />'.format(
# url=self.get_image_url()
# )
# get_image.allow_tags = True
#
# def get_image_url(self):
# image_name = 'missing' if self.missing_image else self.pk
# return 'https://s3.amazonaws.com/endorsementdb.com/images/endorsers/%s.png' % image_name
#
# def needs_quotes(self):
# current_endorsement = self.get_current_endorsement()
# return not current_endorsement.quote.text
# needs_quotes.boolean = True
#
# def get_absolute_url(self):
# return reverse('view-endorser', args=[self.pk])
#
# class Tag(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# is_personal = models.BooleanField(default=True)
# category = models.ForeignKey(Category, null=True, blank=True)
#
# def __unicode__(self):
# return self.name
#
# class Meta:
# ordering = ['name']
#
# def get_mode(self):
# allow_personal = self.category.allow_personal
# allow_org = self.category.allow_org
# if allow_personal and allow_org:
# return 'none'
# elif allow_personal:
# return 'personal'
# else:
# return 'organization'
#
# class Candidate(models.Model):
# endorser_link = models.OneToOneField(Endorser)
# name = models.CharField(max_length=50)
# description = models.TextField()
# color = models.CharField(max_length=6)
# rgb = models.CharField(max_length=13)
# still_running = models.BooleanField(default=False)
# position = models.OneToOneField('Position', blank=True, null=True)
#
# def __unicode__(self):
# return self.name
#
# Path: wikipedia/utils.py
# NAMED_REF_REGEX = re.compile(
# r'<ref name=("(?P<name1>[^"]+?)"|(?P<name2>[^ /"]+?))>[^<]+</ref>'
# )
# REF_BY_NAME_REGEX = re.compile(
# r'<ref name=("(?P<name1>[^"]+)?"|(?P<name2>[^ ]+?)) ?/>'
# )
# SPLIT_REGEX = re.compile(
# '('
# r'(\[\[[^\]]{5,}\]\]( \([^\)]+\))?|[A-Za-z .\-\(\)]+)'
# r'(, ?| and |)'
# r'(<ref name="[^"]+" ?/>|<ref[^<]+?</ref>)'
# ')'
# )
# BRACES_REGEX = re.compile(r'{{[^}]+}}')
# USEFUL_REF_REGEX = re.compile(r'<ref( [^>/]*|)(?!/)>(?P<ref>.*?)</ref>')
# ANY_REF_REGEX = re.compile(r'(<ref[^>]*(?!/)>.*?</ref>|<ref[^/]*?/>)')
# NUMBERS_DATE_FORMAT = re.compile(r'\d{4}-\d{2}-\d{2}$')
# SHORT_DATE_FORMAT = re.compile('[A-z][a-z]{2} ')
# NAMED_LINKS_REGEX = re.compile(r'\[\[[^|\]]+\|([^\]]+)\]\]')
# def get_ref_definitions(line):
# def replace_refs(line, definitions):
# def split_endorsements(line):
# def parse_wiki_text(text):
. Output only the next line. | name__istartswith=first_name_start, |
Given the code snippet: <|code_start|> if ' ' in name:
split_name = name.split(' ')
first_name_start = split_name[0][:3]
last_name = split_name[-1]
if len(last_name) > 3:
query = Endorser.objects.filter(
name__iendswith=last_name,
name__istartswith=first_name_start,
)
if query.exists():
return query.first()
def __unicode__(self):
return self.raw_text
class ElectoralVotes(models.Model):
"""Technically not imported from Wikipedia (entered manually), but this
model fits better in the Wikipedia app."""
state = models.OneToOneField(
Tag,
limit_choices_to={
'category': 8,
},
)
count = models.PositiveSmallIntegerField()
def __unicode__(self):
return "{state} - {count}".format(
state=self.state,
<|code_end|>
, generate the next line using the imports in this file:
from django.db import models
from endorsements.models import Endorser, Tag, Candidate
from wikipedia import utils
and context (functions, classes, or occasionally code) from other files:
# Path: endorsements/models.py
# class Endorser(models.Model):
# name = models.CharField(max_length=100)
# description = models.TextField(null=True, blank=True)
# url = models.URLField(null=True, blank=True)
# is_personal = models.BooleanField(default=True)
# max_followers = models.PositiveIntegerField(default=0)
# tags = models.ManyToManyField(Tag, blank=True)
# missing_image = models.BooleanField(default=True)
# current_position = models.ForeignKey('Position', blank=True, null=True)
#
# class Meta:
# ordering = ['-max_followers']
#
# def __unicode__(self):
# return self.name
#
# def has_url(self):
# return bool(self.url)
# has_url.boolean = True
#
# def get_tags(self):
# return ' / '.join(tag.name for tag in self.tags.all())
#
# def get_current_endorsement(self):
# try:
# return self.endorsement_set.latest('quote')
# except Endorsement.DoesNotExist:
# pass
#
# def get_image(self):
# return '<img src="{url}" width="100" />'.format(
# url=self.get_image_url()
# )
# get_image.allow_tags = True
#
# def get_image_url(self):
# image_name = 'missing' if self.missing_image else self.pk
# return 'https://s3.amazonaws.com/endorsementdb.com/images/endorsers/%s.png' % image_name
#
# def needs_quotes(self):
# current_endorsement = self.get_current_endorsement()
# return not current_endorsement.quote.text
# needs_quotes.boolean = True
#
# def get_absolute_url(self):
# return reverse('view-endorser', args=[self.pk])
#
# class Tag(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# is_personal = models.BooleanField(default=True)
# category = models.ForeignKey(Category, null=True, blank=True)
#
# def __unicode__(self):
# return self.name
#
# class Meta:
# ordering = ['name']
#
# def get_mode(self):
# allow_personal = self.category.allow_personal
# allow_org = self.category.allow_org
# if allow_personal and allow_org:
# return 'none'
# elif allow_personal:
# return 'personal'
# else:
# return 'organization'
#
# class Candidate(models.Model):
# endorser_link = models.OneToOneField(Endorser)
# name = models.CharField(max_length=50)
# description = models.TextField()
# color = models.CharField(max_length=6)
# rgb = models.CharField(max_length=13)
# still_running = models.BooleanField(default=False)
# position = models.OneToOneField('Position', blank=True, null=True)
#
# def __unicode__(self):
# return self.name
#
# Path: wikipedia/utils.py
# NAMED_REF_REGEX = re.compile(
# r'<ref name=("(?P<name1>[^"]+?)"|(?P<name2>[^ /"]+?))>[^<]+</ref>'
# )
# REF_BY_NAME_REGEX = re.compile(
# r'<ref name=("(?P<name1>[^"]+)?"|(?P<name2>[^ ]+?)) ?/>'
# )
# SPLIT_REGEX = re.compile(
# '('
# r'(\[\[[^\]]{5,}\]\]( \([^\)]+\))?|[A-Za-z .\-\(\)]+)'
# r'(, ?| and |)'
# r'(<ref name="[^"]+" ?/>|<ref[^<]+?</ref>)'
# ')'
# )
# BRACES_REGEX = re.compile(r'{{[^}]+}}')
# USEFUL_REF_REGEX = re.compile(r'<ref( [^>/]*|)(?!/)>(?P<ref>.*?)</ref>')
# ANY_REF_REGEX = re.compile(r'(<ref[^>]*(?!/)>.*?</ref>|<ref[^/]*?/>)')
# NUMBERS_DATE_FORMAT = re.compile(r'\d{4}-\d{2}-\d{2}$')
# SHORT_DATE_FORMAT = re.compile('[A-z][a-z]{2} ')
# NAMED_LINKS_REGEX = re.compile(r'\[\[[^|\]]+\|([^\]]+)\]\]')
# def get_ref_definitions(line):
# def replace_refs(line, definitions):
# def split_endorsements(line):
# def parse_wiki_text(text):
. Output only the next line. | count=self.count |
Predict the next line for this snippet: <|code_start|>
class Command(BaseCommand):
help = 'Bulk import all the newspaper endorsements'
def add_arguments(self, parser):
parser.add_argument('endorser_pk', type=int)
parser.add_argument('tags', nargs='+')
parser.add_argument(
<|code_end|>
with the help of current file imports:
from django.core.management.base import BaseCommand, CommandError
from endorsements.models import Tag, Endorser
and context from other files:
# Path: endorsements/models.py
# class Tag(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# is_personal = models.BooleanField(default=True)
# category = models.ForeignKey(Category, null=True, blank=True)
#
# def __unicode__(self):
# return self.name
#
# class Meta:
# ordering = ['name']
#
# def get_mode(self):
# allow_personal = self.category.allow_personal
# allow_org = self.category.allow_org
# if allow_personal and allow_org:
# return 'none'
# elif allow_personal:
# return 'personal'
# else:
# return 'organization'
#
# class Endorser(models.Model):
# name = models.CharField(max_length=100)
# description = models.TextField(null=True, blank=True)
# url = models.URLField(null=True, blank=True)
# is_personal = models.BooleanField(default=True)
# max_followers = models.PositiveIntegerField(default=0)
# tags = models.ManyToManyField(Tag, blank=True)
# missing_image = models.BooleanField(default=True)
# current_position = models.ForeignKey('Position', blank=True, null=True)
#
# class Meta:
# ordering = ['-max_followers']
#
# def __unicode__(self):
# return self.name
#
# def has_url(self):
# return bool(self.url)
# has_url.boolean = True
#
# def get_tags(self):
# return ' / '.join(tag.name for tag in self.tags.all())
#
# def get_current_endorsement(self):
# try:
# return self.endorsement_set.latest('quote')
# except Endorsement.DoesNotExist:
# pass
#
# def get_image(self):
# return '<img src="{url}" width="100" />'.format(
# url=self.get_image_url()
# )
# get_image.allow_tags = True
#
# def get_image_url(self):
# image_name = 'missing' if self.missing_image else self.pk
# return 'https://s3.amazonaws.com/endorsementdb.com/images/endorsers/%s.png' % image_name
#
# def needs_quotes(self):
# current_endorsement = self.get_current_endorsement()
# return not current_endorsement.quote.text
# needs_quotes.boolean = True
#
# def get_absolute_url(self):
# return reverse('view-endorser', args=[self.pk])
, which may contain function names, class names, or code. Output only the next line. | '--remove', |
Next line prediction: <|code_start|>
class Command(BaseCommand):
help = 'Bulk import all the newspaper endorsements'
def add_arguments(self, parser):
parser.add_argument('endorser_pk', type=int)
<|code_end|>
. Use current file imports:
(from django.core.management.base import BaseCommand, CommandError
from endorsements.models import Tag, Endorser)
and context including class names, function names, or small code snippets from other files:
# Path: endorsements/models.py
# class Tag(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# is_personal = models.BooleanField(default=True)
# category = models.ForeignKey(Category, null=True, blank=True)
#
# def __unicode__(self):
# return self.name
#
# class Meta:
# ordering = ['name']
#
# def get_mode(self):
# allow_personal = self.category.allow_personal
# allow_org = self.category.allow_org
# if allow_personal and allow_org:
# return 'none'
# elif allow_personal:
# return 'personal'
# else:
# return 'organization'
#
# class Endorser(models.Model):
# name = models.CharField(max_length=100)
# description = models.TextField(null=True, blank=True)
# url = models.URLField(null=True, blank=True)
# is_personal = models.BooleanField(default=True)
# max_followers = models.PositiveIntegerField(default=0)
# tags = models.ManyToManyField(Tag, blank=True)
# missing_image = models.BooleanField(default=True)
# current_position = models.ForeignKey('Position', blank=True, null=True)
#
# class Meta:
# ordering = ['-max_followers']
#
# def __unicode__(self):
# return self.name
#
# def has_url(self):
# return bool(self.url)
# has_url.boolean = True
#
# def get_tags(self):
# return ' / '.join(tag.name for tag in self.tags.all())
#
# def get_current_endorsement(self):
# try:
# return self.endorsement_set.latest('quote')
# except Endorsement.DoesNotExist:
# pass
#
# def get_image(self):
# return '<img src="{url}" width="100" />'.format(
# url=self.get_image_url()
# )
# get_image.allow_tags = True
#
# def get_image_url(self):
# image_name = 'missing' if self.missing_image else self.pk
# return 'https://s3.amazonaws.com/endorsementdb.com/images/endorsers/%s.png' % image_name
#
# def needs_quotes(self):
# current_endorsement = self.get_current_endorsement()
# return not current_endorsement.quote.text
# needs_quotes.boolean = True
#
# def get_absolute_url(self):
# return reverse('view-endorser', args=[self.pk])
. Output only the next line. | parser.add_argument('tags', nargs='+') |
Predict the next line for this snippet: <|code_start|>
class Command(BaseCommand):
help = 'Refresh the current_position field for all Endorsers'
def handle(self, *args, **options):
<|code_end|>
with the help of current file imports:
from django.core.management.base import BaseCommand, CommandError
from endorsements.models import Endorser
and context from other files:
# Path: endorsements/models.py
# class Endorser(models.Model):
# name = models.CharField(max_length=100)
# description = models.TextField(null=True, blank=True)
# url = models.URLField(null=True, blank=True)
# is_personal = models.BooleanField(default=True)
# max_followers = models.PositiveIntegerField(default=0)
# tags = models.ManyToManyField(Tag, blank=True)
# missing_image = models.BooleanField(default=True)
# current_position = models.ForeignKey('Position', blank=True, null=True)
#
# class Meta:
# ordering = ['-max_followers']
#
# def __unicode__(self):
# return self.name
#
# def has_url(self):
# return bool(self.url)
# has_url.boolean = True
#
# def get_tags(self):
# return ' / '.join(tag.name for tag in self.tags.all())
#
# def get_current_endorsement(self):
# try:
# return self.endorsement_set.latest('quote')
# except Endorsement.DoesNotExist:
# pass
#
# def get_image(self):
# return '<img src="{url}" width="100" />'.format(
# url=self.get_image_url()
# )
# get_image.allow_tags = True
#
# def get_image_url(self):
# image_name = 'missing' if self.missing_image else self.pk
# return 'https://s3.amazonaws.com/endorsementdb.com/images/endorsers/%s.png' % image_name
#
# def needs_quotes(self):
# current_endorsement = self.get_current_endorsement()
# return not current_endorsement.quote.text
# needs_quotes.boolean = True
#
# def get_absolute_url(self):
# return reverse('view-endorser', args=[self.pk])
, which may contain function names, class names, or code. Output only the next line. | n = 0 |
Based on the snippet: <|code_start|>
print row
rows.append(row)
# Now get all the references.
references = {}
for li in soup.findAll('li'):
ref_id = li['id'].replace('_note-', '_ref-')
cite = li.find('cite')
if cite:
url = cite.find('a')['href']
references[ref_id] = url
if not options['create']:
print "would have created", len(rows)
continue
print "ABOUT TO CREATE =========", len(rows)
bulk_import = BulkImport.objects.create(
slug=SLUG,
text=text,
)
for row in rows:
url = references.get(row['ref_id'])
ImportedNewspaper.objects.create(
bulk_import=bulk_import,
confirmed_endorser=None,
section=section,
name=row['name'],
<|code_end|>
, predict the immediate next line with the help of imports:
from datetime import datetime
from BeautifulSoup import BeautifulSoup as BS
from django.core.management.base import BaseCommand, CommandError
from wikipedia.models import BulkImport, ImportedNewspaper, NEWSPAPER_SECTIONS
import requests
and context (classes, functions, sometimes code) from other files:
# Path: wikipedia/models.py
# class BulkImport(models.Model):
# slug = models.SlugField(max_length=255)
# created_at = models.DateTimeField(auto_now_add=True)
# text = models.TextField()
#
# def __unicode__(self):
# return str(self.created_at)
#
# class ImportedNewspaper(models.Model):
# bulk_import = models.ForeignKey(BulkImport)
# confirmed_endorser = models.ForeignKey(Endorser, blank=True, null=True)
# section = models.PositiveSmallIntegerField(choices=NEWSPAPER_SECTIONS)
# name = models.CharField(max_length=100)
# endorsement_2016 = models.CharField(max_length=15)
# endorsement_2012 = models.CharField(max_length=15, blank=True, null=True)
# circulation = models.PositiveIntegerField(blank=True, null=True)
# date = models.DateField(blank=True, null=True)
# city = models.CharField(max_length=20, blank=True, null=True)
# state = models.CharField(max_length=20, blank=True, null=True)
# url = models.URLField(blank=True, null=True, max_length=255)
#
# def __unicode__(self):
# return self.name
#
# def get_likely_endorser(self):
# name = self.name.lower()
#
# query = Endorser.objects.filter(
# name__iexact=name,
# )
# if query.exists():
# return query.first()
#
# # See if putting "The" in front (or removing it) helps.
# if name.startswith('the '):
# query = Endorser.objects.filter(
# name__iexact=name[4:],
# )
# if query.exists():
# return query.first()
# else:
# query = Endorser.objects.filter(
# name__iexact='the ' + name,
# )
# if query.exists():
# return query.first()
#
# NEWSPAPER_SECTIONS = (
# (3, 'Daily newspapers'),
# (7, 'Weekly newspapers'),
# #(11, 'Magazines'),
# #(15, 'College and university newspapers'),
# #(19, 'Foreign publications'),
# )
. Output only the next line. | endorsement_2016=row['2016'], |
Using the snippet: <|code_start|>
URL = 'https://en.wikipedia.org/w/api.php?action=parse&page={slug}&prop=text&format=json§ion={section}'
SLUG = 'Newspaper_endorsements_in_the_United_States_presidential_election,_2016'
class Command(BaseCommand):
help = 'Bulk import all the newspaper endorsements'
def add_arguments(self, parser):
parser.add_argument(
'--create',
<|code_end|>
, determine the next line of code. You have imports:
from datetime import datetime
from BeautifulSoup import BeautifulSoup as BS
from django.core.management.base import BaseCommand, CommandError
from wikipedia.models import BulkImport, ImportedNewspaper, NEWSPAPER_SECTIONS
import requests
and context (class names, function names, or code) available:
# Path: wikipedia/models.py
# class BulkImport(models.Model):
# slug = models.SlugField(max_length=255)
# created_at = models.DateTimeField(auto_now_add=True)
# text = models.TextField()
#
# def __unicode__(self):
# return str(self.created_at)
#
# class ImportedNewspaper(models.Model):
# bulk_import = models.ForeignKey(BulkImport)
# confirmed_endorser = models.ForeignKey(Endorser, blank=True, null=True)
# section = models.PositiveSmallIntegerField(choices=NEWSPAPER_SECTIONS)
# name = models.CharField(max_length=100)
# endorsement_2016 = models.CharField(max_length=15)
# endorsement_2012 = models.CharField(max_length=15, blank=True, null=True)
# circulation = models.PositiveIntegerField(blank=True, null=True)
# date = models.DateField(blank=True, null=True)
# city = models.CharField(max_length=20, blank=True, null=True)
# state = models.CharField(max_length=20, blank=True, null=True)
# url = models.URLField(blank=True, null=True, max_length=255)
#
# def __unicode__(self):
# return self.name
#
# def get_likely_endorser(self):
# name = self.name.lower()
#
# query = Endorser.objects.filter(
# name__iexact=name,
# )
# if query.exists():
# return query.first()
#
# # See if putting "The" in front (or removing it) helps.
# if name.startswith('the '):
# query = Endorser.objects.filter(
# name__iexact=name[4:],
# )
# if query.exists():
# return query.first()
# else:
# query = Endorser.objects.filter(
# name__iexact='the ' + name,
# )
# if query.exists():
# return query.first()
#
# NEWSPAPER_SECTIONS = (
# (3, 'Daily newspapers'),
# (7, 'Weekly newspapers'),
# #(11, 'Magazines'),
# #(15, 'College and university newspapers'),
# #(19, 'Foreign publications'),
# )
. Output only the next line. | action='store_true', |
Given the following code snippet before the placeholder: <|code_start|>
URL = 'https://en.wikipedia.org/w/api.php?action=parse&page={slug}&prop=text&format=json§ion={section}'
SLUG = 'Newspaper_endorsements_in_the_United_States_presidential_election,_2016'
class Command(BaseCommand):
help = 'Bulk import all the newspaper endorsements'
def add_arguments(self, parser):
parser.add_argument(
'--create',
action='store_true',
dest='create',
default=False,
help="Creates everything (otherwise, it's a dry run)",
)
<|code_end|>
, predict the next line using imports from the current file:
from datetime import datetime
from BeautifulSoup import BeautifulSoup as BS
from django.core.management.base import BaseCommand, CommandError
from wikipedia.models import BulkImport, ImportedNewspaper, NEWSPAPER_SECTIONS
import requests
and context including class names, function names, and sometimes code from other files:
# Path: wikipedia/models.py
# class BulkImport(models.Model):
# slug = models.SlugField(max_length=255)
# created_at = models.DateTimeField(auto_now_add=True)
# text = models.TextField()
#
# def __unicode__(self):
# return str(self.created_at)
#
# class ImportedNewspaper(models.Model):
# bulk_import = models.ForeignKey(BulkImport)
# confirmed_endorser = models.ForeignKey(Endorser, blank=True, null=True)
# section = models.PositiveSmallIntegerField(choices=NEWSPAPER_SECTIONS)
# name = models.CharField(max_length=100)
# endorsement_2016 = models.CharField(max_length=15)
# endorsement_2012 = models.CharField(max_length=15, blank=True, null=True)
# circulation = models.PositiveIntegerField(blank=True, null=True)
# date = models.DateField(blank=True, null=True)
# city = models.CharField(max_length=20, blank=True, null=True)
# state = models.CharField(max_length=20, blank=True, null=True)
# url = models.URLField(blank=True, null=True, max_length=255)
#
# def __unicode__(self):
# return self.name
#
# def get_likely_endorser(self):
# name = self.name.lower()
#
# query = Endorser.objects.filter(
# name__iexact=name,
# )
# if query.exists():
# return query.first()
#
# # See if putting "The" in front (or removing it) helps.
# if name.startswith('the '):
# query = Endorser.objects.filter(
# name__iexact=name[4:],
# )
# if query.exists():
# return query.first()
# else:
# query = Endorser.objects.filter(
# name__iexact='the ' + name,
# )
# if query.exists():
# return query.first()
#
# NEWSPAPER_SECTIONS = (
# (3, 'Daily newspapers'),
# (7, 'Weekly newspapers'),
# #(11, 'Magazines'),
# #(15, 'College and university newspapers'),
# #(19, 'Foreign publications'),
# )
. Output only the next line. | def handle(self, *args, **options): |
Given the code snippet: <|code_start|>
class Html5DateInput(forms.DateInput):
input_type = 'date'
class SourceForm(forms.ModelForm):
class Meta:
model = Source
exclude = ('name',)
widgets = {
'date': Html5DateInput(),
}
class PersonalTagForm(forms.Form):
tag = forms.ModelChoiceField(
Tag.objects.filter(is_personal=True)
)
class OrganizationTagForm(forms.Form):
<|code_end|>
, generate the next line using the imports in this file:
from django import forms
from endorsements.models import Candidate, Endorsement, Source, Position, \
Tag, Event
and context (functions, classes, or occasionally code) from other files:
# Path: endorsements/models.py
# class Candidate(models.Model):
# endorser_link = models.OneToOneField(Endorser)
# name = models.CharField(max_length=50)
# description = models.TextField()
# color = models.CharField(max_length=6)
# rgb = models.CharField(max_length=13)
# still_running = models.BooleanField(default=False)
# position = models.OneToOneField('Position', blank=True, null=True)
#
# def __unicode__(self):
# return self.name
#
# class Endorsement(models.Model):
# endorser = models.ForeignKey(Endorser)
# quote = models.ForeignKey(Quote)
# position = models.ForeignKey(Position)
# confirmed = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
#
# class Meta:
# ordering = ['-quote']
#
# def get_date(self):
# return self.quote.date
#
# def __unicode__(self):
# return unicode(self.position)
#
# def get_truncated_quote(self):
# if len(self.quote.text) > 100:
# return self.quote.text[:100] + '...'
# else:
# return self.quote.text
#
# class Source(models.Model):
# date = models.DateField(null=True, blank=True)
# url = models.URLField(unique=True, blank=True, max_length=300)
# name = models.CharField(max_length=100, default='')
#
# class Meta:
# ordering = ['url', '-date']
#
# def get_date_display(self):
# if self.date:
# return self.date.strftime('%b %d, %Y')
#
# def __unicode__(self):
# return "{url} on {date}".format(
# url=self.url,
# date=self.date,
# )
#
# class Position(models.Model):
# colour = models.CharField(max_length=20, blank=True)
# past_tense_prefix = models.CharField(max_length=10, blank=True)
# present_tense_prefix = models.CharField(max_length=10, blank=True)
# suffix = models.CharField(max_length=30)
# slug = models.SlugField()
# show_on_load = models.BooleanField(default=False)
#
# def __unicode__(self):
# return self.get_name_display()
#
# def get_name_display(self):
# if self.present_tense_prefix:
# return "{present} {suffix}".format(
# present=self.present_tense_prefix,
# suffix=self.suffix
# )
# else:
# return self.suffix
#
# def get_present_display(self):
# if self.present_tense_prefix:
# return self.present_tense_prefix + ' ' + self.suffix
# else:
# return self.suffix
#
# def get_past_display(self):
# if self.past_tense_prefix:
# return self.past_tense_prefix + ' ' + self.suffix
# else:
# return self.suffix
#
# class Tag(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# is_personal = models.BooleanField(default=True)
# category = models.ForeignKey(Category, null=True, blank=True)
#
# def __unicode__(self):
# return self.name
#
# class Meta:
# ordering = ['name']
#
# def get_mode(self):
# allow_personal = self.category.allow_personal
# allow_org = self.category.allow_org
# if allow_personal and allow_org:
# return 'none'
# elif allow_personal:
# return 'personal'
# else:
# return 'organization'
#
# class Event(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# start_date = models.DateField()
# end_date = models.DateField()
#
# def __unicode__(self):
# return self.name
. Output only the next line. | tag = forms.ModelChoiceField( |
Using the snippet: <|code_start|> )
source_name = forms.CharField(
widget=forms.TextInput(attrs={'placeholder': 'e.g., Politico'})
)
event = forms.ModelChoiceField(Event.objects.all(), required=False)
class EndorsementFormWithoutPosition(EndorsementForm):
position = None
source_name = forms.CharField(required=False)
date = forms.DateField(widget=Html5DateInput, required=False)
source_url = forms.URLField(required=False)
class EndorserForm(forms.Form):
name = forms.CharField(max_length=100)
description = forms.CharField(
widget=forms.Textarea(attrs={'rows': 4}),
required=False,
)
url = forms.URLField(required=False)
twitter_username_1 = forms.CharField(
max_length=15,
widget=forms.TextInput(attrs={'placeholder': '@username1'}),
required=False
)
twitter_username_2 = forms.CharField(
max_length=15,
widget=forms.TextInput(attrs={'placeholder': '@username1'}),
required=False
<|code_end|>
, determine the next line of code. You have imports:
from django import forms
from endorsements.models import Candidate, Endorsement, Source, Position, \
Tag, Event
and context (class names, function names, or code) available:
# Path: endorsements/models.py
# class Candidate(models.Model):
# endorser_link = models.OneToOneField(Endorser)
# name = models.CharField(max_length=50)
# description = models.TextField()
# color = models.CharField(max_length=6)
# rgb = models.CharField(max_length=13)
# still_running = models.BooleanField(default=False)
# position = models.OneToOneField('Position', blank=True, null=True)
#
# def __unicode__(self):
# return self.name
#
# class Endorsement(models.Model):
# endorser = models.ForeignKey(Endorser)
# quote = models.ForeignKey(Quote)
# position = models.ForeignKey(Position)
# confirmed = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
#
# class Meta:
# ordering = ['-quote']
#
# def get_date(self):
# return self.quote.date
#
# def __unicode__(self):
# return unicode(self.position)
#
# def get_truncated_quote(self):
# if len(self.quote.text) > 100:
# return self.quote.text[:100] + '...'
# else:
# return self.quote.text
#
# class Source(models.Model):
# date = models.DateField(null=True, blank=True)
# url = models.URLField(unique=True, blank=True, max_length=300)
# name = models.CharField(max_length=100, default='')
#
# class Meta:
# ordering = ['url', '-date']
#
# def get_date_display(self):
# if self.date:
# return self.date.strftime('%b %d, %Y')
#
# def __unicode__(self):
# return "{url} on {date}".format(
# url=self.url,
# date=self.date,
# )
#
# class Position(models.Model):
# colour = models.CharField(max_length=20, blank=True)
# past_tense_prefix = models.CharField(max_length=10, blank=True)
# present_tense_prefix = models.CharField(max_length=10, blank=True)
# suffix = models.CharField(max_length=30)
# slug = models.SlugField()
# show_on_load = models.BooleanField(default=False)
#
# def __unicode__(self):
# return self.get_name_display()
#
# def get_name_display(self):
# if self.present_tense_prefix:
# return "{present} {suffix}".format(
# present=self.present_tense_prefix,
# suffix=self.suffix
# )
# else:
# return self.suffix
#
# def get_present_display(self):
# if self.present_tense_prefix:
# return self.present_tense_prefix + ' ' + self.suffix
# else:
# return self.suffix
#
# def get_past_display(self):
# if self.past_tense_prefix:
# return self.past_tense_prefix + ' ' + self.suffix
# else:
# return self.suffix
#
# class Tag(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# is_personal = models.BooleanField(default=True)
# category = models.ForeignKey(Category, null=True, blank=True)
#
# def __unicode__(self):
# return self.name
#
# class Meta:
# ordering = ['name']
#
# def get_mode(self):
# allow_personal = self.category.allow_personal
# allow_org = self.category.allow_org
# if allow_personal and allow_org:
# return 'none'
# elif allow_personal:
# return 'personal'
# else:
# return 'organization'
#
# class Event(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# start_date = models.DateField()
# end_date = models.DateField()
#
# def __unicode__(self):
# return self.name
. Output only the next line. | ) |
Predict the next line for this snippet: <|code_start|> Tag.objects.filter(is_personal=False)
)
class TagFilterForm(forms.Form):
filter_tags_show = forms.ModelMultipleChoiceField(
Tag.objects.all(), required=False
)
filter_tags_hide = forms.ModelMultipleChoiceField(
Tag.objects.all(), required=False
)
class EndorsementForm(forms.Form):
position = forms.ModelChoiceField(Position.objects.all())
quote = forms.CharField(
widget=forms.Textarea(attrs={'rows': 1}),
required=False,
)
context = forms.CharField(
widget=forms.Textarea(attrs={
'rows': 1,
'placeholder': 'e.g., "In an editorial endorsement" (leave blank if no context is required)'
}),
required=False,
)
date = forms.DateField(widget=Html5DateInput)
source_url = forms.URLField(
widget=forms.TextInput(attrs={'placeholder': 'http://example.com'})
)
<|code_end|>
with the help of current file imports:
from django import forms
from endorsements.models import Candidate, Endorsement, Source, Position, \
Tag, Event
and context from other files:
# Path: endorsements/models.py
# class Candidate(models.Model):
# endorser_link = models.OneToOneField(Endorser)
# name = models.CharField(max_length=50)
# description = models.TextField()
# color = models.CharField(max_length=6)
# rgb = models.CharField(max_length=13)
# still_running = models.BooleanField(default=False)
# position = models.OneToOneField('Position', blank=True, null=True)
#
# def __unicode__(self):
# return self.name
#
# class Endorsement(models.Model):
# endorser = models.ForeignKey(Endorser)
# quote = models.ForeignKey(Quote)
# position = models.ForeignKey(Position)
# confirmed = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
#
# class Meta:
# ordering = ['-quote']
#
# def get_date(self):
# return self.quote.date
#
# def __unicode__(self):
# return unicode(self.position)
#
# def get_truncated_quote(self):
# if len(self.quote.text) > 100:
# return self.quote.text[:100] + '...'
# else:
# return self.quote.text
#
# class Source(models.Model):
# date = models.DateField(null=True, blank=True)
# url = models.URLField(unique=True, blank=True, max_length=300)
# name = models.CharField(max_length=100, default='')
#
# class Meta:
# ordering = ['url', '-date']
#
# def get_date_display(self):
# if self.date:
# return self.date.strftime('%b %d, %Y')
#
# def __unicode__(self):
# return "{url} on {date}".format(
# url=self.url,
# date=self.date,
# )
#
# class Position(models.Model):
# colour = models.CharField(max_length=20, blank=True)
# past_tense_prefix = models.CharField(max_length=10, blank=True)
# present_tense_prefix = models.CharField(max_length=10, blank=True)
# suffix = models.CharField(max_length=30)
# slug = models.SlugField()
# show_on_load = models.BooleanField(default=False)
#
# def __unicode__(self):
# return self.get_name_display()
#
# def get_name_display(self):
# if self.present_tense_prefix:
# return "{present} {suffix}".format(
# present=self.present_tense_prefix,
# suffix=self.suffix
# )
# else:
# return self.suffix
#
# def get_present_display(self):
# if self.present_tense_prefix:
# return self.present_tense_prefix + ' ' + self.suffix
# else:
# return self.suffix
#
# def get_past_display(self):
# if self.past_tense_prefix:
# return self.past_tense_prefix + ' ' + self.suffix
# else:
# return self.suffix
#
# class Tag(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# is_personal = models.BooleanField(default=True)
# category = models.ForeignKey(Category, null=True, blank=True)
#
# def __unicode__(self):
# return self.name
#
# class Meta:
# ordering = ['name']
#
# def get_mode(self):
# allow_personal = self.category.allow_personal
# allow_org = self.category.allow_org
# if allow_personal and allow_org:
# return 'none'
# elif allow_personal:
# return 'personal'
# else:
# return 'organization'
#
# class Event(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# start_date = models.DateField()
# end_date = models.DateField()
#
# def __unicode__(self):
# return self.name
, which may contain function names, class names, or code. Output only the next line. | source_name = forms.CharField( |
Given the following code snippet before the placeholder: <|code_start|> widget=forms.Textarea(attrs={'rows': 1}),
required=False,
)
context = forms.CharField(
widget=forms.Textarea(attrs={
'rows': 1,
'placeholder': 'e.g., "In an editorial endorsement" (leave blank if no context is required)'
}),
required=False,
)
date = forms.DateField(widget=Html5DateInput)
source_url = forms.URLField(
widget=forms.TextInput(attrs={'placeholder': 'http://example.com'})
)
source_name = forms.CharField(
widget=forms.TextInput(attrs={'placeholder': 'e.g., Politico'})
)
event = forms.ModelChoiceField(Event.objects.all(), required=False)
class EndorsementFormWithoutPosition(EndorsementForm):
position = None
source_name = forms.CharField(required=False)
date = forms.DateField(widget=Html5DateInput, required=False)
source_url = forms.URLField(required=False)
class EndorserForm(forms.Form):
name = forms.CharField(max_length=100)
description = forms.CharField(
<|code_end|>
, predict the next line using imports from the current file:
from django import forms
from endorsements.models import Candidate, Endorsement, Source, Position, \
Tag, Event
and context including class names, function names, and sometimes code from other files:
# Path: endorsements/models.py
# class Candidate(models.Model):
# endorser_link = models.OneToOneField(Endorser)
# name = models.CharField(max_length=50)
# description = models.TextField()
# color = models.CharField(max_length=6)
# rgb = models.CharField(max_length=13)
# still_running = models.BooleanField(default=False)
# position = models.OneToOneField('Position', blank=True, null=True)
#
# def __unicode__(self):
# return self.name
#
# class Endorsement(models.Model):
# endorser = models.ForeignKey(Endorser)
# quote = models.ForeignKey(Quote)
# position = models.ForeignKey(Position)
# confirmed = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
#
# class Meta:
# ordering = ['-quote']
#
# def get_date(self):
# return self.quote.date
#
# def __unicode__(self):
# return unicode(self.position)
#
# def get_truncated_quote(self):
# if len(self.quote.text) > 100:
# return self.quote.text[:100] + '...'
# else:
# return self.quote.text
#
# class Source(models.Model):
# date = models.DateField(null=True, blank=True)
# url = models.URLField(unique=True, blank=True, max_length=300)
# name = models.CharField(max_length=100, default='')
#
# class Meta:
# ordering = ['url', '-date']
#
# def get_date_display(self):
# if self.date:
# return self.date.strftime('%b %d, %Y')
#
# def __unicode__(self):
# return "{url} on {date}".format(
# url=self.url,
# date=self.date,
# )
#
# class Position(models.Model):
# colour = models.CharField(max_length=20, blank=True)
# past_tense_prefix = models.CharField(max_length=10, blank=True)
# present_tense_prefix = models.CharField(max_length=10, blank=True)
# suffix = models.CharField(max_length=30)
# slug = models.SlugField()
# show_on_load = models.BooleanField(default=False)
#
# def __unicode__(self):
# return self.get_name_display()
#
# def get_name_display(self):
# if self.present_tense_prefix:
# return "{present} {suffix}".format(
# present=self.present_tense_prefix,
# suffix=self.suffix
# )
# else:
# return self.suffix
#
# def get_present_display(self):
# if self.present_tense_prefix:
# return self.present_tense_prefix + ' ' + self.suffix
# else:
# return self.suffix
#
# def get_past_display(self):
# if self.past_tense_prefix:
# return self.past_tense_prefix + ' ' + self.suffix
# else:
# return self.suffix
#
# class Tag(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# is_personal = models.BooleanField(default=True)
# category = models.ForeignKey(Category, null=True, blank=True)
#
# def __unicode__(self):
# return self.name
#
# class Meta:
# ordering = ['name']
#
# def get_mode(self):
# allow_personal = self.category.allow_personal
# allow_org = self.category.allow_org
# if allow_personal and allow_org:
# return 'none'
# elif allow_personal:
# return 'personal'
# else:
# return 'organization'
#
# class Event(models.Model):
# name = models.CharField(max_length=50)
# description = models.TextField(blank=True)
# start_date = models.DateField()
# end_date = models.DateField()
#
# def __unicode__(self):
# return self.name
. Output only the next line. | widget=forms.Textarea(attrs={'rows': 4}), |
Using the snippet: <|code_start|> seqs = SffExtractor([open(sff_fpath, 'rb')], min_left_clip=5,
trim=True).seqs
seqs = list(seqs)
assert len(seqs) == 10
assert str(seqs[0].seq).startswith('GTCTACATGTTGGTTAACCCGTACTGAT')
# empty file
empty_fhand = NamedTemporaryFile()
try:
seqs = SffExtractor([open(empty_fhand.name, 'rb')]).seqs
list(seqs)
self.fail('ValueError expected.')
except ValueError:
pass
# Wrong file type
fasta_fhand = NamedTemporaryFile()
fasta_fhand.write('>a_seq\n' + 'ACTG' * 30 + '\n')
fasta_fhand.flush()
try:
seqs = SffExtractor([open(fasta_fhand.name, 'rb')]).seqs
list(seqs)
except ValueError:
pass
def test_check_nucl_counts(self):
'It checks that the nucleotide freqs are all below the given threshold'
sff_fpath = os.path.join(TEST_DATA_DIR, '10_454_reads.sff')
extractor = SffExtractor([open(sff_fpath, 'rb')])
<|code_end|>
, determine the next line of code. You have imports:
import unittest
import os.path
from tempfile import NamedTemporaryFile
from subprocess import check_output, CalledProcessError
from crumbs.seq.sff_extract import SffExtractor
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.utils.bin_utils import BIN_DIR
and context (class names, function names, or code) available:
# Path: crumbs/seq/sff_extract.py
# class SffExtractor(object):
# 'This class extracts the reads from an SFF file'
# def __init__(self, sff_fhands, trim=False, min_left_clip=0,
# nucls_to_check=50, max_nucl_freq_threshold=0.5):
# 'It inits the class'
# self.fhands = sff_fhands
# self.trim = trim
# self.min_left_clip = min_left_clip
#
# # checking
# self.nucls_to_check = nucls_to_check
# self.max_nucl_freq_threshold = max_nucl_freq_threshold
# self.nucl_counts = {}
#
# @property
# def seqs(self):
# 'It yields all sequences'
# for fhand in self.fhands:
# self._prepare_nucl_counts(fhand.name)
# if not self.min_left_clip:
# seqs = SffIterator(fhand, trim=self.trim)
# else:
# seqs = _min_left_clipped_seqs(fhand, self.trim,
# self.min_left_clip)
# for record in seqs:
# self._update_nucl_counts(str(record.seq), fhand.name)
# yield record
#
# def _prepare_nucl_counts(self, fpath):
# 'It prepares the structure to store the nucleotide counts'
# counts = {'A': array('L', [0] * self.nucls_to_check),
# 'T': array('L', [0] * self.nucls_to_check),
# 'C': array('L', [0] * self.nucls_to_check),
# 'G': array('L', [0] * self.nucls_to_check)}
# self.nucl_counts[fpath] = counts
#
# def _update_nucl_counts(self, seq, fpath):
# 'Given a seq (as a string) it updates the nucleotide counts'
# seq = seq[:self.nucls_to_check]
# counts = self.nucl_counts
# for index, nucl in enumerate(seq):
# try:
# counts[fpath][nucl][index] += 1
# except KeyError:
# pass # we do not count the lowercase letters
#
# @property
# def clip_advice(self):
# 'It checks how many positions have a high max nucl freq.'
# advices = {}
# for fhand in self.fhands:
# fpath = fhand.name
# counts = self.nucl_counts[fpath]
# treshold = self.max_nucl_freq_threshold
# pos_above_threshold = 0
# seq_above_threshold = ''
# index = 0
# for index in range(self.nucls_to_check):
# num_nucls = [counts['A'][index], counts['T'][index],
# counts['C'][index], counts['G'][index]]
# tot_nucls = sum(num_nucls)
# if not tot_nucls:
# continue
# freq_nucls = [i / tot_nucls for i in num_nucls]
# above_threshold = [i >= treshold for i in freq_nucls]
# if any(above_threshold):
# pos_above_threshold += 1
# seq_above_threshold += _get_nucl_with_max_freq('ATCG',
# freq_nucls)
# else:
# break
# if pos_above_threshold:
# if self.trim:
# # number of nucleotides to remove next time, the ones
# # that we have detected plus the ones already removed
# advice = index + self.min_left_clip, seq_above_threshold
# else:
# advice = index, seq_above_threshold
# else:
# advice = None
# advices[fpath] = advice
# return advices
#
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
. Output only the next line. | seqs = extractor.seqs |
Predict the next line after this snippet: <|code_start|> assert extractor.clip_advice[sff_fpath] == (5, 'A')
extractor = SffExtractor([open(sff_fpath, 'rb')], min_left_clip=4,
trim=False)
seqs = extractor.seqs
seqs = list(seqs)
assert len(seqs) == 10
assert extractor.clip_advice[sff_fpath] == (5, 'A')
extractor = SffExtractor([open(sff_fpath, 'rb')], min_left_clip=4,
trim=True)
seqs = extractor.seqs
seqs = list(seqs)
assert len(seqs) == 10
assert extractor.clip_advice[sff_fpath] == (5, 'A')
extractor = SffExtractor([open(sff_fpath, 'rb')], min_left_clip=5,
trim=True)
seqs = extractor.seqs
seqs = list(seqs)
assert len(seqs) == 10
assert not extractor.clip_advice[sff_fpath]
class SffExtractBinTest(unittest.TestCase):
'It tests the sff_extract binary'
def test_extract_sff(self):
'It tests the sff_extract binary'
sff_bin = os.path.join(BIN_DIR, 'sff_extract')
<|code_end|>
using the current file's imports:
import unittest
import os.path
from tempfile import NamedTemporaryFile
from subprocess import check_output, CalledProcessError
from crumbs.seq.sff_extract import SffExtractor
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.utils.bin_utils import BIN_DIR
and any relevant context from other files:
# Path: crumbs/seq/sff_extract.py
# class SffExtractor(object):
# 'This class extracts the reads from an SFF file'
# def __init__(self, sff_fhands, trim=False, min_left_clip=0,
# nucls_to_check=50, max_nucl_freq_threshold=0.5):
# 'It inits the class'
# self.fhands = sff_fhands
# self.trim = trim
# self.min_left_clip = min_left_clip
#
# # checking
# self.nucls_to_check = nucls_to_check
# self.max_nucl_freq_threshold = max_nucl_freq_threshold
# self.nucl_counts = {}
#
# @property
# def seqs(self):
# 'It yields all sequences'
# for fhand in self.fhands:
# self._prepare_nucl_counts(fhand.name)
# if not self.min_left_clip:
# seqs = SffIterator(fhand, trim=self.trim)
# else:
# seqs = _min_left_clipped_seqs(fhand, self.trim,
# self.min_left_clip)
# for record in seqs:
# self._update_nucl_counts(str(record.seq), fhand.name)
# yield record
#
# def _prepare_nucl_counts(self, fpath):
# 'It prepares the structure to store the nucleotide counts'
# counts = {'A': array('L', [0] * self.nucls_to_check),
# 'T': array('L', [0] * self.nucls_to_check),
# 'C': array('L', [0] * self.nucls_to_check),
# 'G': array('L', [0] * self.nucls_to_check)}
# self.nucl_counts[fpath] = counts
#
# def _update_nucl_counts(self, seq, fpath):
# 'Given a seq (as a string) it updates the nucleotide counts'
# seq = seq[:self.nucls_to_check]
# counts = self.nucl_counts
# for index, nucl in enumerate(seq):
# try:
# counts[fpath][nucl][index] += 1
# except KeyError:
# pass # we do not count the lowercase letters
#
# @property
# def clip_advice(self):
# 'It checks how many positions have a high max nucl freq.'
# advices = {}
# for fhand in self.fhands:
# fpath = fhand.name
# counts = self.nucl_counts[fpath]
# treshold = self.max_nucl_freq_threshold
# pos_above_threshold = 0
# seq_above_threshold = ''
# index = 0
# for index in range(self.nucls_to_check):
# num_nucls = [counts['A'][index], counts['T'][index],
# counts['C'][index], counts['G'][index]]
# tot_nucls = sum(num_nucls)
# if not tot_nucls:
# continue
# freq_nucls = [i / tot_nucls for i in num_nucls]
# above_threshold = [i >= treshold for i in freq_nucls]
# if any(above_threshold):
# pos_above_threshold += 1
# seq_above_threshold += _get_nucl_with_max_freq('ATCG',
# freq_nucls)
# else:
# break
# if pos_above_threshold:
# if self.trim:
# # number of nucleotides to remove next time, the ones
# # that we have detected plus the ones already removed
# advice = index + self.min_left_clip, seq_above_threshold
# else:
# advice = index, seq_above_threshold
# else:
# advice = None
# advices[fpath] = advice
# return advices
#
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
. Output only the next line. | assert 'usage' in check_output([sff_bin, '-h']) |
Given the code snippet: <|code_start|> seqs = extractor.seqs
seqs = list(seqs)
assert len(seqs) == 10
assert extractor.clip_advice[sff_fpath] == (5, 'A')
extractor = SffExtractor([open(sff_fpath, 'rb')], min_left_clip=4,
trim=True)
seqs = extractor.seqs
seqs = list(seqs)
assert len(seqs) == 10
assert extractor.clip_advice[sff_fpath] == (5, 'A')
extractor = SffExtractor([open(sff_fpath, 'rb')], min_left_clip=5,
trim=True)
seqs = extractor.seqs
seqs = list(seqs)
assert len(seqs) == 10
assert not extractor.clip_advice[sff_fpath]
class SffExtractBinTest(unittest.TestCase):
'It tests the sff_extract binary'
def test_extract_sff(self):
'It tests the sff_extract binary'
sff_bin = os.path.join(BIN_DIR, 'sff_extract')
assert 'usage' in check_output([sff_bin, '-h'])
assert 'usage' in check_output([sff_bin])
# version string
<|code_end|>
, generate the next line using the imports in this file:
import unittest
import os.path
from tempfile import NamedTemporaryFile
from subprocess import check_output, CalledProcessError
from crumbs.seq.sff_extract import SffExtractor
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.utils.bin_utils import BIN_DIR
and context (functions, classes, or occasionally code) from other files:
# Path: crumbs/seq/sff_extract.py
# class SffExtractor(object):
# 'This class extracts the reads from an SFF file'
# def __init__(self, sff_fhands, trim=False, min_left_clip=0,
# nucls_to_check=50, max_nucl_freq_threshold=0.5):
# 'It inits the class'
# self.fhands = sff_fhands
# self.trim = trim
# self.min_left_clip = min_left_clip
#
# # checking
# self.nucls_to_check = nucls_to_check
# self.max_nucl_freq_threshold = max_nucl_freq_threshold
# self.nucl_counts = {}
#
# @property
# def seqs(self):
# 'It yields all sequences'
# for fhand in self.fhands:
# self._prepare_nucl_counts(fhand.name)
# if not self.min_left_clip:
# seqs = SffIterator(fhand, trim=self.trim)
# else:
# seqs = _min_left_clipped_seqs(fhand, self.trim,
# self.min_left_clip)
# for record in seqs:
# self._update_nucl_counts(str(record.seq), fhand.name)
# yield record
#
# def _prepare_nucl_counts(self, fpath):
# 'It prepares the structure to store the nucleotide counts'
# counts = {'A': array('L', [0] * self.nucls_to_check),
# 'T': array('L', [0] * self.nucls_to_check),
# 'C': array('L', [0] * self.nucls_to_check),
# 'G': array('L', [0] * self.nucls_to_check)}
# self.nucl_counts[fpath] = counts
#
# def _update_nucl_counts(self, seq, fpath):
# 'Given a seq (as a string) it updates the nucleotide counts'
# seq = seq[:self.nucls_to_check]
# counts = self.nucl_counts
# for index, nucl in enumerate(seq):
# try:
# counts[fpath][nucl][index] += 1
# except KeyError:
# pass # we do not count the lowercase letters
#
# @property
# def clip_advice(self):
# 'It checks how many positions have a high max nucl freq.'
# advices = {}
# for fhand in self.fhands:
# fpath = fhand.name
# counts = self.nucl_counts[fpath]
# treshold = self.max_nucl_freq_threshold
# pos_above_threshold = 0
# seq_above_threshold = ''
# index = 0
# for index in range(self.nucls_to_check):
# num_nucls = [counts['A'][index], counts['T'][index],
# counts['C'][index], counts['G'][index]]
# tot_nucls = sum(num_nucls)
# if not tot_nucls:
# continue
# freq_nucls = [i / tot_nucls for i in num_nucls]
# above_threshold = [i >= treshold for i in freq_nucls]
# if any(above_threshold):
# pos_above_threshold += 1
# seq_above_threshold += _get_nucl_with_max_freq('ATCG',
# freq_nucls)
# else:
# break
# if pos_above_threshold:
# if self.trim:
# # number of nucleotides to remove next time, the ones
# # that we have detected plus the ones already removed
# advice = index + self.min_left_clip, seq_above_threshold
# else:
# advice = index, seq_above_threshold
# else:
# advice = None
# advices[fpath] = advice
# return advices
#
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
. Output only the next line. | stderr = NamedTemporaryFile() |
Next line prediction: <|code_start|> seq = SeqWrapper(SEQITEM, seq, 'fasta')
seq2 = copy_seq(seq, seq='ACTG')
assert seq2.object == SeqItem(name='s1', lines=['>s1\n', 'ACTG\n'],
annotations={'a': 'b'})
assert seq.object is not seq2.object
assert seq.object.lines is not seq2.object.lines
# with fastq
seq = SeqItem(name='seq',
lines=['@seq\n', 'aaaa\n', '+\n', '!???\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq')
seq2 = copy_seq(seq, seq='ACTG')
assert seq2.object == SeqItem(name='seq',
lines=['@seq\n', 'ACTG\n', '+\n', '!???\n'])
# with multiline fastq
seq = SeqItem(name='seq', lines=['@seq\n', 'aaaaaaaa\n', '+\n',
'@AAABBBB\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq-illumina')
seq2 = copy_seq(seq, seq='ACTGactg')
assert seq2.object == SeqItem(name='seq',
lines=['@seq\n', 'ACTGactg\n', '+\n',
'@AAABBBB\n'])
def test_change_name(self):
seq = SeqItem(name='seq',
lines=['@seq\n', 'aaaa\n', '+seq\n', '!???\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq')
seq = copy_seq(seq, name='seq2')
assert seq.object == ('seq2', ['@seq2\n', 'aaaa\n', '+\n', '!???\n'],
<|code_end|>
. Use current file imports:
(import unittest
from crumbs.seq.seq import (get_length, get_str_seq, get_int_qualities,
get_str_qualities, slice_seq, copy_seq, SeqItem,
SeqWrapper)
from crumbs.utils.tags import SEQITEM, ILLUMINA_QUALITY)
and context including class names, function names, or small code snippets from other files:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
. Output only the next line. | {}) |
Based on the snippet: <|code_start|> expected_seq = SeqWrapper(SEQITEM, expected_seq, 'fasta')
assert slice_seq(seq, 1, 5) == expected_seq
# with fastq
seq = SeqItem(name='seq',
lines=['@seq\n', 'aata\n', '+\n', '!?!?\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq')
seq = slice_seq(seq, 1, 3)
assert list(get_int_qualities(seq)) == [30, 0]
assert get_str_seq(seq) == 'at'
assert seq.object.lines == ['@seq\n', 'at\n', '+\n', '?!\n']
# with multiline fastq
seq = SeqItem(name='seq', lines=['@seq\n', 'aaatcaaa\n', '+\n',
'@AAABBBB\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq-illumina')
seq_ = slice_seq(seq, 1, 5)
assert list(get_int_qualities(seq_)) == [1, 1, 1, 2]
assert get_str_seq(seq_) == get_str_seq(seq)[1: 5]
# It tests the stop is None
seq = SeqItem('seq', ['>seq\n', 'aCTG'])
seq = SeqWrapper(SEQITEM, seq, 'fasta')
assert get_str_seq(slice_seq(seq, 1, None)) == 'aCTG'[1:]
assert get_str_seq(slice_seq(seq, None, 1)) == 'aCTG'[:1]
def test_copy(self):
# with fasta
seq = SeqItem(name='s1', lines=['>s1\n', 'ACTG\n', 'GTAC\n'],
<|code_end|>
, predict the immediate next line with the help of imports:
import unittest
from crumbs.seq.seq import (get_length, get_str_seq, get_int_qualities,
get_str_qualities, slice_seq, copy_seq, SeqItem,
SeqWrapper)
from crumbs.utils.tags import SEQITEM, ILLUMINA_QUALITY
and context (classes, functions, sometimes code) from other files:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
. Output only the next line. | annotations={'a': 'b'}) |
Next line prediction: <|code_start|> seq = SeqItem(name='s1', lines=['>s1\n', 'ACTGGTAC\n'])
seq = SeqWrapper(SEQITEM, seq, 'fasta')
assert get_length(seq) == 8
# with fastq
seq = SeqItem(name='seq',
lines=['@seq\n', 'aaaa\n', '+\n', '????\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq')
assert get_length(seq) == 4
def test_str_seq(self):
# with fasta
seq = SeqItem(name='s1', lines=['>s1\n', 'ACTGGTAC\n'])
seq = SeqWrapper(SEQITEM, seq, 'fasta')
assert get_str_seq(seq) == 'ACTGGTAC'
# with fastq
seq = SeqItem(name='seq',
lines=['@seq\n', 'aaaa\n', '+\n', '????\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq')
assert get_str_seq(seq) == 'aaaa'
def test_int_qualities(self):
# with fasta
seq = SeqItem(name='s1', lines=['>s1\n', 'ACTG\n', 'GTAC\n'])
seq = SeqWrapper(SEQITEM, seq, 'fasta')
try:
assert get_int_qualities(seq)
self.fail('AttributeError expected')
<|code_end|>
. Use current file imports:
(import unittest
from crumbs.seq.seq import (get_length, get_str_seq, get_int_qualities,
get_str_qualities, slice_seq, copy_seq, SeqItem,
SeqWrapper)
from crumbs.utils.tags import SEQITEM, ILLUMINA_QUALITY)
and context including class names, function names, or small code snippets from other files:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
. Output only the next line. | except AttributeError: |
Next line prediction: <|code_start|> assert seq.object.lines == ['@seq\n', 'at\n', '+\n', '?!\n']
# with multiline fastq
seq = SeqItem(name='seq', lines=['@seq\n', 'aaatcaaa\n', '+\n',
'@AAABBBB\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq-illumina')
seq_ = slice_seq(seq, 1, 5)
assert list(get_int_qualities(seq_)) == [1, 1, 1, 2]
assert get_str_seq(seq_) == get_str_seq(seq)[1: 5]
# It tests the stop is None
seq = SeqItem('seq', ['>seq\n', 'aCTG'])
seq = SeqWrapper(SEQITEM, seq, 'fasta')
assert get_str_seq(slice_seq(seq, 1, None)) == 'aCTG'[1:]
assert get_str_seq(slice_seq(seq, None, 1)) == 'aCTG'[:1]
def test_copy(self):
# with fasta
seq = SeqItem(name='s1', lines=['>s1\n', 'ACTG\n', 'GTAC\n'],
annotations={'a': 'b'})
seq = SeqWrapper(SEQITEM, seq, 'fasta')
seq2 = copy_seq(seq, seq='ACTG')
assert seq2.object == SeqItem(name='s1', lines=['>s1\n', 'ACTG\n'],
annotations={'a': 'b'})
assert seq.object is not seq2.object
assert seq.object.lines is not seq2.object.lines
# with fastq
seq = SeqItem(name='seq',
<|code_end|>
. Use current file imports:
(import unittest
from crumbs.seq.seq import (get_length, get_str_seq, get_int_qualities,
get_str_qualities, slice_seq, copy_seq, SeqItem,
SeqWrapper)
from crumbs.utils.tags import SEQITEM, ILLUMINA_QUALITY)
and context including class names, function names, or small code snippets from other files:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
. Output only the next line. | lines=['@seq\n', 'aaaa\n', '+\n', '!???\n']) |
Continue the code snippet: <|code_start|> assert get_str_seq(seq_) == get_str_seq(seq)[1: 5]
# It tests the stop is None
seq = SeqItem('seq', ['>seq\n', 'aCTG'])
seq = SeqWrapper(SEQITEM, seq, 'fasta')
assert get_str_seq(slice_seq(seq, 1, None)) == 'aCTG'[1:]
assert get_str_seq(slice_seq(seq, None, 1)) == 'aCTG'[:1]
def test_copy(self):
# with fasta
seq = SeqItem(name='s1', lines=['>s1\n', 'ACTG\n', 'GTAC\n'],
annotations={'a': 'b'})
seq = SeqWrapper(SEQITEM, seq, 'fasta')
seq2 = copy_seq(seq, seq='ACTG')
assert seq2.object == SeqItem(name='s1', lines=['>s1\n', 'ACTG\n'],
annotations={'a': 'b'})
assert seq.object is not seq2.object
assert seq.object.lines is not seq2.object.lines
# with fastq
seq = SeqItem(name='seq',
lines=['@seq\n', 'aaaa\n', '+\n', '!???\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq')
seq2 = copy_seq(seq, seq='ACTG')
assert seq2.object == SeqItem(name='seq',
lines=['@seq\n', 'ACTG\n', '+\n', '!???\n'])
# with multiline fastq
seq = SeqItem(name='seq', lines=['@seq\n', 'aaaaaaaa\n', '+\n',
<|code_end|>
. Use current file imports:
import unittest
from crumbs.seq.seq import (get_length, get_str_seq, get_int_qualities,
get_str_qualities, slice_seq, copy_seq, SeqItem,
SeqWrapper)
from crumbs.utils.tags import SEQITEM, ILLUMINA_QUALITY
and context (classes, functions, or code) from other files:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
. Output only the next line. | '@AAABBBB\n']) |
Predict the next line for this snippet: <|code_start|>
# with fastq
seq = SeqItem(name='seq',
lines=['@seq\n', 'aaaa\n', '+\n', '!???\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq')
seq2 = copy_seq(seq, seq='ACTG')
assert seq2.object == SeqItem(name='seq',
lines=['@seq\n', 'ACTG\n', '+\n', '!???\n'])
# with multiline fastq
seq = SeqItem(name='seq', lines=['@seq\n', 'aaaaaaaa\n', '+\n',
'@AAABBBB\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq-illumina')
seq2 = copy_seq(seq, seq='ACTGactg')
assert seq2.object == SeqItem(name='seq',
lines=['@seq\n', 'ACTGactg\n', '+\n',
'@AAABBBB\n'])
def test_change_name(self):
seq = SeqItem(name='seq',
lines=['@seq\n', 'aaaa\n', '+seq\n', '!???\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq')
seq = copy_seq(seq, name='seq2')
assert seq.object == ('seq2', ['@seq2\n', 'aaaa\n', '+\n', '!???\n'],
{})
seq = SeqItem(name='seq', lines=['>seq\n', 'aaaa\n'])
seq = SeqWrapper(SEQITEM, seq, 'fasta')
seq = copy_seq(seq, name='seq2')
assert seq.object == ('seq2', ['>seq2\n', 'aaaa\n'],
<|code_end|>
with the help of current file imports:
import unittest
from crumbs.seq.seq import (get_length, get_str_seq, get_int_qualities,
get_str_qualities, slice_seq, copy_seq, SeqItem,
SeqWrapper)
from crumbs.utils.tags import SEQITEM, ILLUMINA_QUALITY
and context from other files:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
, which may contain function names, class names, or code. Output only the next line. | {}) |
Here is a snippet: <|code_start|> assert get_str_seq(slice_seq(seq, 1, None)) == 'aCTG'[1:]
assert get_str_seq(slice_seq(seq, None, 1)) == 'aCTG'[:1]
def test_copy(self):
# with fasta
seq = SeqItem(name='s1', lines=['>s1\n', 'ACTG\n', 'GTAC\n'],
annotations={'a': 'b'})
seq = SeqWrapper(SEQITEM, seq, 'fasta')
seq2 = copy_seq(seq, seq='ACTG')
assert seq2.object == SeqItem(name='s1', lines=['>s1\n', 'ACTG\n'],
annotations={'a': 'b'})
assert seq.object is not seq2.object
assert seq.object.lines is not seq2.object.lines
# with fastq
seq = SeqItem(name='seq',
lines=['@seq\n', 'aaaa\n', '+\n', '!???\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq')
seq2 = copy_seq(seq, seq='ACTG')
assert seq2.object == SeqItem(name='seq',
lines=['@seq\n', 'ACTG\n', '+\n', '!???\n'])
# with multiline fastq
seq = SeqItem(name='seq', lines=['@seq\n', 'aaaaaaaa\n', '+\n',
'@AAABBBB\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq-illumina')
seq2 = copy_seq(seq, seq='ACTGactg')
assert seq2.object == SeqItem(name='seq',
lines=['@seq\n', 'ACTGactg\n', '+\n',
<|code_end|>
. Write the next line using the current file imports:
import unittest
from crumbs.seq.seq import (get_length, get_str_seq, get_int_qualities,
get_str_qualities, slice_seq, copy_seq, SeqItem,
SeqWrapper)
from crumbs.utils.tags import SEQITEM, ILLUMINA_QUALITY
and context from other files:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
, which may include functions, classes, or code. Output only the next line. | '@AAABBBB\n']) |
Given snippet: <|code_start|> seq = SeqItem(name='s1', lines=['>s1\n', 'ACTGGTAC\n'])
seq = SeqWrapper(SEQITEM, seq, 'fasta')
expected_seq = SeqItem(name='s1', lines=['>s1\n', 'CTGG\n'])
expected_seq = SeqWrapper(SEQITEM, expected_seq, 'fasta')
assert slice_seq(seq, 1, 5) == expected_seq
# with fastq
seq = SeqItem(name='seq',
lines=['@seq\n', 'aata\n', '+\n', '!?!?\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq')
seq = slice_seq(seq, 1, 3)
assert list(get_int_qualities(seq)) == [30, 0]
assert get_str_seq(seq) == 'at'
assert seq.object.lines == ['@seq\n', 'at\n', '+\n', '?!\n']
# with multiline fastq
seq = SeqItem(name='seq', lines=['@seq\n', 'aaatcaaa\n', '+\n',
'@AAABBBB\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq-illumina')
seq_ = slice_seq(seq, 1, 5)
assert list(get_int_qualities(seq_)) == [1, 1, 1, 2]
assert get_str_seq(seq_) == get_str_seq(seq)[1: 5]
# It tests the stop is None
seq = SeqItem('seq', ['>seq\n', 'aCTG'])
seq = SeqWrapper(SEQITEM, seq, 'fasta')
assert get_str_seq(slice_seq(seq, 1, None)) == 'aCTG'[1:]
assert get_str_seq(slice_seq(seq, None, 1)) == 'aCTG'[:1]
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
from crumbs.seq.seq import (get_length, get_str_seq, get_int_qualities,
get_str_qualities, slice_seq, copy_seq, SeqItem,
SeqWrapper)
from crumbs.utils.tags import SEQITEM, ILLUMINA_QUALITY
and context:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
which might include code, classes, or functions. Output only the next line. | def test_copy(self): |
Continue the code snippet: <|code_start|> seq = SeqItem(name='s1', lines=['>s1\n', 'ACTGGTAC\n'])
seq = SeqWrapper(SEQITEM, seq, 'fasta')
expected_seq = SeqItem(name='s1', lines=['>s1\n', 'CTGG\n'])
expected_seq = SeqWrapper(SEQITEM, expected_seq, 'fasta')
assert slice_seq(seq, 1, 5) == expected_seq
# with fastq
seq = SeqItem(name='seq',
lines=['@seq\n', 'aata\n', '+\n', '!?!?\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq')
seq = slice_seq(seq, 1, 3)
assert list(get_int_qualities(seq)) == [30, 0]
assert get_str_seq(seq) == 'at'
assert seq.object.lines == ['@seq\n', 'at\n', '+\n', '?!\n']
# with multiline fastq
seq = SeqItem(name='seq', lines=['@seq\n', 'aaatcaaa\n', '+\n',
'@AAABBBB\n'])
seq = SeqWrapper(SEQITEM, seq, 'fastq-illumina')
seq_ = slice_seq(seq, 1, 5)
assert list(get_int_qualities(seq_)) == [1, 1, 1, 2]
assert get_str_seq(seq_) == get_str_seq(seq)[1: 5]
# It tests the stop is None
seq = SeqItem('seq', ['>seq\n', 'aCTG'])
seq = SeqWrapper(SEQITEM, seq, 'fasta')
assert get_str_seq(slice_seq(seq, 1, None)) == 'aCTG'[1:]
assert get_str_seq(slice_seq(seq, None, 1)) == 'aCTG'[:1]
<|code_end|>
. Use current file imports:
import unittest
from crumbs.seq.seq import (get_length, get_str_seq, get_int_qualities,
get_str_qualities, slice_seq, copy_seq, SeqItem,
SeqWrapper)
from crumbs.utils.tags import SEQITEM, ILLUMINA_QUALITY
and context (classes, functions, or code) from other files:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
. Output only the next line. | def test_copy(self): |
Using the snippet: <|code_start|> type=argparse.FileType('wt'))
parser.add_argument('--version', action='version',
version=build_version_msg())
group = parser.add_mutually_exclusive_group()
group.add_argument('-z ', '--gzip', action='store_true',
help='Compress the output in gzip format')
group.add_argument('-Z ', '--bgzf', action='store_true',
help='Compress the output in bgzf format')
group.add_argument('-B ', '--bzip2', action='store_true',
help='Compress the output in bzip2 format')
return parser
def create_basic_parallel_argparse(**kwargs):
'It returns a cmd parser with inputs, output, format, num_processes'
parser = create_basic_argparse(**kwargs)
parser = argparse.ArgumentParser(parents=[parser], add_help=False)
parser.add_argument('-p', '--processes', dest='processes', type=int,
help='Num. of processes to use (default: %(default)s)',
default=1)
return parser
def _to_bool(string):
if string.lower()[0] == 'f':
return False
elif string.lower()[0] == 't':
return True
<|code_end|>
, determine the next line of code. You have imports:
import sys
import argparse
from crumbs.utils.file_utils import (wrap_in_buffered_reader,
uncompress_if_required, compress_fhand)
from crumbs.utils.tags import OUTFILE, GUESS_FORMAT
from crumbs.seq.utils.file_formats import get_format, set_format
from crumbs.utils.bin_utils import build_version_msg, get_requested_compression
and context (class names, function names, or code) available:
# Path: crumbs/utils/file_utils.py
# def wrap_in_buffered_reader(fhand, force_wrap=False,
# buffering=DEF_FILE_BUFFER):
# '''It wraps the given file in a peekable BufferedReader.
#
# If the file is seekable it doesn't do anything.
# '''
# if not force_wrap and fhand_is_seekable(fhand):
# return fhand
# else:
# fhand = io.open(fhand.fileno(), mode='rb',
# buffering=buffering) # with text there's no peek
#
# return fhand
#
# def uncompress_if_required(fhand):
# 'It returns a uncompressed handle if required'
# magic = peek_chunk_from_file(fhand, 2)
# if magic == '\037\213':
# fhand = GzipFile(fileobj=fhand)
# elif magic == 'BZ':
# try:
# fhand = BZ2File(fhand)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# def compress_fhand(fhand, compression_kind=None):
# 'Compresses the file if required'
# if compression_kind == BGZF:
# if fhand_is_seekable(fhand):
# fhand = BgzfWriter(fileobj=fhand)
# else:
# raise RuntimeError('bgzf is only available for seekable files')
# elif compression_kind == GZIP:
# fhand = GzipFile(fileobj=fhand)
# elif compression_kind == BZIP2:
# mode = 'w' if 'w' in fhand.mode else 'r'
# try:
# fhand = BZ2File(fhand, mode=mode)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# Path: crumbs/utils/tags.py
# OUTFILE = 'output'
#
# GUESS_FORMAT = 'guess'
#
# Path: crumbs/seq/utils/file_formats.py
# def get_format(fhand):
# 'It gets the format or it looks in the inventory'
# id_ = _get_fhand_id(fhand)
# try:
# file_format = FILEFORMAT_INVENTORY[id_]
# except KeyError:
# file_format = None
#
# if file_format is None:
# file_format = _guess_format(fhand, force_file_as_non_seek=False)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# return file_format
#
# def set_format(fhand, file_format):
# 'It sets the file format in the global inventory variable'
# id_ = _get_fhand_id(fhand)
# if id_ in FILEFORMAT_INVENTORY:
# msg = 'The given instance already setted its file format'
# raise RuntimeError(msg)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# Path: crumbs/utils/bin_utils.py
# def build_version_msg():
# 'It creates a message with the version.'
# bin_name = os.path.split(sys.argv[0])[-1]
# version_msg = bin_name + ' from seq_crumbs version: ' + version
# return version_msg
#
# def get_requested_compression(parsed_args):
# 'It looks in the selected options and return the selected compression kind'
# comp_kind = None
# bgzf = getattr(parsed_args, 'bgzf', False)
# gzip = getattr(parsed_args, 'gzip', False)
# bzip2 = getattr(parsed_args, 'bzip2', False)
# if bgzf:
# comp_kind = BGZF
# elif gzip:
# comp_kind = GZIP
# elif bzip2:
# comp_kind = BZIP2
# return comp_kind
. Output only the next line. | elif string.isdigit(): |
Given snippet: <|code_start|>
parser.add_argument('input', default=sys.stdin, nargs='*',
help='Sequence input files to process (default STDIN)',
type=argparse.FileType('rt'))
hlp_fmt = 'Format of the input files (default: %(default)s)'
parser.add_argument('-t', '--in_format', default=GUESS_FORMAT,
help=hlp_fmt)
parser.add_argument('-o', '--outfile', default=sys.stdout, dest=OUTFILE,
help='Sequence output file (default: STDOUT)',
type=argparse.FileType('wt'))
parser.add_argument('--version', action='version',
version=build_version_msg())
group = parser.add_mutually_exclusive_group()
group.add_argument('-z ', '--gzip', action='store_true',
help='Compress the output in gzip format')
group.add_argument('-Z ', '--bgzf', action='store_true',
help='Compress the output in bgzf format')
group.add_argument('-B ', '--bzip2', action='store_true',
help='Compress the output in bzip2 format')
return parser
def create_basic_parallel_argparse(**kwargs):
'It returns a cmd parser with inputs, output, format, num_processes'
parser = create_basic_argparse(**kwargs)
parser = argparse.ArgumentParser(parents=[parser], add_help=False)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
import argparse
from crumbs.utils.file_utils import (wrap_in_buffered_reader,
uncompress_if_required, compress_fhand)
from crumbs.utils.tags import OUTFILE, GUESS_FORMAT
from crumbs.seq.utils.file_formats import get_format, set_format
from crumbs.utils.bin_utils import build_version_msg, get_requested_compression
and context:
# Path: crumbs/utils/file_utils.py
# def wrap_in_buffered_reader(fhand, force_wrap=False,
# buffering=DEF_FILE_BUFFER):
# '''It wraps the given file in a peekable BufferedReader.
#
# If the file is seekable it doesn't do anything.
# '''
# if not force_wrap and fhand_is_seekable(fhand):
# return fhand
# else:
# fhand = io.open(fhand.fileno(), mode='rb',
# buffering=buffering) # with text there's no peek
#
# return fhand
#
# def uncompress_if_required(fhand):
# 'It returns a uncompressed handle if required'
# magic = peek_chunk_from_file(fhand, 2)
# if magic == '\037\213':
# fhand = GzipFile(fileobj=fhand)
# elif magic == 'BZ':
# try:
# fhand = BZ2File(fhand)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# def compress_fhand(fhand, compression_kind=None):
# 'Compresses the file if required'
# if compression_kind == BGZF:
# if fhand_is_seekable(fhand):
# fhand = BgzfWriter(fileobj=fhand)
# else:
# raise RuntimeError('bgzf is only available for seekable files')
# elif compression_kind == GZIP:
# fhand = GzipFile(fileobj=fhand)
# elif compression_kind == BZIP2:
# mode = 'w' if 'w' in fhand.mode else 'r'
# try:
# fhand = BZ2File(fhand, mode=mode)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# Path: crumbs/utils/tags.py
# OUTFILE = 'output'
#
# GUESS_FORMAT = 'guess'
#
# Path: crumbs/seq/utils/file_formats.py
# def get_format(fhand):
# 'It gets the format or it looks in the inventory'
# id_ = _get_fhand_id(fhand)
# try:
# file_format = FILEFORMAT_INVENTORY[id_]
# except KeyError:
# file_format = None
#
# if file_format is None:
# file_format = _guess_format(fhand, force_file_as_non_seek=False)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# return file_format
#
# def set_format(fhand, file_format):
# 'It sets the file format in the global inventory variable'
# id_ = _get_fhand_id(fhand)
# if id_ in FILEFORMAT_INVENTORY:
# msg = 'The given instance already setted its file format'
# raise RuntimeError(msg)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# Path: crumbs/utils/bin_utils.py
# def build_version_msg():
# 'It creates a message with the version.'
# bin_name = os.path.split(sys.argv[0])[-1]
# version_msg = bin_name + ' from seq_crumbs version: ' + version
# return version_msg
#
# def get_requested_compression(parsed_args):
# 'It looks in the selected options and return the selected compression kind'
# comp_kind = None
# bgzf = getattr(parsed_args, 'bgzf', False)
# gzip = getattr(parsed_args, 'gzip', False)
# bzip2 = getattr(parsed_args, 'bzip2', False)
# if bgzf:
# comp_kind = BGZF
# elif gzip:
# comp_kind = GZIP
# elif bzip2:
# comp_kind = BZIP2
# return comp_kind
which might include code, classes, or functions. Output only the next line. | parser.add_argument('-p', '--processes', dest='processes', type=int, |
Based on the snippet: <|code_start|> help='Compress the output in gzip format')
group.add_argument('-Z ', '--bgzf', action='store_true',
help='Compress the output in bgzf format')
group.add_argument('-B ', '--bzip2', action='store_true',
help='Compress the output in bzip2 format')
return parser
def create_basic_parallel_argparse(**kwargs):
'It returns a cmd parser with inputs, output, format, num_processes'
parser = create_basic_argparse(**kwargs)
parser = argparse.ArgumentParser(parents=[parser], add_help=False)
parser.add_argument('-p', '--processes', dest='processes', type=int,
help='Num. of processes to use (default: %(default)s)',
default=1)
return parser
def _to_bool(string):
if string.lower()[0] == 'f':
return False
elif string.lower()[0] == 't':
return True
elif string.isdigit():
return bool(int(string))
def create_filter_argparse(add_reverse=True, **kwargs):
'It returns a cmd parser for the filter executables'
parser = create_basic_parallel_argparse(**kwargs)
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import argparse
from crumbs.utils.file_utils import (wrap_in_buffered_reader,
uncompress_if_required, compress_fhand)
from crumbs.utils.tags import OUTFILE, GUESS_FORMAT
from crumbs.seq.utils.file_formats import get_format, set_format
from crumbs.utils.bin_utils import build_version_msg, get_requested_compression
and context (classes, functions, sometimes code) from other files:
# Path: crumbs/utils/file_utils.py
# def wrap_in_buffered_reader(fhand, force_wrap=False,
# buffering=DEF_FILE_BUFFER):
# '''It wraps the given file in a peekable BufferedReader.
#
# If the file is seekable it doesn't do anything.
# '''
# if not force_wrap and fhand_is_seekable(fhand):
# return fhand
# else:
# fhand = io.open(fhand.fileno(), mode='rb',
# buffering=buffering) # with text there's no peek
#
# return fhand
#
# def uncompress_if_required(fhand):
# 'It returns a uncompressed handle if required'
# magic = peek_chunk_from_file(fhand, 2)
# if magic == '\037\213':
# fhand = GzipFile(fileobj=fhand)
# elif magic == 'BZ':
# try:
# fhand = BZ2File(fhand)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# def compress_fhand(fhand, compression_kind=None):
# 'Compresses the file if required'
# if compression_kind == BGZF:
# if fhand_is_seekable(fhand):
# fhand = BgzfWriter(fileobj=fhand)
# else:
# raise RuntimeError('bgzf is only available for seekable files')
# elif compression_kind == GZIP:
# fhand = GzipFile(fileobj=fhand)
# elif compression_kind == BZIP2:
# mode = 'w' if 'w' in fhand.mode else 'r'
# try:
# fhand = BZ2File(fhand, mode=mode)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# Path: crumbs/utils/tags.py
# OUTFILE = 'output'
#
# GUESS_FORMAT = 'guess'
#
# Path: crumbs/seq/utils/file_formats.py
# def get_format(fhand):
# 'It gets the format or it looks in the inventory'
# id_ = _get_fhand_id(fhand)
# try:
# file_format = FILEFORMAT_INVENTORY[id_]
# except KeyError:
# file_format = None
#
# if file_format is None:
# file_format = _guess_format(fhand, force_file_as_non_seek=False)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# return file_format
#
# def set_format(fhand, file_format):
# 'It sets the file format in the global inventory variable'
# id_ = _get_fhand_id(fhand)
# if id_ in FILEFORMAT_INVENTORY:
# msg = 'The given instance already setted its file format'
# raise RuntimeError(msg)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# Path: crumbs/utils/bin_utils.py
# def build_version_msg():
# 'It creates a message with the version.'
# bin_name = os.path.split(sys.argv[0])[-1]
# version_msg = bin_name + ' from seq_crumbs version: ' + version
# return version_msg
#
# def get_requested_compression(parsed_args):
# 'It looks in the selected options and return the selected compression kind'
# comp_kind = None
# bgzf = getattr(parsed_args, 'bgzf', False)
# gzip = getattr(parsed_args, 'gzip', False)
# bzip2 = getattr(parsed_args, 'bzip2', False)
# if bgzf:
# comp_kind = BGZF
# elif gzip:
# comp_kind = GZIP
# elif bzip2:
# comp_kind = BZIP2
# return comp_kind
. Output only the next line. | parser = argparse.ArgumentParser(parents=[parser], add_help=False) |
Next line prediction: <|code_start|># You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
def create_basic_argparse(**kwargs):
'It returns a parser with several inputs and one output'
parser = argparse.ArgumentParser(**kwargs)
parser.add_argument('input', default=sys.stdin, nargs='*',
help='Sequence input files to process (default STDIN)',
type=argparse.FileType('rt'))
hlp_fmt = 'Format of the input files (default: %(default)s)'
parser.add_argument('-t', '--in_format', default=GUESS_FORMAT,
help=hlp_fmt)
parser.add_argument('-o', '--outfile', default=sys.stdout, dest=OUTFILE,
help='Sequence output file (default: STDOUT)',
type=argparse.FileType('wt'))
parser.add_argument('--version', action='version',
version=build_version_msg())
group = parser.add_mutually_exclusive_group()
group.add_argument('-z ', '--gzip', action='store_true',
help='Compress the output in gzip format')
group.add_argument('-Z ', '--bgzf', action='store_true',
help='Compress the output in bgzf format')
<|code_end|>
. Use current file imports:
(import sys
import argparse
from crumbs.utils.file_utils import (wrap_in_buffered_reader,
uncompress_if_required, compress_fhand)
from crumbs.utils.tags import OUTFILE, GUESS_FORMAT
from crumbs.seq.utils.file_formats import get_format, set_format
from crumbs.utils.bin_utils import build_version_msg, get_requested_compression)
and context including class names, function names, or small code snippets from other files:
# Path: crumbs/utils/file_utils.py
# def wrap_in_buffered_reader(fhand, force_wrap=False,
# buffering=DEF_FILE_BUFFER):
# '''It wraps the given file in a peekable BufferedReader.
#
# If the file is seekable it doesn't do anything.
# '''
# if not force_wrap and fhand_is_seekable(fhand):
# return fhand
# else:
# fhand = io.open(fhand.fileno(), mode='rb',
# buffering=buffering) # with text there's no peek
#
# return fhand
#
# def uncompress_if_required(fhand):
# 'It returns a uncompressed handle if required'
# magic = peek_chunk_from_file(fhand, 2)
# if magic == '\037\213':
# fhand = GzipFile(fileobj=fhand)
# elif magic == 'BZ':
# try:
# fhand = BZ2File(fhand)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# def compress_fhand(fhand, compression_kind=None):
# 'Compresses the file if required'
# if compression_kind == BGZF:
# if fhand_is_seekable(fhand):
# fhand = BgzfWriter(fileobj=fhand)
# else:
# raise RuntimeError('bgzf is only available for seekable files')
# elif compression_kind == GZIP:
# fhand = GzipFile(fileobj=fhand)
# elif compression_kind == BZIP2:
# mode = 'w' if 'w' in fhand.mode else 'r'
# try:
# fhand = BZ2File(fhand, mode=mode)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# Path: crumbs/utils/tags.py
# OUTFILE = 'output'
#
# GUESS_FORMAT = 'guess'
#
# Path: crumbs/seq/utils/file_formats.py
# def get_format(fhand):
# 'It gets the format or it looks in the inventory'
# id_ = _get_fhand_id(fhand)
# try:
# file_format = FILEFORMAT_INVENTORY[id_]
# except KeyError:
# file_format = None
#
# if file_format is None:
# file_format = _guess_format(fhand, force_file_as_non_seek=False)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# return file_format
#
# def set_format(fhand, file_format):
# 'It sets the file format in the global inventory variable'
# id_ = _get_fhand_id(fhand)
# if id_ in FILEFORMAT_INVENTORY:
# msg = 'The given instance already setted its file format'
# raise RuntimeError(msg)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# Path: crumbs/utils/bin_utils.py
# def build_version_msg():
# 'It creates a message with the version.'
# bin_name = os.path.split(sys.argv[0])[-1]
# version_msg = bin_name + ' from seq_crumbs version: ' + version
# return version_msg
#
# def get_requested_compression(parsed_args):
# 'It looks in the selected options and return the selected compression kind'
# comp_kind = None
# bgzf = getattr(parsed_args, 'bgzf', False)
# gzip = getattr(parsed_args, 'gzip', False)
# bzip2 = getattr(parsed_args, 'bzip2', False)
# if bgzf:
# comp_kind = BGZF
# elif gzip:
# comp_kind = GZIP
# elif bzip2:
# comp_kind = BZIP2
# return comp_kind
. Output only the next line. | group.add_argument('-B ', '--bzip2', action='store_true', |
Predict the next line for this snippet: <|code_start|> # closed
args = {'out_fhand': out_fhand, 'in_fhands': wrapped_fhands,
'out_format': out_format, 'original_in_fhands': in_fhands}
return args, parsed_args
def parse_basic_parallel_args(parser):
'It parses the command line and it returns a dict with the arguments.'
args, parsed_args = parse_basic_args(parser)
args['processes'] = parsed_args.processes
return args, parsed_args
def parse_filter_args(parser, add_reverse=True):
'It parses the command line and it returns a dict with the arguments.'
args, parsed_args = parse_basic_parallel_args(parser)
if add_reverse:
args['reverse'] = parsed_args.reverse
args['filtered_fhand'] = parsed_args.filtered_file
paired_reads = parsed_args.paired_reads
args['paired_reads'] = paired_reads
if paired_reads:
# in this case fail_drags_pair is required
fail_drags_pair = parsed_args.fail_drags_pair
if fail_drags_pair is None:
msg = 'For pairs fail_drags_pair is required'
parser.error(msg)
# raise argparse.ArgumentError(parsed_args.fail_drags_pair, msg)
else:
fail_drags_pair = None
<|code_end|>
with the help of current file imports:
import sys
import argparse
from crumbs.utils.file_utils import (wrap_in_buffered_reader,
uncompress_if_required, compress_fhand)
from crumbs.utils.tags import OUTFILE, GUESS_FORMAT
from crumbs.seq.utils.file_formats import get_format, set_format
from crumbs.utils.bin_utils import build_version_msg, get_requested_compression
and context from other files:
# Path: crumbs/utils/file_utils.py
# def wrap_in_buffered_reader(fhand, force_wrap=False,
# buffering=DEF_FILE_BUFFER):
# '''It wraps the given file in a peekable BufferedReader.
#
# If the file is seekable it doesn't do anything.
# '''
# if not force_wrap and fhand_is_seekable(fhand):
# return fhand
# else:
# fhand = io.open(fhand.fileno(), mode='rb',
# buffering=buffering) # with text there's no peek
#
# return fhand
#
# def uncompress_if_required(fhand):
# 'It returns a uncompressed handle if required'
# magic = peek_chunk_from_file(fhand, 2)
# if magic == '\037\213':
# fhand = GzipFile(fileobj=fhand)
# elif magic == 'BZ':
# try:
# fhand = BZ2File(fhand)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# def compress_fhand(fhand, compression_kind=None):
# 'Compresses the file if required'
# if compression_kind == BGZF:
# if fhand_is_seekable(fhand):
# fhand = BgzfWriter(fileobj=fhand)
# else:
# raise RuntimeError('bgzf is only available for seekable files')
# elif compression_kind == GZIP:
# fhand = GzipFile(fileobj=fhand)
# elif compression_kind == BZIP2:
# mode = 'w' if 'w' in fhand.mode else 'r'
# try:
# fhand = BZ2File(fhand, mode=mode)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# Path: crumbs/utils/tags.py
# OUTFILE = 'output'
#
# GUESS_FORMAT = 'guess'
#
# Path: crumbs/seq/utils/file_formats.py
# def get_format(fhand):
# 'It gets the format or it looks in the inventory'
# id_ = _get_fhand_id(fhand)
# try:
# file_format = FILEFORMAT_INVENTORY[id_]
# except KeyError:
# file_format = None
#
# if file_format is None:
# file_format = _guess_format(fhand, force_file_as_non_seek=False)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# return file_format
#
# def set_format(fhand, file_format):
# 'It sets the file format in the global inventory variable'
# id_ = _get_fhand_id(fhand)
# if id_ in FILEFORMAT_INVENTORY:
# msg = 'The given instance already setted its file format'
# raise RuntimeError(msg)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# Path: crumbs/utils/bin_utils.py
# def build_version_msg():
# 'It creates a message with the version.'
# bin_name = os.path.split(sys.argv[0])[-1]
# version_msg = bin_name + ' from seq_crumbs version: ' + version
# return version_msg
#
# def get_requested_compression(parsed_args):
# 'It looks in the selected options and return the selected compression kind'
# comp_kind = None
# bgzf = getattr(parsed_args, 'bgzf', False)
# gzip = getattr(parsed_args, 'gzip', False)
# bzip2 = getattr(parsed_args, 'bzip2', False)
# if bgzf:
# comp_kind = BGZF
# elif gzip:
# comp_kind = GZIP
# elif bzip2:
# comp_kind = BZIP2
# return comp_kind
, which may contain function names, class names, or code. Output only the next line. | args['fail_drags_pair'] = fail_drags_pair |
Given snippet: <|code_start|> help='Do not trim, only mask by lowering the case')
group = parser.add_argument_group('Pairing')
group.add_argument('--paired_reads', action='store_true',
help='Trim considering interleaved pairs')
group.add_argument('-e', '--orphan_file',
help='Orphan sequences output file',
type=argparse.FileType('wt'))
return parser
def parse_basic_args(parser):
'It parses the command line and it returns a dict with the arguments.'
parsed_args = parser.parse_args()
# we have to wrap the file in a BufferedReader to allow peeking into stdin
wrapped_fhands = []
# if input is stdin it will be a fhand not a list of fhands.
# we have to convert to a list
in_fhands = parsed_args.input
if not isinstance(in_fhands, list):
in_fhands = [in_fhands]
for fhand in in_fhands:
fhand = wrap_in_buffered_reader(fhand)
fhand = uncompress_if_required(fhand)
wrapped_fhands.append(fhand)
# We have to add the one_line to the fastq files in order to get the
# speed improvements of the seqitems
in_format = parsed_args.in_format
if in_format == GUESS_FORMAT:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
import argparse
from crumbs.utils.file_utils import (wrap_in_buffered_reader,
uncompress_if_required, compress_fhand)
from crumbs.utils.tags import OUTFILE, GUESS_FORMAT
from crumbs.seq.utils.file_formats import get_format, set_format
from crumbs.utils.bin_utils import build_version_msg, get_requested_compression
and context:
# Path: crumbs/utils/file_utils.py
# def wrap_in_buffered_reader(fhand, force_wrap=False,
# buffering=DEF_FILE_BUFFER):
# '''It wraps the given file in a peekable BufferedReader.
#
# If the file is seekable it doesn't do anything.
# '''
# if not force_wrap and fhand_is_seekable(fhand):
# return fhand
# else:
# fhand = io.open(fhand.fileno(), mode='rb',
# buffering=buffering) # with text there's no peek
#
# return fhand
#
# def uncompress_if_required(fhand):
# 'It returns a uncompressed handle if required'
# magic = peek_chunk_from_file(fhand, 2)
# if magic == '\037\213':
# fhand = GzipFile(fileobj=fhand)
# elif magic == 'BZ':
# try:
# fhand = BZ2File(fhand)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# def compress_fhand(fhand, compression_kind=None):
# 'Compresses the file if required'
# if compression_kind == BGZF:
# if fhand_is_seekable(fhand):
# fhand = BgzfWriter(fileobj=fhand)
# else:
# raise RuntimeError('bgzf is only available for seekable files')
# elif compression_kind == GZIP:
# fhand = GzipFile(fileobj=fhand)
# elif compression_kind == BZIP2:
# mode = 'w' if 'w' in fhand.mode else 'r'
# try:
# fhand = BZ2File(fhand, mode=mode)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# Path: crumbs/utils/tags.py
# OUTFILE = 'output'
#
# GUESS_FORMAT = 'guess'
#
# Path: crumbs/seq/utils/file_formats.py
# def get_format(fhand):
# 'It gets the format or it looks in the inventory'
# id_ = _get_fhand_id(fhand)
# try:
# file_format = FILEFORMAT_INVENTORY[id_]
# except KeyError:
# file_format = None
#
# if file_format is None:
# file_format = _guess_format(fhand, force_file_as_non_seek=False)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# return file_format
#
# def set_format(fhand, file_format):
# 'It sets the file format in the global inventory variable'
# id_ = _get_fhand_id(fhand)
# if id_ in FILEFORMAT_INVENTORY:
# msg = 'The given instance already setted its file format'
# raise RuntimeError(msg)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# Path: crumbs/utils/bin_utils.py
# def build_version_msg():
# 'It creates a message with the version.'
# bin_name = os.path.split(sys.argv[0])[-1]
# version_msg = bin_name + ' from seq_crumbs version: ' + version
# return version_msg
#
# def get_requested_compression(parsed_args):
# 'It looks in the selected options and return the selected compression kind'
# comp_kind = None
# bgzf = getattr(parsed_args, 'bgzf', False)
# gzip = getattr(parsed_args, 'gzip', False)
# bzip2 = getattr(parsed_args, 'bzip2', False)
# if bgzf:
# comp_kind = BGZF
# elif gzip:
# comp_kind = GZIP
# elif bzip2:
# comp_kind = BZIP2
# return comp_kind
which might include code, classes, or functions. Output only the next line. | for wrapped_fhand in wrapped_fhands: |
Predict the next line after this snippet: <|code_start|>
def parse_filter_args(parser, add_reverse=True):
'It parses the command line and it returns a dict with the arguments.'
args, parsed_args = parse_basic_parallel_args(parser)
if add_reverse:
args['reverse'] = parsed_args.reverse
args['filtered_fhand'] = parsed_args.filtered_file
paired_reads = parsed_args.paired_reads
args['paired_reads'] = paired_reads
if paired_reads:
# in this case fail_drags_pair is required
fail_drags_pair = parsed_args.fail_drags_pair
if fail_drags_pair is None:
msg = 'For pairs fail_drags_pair is required'
parser.error(msg)
# raise argparse.ArgumentError(parsed_args.fail_drags_pair, msg)
else:
fail_drags_pair = None
args['fail_drags_pair'] = fail_drags_pair
return args, parsed_args
def parse_trimmer_args(parser):
'It parses the command line and it returns a dict with the arguments.'
args, parsed_args = parse_basic_parallel_args(parser)
args['mask'] = parsed_args.mask
args['orphan_fhand'] = parsed_args.orphan_file
paired_reads = parsed_args.paired_reads
args['paired_reads'] = paired_reads
<|code_end|>
using the current file's imports:
import sys
import argparse
from crumbs.utils.file_utils import (wrap_in_buffered_reader,
uncompress_if_required, compress_fhand)
from crumbs.utils.tags import OUTFILE, GUESS_FORMAT
from crumbs.seq.utils.file_formats import get_format, set_format
from crumbs.utils.bin_utils import build_version_msg, get_requested_compression
and any relevant context from other files:
# Path: crumbs/utils/file_utils.py
# def wrap_in_buffered_reader(fhand, force_wrap=False,
# buffering=DEF_FILE_BUFFER):
# '''It wraps the given file in a peekable BufferedReader.
#
# If the file is seekable it doesn't do anything.
# '''
# if not force_wrap and fhand_is_seekable(fhand):
# return fhand
# else:
# fhand = io.open(fhand.fileno(), mode='rb',
# buffering=buffering) # with text there's no peek
#
# return fhand
#
# def uncompress_if_required(fhand):
# 'It returns a uncompressed handle if required'
# magic = peek_chunk_from_file(fhand, 2)
# if magic == '\037\213':
# fhand = GzipFile(fileobj=fhand)
# elif magic == 'BZ':
# try:
# fhand = BZ2File(fhand)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# def compress_fhand(fhand, compression_kind=None):
# 'Compresses the file if required'
# if compression_kind == BGZF:
# if fhand_is_seekable(fhand):
# fhand = BgzfWriter(fileobj=fhand)
# else:
# raise RuntimeError('bgzf is only available for seekable files')
# elif compression_kind == GZIP:
# fhand = GzipFile(fileobj=fhand)
# elif compression_kind == BZIP2:
# mode = 'w' if 'w' in fhand.mode else 'r'
# try:
# fhand = BZ2File(fhand, mode=mode)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# Path: crumbs/utils/tags.py
# OUTFILE = 'output'
#
# GUESS_FORMAT = 'guess'
#
# Path: crumbs/seq/utils/file_formats.py
# def get_format(fhand):
# 'It gets the format or it looks in the inventory'
# id_ = _get_fhand_id(fhand)
# try:
# file_format = FILEFORMAT_INVENTORY[id_]
# except KeyError:
# file_format = None
#
# if file_format is None:
# file_format = _guess_format(fhand, force_file_as_non_seek=False)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# return file_format
#
# def set_format(fhand, file_format):
# 'It sets the file format in the global inventory variable'
# id_ = _get_fhand_id(fhand)
# if id_ in FILEFORMAT_INVENTORY:
# msg = 'The given instance already setted its file format'
# raise RuntimeError(msg)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# Path: crumbs/utils/bin_utils.py
# def build_version_msg():
# 'It creates a message with the version.'
# bin_name = os.path.split(sys.argv[0])[-1]
# version_msg = bin_name + ' from seq_crumbs version: ' + version
# return version_msg
#
# def get_requested_compression(parsed_args):
# 'It looks in the selected options and return the selected compression kind'
# comp_kind = None
# bgzf = getattr(parsed_args, 'bgzf', False)
# gzip = getattr(parsed_args, 'gzip', False)
# bzip2 = getattr(parsed_args, 'bzip2', False)
# if bgzf:
# comp_kind = BGZF
# elif gzip:
# comp_kind = GZIP
# elif bzip2:
# comp_kind = BZIP2
# return comp_kind
. Output only the next line. | return args, parsed_args |
Using the snippet: <|code_start|> elif string.lower()[0] == 't':
return True
elif string.isdigit():
return bool(int(string))
def create_filter_argparse(add_reverse=True, **kwargs):
'It returns a cmd parser for the filter executables'
parser = create_basic_parallel_argparse(**kwargs)
parser = argparse.ArgumentParser(parents=[parser], add_help=False)
if add_reverse:
parser.add_argument('-r', '--reverse', action='store_true',
help='Reverses the filtering')
parser.add_argument('-e', '--filtered_file',
help='Filtered out sequences output file',
type=argparse.FileType('wt'))
group = parser.add_argument_group('Pairing')
group.add_argument('--paired_reads', action='store_true',
help='Filter considering interleaved pairs')
help_msg = 'If one read fails the pair will be filtered out '
help_msg += '(default: %(default)s)'
group.add_argument('--fail_drags_pair', type=_to_bool, default='true',
choices=(True, False), help=help_msg)
return parser
def create_trimmer_argparse(**kwargs):
'It returns a cmd parser for the filter executables'
parser = create_basic_parallel_argparse(**kwargs)
parser = argparse.ArgumentParser(parents=[parser], add_help=False)
<|code_end|>
, determine the next line of code. You have imports:
import sys
import argparse
from crumbs.utils.file_utils import (wrap_in_buffered_reader,
uncompress_if_required, compress_fhand)
from crumbs.utils.tags import OUTFILE, GUESS_FORMAT
from crumbs.seq.utils.file_formats import get_format, set_format
from crumbs.utils.bin_utils import build_version_msg, get_requested_compression
and context (class names, function names, or code) available:
# Path: crumbs/utils/file_utils.py
# def wrap_in_buffered_reader(fhand, force_wrap=False,
# buffering=DEF_FILE_BUFFER):
# '''It wraps the given file in a peekable BufferedReader.
#
# If the file is seekable it doesn't do anything.
# '''
# if not force_wrap and fhand_is_seekable(fhand):
# return fhand
# else:
# fhand = io.open(fhand.fileno(), mode='rb',
# buffering=buffering) # with text there's no peek
#
# return fhand
#
# def uncompress_if_required(fhand):
# 'It returns a uncompressed handle if required'
# magic = peek_chunk_from_file(fhand, 2)
# if magic == '\037\213':
# fhand = GzipFile(fileobj=fhand)
# elif magic == 'BZ':
# try:
# fhand = BZ2File(fhand)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# def compress_fhand(fhand, compression_kind=None):
# 'Compresses the file if required'
# if compression_kind == BGZF:
# if fhand_is_seekable(fhand):
# fhand = BgzfWriter(fileobj=fhand)
# else:
# raise RuntimeError('bgzf is only available for seekable files')
# elif compression_kind == GZIP:
# fhand = GzipFile(fileobj=fhand)
# elif compression_kind == BZIP2:
# mode = 'w' if 'w' in fhand.mode else 'r'
# try:
# fhand = BZ2File(fhand, mode=mode)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# Path: crumbs/utils/tags.py
# OUTFILE = 'output'
#
# GUESS_FORMAT = 'guess'
#
# Path: crumbs/seq/utils/file_formats.py
# def get_format(fhand):
# 'It gets the format or it looks in the inventory'
# id_ = _get_fhand_id(fhand)
# try:
# file_format = FILEFORMAT_INVENTORY[id_]
# except KeyError:
# file_format = None
#
# if file_format is None:
# file_format = _guess_format(fhand, force_file_as_non_seek=False)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# return file_format
#
# def set_format(fhand, file_format):
# 'It sets the file format in the global inventory variable'
# id_ = _get_fhand_id(fhand)
# if id_ in FILEFORMAT_INVENTORY:
# msg = 'The given instance already setted its file format'
# raise RuntimeError(msg)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# Path: crumbs/utils/bin_utils.py
# def build_version_msg():
# 'It creates a message with the version.'
# bin_name = os.path.split(sys.argv[0])[-1]
# version_msg = bin_name + ' from seq_crumbs version: ' + version
# return version_msg
#
# def get_requested_compression(parsed_args):
# 'It looks in the selected options and return the selected compression kind'
# comp_kind = None
# bgzf = getattr(parsed_args, 'bgzf', False)
# gzip = getattr(parsed_args, 'gzip', False)
# bzip2 = getattr(parsed_args, 'bzip2', False)
# if bgzf:
# comp_kind = BGZF
# elif gzip:
# comp_kind = GZIP
# elif bzip2:
# comp_kind = BZIP2
# return comp_kind
. Output only the next line. | parser.add_argument('-m', '--mask', dest='mask', action='store_true', |
Given the following code snippet before the placeholder: <|code_start|># You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
def create_basic_argparse(**kwargs):
'It returns a parser with several inputs and one output'
parser = argparse.ArgumentParser(**kwargs)
parser.add_argument('input', default=sys.stdin, nargs='*',
help='Sequence input files to process (default STDIN)',
type=argparse.FileType('rt'))
hlp_fmt = 'Format of the input files (default: %(default)s)'
parser.add_argument('-t', '--in_format', default=GUESS_FORMAT,
help=hlp_fmt)
parser.add_argument('-o', '--outfile', default=sys.stdout, dest=OUTFILE,
help='Sequence output file (default: STDOUT)',
type=argparse.FileType('wt'))
parser.add_argument('--version', action='version',
version=build_version_msg())
group = parser.add_mutually_exclusive_group()
group.add_argument('-z ', '--gzip', action='store_true',
help='Compress the output in gzip format')
group.add_argument('-Z ', '--bgzf', action='store_true',
help='Compress the output in bgzf format')
<|code_end|>
, predict the next line using imports from the current file:
import sys
import argparse
from crumbs.utils.file_utils import (wrap_in_buffered_reader,
uncompress_if_required, compress_fhand)
from crumbs.utils.tags import OUTFILE, GUESS_FORMAT
from crumbs.seq.utils.file_formats import get_format, set_format
from crumbs.utils.bin_utils import build_version_msg, get_requested_compression
and context including class names, function names, and sometimes code from other files:
# Path: crumbs/utils/file_utils.py
# def wrap_in_buffered_reader(fhand, force_wrap=False,
# buffering=DEF_FILE_BUFFER):
# '''It wraps the given file in a peekable BufferedReader.
#
# If the file is seekable it doesn't do anything.
# '''
# if not force_wrap and fhand_is_seekable(fhand):
# return fhand
# else:
# fhand = io.open(fhand.fileno(), mode='rb',
# buffering=buffering) # with text there's no peek
#
# return fhand
#
# def uncompress_if_required(fhand):
# 'It returns a uncompressed handle if required'
# magic = peek_chunk_from_file(fhand, 2)
# if magic == '\037\213':
# fhand = GzipFile(fileobj=fhand)
# elif magic == 'BZ':
# try:
# fhand = BZ2File(fhand)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# def compress_fhand(fhand, compression_kind=None):
# 'Compresses the file if required'
# if compression_kind == BGZF:
# if fhand_is_seekable(fhand):
# fhand = BgzfWriter(fileobj=fhand)
# else:
# raise RuntimeError('bgzf is only available for seekable files')
# elif compression_kind == GZIP:
# fhand = GzipFile(fileobj=fhand)
# elif compression_kind == BZIP2:
# mode = 'w' if 'w' in fhand.mode else 'r'
# try:
# fhand = BZ2File(fhand, mode=mode)
# except NameError:
# raise OptionalRequirementError(BZIP_ERROR)
# return fhand
#
# Path: crumbs/utils/tags.py
# OUTFILE = 'output'
#
# GUESS_FORMAT = 'guess'
#
# Path: crumbs/seq/utils/file_formats.py
# def get_format(fhand):
# 'It gets the format or it looks in the inventory'
# id_ = _get_fhand_id(fhand)
# try:
# file_format = FILEFORMAT_INVENTORY[id_]
# except KeyError:
# file_format = None
#
# if file_format is None:
# file_format = _guess_format(fhand, force_file_as_non_seek=False)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# return file_format
#
# def set_format(fhand, file_format):
# 'It sets the file format in the global inventory variable'
# id_ = _get_fhand_id(fhand)
# if id_ in FILEFORMAT_INVENTORY:
# msg = 'The given instance already setted its file format'
# raise RuntimeError(msg)
# FILEFORMAT_INVENTORY[id_] = file_format
#
# Path: crumbs/utils/bin_utils.py
# def build_version_msg():
# 'It creates a message with the version.'
# bin_name = os.path.split(sys.argv[0])[-1]
# version_msg = bin_name + ' from seq_crumbs version: ' + version
# return version_msg
#
# def get_requested_compression(parsed_args):
# 'It looks in the selected options and return the selected compression kind'
# comp_kind = None
# bgzf = getattr(parsed_args, 'bgzf', False)
# gzip = getattr(parsed_args, 'gzip', False)
# bzip2 = getattr(parsed_args, 'bzip2', False)
# if bgzf:
# comp_kind = BGZF
# elif gzip:
# comp_kind = GZIP
# elif bzip2:
# comp_kind = BZIP2
# return comp_kind
. Output only the next line. | group.add_argument('-B ', '--bzip2', action='store_true', |
Given the following code snippet before the placeholder: <|code_start|># along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=C0111
class UppercaseLengthTest(unittest.TestCase):
'It tests the uppercase character count'
def test_uppercase_length(self):
'It counts the number of uppercase letters in a string'
assert uppercase_length('aCTaGGt') == 4
assert uppercase_length('acagt') == 0
def _make_fhand(content=''):
'It makes temporary fhands'
fhand = NamedTemporaryFile()
fhand.write(content)
fhand.flush()
return fhand
class MaskedSegmentsTest(unittest.TestCase):
'It tests the lower case segments location functions'
@staticmethod
<|code_end|>
, predict the next line using imports from the current file:
import unittest
import os.path
from cStringIO import StringIO
from tempfile import NamedTemporaryFile
from subprocess import check_output
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.seq.seq import assing_kind_to_seqs, get_str_seq
from crumbs.seq.utils.seq_utils import (uppercase_length, ChangeCase,
get_uppercase_segments)
from crumbs.utils.tags import SWAPCASE, UPPERCASE, LOWERCASE, SEQRECORD
and context including class names, function names, and sometimes code from other files:
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/seq/seq.py
# def assing_kind_to_seqs(kind, seqs, file_format):
# 'It puts each seq into a NamedTuple named Seq'
# return (SeqWrapper(kind, seq, file_format) for seq in seqs)
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# Path: crumbs/seq/utils/seq_utils.py
# def uppercase_length(string):
# 'It returns the number of uppercase characters found in the string'
# return len(re.findall("[A-Z]", string))
#
# class ChangeCase(object):
# 'It changes the sequence case.'
#
# def __init__(self, action):
# 'The initiator'
# if action not in (UPPERCASE, LOWERCASE, SWAPCASE):
# msg = 'Action should be: uppercase, lowercase or invertcase'
# raise ValueError(msg)
# self.action = action
#
# def __call__(self, seqs):
# 'It changes the case of the seqrecords.'
# action = self.action
# processed_seqs = []
# for seq in seqs:
# str_seq = get_str_seq(seq)
# if action == UPPERCASE:
# str_seq = str_seq.upper()
# elif action == LOWERCASE:
# str_seq = str_seq.lower()
# elif action == SWAPCASE:
# str_seq = str_seq.swapcase()
# else:
# raise NotImplementedError()
# seq = copy_seq(seq, seq=str_seq)
# processed_seqs.append(seq)
# return processed_seqs
#
# def get_uppercase_segments(string):
# '''It detects the unmasked regions of a sequence
#
# It returns a list of (start, end) tuples'''
# start = 0
# for is_upper, group in itertools.groupby(string, lambda x: x.isupper()):
# group = list(group)
# end = start + len(group) - 1
# if is_upper:
# yield start, end
# start = end + 1
#
# Path: crumbs/utils/tags.py
# SWAPCASE = 'swap'
#
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SEQRECORD = 'seqrecord'
. Output only the next line. | def test_masked_locations(): |
Predict the next line for this snippet: <|code_start|> 'It tests the case change'
def test_case_change(self):
'It changes the case of the sequences'
seqs = [SeqRecord(Seq('aCCg'), letter_annotations={'dummy': 'dddd'})]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=UPPERCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['ACCG']
seqs = [SeqRecord(Seq('aCCg'))]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=LOWERCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['accg']
seqs = [SeqRecord(Seq('aCCg'))]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=SWAPCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['AccG']
def test_bin(self):
'It tests the trim seqs binary'
change_bin = os.path.join(BIN_DIR, 'change_case')
assert 'usage' in check_output([change_bin, '-h'])
fastq = '@seq1\naTCgt\n+\n?????\n@seq2\natcGT\n+\n?????\n'
fastq_fhand = _make_fhand(fastq)
result = check_output([change_bin, '-a', 'upper', fastq_fhand.name])
<|code_end|>
with the help of current file imports:
import unittest
import os.path
from cStringIO import StringIO
from tempfile import NamedTemporaryFile
from subprocess import check_output
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.seq.seq import assing_kind_to_seqs, get_str_seq
from crumbs.seq.utils.seq_utils import (uppercase_length, ChangeCase,
get_uppercase_segments)
from crumbs.utils.tags import SWAPCASE, UPPERCASE, LOWERCASE, SEQRECORD
and context from other files:
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/seq/seq.py
# def assing_kind_to_seqs(kind, seqs, file_format):
# 'It puts each seq into a NamedTuple named Seq'
# return (SeqWrapper(kind, seq, file_format) for seq in seqs)
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# Path: crumbs/seq/utils/seq_utils.py
# def uppercase_length(string):
# 'It returns the number of uppercase characters found in the string'
# return len(re.findall("[A-Z]", string))
#
# class ChangeCase(object):
# 'It changes the sequence case.'
#
# def __init__(self, action):
# 'The initiator'
# if action not in (UPPERCASE, LOWERCASE, SWAPCASE):
# msg = 'Action should be: uppercase, lowercase or invertcase'
# raise ValueError(msg)
# self.action = action
#
# def __call__(self, seqs):
# 'It changes the case of the seqrecords.'
# action = self.action
# processed_seqs = []
# for seq in seqs:
# str_seq = get_str_seq(seq)
# if action == UPPERCASE:
# str_seq = str_seq.upper()
# elif action == LOWERCASE:
# str_seq = str_seq.lower()
# elif action == SWAPCASE:
# str_seq = str_seq.swapcase()
# else:
# raise NotImplementedError()
# seq = copy_seq(seq, seq=str_seq)
# processed_seqs.append(seq)
# return processed_seqs
#
# def get_uppercase_segments(string):
# '''It detects the unmasked regions of a sequence
#
# It returns a list of (start, end) tuples'''
# start = 0
# for is_upper, group in itertools.groupby(string, lambda x: x.isupper()):
# group = list(group)
# end = start + len(group) - 1
# if is_upper:
# yield start, end
# start = end + 1
#
# Path: crumbs/utils/tags.py
# SWAPCASE = 'swap'
#
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SEQRECORD = 'seqrecord'
, which may contain function names, class names, or code. Output only the next line. | assert '@seq1\nATCGT\n+' in result |
Given the following code snippet before the placeholder: <|code_start|> seqs = [SeqRecord(Seq('aCCg'), letter_annotations={'dummy': 'dddd'})]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=UPPERCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['ACCG']
seqs = [SeqRecord(Seq('aCCg'))]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=LOWERCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['accg']
seqs = [SeqRecord(Seq('aCCg'))]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=SWAPCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['AccG']
def test_bin(self):
'It tests the trim seqs binary'
change_bin = os.path.join(BIN_DIR, 'change_case')
assert 'usage' in check_output([change_bin, '-h'])
fastq = '@seq1\naTCgt\n+\n?????\n@seq2\natcGT\n+\n?????\n'
fastq_fhand = _make_fhand(fastq)
result = check_output([change_bin, '-a', 'upper', fastq_fhand.name])
assert '@seq1\nATCGT\n+' in result
<|code_end|>
, predict the next line using imports from the current file:
import unittest
import os.path
from cStringIO import StringIO
from tempfile import NamedTemporaryFile
from subprocess import check_output
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.seq.seq import assing_kind_to_seqs, get_str_seq
from crumbs.seq.utils.seq_utils import (uppercase_length, ChangeCase,
get_uppercase_segments)
from crumbs.utils.tags import SWAPCASE, UPPERCASE, LOWERCASE, SEQRECORD
and context including class names, function names, and sometimes code from other files:
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/seq/seq.py
# def assing_kind_to_seqs(kind, seqs, file_format):
# 'It puts each seq into a NamedTuple named Seq'
# return (SeqWrapper(kind, seq, file_format) for seq in seqs)
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# Path: crumbs/seq/utils/seq_utils.py
# def uppercase_length(string):
# 'It returns the number of uppercase characters found in the string'
# return len(re.findall("[A-Z]", string))
#
# class ChangeCase(object):
# 'It changes the sequence case.'
#
# def __init__(self, action):
# 'The initiator'
# if action not in (UPPERCASE, LOWERCASE, SWAPCASE):
# msg = 'Action should be: uppercase, lowercase or invertcase'
# raise ValueError(msg)
# self.action = action
#
# def __call__(self, seqs):
# 'It changes the case of the seqrecords.'
# action = self.action
# processed_seqs = []
# for seq in seqs:
# str_seq = get_str_seq(seq)
# if action == UPPERCASE:
# str_seq = str_seq.upper()
# elif action == LOWERCASE:
# str_seq = str_seq.lower()
# elif action == SWAPCASE:
# str_seq = str_seq.swapcase()
# else:
# raise NotImplementedError()
# seq = copy_seq(seq, seq=str_seq)
# processed_seqs.append(seq)
# return processed_seqs
#
# def get_uppercase_segments(string):
# '''It detects the unmasked regions of a sequence
#
# It returns a list of (start, end) tuples'''
# start = 0
# for is_upper, group in itertools.groupby(string, lambda x: x.isupper()):
# group = list(group)
# end = start + len(group) - 1
# if is_upper:
# yield start, end
# start = end + 1
#
# Path: crumbs/utils/tags.py
# SWAPCASE = 'swap'
#
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SEQRECORD = 'seqrecord'
. Output only the next line. | if __name__ == "__main__": |
Using the snippet: <|code_start|> fhand = NamedTemporaryFile()
fhand.write(content)
fhand.flush()
return fhand
class MaskedSegmentsTest(unittest.TestCase):
'It tests the lower case segments location functions'
@staticmethod
def test_masked_locations():
'It test the masked locations function'
assert list(get_uppercase_segments('aaATTTTTTaa')) == [(2, 8)]
assert list(get_uppercase_segments('aaATTTaTTaa')) == [(2, 5), (7, 8)]
assert list(get_uppercase_segments('AAATaaa')) == [(0, 3)]
assert list(get_uppercase_segments('aaaaAAAA')) == [(4, 7)]
seq = 'AATTaaTTaaTTT'
assert list(get_uppercase_segments(seq)) == [(0, 3), (6, 7), (10, 12)]
assert list(get_uppercase_segments('AATT')) == [(0, 3)]
assert not list(get_uppercase_segments('aatt'))
class ChangeCaseTest(unittest.TestCase):
'It tests the case change'
<|code_end|>
, determine the next line of code. You have imports:
import unittest
import os.path
from cStringIO import StringIO
from tempfile import NamedTemporaryFile
from subprocess import check_output
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.seq.seq import assing_kind_to_seqs, get_str_seq
from crumbs.seq.utils.seq_utils import (uppercase_length, ChangeCase,
get_uppercase_segments)
from crumbs.utils.tags import SWAPCASE, UPPERCASE, LOWERCASE, SEQRECORD
and context (class names, function names, or code) available:
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/seq/seq.py
# def assing_kind_to_seqs(kind, seqs, file_format):
# 'It puts each seq into a NamedTuple named Seq'
# return (SeqWrapper(kind, seq, file_format) for seq in seqs)
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# Path: crumbs/seq/utils/seq_utils.py
# def uppercase_length(string):
# 'It returns the number of uppercase characters found in the string'
# return len(re.findall("[A-Z]", string))
#
# class ChangeCase(object):
# 'It changes the sequence case.'
#
# def __init__(self, action):
# 'The initiator'
# if action not in (UPPERCASE, LOWERCASE, SWAPCASE):
# msg = 'Action should be: uppercase, lowercase or invertcase'
# raise ValueError(msg)
# self.action = action
#
# def __call__(self, seqs):
# 'It changes the case of the seqrecords.'
# action = self.action
# processed_seqs = []
# for seq in seqs:
# str_seq = get_str_seq(seq)
# if action == UPPERCASE:
# str_seq = str_seq.upper()
# elif action == LOWERCASE:
# str_seq = str_seq.lower()
# elif action == SWAPCASE:
# str_seq = str_seq.swapcase()
# else:
# raise NotImplementedError()
# seq = copy_seq(seq, seq=str_seq)
# processed_seqs.append(seq)
# return processed_seqs
#
# def get_uppercase_segments(string):
# '''It detects the unmasked regions of a sequence
#
# It returns a list of (start, end) tuples'''
# start = 0
# for is_upper, group in itertools.groupby(string, lambda x: x.isupper()):
# group = list(group)
# end = start + len(group) - 1
# if is_upper:
# yield start, end
# start = end + 1
#
# Path: crumbs/utils/tags.py
# SWAPCASE = 'swap'
#
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SEQRECORD = 'seqrecord'
. Output only the next line. | def test_case_change(self): |
Predict the next line for this snippet: <|code_start|>class ChangeCaseTest(unittest.TestCase):
'It tests the case change'
def test_case_change(self):
'It changes the case of the sequences'
seqs = [SeqRecord(Seq('aCCg'), letter_annotations={'dummy': 'dddd'})]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=UPPERCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['ACCG']
seqs = [SeqRecord(Seq('aCCg'))]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=LOWERCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['accg']
seqs = [SeqRecord(Seq('aCCg'))]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=SWAPCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['AccG']
def test_bin(self):
'It tests the trim seqs binary'
change_bin = os.path.join(BIN_DIR, 'change_case')
assert 'usage' in check_output([change_bin, '-h'])
fastq = '@seq1\naTCgt\n+\n?????\n@seq2\natcGT\n+\n?????\n'
fastq_fhand = _make_fhand(fastq)
<|code_end|>
with the help of current file imports:
import unittest
import os.path
from cStringIO import StringIO
from tempfile import NamedTemporaryFile
from subprocess import check_output
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.seq.seq import assing_kind_to_seqs, get_str_seq
from crumbs.seq.utils.seq_utils import (uppercase_length, ChangeCase,
get_uppercase_segments)
from crumbs.utils.tags import SWAPCASE, UPPERCASE, LOWERCASE, SEQRECORD
and context from other files:
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/seq/seq.py
# def assing_kind_to_seqs(kind, seqs, file_format):
# 'It puts each seq into a NamedTuple named Seq'
# return (SeqWrapper(kind, seq, file_format) for seq in seqs)
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# Path: crumbs/seq/utils/seq_utils.py
# def uppercase_length(string):
# 'It returns the number of uppercase characters found in the string'
# return len(re.findall("[A-Z]", string))
#
# class ChangeCase(object):
# 'It changes the sequence case.'
#
# def __init__(self, action):
# 'The initiator'
# if action not in (UPPERCASE, LOWERCASE, SWAPCASE):
# msg = 'Action should be: uppercase, lowercase or invertcase'
# raise ValueError(msg)
# self.action = action
#
# def __call__(self, seqs):
# 'It changes the case of the seqrecords.'
# action = self.action
# processed_seqs = []
# for seq in seqs:
# str_seq = get_str_seq(seq)
# if action == UPPERCASE:
# str_seq = str_seq.upper()
# elif action == LOWERCASE:
# str_seq = str_seq.lower()
# elif action == SWAPCASE:
# str_seq = str_seq.swapcase()
# else:
# raise NotImplementedError()
# seq = copy_seq(seq, seq=str_seq)
# processed_seqs.append(seq)
# return processed_seqs
#
# def get_uppercase_segments(string):
# '''It detects the unmasked regions of a sequence
#
# It returns a list of (start, end) tuples'''
# start = 0
# for is_upper, group in itertools.groupby(string, lambda x: x.isupper()):
# group = list(group)
# end = start + len(group) - 1
# if is_upper:
# yield start, end
# start = end + 1
#
# Path: crumbs/utils/tags.py
# SWAPCASE = 'swap'
#
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SEQRECORD = 'seqrecord'
, which may contain function names, class names, or code. Output only the next line. | result = check_output([change_bin, '-a', 'upper', fastq_fhand.name]) |
Continue the code snippet: <|code_start|> change_case = ChangeCase(action=UPPERCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['ACCG']
seqs = [SeqRecord(Seq('aCCg'))]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=LOWERCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['accg']
seqs = [SeqRecord(Seq('aCCg'))]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=SWAPCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['AccG']
def test_bin(self):
'It tests the trim seqs binary'
change_bin = os.path.join(BIN_DIR, 'change_case')
assert 'usage' in check_output([change_bin, '-h'])
fastq = '@seq1\naTCgt\n+\n?????\n@seq2\natcGT\n+\n?????\n'
fastq_fhand = _make_fhand(fastq)
result = check_output([change_bin, '-a', 'upper', fastq_fhand.name])
assert '@seq1\nATCGT\n+' in result
if __name__ == "__main__":
#import sys;sys.argv = ['', 'ChangeCaseTest.test_bin']
<|code_end|>
. Use current file imports:
import unittest
import os.path
from cStringIO import StringIO
from tempfile import NamedTemporaryFile
from subprocess import check_output
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.seq.seq import assing_kind_to_seqs, get_str_seq
from crumbs.seq.utils.seq_utils import (uppercase_length, ChangeCase,
get_uppercase_segments)
from crumbs.utils.tags import SWAPCASE, UPPERCASE, LOWERCASE, SEQRECORD
and context (classes, functions, or code) from other files:
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/seq/seq.py
# def assing_kind_to_seqs(kind, seqs, file_format):
# 'It puts each seq into a NamedTuple named Seq'
# return (SeqWrapper(kind, seq, file_format) for seq in seqs)
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# Path: crumbs/seq/utils/seq_utils.py
# def uppercase_length(string):
# 'It returns the number of uppercase characters found in the string'
# return len(re.findall("[A-Z]", string))
#
# class ChangeCase(object):
# 'It changes the sequence case.'
#
# def __init__(self, action):
# 'The initiator'
# if action not in (UPPERCASE, LOWERCASE, SWAPCASE):
# msg = 'Action should be: uppercase, lowercase or invertcase'
# raise ValueError(msg)
# self.action = action
#
# def __call__(self, seqs):
# 'It changes the case of the seqrecords.'
# action = self.action
# processed_seqs = []
# for seq in seqs:
# str_seq = get_str_seq(seq)
# if action == UPPERCASE:
# str_seq = str_seq.upper()
# elif action == LOWERCASE:
# str_seq = str_seq.lower()
# elif action == SWAPCASE:
# str_seq = str_seq.swapcase()
# else:
# raise NotImplementedError()
# seq = copy_seq(seq, seq=str_seq)
# processed_seqs.append(seq)
# return processed_seqs
#
# def get_uppercase_segments(string):
# '''It detects the unmasked regions of a sequence
#
# It returns a list of (start, end) tuples'''
# start = 0
# for is_upper, group in itertools.groupby(string, lambda x: x.isupper()):
# group = list(group)
# end = start + len(group) - 1
# if is_upper:
# yield start, end
# start = end + 1
#
# Path: crumbs/utils/tags.py
# SWAPCASE = 'swap'
#
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SEQRECORD = 'seqrecord'
. Output only the next line. | unittest.main() |
Continue the code snippet: <|code_start|> seqs = [SeqRecord(Seq('aCCg'), letter_annotations={'dummy': 'dddd'})]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=UPPERCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['ACCG']
seqs = [SeqRecord(Seq('aCCg'))]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=LOWERCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['accg']
seqs = [SeqRecord(Seq('aCCg'))]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=SWAPCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['AccG']
def test_bin(self):
'It tests the trim seqs binary'
change_bin = os.path.join(BIN_DIR, 'change_case')
assert 'usage' in check_output([change_bin, '-h'])
fastq = '@seq1\naTCgt\n+\n?????\n@seq2\natcGT\n+\n?????\n'
fastq_fhand = _make_fhand(fastq)
result = check_output([change_bin, '-a', 'upper', fastq_fhand.name])
assert '@seq1\nATCGT\n+' in result
<|code_end|>
. Use current file imports:
import unittest
import os.path
from cStringIO import StringIO
from tempfile import NamedTemporaryFile
from subprocess import check_output
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.seq.seq import assing_kind_to_seqs, get_str_seq
from crumbs.seq.utils.seq_utils import (uppercase_length, ChangeCase,
get_uppercase_segments)
from crumbs.utils.tags import SWAPCASE, UPPERCASE, LOWERCASE, SEQRECORD
and context (classes, functions, or code) from other files:
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/seq/seq.py
# def assing_kind_to_seqs(kind, seqs, file_format):
# 'It puts each seq into a NamedTuple named Seq'
# return (SeqWrapper(kind, seq, file_format) for seq in seqs)
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# Path: crumbs/seq/utils/seq_utils.py
# def uppercase_length(string):
# 'It returns the number of uppercase characters found in the string'
# return len(re.findall("[A-Z]", string))
#
# class ChangeCase(object):
# 'It changes the sequence case.'
#
# def __init__(self, action):
# 'The initiator'
# if action not in (UPPERCASE, LOWERCASE, SWAPCASE):
# msg = 'Action should be: uppercase, lowercase or invertcase'
# raise ValueError(msg)
# self.action = action
#
# def __call__(self, seqs):
# 'It changes the case of the seqrecords.'
# action = self.action
# processed_seqs = []
# for seq in seqs:
# str_seq = get_str_seq(seq)
# if action == UPPERCASE:
# str_seq = str_seq.upper()
# elif action == LOWERCASE:
# str_seq = str_seq.lower()
# elif action == SWAPCASE:
# str_seq = str_seq.swapcase()
# else:
# raise NotImplementedError()
# seq = copy_seq(seq, seq=str_seq)
# processed_seqs.append(seq)
# return processed_seqs
#
# def get_uppercase_segments(string):
# '''It detects the unmasked regions of a sequence
#
# It returns a list of (start, end) tuples'''
# start = 0
# for is_upper, group in itertools.groupby(string, lambda x: x.isupper()):
# group = list(group)
# end = start + len(group) - 1
# if is_upper:
# yield start, end
# start = end + 1
#
# Path: crumbs/utils/tags.py
# SWAPCASE = 'swap'
#
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SEQRECORD = 'seqrecord'
. Output only the next line. | if __name__ == "__main__": |
Here is a snippet: <|code_start|>def _make_fhand(content=''):
'It makes temporary fhands'
fhand = NamedTemporaryFile()
fhand.write(content)
fhand.flush()
return fhand
class MaskedSegmentsTest(unittest.TestCase):
'It tests the lower case segments location functions'
@staticmethod
def test_masked_locations():
'It test the masked locations function'
assert list(get_uppercase_segments('aaATTTTTTaa')) == [(2, 8)]
assert list(get_uppercase_segments('aaATTTaTTaa')) == [(2, 5), (7, 8)]
assert list(get_uppercase_segments('AAATaaa')) == [(0, 3)]
assert list(get_uppercase_segments('aaaaAAAA')) == [(4, 7)]
seq = 'AATTaaTTaaTTT'
assert list(get_uppercase_segments(seq)) == [(0, 3), (6, 7), (10, 12)]
assert list(get_uppercase_segments('AATT')) == [(0, 3)]
assert not list(get_uppercase_segments('aatt'))
<|code_end|>
. Write the next line using the current file imports:
import unittest
import os.path
from cStringIO import StringIO
from tempfile import NamedTemporaryFile
from subprocess import check_output
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.seq.seq import assing_kind_to_seqs, get_str_seq
from crumbs.seq.utils.seq_utils import (uppercase_length, ChangeCase,
get_uppercase_segments)
from crumbs.utils.tags import SWAPCASE, UPPERCASE, LOWERCASE, SEQRECORD
and context from other files:
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/seq/seq.py
# def assing_kind_to_seqs(kind, seqs, file_format):
# 'It puts each seq into a NamedTuple named Seq'
# return (SeqWrapper(kind, seq, file_format) for seq in seqs)
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# Path: crumbs/seq/utils/seq_utils.py
# def uppercase_length(string):
# 'It returns the number of uppercase characters found in the string'
# return len(re.findall("[A-Z]", string))
#
# class ChangeCase(object):
# 'It changes the sequence case.'
#
# def __init__(self, action):
# 'The initiator'
# if action not in (UPPERCASE, LOWERCASE, SWAPCASE):
# msg = 'Action should be: uppercase, lowercase or invertcase'
# raise ValueError(msg)
# self.action = action
#
# def __call__(self, seqs):
# 'It changes the case of the seqrecords.'
# action = self.action
# processed_seqs = []
# for seq in seqs:
# str_seq = get_str_seq(seq)
# if action == UPPERCASE:
# str_seq = str_seq.upper()
# elif action == LOWERCASE:
# str_seq = str_seq.lower()
# elif action == SWAPCASE:
# str_seq = str_seq.swapcase()
# else:
# raise NotImplementedError()
# seq = copy_seq(seq, seq=str_seq)
# processed_seqs.append(seq)
# return processed_seqs
#
# def get_uppercase_segments(string):
# '''It detects the unmasked regions of a sequence
#
# It returns a list of (start, end) tuples'''
# start = 0
# for is_upper, group in itertools.groupby(string, lambda x: x.isupper()):
# group = list(group)
# end = start + len(group) - 1
# if is_upper:
# yield start, end
# start = end + 1
#
# Path: crumbs/utils/tags.py
# SWAPCASE = 'swap'
#
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SEQRECORD = 'seqrecord'
, which may include functions, classes, or code. Output only the next line. | class ChangeCaseTest(unittest.TestCase): |
Given snippet: <|code_start|>
@staticmethod
def test_masked_locations():
'It test the masked locations function'
assert list(get_uppercase_segments('aaATTTTTTaa')) == [(2, 8)]
assert list(get_uppercase_segments('aaATTTaTTaa')) == [(2, 5), (7, 8)]
assert list(get_uppercase_segments('AAATaaa')) == [(0, 3)]
assert list(get_uppercase_segments('aaaaAAAA')) == [(4, 7)]
seq = 'AATTaaTTaaTTT'
assert list(get_uppercase_segments(seq)) == [(0, 3), (6, 7), (10, 12)]
assert list(get_uppercase_segments('AATT')) == [(0, 3)]
assert not list(get_uppercase_segments('aatt'))
class ChangeCaseTest(unittest.TestCase):
'It tests the case change'
def test_case_change(self):
'It changes the case of the sequences'
seqs = [SeqRecord(Seq('aCCg'), letter_annotations={'dummy': 'dddd'})]
seqs = assing_kind_to_seqs(SEQRECORD, seqs, None)
change_case = ChangeCase(action=UPPERCASE)
strs = [get_str_seq(s) for s in change_case(seqs)]
assert strs == ['ACCG']
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
import os.path
from cStringIO import StringIO
from tempfile import NamedTemporaryFile
from subprocess import check_output
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.seq.seq import assing_kind_to_seqs, get_str_seq
from crumbs.seq.utils.seq_utils import (uppercase_length, ChangeCase,
get_uppercase_segments)
from crumbs.utils.tags import SWAPCASE, UPPERCASE, LOWERCASE, SEQRECORD
and context:
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/seq/seq.py
# def assing_kind_to_seqs(kind, seqs, file_format):
# 'It puts each seq into a NamedTuple named Seq'
# return (SeqWrapper(kind, seq, file_format) for seq in seqs)
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# Path: crumbs/seq/utils/seq_utils.py
# def uppercase_length(string):
# 'It returns the number of uppercase characters found in the string'
# return len(re.findall("[A-Z]", string))
#
# class ChangeCase(object):
# 'It changes the sequence case.'
#
# def __init__(self, action):
# 'The initiator'
# if action not in (UPPERCASE, LOWERCASE, SWAPCASE):
# msg = 'Action should be: uppercase, lowercase or invertcase'
# raise ValueError(msg)
# self.action = action
#
# def __call__(self, seqs):
# 'It changes the case of the seqrecords.'
# action = self.action
# processed_seqs = []
# for seq in seqs:
# str_seq = get_str_seq(seq)
# if action == UPPERCASE:
# str_seq = str_seq.upper()
# elif action == LOWERCASE:
# str_seq = str_seq.lower()
# elif action == SWAPCASE:
# str_seq = str_seq.swapcase()
# else:
# raise NotImplementedError()
# seq = copy_seq(seq, seq=str_seq)
# processed_seqs.append(seq)
# return processed_seqs
#
# def get_uppercase_segments(string):
# '''It detects the unmasked regions of a sequence
#
# It returns a list of (start, end) tuples'''
# start = 0
# for is_upper, group in itertools.groupby(string, lambda x: x.isupper()):
# group = list(group)
# end = start + len(group) - 1
# if is_upper:
# yield start, end
# start = end + 1
#
# Path: crumbs/utils/tags.py
# SWAPCASE = 'swap'
#
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SEQRECORD = 'seqrecord'
which might include code, classes, or functions. Output only the next line. | seqs = [SeqRecord(Seq('aCCg'))] |
Given the code snippet: <|code_start|># Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of seq_crumbs.
# seq_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# seq_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=C0111
class UppercaseLengthTest(unittest.TestCase):
'It tests the uppercase character count'
def test_uppercase_length(self):
<|code_end|>
, generate the next line using the imports in this file:
import unittest
import os.path
from cStringIO import StringIO
from tempfile import NamedTemporaryFile
from subprocess import check_output
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.seq.seq import assing_kind_to_seqs, get_str_seq
from crumbs.seq.utils.seq_utils import (uppercase_length, ChangeCase,
get_uppercase_segments)
from crumbs.utils.tags import SWAPCASE, UPPERCASE, LOWERCASE, SEQRECORD
and context (functions, classes, or occasionally code) from other files:
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/seq/seq.py
# def assing_kind_to_seqs(kind, seqs, file_format):
# 'It puts each seq into a NamedTuple named Seq'
# return (SeqWrapper(kind, seq, file_format) for seq in seqs)
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# Path: crumbs/seq/utils/seq_utils.py
# def uppercase_length(string):
# 'It returns the number of uppercase characters found in the string'
# return len(re.findall("[A-Z]", string))
#
# class ChangeCase(object):
# 'It changes the sequence case.'
#
# def __init__(self, action):
# 'The initiator'
# if action not in (UPPERCASE, LOWERCASE, SWAPCASE):
# msg = 'Action should be: uppercase, lowercase or invertcase'
# raise ValueError(msg)
# self.action = action
#
# def __call__(self, seqs):
# 'It changes the case of the seqrecords.'
# action = self.action
# processed_seqs = []
# for seq in seqs:
# str_seq = get_str_seq(seq)
# if action == UPPERCASE:
# str_seq = str_seq.upper()
# elif action == LOWERCASE:
# str_seq = str_seq.lower()
# elif action == SWAPCASE:
# str_seq = str_seq.swapcase()
# else:
# raise NotImplementedError()
# seq = copy_seq(seq, seq=str_seq)
# processed_seqs.append(seq)
# return processed_seqs
#
# def get_uppercase_segments(string):
# '''It detects the unmasked regions of a sequence
#
# It returns a list of (start, end) tuples'''
# start = 0
# for is_upper, group in itertools.groupby(string, lambda x: x.isupper()):
# group = list(group)
# end = start + len(group) - 1
# if is_upper:
# yield start, end
# start = end + 1
#
# Path: crumbs/utils/tags.py
# SWAPCASE = 'swap'
#
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SEQRECORD = 'seqrecord'
. Output only the next line. | 'It counts the number of uppercase letters in a string' |
Continue the code snippet: <|code_start|>
cmd.extend([in_fpath])
if regions:
regions = ['{0}:{1}-{2}'.format(*s) for s in regions.segments]
cmd.extend(regions)
pysam.view(*cmd)
def sort_bam(in_bam_fpath, out_bam_fpath=None):
if out_bam_fpath is None:
out_bam_fpath = in_bam_fpath
if out_bam_fpath == in_bam_fpath:
sorted_fhand = NamedTemporaryFile(suffix='.sorted.bam', delete=False)
temp_out_fpath = sorted_fhand.name
else:
temp_out_fpath = out_bam_fpath
picard_jar = get_setting("PICARD_JAR")
cmd = ['java', '-jar', picard_jar, 'SortSam',
'INPUT={0}'.format(in_bam_fpath),
'OUTPUT={0}'.format(temp_out_fpath),
'SORT_ORDER=coordinate', 'VALIDATION_STRINGENCY=LENIENT']
stderr = NamedTemporaryFile(suffix='picard.stderr')
check_call(cmd, stderr=stderr)
if temp_out_fpath != out_bam_fpath:
<|code_end|>
. Use current file imports:
import os.path
import shutil
import sys
import pysam
from subprocess import check_call, CalledProcessError
from tempfile import NamedTemporaryFile
from crumbs.bam.flag import create_flag
from crumbs.settings import get_setting
from crumbs.utils.bin_utils import get_num_threads
and context (classes, functions, or code) from other files:
# Path: crumbs/bam/flag.py
# def create_flag(bit_tags):
# 'It returns the integer corresponding to the bitwise or of the tags'
# return reduce(or_, [SAM_FLAG_BITS[t] for t in bit_tags])
#
# Path: crumbs/settings.py
# def get_setting(key):
# 'It returns the value for one setting'
# return _settings[key]
#
# Path: crumbs/utils/bin_utils.py
# def get_num_threads(threads):
# """It returns num of threads to use in parallel.
#
# You can pass to the funaction the memory you want to use each thread.
# It calculates the number of treads
# In megabytes
# """
# phisical_threads = os.sysconf('SC_NPROCESSORS_ONLN')
# if not threads:
# return 1
# elif isinstance(threads, bool):
# return phisical_threads
# else:
# return threads
. Output only the next line. | shutil.move(temp_out_fpath, out_bam_fpath) |
Next line prediction: <|code_start|>
cmd.extend([in_fpath])
if regions:
regions = ['{0}:{1}-{2}'.format(*s) for s in regions.segments]
cmd.extend(regions)
pysam.view(*cmd)
def sort_bam(in_bam_fpath, out_bam_fpath=None):
if out_bam_fpath is None:
out_bam_fpath = in_bam_fpath
if out_bam_fpath == in_bam_fpath:
sorted_fhand = NamedTemporaryFile(suffix='.sorted.bam', delete=False)
temp_out_fpath = sorted_fhand.name
else:
temp_out_fpath = out_bam_fpath
picard_jar = get_setting("PICARD_JAR")
cmd = ['java', '-jar', picard_jar, 'SortSam',
'INPUT={0}'.format(in_bam_fpath),
'OUTPUT={0}'.format(temp_out_fpath),
'SORT_ORDER=coordinate', 'VALIDATION_STRINGENCY=LENIENT']
stderr = NamedTemporaryFile(suffix='picard.stderr')
check_call(cmd, stderr=stderr)
if temp_out_fpath != out_bam_fpath:
<|code_end|>
. Use current file imports:
(import os.path
import shutil
import sys
import pysam
from subprocess import check_call, CalledProcessError
from tempfile import NamedTemporaryFile
from crumbs.bam.flag import create_flag
from crumbs.settings import get_setting
from crumbs.utils.bin_utils import get_num_threads)
and context including class names, function names, or small code snippets from other files:
# Path: crumbs/bam/flag.py
# def create_flag(bit_tags):
# 'It returns the integer corresponding to the bitwise or of the tags'
# return reduce(or_, [SAM_FLAG_BITS[t] for t in bit_tags])
#
# Path: crumbs/settings.py
# def get_setting(key):
# 'It returns the value for one setting'
# return _settings[key]
#
# Path: crumbs/utils/bin_utils.py
# def get_num_threads(threads):
# """It returns num of threads to use in parallel.
#
# You can pass to the funaction the memory you want to use each thread.
# It calculates the number of treads
# In megabytes
# """
# phisical_threads = os.sysconf('SC_NPROCESSORS_ONLN')
# if not threads:
# return 1
# elif isinstance(threads, bool):
# return phisical_threads
# else:
# return threads
. Output only the next line. | shutil.move(temp_out_fpath, out_bam_fpath) |
Next line prediction: <|code_start|> out_bam_fpath = in_bam_fpath
if out_bam_fpath == in_bam_fpath:
realigned_fhand = NamedTemporaryFile(suffix='.realigned.bam',
delete=False)
temp_out_fpath = realigned_fhand.name
else:
temp_out_fpath = out_bam_fpath
_calmd_bam(in_bam_fpath, reference_fpath, temp_out_fpath)
if temp_out_fpath != out_bam_fpath:
shutil.move(temp_out_fpath, out_bam_fpath)
def _calmd_bam(bam_fpath, reference_fpath, out_bam_fpath):
out_fhand = open(out_bam_fpath, 'wb')
for line in pysam.calmd(*["-bAr", bam_fpath, reference_fpath]):
out_fhand.write(line)
# out_fhand.write(pysam.calmd(*["-bAr", bam_fpath, reference_fpath]))
out_fhand.flush()
out_fhand.close()
def merge_sams(in_fpaths, out_fpath):
picard_jar = get_setting("PICARD_JAR")
cmd = ['java', '-jar', picard_jar, 'MergeSamFiles',
'O={}'.format(out_fpath)]
for in_fpath in in_fpaths:
<|code_end|>
. Use current file imports:
(import os.path
import shutil
import sys
import pysam
from subprocess import check_call, CalledProcessError
from tempfile import NamedTemporaryFile
from crumbs.bam.flag import create_flag
from crumbs.settings import get_setting
from crumbs.utils.bin_utils import get_num_threads)
and context including class names, function names, or small code snippets from other files:
# Path: crumbs/bam/flag.py
# def create_flag(bit_tags):
# 'It returns the integer corresponding to the bitwise or of the tags'
# return reduce(or_, [SAM_FLAG_BITS[t] for t in bit_tags])
#
# Path: crumbs/settings.py
# def get_setting(key):
# 'It returns the value for one setting'
# return _settings[key]
#
# Path: crumbs/utils/bin_utils.py
# def get_num_threads(threads):
# """It returns num of threads to use in parallel.
#
# You can pass to the funaction the memory you want to use each thread.
# It calculates the number of treads
# In megabytes
# """
# phisical_threads = os.sysconf('SC_NPROCESSORS_ONLN')
# if not threads:
# return 1
# elif isinstance(threads, bool):
# return phisical_threads
# else:
# return threads
. Output only the next line. | cmd.append('I={}'.format(in_fpath)) |
Here is a snippet: <|code_start|>
class CloseToSnv(BaseAnnotator):
'''Filter snps with other close snvs.
Allowed snv_types: [snp, indel, unknown] '''
def __init__(self, distance=60, max_maf_depth=None, snv_type=None):
self.distance = distance
self.random_reader = None
self.max_maf_depth = max_maf_depth
self.snv_type = snv_type
self.conf = {'distance': distance, 'max_maf_depth': max_maf_depth,
'snv_type': snv_type}
def __call__(self, snv):
if self.random_reader is None:
self.random_reader = self._create_reader_from_snv(snv)
self._clean_filter(snv)
chrom = snv.chrom
pos = snv.pos
start = pos - self.distance if pos - self.distance > 0 else 0
end = pos + self.distance
snv_type = self.snv_type
max_maf_depth = self.max_maf_depth
passed_snvs = 0
for snv_in_window in self.random_reader.fetch_snvs(chrom, start, end):
if snv_in_window.pos == pos:
continue
<|code_end|>
. Write the next line using the current file imports:
import json
from os.path import join, abspath
from collections import Counter
from itertools import count
from Bio import SeqIO
from Bio.Restriction.Restriction import CommOnly, RestrictionBatch, Analysis
from crumbs.vcf.prot_change import (get_amino_change, IsIndelError,
BetweenSegments, OutsideAlignment)
from crumbs.vcf.snv import VCFReader
and context from other files:
# Path: crumbs/vcf/snv.py
# class VCFReader(object):
# def __init__(self, fhand, compressed=None, filename=None,
# min_calls_for_pop_stats=DEF_MIN_CALLS_FOR_POP_STATS):
# self.fhand = fhand
# self.pyvcf_reader = pyvcfReader(fsock=fhand, compressed=compressed,
# filename=filename)
# self.min_calls_for_pop_stats = min_calls_for_pop_stats
# self._snpcaller = None
#
# def parse_snvs(self):
# min_calls_for_pop_stats = self.min_calls_for_pop_stats
# last_snp = None
# try:
# counter =0
# for snp in self.pyvcf_reader:
# counter +=1
# snp = SNV(snp, reader=self,
# min_calls_for_pop_stats=min_calls_for_pop_stats)
# last_snp = snp
# yield snp
# except Exception:
# from traceback import print_exception
# exc_type, exc_value, exc_traceback = sys.exc_info()
#
# print_exception(exc_type, exc_value, exc_traceback,
# limit=20, file=sys.stderr)
#
# if last_snp is not None:
# chrom = str(last_snp.chrom)
# pos = last_snp.pos
# msg = 'Last parsed SNP was: {} {}\n'.format(chrom, pos + 1)
# sys.stderr.write(msg)
# raise
#
# def fetch_snvs(self, chrom, start, end=None):
# min_calls_for_pop_stats = self.min_calls_for_pop_stats
# try:
# snvs = self.pyvcf_reader.fetch(chrom, start + 1, end=end)
# except KeyError:
# snvs = []
# if snvs is None:
# snvs = []
#
# for snp in snvs:
# snp = SNV(snp, reader=self,
# min_calls_for_pop_stats=min_calls_for_pop_stats)
# yield snp
#
# def sliding_windows(self, size, step=None, ref_fhand=None,
# min_num_snps=DEF_MIN_NUM_SNPS_IN_WIN):
# random_snp_reader = VCFReader(open(self.fhand.name))
# sliding_window = _SNPSlidingWindow(snp_reader=random_snp_reader,
# win_size=size, win_step=step,
# min_num_snps=min_num_snps,
# ref_fhand=ref_fhand)
# for window in sliding_window.windows():
# yield window
#
# @property
# def snpcaller(self):
# if self._snpcaller is not None:
# return self._snpcaller
#
# metadata = self.pyvcf_reader.metadata
# if 'source' in metadata:
# if 'VarScan2' in metadata['source']:
# snpcaller = VARSCAN
# elif 'freebayes' in metadata['source'][0].lower():
# snpcaller = FREEBAYES
# else:
# snpcaller = GENERIC
# elif 'UnifiedGenotyper' in metadata:
# snpcaller = GATK
# else:
# snpcaller = GENERIC
# self._snpcaller = snpcaller
# return snpcaller
#
# @property
# def samples(self):
# return self.pyvcf_reader.samples
#
# @property
# def filters(self):
# return self.pyvcf_reader.filters
#
# @property
# def infos(self):
# return self.pyvcf_reader.infos
#
# @property
# def header(self):
# header = '\n'.join(self.pyvcf_reader._header_lines)
# header += '\n#' + '\t'.join(self.pyvcf_reader._column_headers)
# header += '\t' + '\t'.join(self.pyvcf_reader.samples)
# return header
, which may include functions, classes, or code. Output only the next line. | if max_maf_depth is None and snv_type is None: |
Predict the next line after this snippet: <|code_start|> object=SeqItem(name, lines, annotations),
file_format=fmt)
return seq
def copy_seq(seqwrapper, seq=None, name=None):
seq_class = seqwrapper.kind
seq_obj = seqwrapper.object
if seq_class == SEQITEM:
seq = _copy_seqitem(seqwrapper, seq=seq, name=name)
elif seq_class == SEQRECORD:
seq_obj = _copy_seqrecord(seq_obj, seq=seq, name=name, id_=name)
seq = SeqWrapper(kind=seqwrapper.kind, object=seq_obj,
file_format=seqwrapper.file_format)
return seq
def _slice_seqitem(seqwrap, start, stop):
fmt = seqwrap.file_format
seq_obj = seqwrap.object
lines = seq_obj.lines
seq_str = get_str_seq(seqwrap)
seq_str = seq_str[start: stop] + '\n'
if 'fasta' in fmt:
lines = [lines[0], seq_str]
elif 'fastq' in fmt:
qual_str = get_str_qualities(seqwrap)
qual_str = qual_str[start: stop]
qual_str += '\n'
lines = [lines[0], seq_str, '+\n', qual_str]
<|code_end|>
using the current file's imports:
from copy import deepcopy
from collections import namedtuple
from crumbs.utils.optional_modules import SeqRecord
from crumbs.utils.tags import (SEQITEM, SEQRECORD, ILLUMINA_QUALITY,
SANGER_QUALITY, SANGER_FASTQ_FORMATS,
ILLUMINA_FASTQ_FORMATS)
and any relevant context from other files:
# Path: crumbs/utils/optional_modules.py
# MSG = 'A python package to run this executable is required,'
# BIO = 'biopython'
# BIO_BGZF = 'biopython with Bgzf support'
# NCBIXML = create_fake_class(MSG + BIO)
# NCBIWWW = create_fake_class(MSG + BIO)
# def create_fake_class(msg):
# def __init__(self, *args, **kwargs):
# def create_fake_funct(msg):
# def FakeRequiredfunct(*args, **kwargs):
# class FakePythonRequiredClass(object):
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# SEQRECORD = 'seqrecord'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
#
# SANGER_QUALITY = 'fastq'
#
# SANGER_FASTQ_FORMATS = ('fastq-sanger', 'fastq')
#
# ILLUMINA_FASTQ_FORMATS = ('fastq-illumina',)
. Output only the next line. | else: |
Predict the next line for this snippet: <|code_start|> msg = 'Unknown or not supported quality format'
raise ValueError(msg)
return ''.join([quals_map[int_quality] for int_quality in int_quals])
def get_str_qualities(seq, out_format=None):
if out_format is None:
out_format = seq.file_format
if out_format in SANGER_FASTQ_FORMATS:
out_format = SANGER_QUALITY
elif out_format in ILLUMINA_FASTQ_FORMATS:
out_format = ILLUMINA_QUALITY
seq_class = seq.kind
if seq_class == SEQITEM:
in_format = seq.file_format
if 'fasta' in in_format:
raise ValueError('A fasta file has no qualities')
if in_format in SANGER_FASTQ_FORMATS:
in_format = SANGER_QUALITY
elif in_format in ILLUMINA_FASTQ_FORMATS:
in_format = ILLUMINA_QUALITY
else:
msg = 'Unknown or not supported quality format: '
msg += in_format
raise ValueError(msg)
if in_format == out_format:
quals = seq.object.lines[3].rstrip()
else:
int_quals = get_int_qualities(seq)
<|code_end|>
with the help of current file imports:
from copy import deepcopy
from collections import namedtuple
from crumbs.utils.optional_modules import SeqRecord
from crumbs.utils.tags import (SEQITEM, SEQRECORD, ILLUMINA_QUALITY,
SANGER_QUALITY, SANGER_FASTQ_FORMATS,
ILLUMINA_FASTQ_FORMATS)
and context from other files:
# Path: crumbs/utils/optional_modules.py
# MSG = 'A python package to run this executable is required,'
# BIO = 'biopython'
# BIO_BGZF = 'biopython with Bgzf support'
# NCBIXML = create_fake_class(MSG + BIO)
# NCBIWWW = create_fake_class(MSG + BIO)
# def create_fake_class(msg):
# def __init__(self, *args, **kwargs):
# def create_fake_funct(msg):
# def FakeRequiredfunct(*args, **kwargs):
# class FakePythonRequiredClass(object):
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# SEQRECORD = 'seqrecord'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
#
# SANGER_QUALITY = 'fastq'
#
# SANGER_FASTQ_FORMATS = ('fastq-sanger', 'fastq')
#
# ILLUMINA_FASTQ_FORMATS = ('fastq-illumina',)
, which may contain function names, class names, or code. Output only the next line. | quals = _int_quals_to_str_quals(int_quals, out_format) |
Next line prediction: <|code_start|> if 'illumina' in fmt:
quals_map = ILLUMINA_QUALS
else:
quals_map = SANGER_QUALS
encoded_quals = seqwrap.object.lines[3].rstrip()
quals = [quals_map[qual] for qual in encoded_quals]
else:
raise RuntimeError('Qualities requested for an unknown SeqItem format')
return quals
def get_int_qualities(seq):
seq_class = seq.kind
if seq_class == SEQITEM:
return _get_seqitem_qualities(seq)
elif seq_class == SEQRECORD:
try:
quals = seq.object.letter_annotations['phred_quality']
except KeyError:
msg = 'The given SeqRecord has no phred_quality'
raise AttributeError(msg)
return quals
SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
def _int_quals_to_str_quals(int_quals, out_format):
if out_format == SANGER_QUALITY:
<|code_end|>
. Use current file imports:
(from copy import deepcopy
from collections import namedtuple
from crumbs.utils.optional_modules import SeqRecord
from crumbs.utils.tags import (SEQITEM, SEQRECORD, ILLUMINA_QUALITY,
SANGER_QUALITY, SANGER_FASTQ_FORMATS,
ILLUMINA_FASTQ_FORMATS))
and context including class names, function names, or small code snippets from other files:
# Path: crumbs/utils/optional_modules.py
# MSG = 'A python package to run this executable is required,'
# BIO = 'biopython'
# BIO_BGZF = 'biopython with Bgzf support'
# NCBIXML = create_fake_class(MSG + BIO)
# NCBIWWW = create_fake_class(MSG + BIO)
# def create_fake_class(msg):
# def __init__(self, *args, **kwargs):
# def create_fake_funct(msg):
# def FakeRequiredfunct(*args, **kwargs):
# class FakePythonRequiredClass(object):
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# SEQRECORD = 'seqrecord'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
#
# SANGER_QUALITY = 'fastq'
#
# SANGER_FASTQ_FORMATS = ('fastq-sanger', 'fastq')
#
# ILLUMINA_FASTQ_FORMATS = ('fastq-illumina',)
. Output only the next line. | quals_map = SANGER_STRS |
Next line prediction: <|code_start|> fmt = None
return fmt
def _break():
raise StopIteration
def _is_fastq_plus_line(line, seq_name):
if line == '+\n' or line.startswith('+') and seq_name in line:
return True
else:
return False
def _get_seqitem_quals(seq):
fmt = seq.file_format
sitem = seq.object
if 'fastq' in fmt:
quals = sitem.lines[3].rstrip()
else:
quals = None
return quals
def get_str_seq(seq):
seq_class = seq.kind
if seq_class == SEQITEM:
seq = seq.object.lines[1].strip()
elif seq_class == SEQRECORD:
<|code_end|>
. Use current file imports:
(from copy import deepcopy
from collections import namedtuple
from crumbs.utils.optional_modules import SeqRecord
from crumbs.utils.tags import (SEQITEM, SEQRECORD, ILLUMINA_QUALITY,
SANGER_QUALITY, SANGER_FASTQ_FORMATS,
ILLUMINA_FASTQ_FORMATS))
and context including class names, function names, or small code snippets from other files:
# Path: crumbs/utils/optional_modules.py
# MSG = 'A python package to run this executable is required,'
# BIO = 'biopython'
# BIO_BGZF = 'biopython with Bgzf support'
# NCBIXML = create_fake_class(MSG + BIO)
# NCBIWWW = create_fake_class(MSG + BIO)
# def create_fake_class(msg):
# def __init__(self, *args, **kwargs):
# def create_fake_funct(msg):
# def FakeRequiredfunct(*args, **kwargs):
# class FakePythonRequiredClass(object):
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# SEQRECORD = 'seqrecord'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
#
# SANGER_QUALITY = 'fastq'
#
# SANGER_FASTQ_FORMATS = ('fastq-sanger', 'fastq')
#
# ILLUMINA_FASTQ_FORMATS = ('fastq-illumina',)
. Output only the next line. | seq = str(seq.object.seq) |
Continue the code snippet: <|code_start|> return quals
def get_str_seq(seq):
seq_class = seq.kind
if seq_class == SEQITEM:
seq = seq.object.lines[1].strip()
elif seq_class == SEQRECORD:
seq = str(seq.object.seq)
return seq.strip()
def get_length(seq):
return len(get_str_seq(seq))
SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
def _get_seqitem_qualities(seqwrap):
fmt = seqwrap.file_format.lower()
if 'fasta' in fmt:
raise AttributeError('A fasta file has no qualities')
elif 'fastq' in fmt:
if 'illumina' in fmt:
quals_map = ILLUMINA_QUALS
else:
quals_map = SANGER_QUALS
encoded_quals = seqwrap.object.lines[3].rstrip()
<|code_end|>
. Use current file imports:
from copy import deepcopy
from collections import namedtuple
from crumbs.utils.optional_modules import SeqRecord
from crumbs.utils.tags import (SEQITEM, SEQRECORD, ILLUMINA_QUALITY,
SANGER_QUALITY, SANGER_FASTQ_FORMATS,
ILLUMINA_FASTQ_FORMATS)
and context (classes, functions, or code) from other files:
# Path: crumbs/utils/optional_modules.py
# MSG = 'A python package to run this executable is required,'
# BIO = 'biopython'
# BIO_BGZF = 'biopython with Bgzf support'
# NCBIXML = create_fake_class(MSG + BIO)
# NCBIWWW = create_fake_class(MSG + BIO)
# def create_fake_class(msg):
# def __init__(self, *args, **kwargs):
# def create_fake_funct(msg):
# def FakeRequiredfunct(*args, **kwargs):
# class FakePythonRequiredClass(object):
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# SEQRECORD = 'seqrecord'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
#
# SANGER_QUALITY = 'fastq'
#
# SANGER_FASTQ_FORMATS = ('fastq-sanger', 'fastq')
#
# ILLUMINA_FASTQ_FORMATS = ('fastq-illumina',)
. Output only the next line. | quals = [quals_map[qual] for qual in encoded_quals] |
Given snippet: <|code_start|> out_format = ILLUMINA_QUALITY
seq_class = seq.kind
if seq_class == SEQITEM:
in_format = seq.file_format
if 'fasta' in in_format:
raise ValueError('A fasta file has no qualities')
if in_format in SANGER_FASTQ_FORMATS:
in_format = SANGER_QUALITY
elif in_format in ILLUMINA_FASTQ_FORMATS:
in_format = ILLUMINA_QUALITY
else:
msg = 'Unknown or not supported quality format: '
msg += in_format
raise ValueError(msg)
if in_format == out_format:
quals = seq.object.lines[3].rstrip()
else:
int_quals = get_int_qualities(seq)
quals = _int_quals_to_str_quals(int_quals, out_format)
elif seq_class == SEQRECORD:
int_quals = get_int_qualities(seq)
quals = _int_quals_to_str_quals(int_quals, out_format)
return quals
def get_annotations(seq):
return seq.object.annotations
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from copy import deepcopy
from collections import namedtuple
from crumbs.utils.optional_modules import SeqRecord
from crumbs.utils.tags import (SEQITEM, SEQRECORD, ILLUMINA_QUALITY,
SANGER_QUALITY, SANGER_FASTQ_FORMATS,
ILLUMINA_FASTQ_FORMATS)
and context:
# Path: crumbs/utils/optional_modules.py
# MSG = 'A python package to run this executable is required,'
# BIO = 'biopython'
# BIO_BGZF = 'biopython with Bgzf support'
# NCBIXML = create_fake_class(MSG + BIO)
# NCBIWWW = create_fake_class(MSG + BIO)
# def create_fake_class(msg):
# def __init__(self, *args, **kwargs):
# def create_fake_funct(msg):
# def FakeRequiredfunct(*args, **kwargs):
# class FakePythonRequiredClass(object):
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# SEQRECORD = 'seqrecord'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
#
# SANGER_QUALITY = 'fastq'
#
# SANGER_FASTQ_FORMATS = ('fastq-sanger', 'fastq')
#
# ILLUMINA_FASTQ_FORMATS = ('fastq-illumina',)
which might include code, classes, or functions. Output only the next line. | def _copy_seqrecord(seqrec, seq=None, name=None, id_=None): |
Given the following code snippet before the placeholder: <|code_start|> if seq_class == SEQITEM:
seq = _copy_seqitem(seqwrapper, seq=seq, name=name)
elif seq_class == SEQRECORD:
seq_obj = _copy_seqrecord(seq_obj, seq=seq, name=name, id_=name)
seq = SeqWrapper(kind=seqwrapper.kind, object=seq_obj,
file_format=seqwrapper.file_format)
return seq
def _slice_seqitem(seqwrap, start, stop):
fmt = seqwrap.file_format
seq_obj = seqwrap.object
lines = seq_obj.lines
seq_str = get_str_seq(seqwrap)
seq_str = seq_str[start: stop] + '\n'
if 'fasta' in fmt:
lines = [lines[0], seq_str]
elif 'fastq' in fmt:
qual_str = get_str_qualities(seqwrap)
qual_str = qual_str[start: stop]
qual_str += '\n'
lines = [lines[0], seq_str, '+\n', qual_str]
else:
raise ValueError('Unknown SeqItem type')
seq_obj = SeqItem(name=seq_obj.name, lines=lines,
annotations=seq_obj.annotations)
return seq_obj
def slice_seq(seq, start=None, stop=None):
<|code_end|>
, predict the next line using imports from the current file:
from copy import deepcopy
from collections import namedtuple
from crumbs.utils.optional_modules import SeqRecord
from crumbs.utils.tags import (SEQITEM, SEQRECORD, ILLUMINA_QUALITY,
SANGER_QUALITY, SANGER_FASTQ_FORMATS,
ILLUMINA_FASTQ_FORMATS)
and context including class names, function names, and sometimes code from other files:
# Path: crumbs/utils/optional_modules.py
# MSG = 'A python package to run this executable is required,'
# BIO = 'biopython'
# BIO_BGZF = 'biopython with Bgzf support'
# NCBIXML = create_fake_class(MSG + BIO)
# NCBIWWW = create_fake_class(MSG + BIO)
# def create_fake_class(msg):
# def __init__(self, *args, **kwargs):
# def create_fake_funct(msg):
# def FakeRequiredfunct(*args, **kwargs):
# class FakePythonRequiredClass(object):
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# SEQRECORD = 'seqrecord'
#
# ILLUMINA_QUALITY = 'fastq-illumina'
#
# SANGER_QUALITY = 'fastq'
#
# SANGER_FASTQ_FORMATS = ('fastq-sanger', 'fastq')
#
# ILLUMINA_FASTQ_FORMATS = ('fastq-illumina',)
. Output only the next line. | seq_class = seq.kind |
Given the following code snippet before the placeholder: <|code_start|> type=argparse.FileType('w'))
msg = 'File to print some statistics (default STDERR)'
parser.add_argument('-l', '--log', help=msg, type=argparse.FileType('w'),
default=sys.stderr)
return parser
def setup_filter_argparse(**kwargs):
'It prepares the command line argument parsing.'
parser = setup_basic_argparse(**kwargs)
parser.add_argument('-f', '--filtered',
help='Output for filtered SNVs',
type=argparse.FileType('w'))
parser.add_argument('-s', '--samples', action='append',
help='samples to use')
parser.add_argument('-p', '--samples_file',
help='File with samples to use. One per line',
type=argparse.FileType('r'))
return parser
def parse_basic_args(parser):
parsed_args = parser.parse_args()
in_fhand = get_input_fhand(parsed_args.input)
out_fhand = parsed_args.output
log_fhand = parsed_args.log
<|code_end|>
, predict the next line using imports from the current file:
import argparse
import sys
from crumbs.utils.file_utils import get_input_fhand
and context including class names, function names, and sometimes code from other files:
# Path: crumbs/utils/file_utils.py
# def get_input_fhand(in_fhand):
# in_fhand = wrap_in_buffered_reader(in_fhand, buffering=DEF_FILE_BUFFER)
#
# in_compressed = _vcf_is_gz(in_fhand)
# if in_compressed and not _fhand_is_tellable(in_fhand):
# msg = 'The given input has no tell member and it is compressed. '
# msg += 'You cannot use gzip file through stdin, try to pipe it '
# msg += 'uncompressed with zcat |'
# raise RuntimeError(msg)
#
# if in_compressed:
# mod_in_fhand = GzipFile(fileobj=in_fhand)
# else:
# mod_in_fhand = in_fhand
#
# return mod_in_fhand
. Output only the next line. | args = {'in_fhand': in_fhand, 'log_fhand': log_fhand, |
Continue the code snippet: <|code_start|>
def __init__(self, min_qual):
self._min_qual = min_qual
def __call__(self, snv):
return snv.remove_gt_from_low_qual_calls(min_qual=self._min_qual)
class HetGenotypeFilter(object):
def __call__(self, snv):
return snv.remove_gt_from_het_calls()
def prob_aa_given_n_a_reads_hw(num_a_reads, freq_a_in_pop):
'It assumes HW'
# TODO fix for Backcross
proba = freq_a_in_pop
res = proba
res /= proba + 0.5 ** (num_a_reads - 1) * (1 - proba)
return res
RIL_FREQ_AA_CACHE = {}
def _prob_aa_ril_self(n_generation):
if n_generation in RIL_FREQ_AA_CACHE:
return RIL_FREQ_AA_CACHE[n_generation]
freq_aa = (2**n_generation - 1) / 2 ** (n_generation + 1)
<|code_end|>
. Use current file imports:
from collections import Counter
from StringIO import StringIO
from crumbs.vcf.snv import VCFReader, VCFWriter
and context (classes, functions, or code) from other files:
# Path: crumbs/vcf/snv.py
# class VCFReader(object):
# def __init__(self, fhand, compressed=None, filename=None,
# min_calls_for_pop_stats=DEF_MIN_CALLS_FOR_POP_STATS):
# self.fhand = fhand
# self.pyvcf_reader = pyvcfReader(fsock=fhand, compressed=compressed,
# filename=filename)
# self.min_calls_for_pop_stats = min_calls_for_pop_stats
# self._snpcaller = None
#
# def parse_snvs(self):
# min_calls_for_pop_stats = self.min_calls_for_pop_stats
# last_snp = None
# try:
# counter =0
# for snp in self.pyvcf_reader:
# counter +=1
# snp = SNV(snp, reader=self,
# min_calls_for_pop_stats=min_calls_for_pop_stats)
# last_snp = snp
# yield snp
# except Exception:
# from traceback import print_exception
# exc_type, exc_value, exc_traceback = sys.exc_info()
#
# print_exception(exc_type, exc_value, exc_traceback,
# limit=20, file=sys.stderr)
#
# if last_snp is not None:
# chrom = str(last_snp.chrom)
# pos = last_snp.pos
# msg = 'Last parsed SNP was: {} {}\n'.format(chrom, pos + 1)
# sys.stderr.write(msg)
# raise
#
# def fetch_snvs(self, chrom, start, end=None):
# min_calls_for_pop_stats = self.min_calls_for_pop_stats
# try:
# snvs = self.pyvcf_reader.fetch(chrom, start + 1, end=end)
# except KeyError:
# snvs = []
# if snvs is None:
# snvs = []
#
# for snp in snvs:
# snp = SNV(snp, reader=self,
# min_calls_for_pop_stats=min_calls_for_pop_stats)
# yield snp
#
# def sliding_windows(self, size, step=None, ref_fhand=None,
# min_num_snps=DEF_MIN_NUM_SNPS_IN_WIN):
# random_snp_reader = VCFReader(open(self.fhand.name))
# sliding_window = _SNPSlidingWindow(snp_reader=random_snp_reader,
# win_size=size, win_step=step,
# min_num_snps=min_num_snps,
# ref_fhand=ref_fhand)
# for window in sliding_window.windows():
# yield window
#
# @property
# def snpcaller(self):
# if self._snpcaller is not None:
# return self._snpcaller
#
# metadata = self.pyvcf_reader.metadata
# if 'source' in metadata:
# if 'VarScan2' in metadata['source']:
# snpcaller = VARSCAN
# elif 'freebayes' in metadata['source'][0].lower():
# snpcaller = FREEBAYES
# else:
# snpcaller = GENERIC
# elif 'UnifiedGenotyper' in metadata:
# snpcaller = GATK
# else:
# snpcaller = GENERIC
# self._snpcaller = snpcaller
# return snpcaller
#
# @property
# def samples(self):
# return self.pyvcf_reader.samples
#
# @property
# def filters(self):
# return self.pyvcf_reader.filters
#
# @property
# def infos(self):
# return self.pyvcf_reader.infos
#
# @property
# def header(self):
# header = '\n'.join(self.pyvcf_reader._header_lines)
# header += '\n#' + '\t'.join(self.pyvcf_reader._column_headers)
# header += '\t' + '\t'.join(self.pyvcf_reader.samples)
# return header
#
# class VCFWriter(pyvcfWriter):
#
# def __init__(self, stream, template_reader, lineterminator="\n"):
# template = template_reader.pyvcf_reader
# super(VCFWriter, self).__init__(stream, template,
# lineterminator=lineterminator)
#
# def write_snv(self, snv):
# super(VCFWriter, self).write_record(snv.record)
#
# def write_snvs(self, snvs):
# for snv in snvs:
# try:
# self.write_snv(snv)
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' in str(error):
# break
# else:
# raise
#
# def flush(self):
# flush_fhand(self.stream)
. Output only the next line. | RIL_FREQ_AA_CACHE[n_generation] = freq_aa |
Given snippet: <|code_start|> if genotypic_freqs_method == HW:
allele_freqs = snv.allele_freqs
if not allele_freqs:
num_samples = len(snv.record.samples)
self.log['not_enough_individuals'] += num_samples
self.log['tot'] += num_samples
def set_all_gt_to_none(call):
return call.copy_setting_gt(gt=None,
return_pyvcf_call=True)
return snv.copy_mapping_calls(set_all_gt_to_none)
calls = []
kwargs = self.genotypic_freqs_kwargs
log = self.log
for call in snv.calls:
if not call.called:
filtered_call = call.call
log['was_not_called'] += 1
else:
alleles = call.int_alleles
if len(set(alleles)) > 1:
filtered_call = call.call
log['was_het'] += 1
else:
allele = alleles[0]
allele_depths = call.allele_depths
if not allele_depths:
msg = 'Allele depths are required for the lowEvidence'
msg += 'Allele filter'
raise RuntimeError(msg)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from collections import Counter
from StringIO import StringIO
from crumbs.vcf.snv import VCFReader, VCFWriter
and context:
# Path: crumbs/vcf/snv.py
# class VCFReader(object):
# def __init__(self, fhand, compressed=None, filename=None,
# min_calls_for_pop_stats=DEF_MIN_CALLS_FOR_POP_STATS):
# self.fhand = fhand
# self.pyvcf_reader = pyvcfReader(fsock=fhand, compressed=compressed,
# filename=filename)
# self.min_calls_for_pop_stats = min_calls_for_pop_stats
# self._snpcaller = None
#
# def parse_snvs(self):
# min_calls_for_pop_stats = self.min_calls_for_pop_stats
# last_snp = None
# try:
# counter =0
# for snp in self.pyvcf_reader:
# counter +=1
# snp = SNV(snp, reader=self,
# min_calls_for_pop_stats=min_calls_for_pop_stats)
# last_snp = snp
# yield snp
# except Exception:
# from traceback import print_exception
# exc_type, exc_value, exc_traceback = sys.exc_info()
#
# print_exception(exc_type, exc_value, exc_traceback,
# limit=20, file=sys.stderr)
#
# if last_snp is not None:
# chrom = str(last_snp.chrom)
# pos = last_snp.pos
# msg = 'Last parsed SNP was: {} {}\n'.format(chrom, pos + 1)
# sys.stderr.write(msg)
# raise
#
# def fetch_snvs(self, chrom, start, end=None):
# min_calls_for_pop_stats = self.min_calls_for_pop_stats
# try:
# snvs = self.pyvcf_reader.fetch(chrom, start + 1, end=end)
# except KeyError:
# snvs = []
# if snvs is None:
# snvs = []
#
# for snp in snvs:
# snp = SNV(snp, reader=self,
# min_calls_for_pop_stats=min_calls_for_pop_stats)
# yield snp
#
# def sliding_windows(self, size, step=None, ref_fhand=None,
# min_num_snps=DEF_MIN_NUM_SNPS_IN_WIN):
# random_snp_reader = VCFReader(open(self.fhand.name))
# sliding_window = _SNPSlidingWindow(snp_reader=random_snp_reader,
# win_size=size, win_step=step,
# min_num_snps=min_num_snps,
# ref_fhand=ref_fhand)
# for window in sliding_window.windows():
# yield window
#
# @property
# def snpcaller(self):
# if self._snpcaller is not None:
# return self._snpcaller
#
# metadata = self.pyvcf_reader.metadata
# if 'source' in metadata:
# if 'VarScan2' in metadata['source']:
# snpcaller = VARSCAN
# elif 'freebayes' in metadata['source'][0].lower():
# snpcaller = FREEBAYES
# else:
# snpcaller = GENERIC
# elif 'UnifiedGenotyper' in metadata:
# snpcaller = GATK
# else:
# snpcaller = GENERIC
# self._snpcaller = snpcaller
# return snpcaller
#
# @property
# def samples(self):
# return self.pyvcf_reader.samples
#
# @property
# def filters(self):
# return self.pyvcf_reader.filters
#
# @property
# def infos(self):
# return self.pyvcf_reader.infos
#
# @property
# def header(self):
# header = '\n'.join(self.pyvcf_reader._header_lines)
# header += '\n#' + '\t'.join(self.pyvcf_reader._column_headers)
# header += '\t' + '\t'.join(self.pyvcf_reader.samples)
# return header
#
# class VCFWriter(pyvcfWriter):
#
# def __init__(self, stream, template_reader, lineterminator="\n"):
# template = template_reader.pyvcf_reader
# super(VCFWriter, self).__init__(stream, template,
# lineterminator=lineterminator)
#
# def write_snv(self, snv):
# super(VCFWriter, self).write_record(snv.record)
#
# def write_snvs(self, snvs):
# for snv in snvs:
# try:
# self.write_snv(snv)
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' in str(error):
# break
# else:
# raise
#
# def flush(self):
# flush_fhand(self.stream)
which might include code, classes, or functions. Output only the next line. | depth = call.allele_depths[allele] |
Given the code snippet: <|code_start|>
# pylint: disable=C0111
class SortTest(unittest.TestCase):
def test_sort_bam_bin(self):
bin_ = os.path.join(BIN_DIR, 'sort_bam')
assert 'usage' in check_output([bin_, '-h'])
bam_fpath = os.path.join(TEST_DATA_DIR, 'seqs.bam')
sorted_fhand = NamedTemporaryFile(suffix='.sorted.bam')
check_call([bin_, bam_fpath, '-o', sorted_fhand.name])
assert "@HD\tVN:1.4" in check_output(['samtools', 'view', '-h',
<|code_end|>
, generate the next line using the imports in this file:
import os.path
import unittest
import shutil
import pysam
from subprocess import check_output, check_call
from tempfile import NamedTemporaryFile
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.bam.bam_tools import (filter_bam, calmd_bam, realign_bam,
index_bam, merge_sams)
and context (functions, classes, or occasionally code) from other files:
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/bam/bam_tools.py
# def filter_bam(in_fpath, out_fpath, min_mapq=0, required_flag_tags=None,
# filtering_flag_tags=None, regions=None):
# cmd = ['-bh']
#
# # The following line:
# cmd.append('-o' + out_fpath)
# # should be
# # cmd.extend(['-o', out_fpath])
# # but it is a workaround, take a look at:
# # https://groups.google.com/forum/#!msg/pysam-user-group/ooHgIiNVe4c/CcY06d45rzQJ
#
# if min_mapq:
# cmd.extend(['-q', str(min_mapq)])
#
# if required_flag_tags:
# flag = create_flag(required_flag_tags)
# cmd.extend(['-f', str(flag)])
#
# if filtering_flag_tags:
# flag = create_flag(filtering_flag_tags)
# cmd.extend(['-F', str(flag)])
#
# cmd.extend([in_fpath])
#
# if regions:
# regions = ['{0}:{1}-{2}'.format(*s) for s in regions.segments]
# cmd.extend(regions)
#
# pysam.view(*cmd)
#
# def calmd_bam(in_bam_fpath, reference_fpath, out_bam_fpath=None):
#
# if out_bam_fpath is None:
# out_bam_fpath = in_bam_fpath
#
# if out_bam_fpath == in_bam_fpath:
# realigned_fhand = NamedTemporaryFile(suffix='.realigned.bam',
# delete=False)
# temp_out_fpath = realigned_fhand.name
# else:
# temp_out_fpath = out_bam_fpath
#
# _calmd_bam(in_bam_fpath, reference_fpath, temp_out_fpath)
#
# if temp_out_fpath != out_bam_fpath:
# shutil.move(temp_out_fpath, out_bam_fpath)
#
# def realign_bam(in_bam_fpath, reference_fpath, out_bam_fpath=None):
#
# if out_bam_fpath is None:
# out_bam_fpath = in_bam_fpath
#
# if out_bam_fpath == in_bam_fpath:
# realigned_fhand = NamedTemporaryFile(suffix='.realigned.bam',
# delete=False)
# temp_out_fpath = realigned_fhand.name
# else:
# temp_out_fpath = out_bam_fpath
#
# _realign_bam(in_bam_fpath, reference_fpath, temp_out_fpath, threads=False)
# sort_bam(temp_out_fpath)
#
# if temp_out_fpath != out_bam_fpath:
# shutil.move(temp_out_fpath, out_bam_fpath)
#
# def index_bam(bam_fpath):
# 'It indexes a bam file'
# pysam.index(bam_fpath)
#
# def merge_sams(in_fpaths, out_fpath):
# picard_jar = get_setting("PICARD_JAR")
#
# cmd = ['java', '-jar', picard_jar, 'MergeSamFiles',
# 'O={}'.format(out_fpath)]
# for in_fpath in in_fpaths:
# cmd.append('I={}'.format(in_fpath))
# stderr = NamedTemporaryFile(suffix='picard.stderr')
# stdout = NamedTemporaryFile(suffix='picard.stdout')
# try:
# check_call(cmd, stderr=stderr, stdout=stdout)
# except CalledProcessError:
# sys.stderr.write(open(stderr.name).read())
# sys.stdout.write(open(stdout.name).read())
. Output only the next line. | sorted_fhand.name]) |
Predict the next line after this snippet: <|code_start|>
# pylint: disable=C0111
class SortTest(unittest.TestCase):
def test_sort_bam_bin(self):
bin_ = os.path.join(BIN_DIR, 'sort_bam')
assert 'usage' in check_output([bin_, '-h'])
bam_fpath = os.path.join(TEST_DATA_DIR, 'seqs.bam')
<|code_end|>
using the current file's imports:
import os.path
import unittest
import shutil
import pysam
from subprocess import check_output, check_call
from tempfile import NamedTemporaryFile
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.bam.bam_tools import (filter_bam, calmd_bam, realign_bam,
index_bam, merge_sams)
and any relevant context from other files:
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/bam/bam_tools.py
# def filter_bam(in_fpath, out_fpath, min_mapq=0, required_flag_tags=None,
# filtering_flag_tags=None, regions=None):
# cmd = ['-bh']
#
# # The following line:
# cmd.append('-o' + out_fpath)
# # should be
# # cmd.extend(['-o', out_fpath])
# # but it is a workaround, take a look at:
# # https://groups.google.com/forum/#!msg/pysam-user-group/ooHgIiNVe4c/CcY06d45rzQJ
#
# if min_mapq:
# cmd.extend(['-q', str(min_mapq)])
#
# if required_flag_tags:
# flag = create_flag(required_flag_tags)
# cmd.extend(['-f', str(flag)])
#
# if filtering_flag_tags:
# flag = create_flag(filtering_flag_tags)
# cmd.extend(['-F', str(flag)])
#
# cmd.extend([in_fpath])
#
# if regions:
# regions = ['{0}:{1}-{2}'.format(*s) for s in regions.segments]
# cmd.extend(regions)
#
# pysam.view(*cmd)
#
# def calmd_bam(in_bam_fpath, reference_fpath, out_bam_fpath=None):
#
# if out_bam_fpath is None:
# out_bam_fpath = in_bam_fpath
#
# if out_bam_fpath == in_bam_fpath:
# realigned_fhand = NamedTemporaryFile(suffix='.realigned.bam',
# delete=False)
# temp_out_fpath = realigned_fhand.name
# else:
# temp_out_fpath = out_bam_fpath
#
# _calmd_bam(in_bam_fpath, reference_fpath, temp_out_fpath)
#
# if temp_out_fpath != out_bam_fpath:
# shutil.move(temp_out_fpath, out_bam_fpath)
#
# def realign_bam(in_bam_fpath, reference_fpath, out_bam_fpath=None):
#
# if out_bam_fpath is None:
# out_bam_fpath = in_bam_fpath
#
# if out_bam_fpath == in_bam_fpath:
# realigned_fhand = NamedTemporaryFile(suffix='.realigned.bam',
# delete=False)
# temp_out_fpath = realigned_fhand.name
# else:
# temp_out_fpath = out_bam_fpath
#
# _realign_bam(in_bam_fpath, reference_fpath, temp_out_fpath, threads=False)
# sort_bam(temp_out_fpath)
#
# if temp_out_fpath != out_bam_fpath:
# shutil.move(temp_out_fpath, out_bam_fpath)
#
# def index_bam(bam_fpath):
# 'It indexes a bam file'
# pysam.index(bam_fpath)
#
# def merge_sams(in_fpaths, out_fpath):
# picard_jar = get_setting("PICARD_JAR")
#
# cmd = ['java', '-jar', picard_jar, 'MergeSamFiles',
# 'O={}'.format(out_fpath)]
# for in_fpath in in_fpaths:
# cmd.append('I={}'.format(in_fpath))
# stderr = NamedTemporaryFile(suffix='picard.stderr')
# stdout = NamedTemporaryFile(suffix='picard.stdout')
# try:
# check_call(cmd, stderr=stderr, stdout=stdout)
# except CalledProcessError:
# sys.stderr.write(open(stderr.name).read())
# sys.stdout.write(open(stdout.name).read())
. Output only the next line. | sorted_fhand = NamedTemporaryFile(suffix='.sorted.bam') |
Given snippet: <|code_start|>
# no index
sorted_fhand = NamedTemporaryFile()
check_call([bin_, bam_fpath, '-o', sorted_fhand.name, '--no-index'])
assert not os.path.exists(sorted_fhand.name + '.bai')
# sort the sam file
fhand = NamedTemporaryFile()
fhand.write(open(bam_fpath).read())
fhand.flush()
check_call([bin_, fhand.name])
assert "@HD\tVN:1.4" in check_output(['samtools', 'view', '-h',
bam_fpath])
assert os.path.exists(fhand.name + '.bai')
os.remove(fhand.name + '.bai')
class ToolsTest(unittest.TestCase):
def test_index_bam(self):
bam_fpath = os.path.join(TEST_DATA_DIR, 'seqs.bam')
index_bam(bam_fpath)
index_bam(bam_fpath)
def test_merge_sam(self):
bam_fpath = os.path.join(TEST_DATA_DIR, 'sample.bam')
fhand = NamedTemporaryFile(suffix='.bam')
out_fpath = fhand.name
fhand.close()
try:
merge_sams([bam_fpath, bam_fpath], out_fpath=out_fpath)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os.path
import unittest
import shutil
import pysam
from subprocess import check_output, check_call
from tempfile import NamedTemporaryFile
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.bam.bam_tools import (filter_bam, calmd_bam, realign_bam,
index_bam, merge_sams)
and context:
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/bam/bam_tools.py
# def filter_bam(in_fpath, out_fpath, min_mapq=0, required_flag_tags=None,
# filtering_flag_tags=None, regions=None):
# cmd = ['-bh']
#
# # The following line:
# cmd.append('-o' + out_fpath)
# # should be
# # cmd.extend(['-o', out_fpath])
# # but it is a workaround, take a look at:
# # https://groups.google.com/forum/#!msg/pysam-user-group/ooHgIiNVe4c/CcY06d45rzQJ
#
# if min_mapq:
# cmd.extend(['-q', str(min_mapq)])
#
# if required_flag_tags:
# flag = create_flag(required_flag_tags)
# cmd.extend(['-f', str(flag)])
#
# if filtering_flag_tags:
# flag = create_flag(filtering_flag_tags)
# cmd.extend(['-F', str(flag)])
#
# cmd.extend([in_fpath])
#
# if regions:
# regions = ['{0}:{1}-{2}'.format(*s) for s in regions.segments]
# cmd.extend(regions)
#
# pysam.view(*cmd)
#
# def calmd_bam(in_bam_fpath, reference_fpath, out_bam_fpath=None):
#
# if out_bam_fpath is None:
# out_bam_fpath = in_bam_fpath
#
# if out_bam_fpath == in_bam_fpath:
# realigned_fhand = NamedTemporaryFile(suffix='.realigned.bam',
# delete=False)
# temp_out_fpath = realigned_fhand.name
# else:
# temp_out_fpath = out_bam_fpath
#
# _calmd_bam(in_bam_fpath, reference_fpath, temp_out_fpath)
#
# if temp_out_fpath != out_bam_fpath:
# shutil.move(temp_out_fpath, out_bam_fpath)
#
# def realign_bam(in_bam_fpath, reference_fpath, out_bam_fpath=None):
#
# if out_bam_fpath is None:
# out_bam_fpath = in_bam_fpath
#
# if out_bam_fpath == in_bam_fpath:
# realigned_fhand = NamedTemporaryFile(suffix='.realigned.bam',
# delete=False)
# temp_out_fpath = realigned_fhand.name
# else:
# temp_out_fpath = out_bam_fpath
#
# _realign_bam(in_bam_fpath, reference_fpath, temp_out_fpath, threads=False)
# sort_bam(temp_out_fpath)
#
# if temp_out_fpath != out_bam_fpath:
# shutil.move(temp_out_fpath, out_bam_fpath)
#
# def index_bam(bam_fpath):
# 'It indexes a bam file'
# pysam.index(bam_fpath)
#
# def merge_sams(in_fpaths, out_fpath):
# picard_jar = get_setting("PICARD_JAR")
#
# cmd = ['java', '-jar', picard_jar, 'MergeSamFiles',
# 'O={}'.format(out_fpath)]
# for in_fpath in in_fpaths:
# cmd.append('I={}'.format(in_fpath))
# stderr = NamedTemporaryFile(suffix='picard.stderr')
# stdout = NamedTemporaryFile(suffix='picard.stdout')
# try:
# check_call(cmd, stderr=stderr, stdout=stdout)
# except CalledProcessError:
# sys.stderr.write(open(stderr.name).read())
# sys.stdout.write(open(stdout.name).read())
which might include code, classes, or functions. Output only the next line. | samfile = pysam.Samfile(out_fpath) |
Given the code snippet: <|code_start|> merge_sams([bam_fpath, bam_fpath], out_fpath=out_fpath)
samfile = pysam.Samfile(out_fpath)
assert len(list(samfile)) == 2
assert os.stat(bam_fpath) != os.stat(out_fpath)
finally:
if os.path.exists(out_fpath):
os.remove(out_fpath)
class FilterTest(unittest.TestCase):
def test_filter_mapq(self):
bam_fpath = os.path.join(TEST_DATA_DIR, 'seqs.bam')
out_fhand = NamedTemporaryFile()
filter_bam(bam_fpath, out_fhand.name, min_mapq=100)
assert len(open(out_fhand.name).read(20)) == 20
class RealignTest(unittest.TestCase):
def test_realign_bamself(self):
ref_fpath = os.path.join(TEST_DATA_DIR, 'CUUC00007_TC01.fasta')
bam_fpath = os.path.join(TEST_DATA_DIR, 'sample.bam')
out_bam = NamedTemporaryFile()
realign_bam(bam_fpath, ref_fpath, out_bam.name)
def test_realign_bin(self):
bin_ = os.path.join(BIN_DIR, 'realign_bam')
assert 'usage' in check_output([bin_, '-h'])
bam_fpath = os.path.join(TEST_DATA_DIR, 'sample.bam')
ref_fpath = os.path.join(TEST_DATA_DIR, 'CUUC00007_TC01.fasta')
<|code_end|>
, generate the next line using the imports in this file:
import os.path
import unittest
import shutil
import pysam
from subprocess import check_output, check_call
from tempfile import NamedTemporaryFile
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.bam.bam_tools import (filter_bam, calmd_bam, realign_bam,
index_bam, merge_sams)
and context (functions, classes, or occasionally code) from other files:
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/bam/bam_tools.py
# def filter_bam(in_fpath, out_fpath, min_mapq=0, required_flag_tags=None,
# filtering_flag_tags=None, regions=None):
# cmd = ['-bh']
#
# # The following line:
# cmd.append('-o' + out_fpath)
# # should be
# # cmd.extend(['-o', out_fpath])
# # but it is a workaround, take a look at:
# # https://groups.google.com/forum/#!msg/pysam-user-group/ooHgIiNVe4c/CcY06d45rzQJ
#
# if min_mapq:
# cmd.extend(['-q', str(min_mapq)])
#
# if required_flag_tags:
# flag = create_flag(required_flag_tags)
# cmd.extend(['-f', str(flag)])
#
# if filtering_flag_tags:
# flag = create_flag(filtering_flag_tags)
# cmd.extend(['-F', str(flag)])
#
# cmd.extend([in_fpath])
#
# if regions:
# regions = ['{0}:{1}-{2}'.format(*s) for s in regions.segments]
# cmd.extend(regions)
#
# pysam.view(*cmd)
#
# def calmd_bam(in_bam_fpath, reference_fpath, out_bam_fpath=None):
#
# if out_bam_fpath is None:
# out_bam_fpath = in_bam_fpath
#
# if out_bam_fpath == in_bam_fpath:
# realigned_fhand = NamedTemporaryFile(suffix='.realigned.bam',
# delete=False)
# temp_out_fpath = realigned_fhand.name
# else:
# temp_out_fpath = out_bam_fpath
#
# _calmd_bam(in_bam_fpath, reference_fpath, temp_out_fpath)
#
# if temp_out_fpath != out_bam_fpath:
# shutil.move(temp_out_fpath, out_bam_fpath)
#
# def realign_bam(in_bam_fpath, reference_fpath, out_bam_fpath=None):
#
# if out_bam_fpath is None:
# out_bam_fpath = in_bam_fpath
#
# if out_bam_fpath == in_bam_fpath:
# realigned_fhand = NamedTemporaryFile(suffix='.realigned.bam',
# delete=False)
# temp_out_fpath = realigned_fhand.name
# else:
# temp_out_fpath = out_bam_fpath
#
# _realign_bam(in_bam_fpath, reference_fpath, temp_out_fpath, threads=False)
# sort_bam(temp_out_fpath)
#
# if temp_out_fpath != out_bam_fpath:
# shutil.move(temp_out_fpath, out_bam_fpath)
#
# def index_bam(bam_fpath):
# 'It indexes a bam file'
# pysam.index(bam_fpath)
#
# def merge_sams(in_fpaths, out_fpath):
# picard_jar = get_setting("PICARD_JAR")
#
# cmd = ['java', '-jar', picard_jar, 'MergeSamFiles',
# 'O={}'.format(out_fpath)]
# for in_fpath in in_fpaths:
# cmd.append('I={}'.format(in_fpath))
# stderr = NamedTemporaryFile(suffix='picard.stderr')
# stdout = NamedTemporaryFile(suffix='picard.stdout')
# try:
# check_call(cmd, stderr=stderr, stdout=stdout)
# except CalledProcessError:
# sys.stderr.write(open(stderr.name).read())
# sys.stdout.write(open(stdout.name).read())
. Output only the next line. | realigned_fhand = NamedTemporaryFile(suffix='.realigned.bam') |
Given snippet: <|code_start|>
# pylint: disable=C0111
class SortTest(unittest.TestCase):
def test_sort_bam_bin(self):
bin_ = os.path.join(BIN_DIR, 'sort_bam')
assert 'usage' in check_output([bin_, '-h'])
bam_fpath = os.path.join(TEST_DATA_DIR, 'seqs.bam')
sorted_fhand = NamedTemporaryFile(suffix='.sorted.bam')
check_call([bin_, bam_fpath, '-o', sorted_fhand.name])
assert "@HD\tVN:1.4" in check_output(['samtools', 'view', '-h',
sorted_fhand.name])
assert os.path.exists(sorted_fhand.name + '.bai')
os.remove(sorted_fhand.name + '.bai')
sorted_fhand.close()
# no index
sorted_fhand = NamedTemporaryFile()
check_call([bin_, bam_fpath, '-o', sorted_fhand.name, '--no-index'])
assert not os.path.exists(sorted_fhand.name + '.bai')
# sort the sam file
fhand = NamedTemporaryFile()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os.path
import unittest
import shutil
import pysam
from subprocess import check_output, check_call
from tempfile import NamedTemporaryFile
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.bam.bam_tools import (filter_bam, calmd_bam, realign_bam,
index_bam, merge_sams)
and context:
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/bam/bam_tools.py
# def filter_bam(in_fpath, out_fpath, min_mapq=0, required_flag_tags=None,
# filtering_flag_tags=None, regions=None):
# cmd = ['-bh']
#
# # The following line:
# cmd.append('-o' + out_fpath)
# # should be
# # cmd.extend(['-o', out_fpath])
# # but it is a workaround, take a look at:
# # https://groups.google.com/forum/#!msg/pysam-user-group/ooHgIiNVe4c/CcY06d45rzQJ
#
# if min_mapq:
# cmd.extend(['-q', str(min_mapq)])
#
# if required_flag_tags:
# flag = create_flag(required_flag_tags)
# cmd.extend(['-f', str(flag)])
#
# if filtering_flag_tags:
# flag = create_flag(filtering_flag_tags)
# cmd.extend(['-F', str(flag)])
#
# cmd.extend([in_fpath])
#
# if regions:
# regions = ['{0}:{1}-{2}'.format(*s) for s in regions.segments]
# cmd.extend(regions)
#
# pysam.view(*cmd)
#
# def calmd_bam(in_bam_fpath, reference_fpath, out_bam_fpath=None):
#
# if out_bam_fpath is None:
# out_bam_fpath = in_bam_fpath
#
# if out_bam_fpath == in_bam_fpath:
# realigned_fhand = NamedTemporaryFile(suffix='.realigned.bam',
# delete=False)
# temp_out_fpath = realigned_fhand.name
# else:
# temp_out_fpath = out_bam_fpath
#
# _calmd_bam(in_bam_fpath, reference_fpath, temp_out_fpath)
#
# if temp_out_fpath != out_bam_fpath:
# shutil.move(temp_out_fpath, out_bam_fpath)
#
# def realign_bam(in_bam_fpath, reference_fpath, out_bam_fpath=None):
#
# if out_bam_fpath is None:
# out_bam_fpath = in_bam_fpath
#
# if out_bam_fpath == in_bam_fpath:
# realigned_fhand = NamedTemporaryFile(suffix='.realigned.bam',
# delete=False)
# temp_out_fpath = realigned_fhand.name
# else:
# temp_out_fpath = out_bam_fpath
#
# _realign_bam(in_bam_fpath, reference_fpath, temp_out_fpath, threads=False)
# sort_bam(temp_out_fpath)
#
# if temp_out_fpath != out_bam_fpath:
# shutil.move(temp_out_fpath, out_bam_fpath)
#
# def index_bam(bam_fpath):
# 'It indexes a bam file'
# pysam.index(bam_fpath)
#
# def merge_sams(in_fpaths, out_fpath):
# picard_jar = get_setting("PICARD_JAR")
#
# cmd = ['java', '-jar', picard_jar, 'MergeSamFiles',
# 'O={}'.format(out_fpath)]
# for in_fpath in in_fpaths:
# cmd.append('I={}'.format(in_fpath))
# stderr = NamedTemporaryFile(suffix='picard.stderr')
# stdout = NamedTemporaryFile(suffix='picard.stdout')
# try:
# check_call(cmd, stderr=stderr, stdout=stdout)
# except CalledProcessError:
# sys.stderr.write(open(stderr.name).read())
# sys.stdout.write(open(stdout.name).read())
which might include code, classes, or functions. Output only the next line. | fhand.write(open(bam_fpath).read()) |
Predict the next line after this snippet: <|code_start|> max_ = intcounter.max
axes.bar(index + 0.5, quart[2] - quart[0], bottom=quart[0],
width=bar_width, facecolor='none', align='center')
# median
axes.plot([index + 0.5 - bar_width / 2, index + 0.5 + bar_width / 2],
[quart[1], quart[1]], color='red')
# max
axes.plot([index + 0.5 - bar_width / 4, index + 0.5 + bar_width / 4],
[max_, max_], color='black')
axes.plot([index + 0.5, index + 0.5], [quart[0], min_], '--',
color='black')
axes.plot([index + 0.5 - bar_width / 4, index + 0.5 + bar_width / 4],
[min_, min_], color='black')
axes.plot([index + 0.5, index + 0.5], [quart[2], max_], '--',
color='black')
max_x = len(x_vals) - 1
axes.set_xlim(0, max_x + 1)
axes.set_xticks(xticks)
axes.set_xticklabels(xticks_lables)
if title:
axes.set_title(title)
if xlabel:
axes.set_xlabel(xlabel)
if ylabel:
axes.set_ylabel(ylabel)
if not using_given_axes:
<|code_end|>
using the current file's imports:
from os.path import splitext
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib import colors, cm
from crumbs.exceptions import OptionalRequirementError
and any relevant context from other files:
# Path: crumbs/exceptions.py
# class OptionalRequirementError(Exception):
# 'An optional module is not present'
# pass
. Output only the next line. | canvas.print_figure(fhand) |
Here is a snippet: <|code_start|> return True
else:
return False
log1 = math.log(float(num1))
log2 = math.log(float(num2))
return abs(log1 - log2) < ratio
def _check_sequence(sequence, expected):
'It matches a sequence against an expected result'
if 'name' in expected:
assert sequence['name'] == expected['name']
if 'description' in expected:
assert sequence['description'] == expected['description']
if 'length' in expected:
assert sequence['length'] == expected['length']
def _check_match_part(match_part, expected):
'It matches a match_part against an expected result'
if 'query_start' in expected:
assert match_part['query_start'] == expected['query_start']
if 'query_end' in expected:
assert match_part['query_end'] == expected['query_end']
if 'query_strand' in expected:
assert match_part['query_strand'] == expected['query_strand']
if 'subject_start' in expected:
assert match_part['subject_start'] == expected['subject_start']
if 'subject_end' in expected:
assert match_part['subject_end'] == expected['subject_end']
<|code_end|>
. Write the next line using the current file imports:
import unittest
import os
import math
from StringIO import StringIO
from tempfile import NamedTemporaryFile
from crumbs.seq.alignment_result import (BlastParser, TabularBlastParser,
alignment_results_scores,
ExonerateParser,
filter_alignments,
covered_segments_from_match_parts,
elongate_match_parts_till_global,
TextBlastParser, QUERY, SUBJECT)
from crumbs.utils.test_utils import TEST_DATA_DIR
and context from other files:
# Path: crumbs/seq/alignment_result.py
# def _text_blasts_in_file(fhand):
# def _split_description(string):
# def _text_blast_parser(fhand):
# def __init__(self, fhand):
# def __iter__(self):
# def next(self):
# def _lines_for_every_tab_blast(fhand, line_format):
# def _group_match_parts_by_subject(match_parts):
# def _tabular_blast_parser(fhand, line_format):
# def __init__(self, fhand, line_format=DEFAULT_TABBLAST_FORMAT):
# def __iter__(self):
# def next(self):
# def __init__(self, fhand, subj_def_as_accesion=None):
# def __iter__(self):
# def _create_result_structure(self, bio_result):
# def _get_blast_metadata(self):
# def next(self):
# def __init__(self, fhand):
# def __iter__(self):
# def _results_query_from_exonerate(self):
# def _create_structure_result(query_result):
# def next(self):
# def _strand_transform(strand):
# def _match_num_if_exists_in_struc(subject_name, struct_dict):
# def get_alignment_parser(kind):
# def get_match_score(match, score_key, query=None, subject=None):
# def get_match_scores(match, score_keys, query, subject):
# def alignment_results_scores(results, scores, filter_same_query_subject=True):
# def build_relations_from_aligment(fhand, query_name, subject_name):
# def _get_match_score(match, score_key, query=None, subject=None):
# def _score_above_threshold(score, min_score, max_score, log_tolerance,
# log_best_score):
# def _create_scores_mapper_(score_key, score_tolerance=None,
# max_score=None, min_score=None):
# def map_(alignment):
# def _create_best_scores_mapper(score_key, score_tolerance=None,
# max_score=None, min_score=None):
# def _create_scores_mapper(score_key, max_score=None, min_score=None):
# def _create_deepcopy_mapper():
# def map_(alignment):
# def _create_empty_filter():
# def filter_(alignment):
# def _fix_match_scores(match, score_keys):
# def _fix_match_start_end(match):
# def _fix_matches(alignment, score_keys=None):
# def _create_fix_matches_mapper():
# def covered_segments_from_match_parts(match_parts, in_query=True,
# merge_segments_closer=1):
# def elongate_match_part_till_global(match_part, query_length, subject_length,
# align_completely):
# def elongate_match_parts_till_global(match_parts, query_length,
# subject_length, align_completely):
# def _match_length(match, length_from_query):
# def _match_part_length(match_part, length_in_query):
# def _match_long_enough(match_length, total_length, min_num_residues,
# min_percentage, length_in_query):
# def _create_min_length_mapper(length_in_query, min_num_residues=None,
# min_percentage=None, filter_match_parts=False):
# def map_(alignment):
# def filter_alignments(alignments, config):
# class TextBlastParser(object):
# class TabularBlastParser(object):
# class BlastParser(object):
# class ExonerateParser(object):
# DEFAULT_TABBLAST_FORMAT = ('query', 'subject', 'identity', 'alignment_length',
# 'mismatches', 'gap_open', 'query_start',
# 'query_end', 'subject_start', 'subject_end',
# 'expect', 'score')
# MAPPER = 1
# FILTER = 2
# FILTER_COLLECTION = {'best_scores':
# {'funct_factory': _create_best_scores_mapper,
# 'kind': MAPPER},
# 'score_threshold':
# {'funct_factory': _create_scores_mapper,
# 'kind': MAPPER},
# 'min_length': {'funct_factory': _create_min_length_mapper,
# 'kind': MAPPER},
# 'deepcopy': {'funct_factory': _create_deepcopy_mapper,
# 'kind': MAPPER},
# 'fix_matches':
# {'funct_factory': _create_fix_matches_mapper,
# 'kind': MAPPER},
# 'filter_empty':
# {'funct_factory': _create_empty_filter,
# 'kind': FILTER},
# }
#
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
, which may include functions, classes, or code. Output only the next line. | if 'subject_strand' in expected: |
Continue the code snippet: <|code_start|> 'It test the blast parser'
def test_blast_parser(self):
'It test the blast parser'
blast_file = open(os.path.join(TEST_DATA_DIR, 'blast.xml'))
parser = BlastParser(fhand=blast_file)
expected_results = [
{'query':{'name':'cCL1Contig2',
'description':"<unknown description>",
'length':1924},
'matches':[
{'subject':{'name':'chr18',
'description':'No definition line found',
'length':19691255},
'scores':{'expect':4.60533e-35},
'match_parts':[{'query_start':276, 'query_end':484,
'query_strand':-1,
'subject_start':477142,
'subject_end':477350,
'subject_strand':1,
'scores':{'expect': 4.60533e-35,
'similarity':84.2,
'identity': 84.2}
}],
}
]
},
{'query':{'name':'cCL1Contig3',
'description':"<unknown description>",
<|code_end|>
. Use current file imports:
import unittest
import os
import math
from StringIO import StringIO
from tempfile import NamedTemporaryFile
from crumbs.seq.alignment_result import (BlastParser, TabularBlastParser,
alignment_results_scores,
ExonerateParser,
filter_alignments,
covered_segments_from_match_parts,
elongate_match_parts_till_global,
TextBlastParser, QUERY, SUBJECT)
from crumbs.utils.test_utils import TEST_DATA_DIR
and context (classes, functions, or code) from other files:
# Path: crumbs/seq/alignment_result.py
# def _text_blasts_in_file(fhand):
# def _split_description(string):
# def _text_blast_parser(fhand):
# def __init__(self, fhand):
# def __iter__(self):
# def next(self):
# def _lines_for_every_tab_blast(fhand, line_format):
# def _group_match_parts_by_subject(match_parts):
# def _tabular_blast_parser(fhand, line_format):
# def __init__(self, fhand, line_format=DEFAULT_TABBLAST_FORMAT):
# def __iter__(self):
# def next(self):
# def __init__(self, fhand, subj_def_as_accesion=None):
# def __iter__(self):
# def _create_result_structure(self, bio_result):
# def _get_blast_metadata(self):
# def next(self):
# def __init__(self, fhand):
# def __iter__(self):
# def _results_query_from_exonerate(self):
# def _create_structure_result(query_result):
# def next(self):
# def _strand_transform(strand):
# def _match_num_if_exists_in_struc(subject_name, struct_dict):
# def get_alignment_parser(kind):
# def get_match_score(match, score_key, query=None, subject=None):
# def get_match_scores(match, score_keys, query, subject):
# def alignment_results_scores(results, scores, filter_same_query_subject=True):
# def build_relations_from_aligment(fhand, query_name, subject_name):
# def _get_match_score(match, score_key, query=None, subject=None):
# def _score_above_threshold(score, min_score, max_score, log_tolerance,
# log_best_score):
# def _create_scores_mapper_(score_key, score_tolerance=None,
# max_score=None, min_score=None):
# def map_(alignment):
# def _create_best_scores_mapper(score_key, score_tolerance=None,
# max_score=None, min_score=None):
# def _create_scores_mapper(score_key, max_score=None, min_score=None):
# def _create_deepcopy_mapper():
# def map_(alignment):
# def _create_empty_filter():
# def filter_(alignment):
# def _fix_match_scores(match, score_keys):
# def _fix_match_start_end(match):
# def _fix_matches(alignment, score_keys=None):
# def _create_fix_matches_mapper():
# def covered_segments_from_match_parts(match_parts, in_query=True,
# merge_segments_closer=1):
# def elongate_match_part_till_global(match_part, query_length, subject_length,
# align_completely):
# def elongate_match_parts_till_global(match_parts, query_length,
# subject_length, align_completely):
# def _match_length(match, length_from_query):
# def _match_part_length(match_part, length_in_query):
# def _match_long_enough(match_length, total_length, min_num_residues,
# min_percentage, length_in_query):
# def _create_min_length_mapper(length_in_query, min_num_residues=None,
# min_percentage=None, filter_match_parts=False):
# def map_(alignment):
# def filter_alignments(alignments, config):
# class TextBlastParser(object):
# class TabularBlastParser(object):
# class BlastParser(object):
# class ExonerateParser(object):
# DEFAULT_TABBLAST_FORMAT = ('query', 'subject', 'identity', 'alignment_length',
# 'mismatches', 'gap_open', 'query_start',
# 'query_end', 'subject_start', 'subject_end',
# 'expect', 'score')
# MAPPER = 1
# FILTER = 2
# FILTER_COLLECTION = {'best_scores':
# {'funct_factory': _create_best_scores_mapper,
# 'kind': MAPPER},
# 'score_threshold':
# {'funct_factory': _create_scores_mapper,
# 'kind': MAPPER},
# 'min_length': {'funct_factory': _create_min_length_mapper,
# 'kind': MAPPER},
# 'deepcopy': {'funct_factory': _create_deepcopy_mapper,
# 'kind': MAPPER},
# 'fix_matches':
# {'funct_factory': _create_fix_matches_mapper,
# 'kind': MAPPER},
# 'filter_empty':
# {'funct_factory': _create_empty_filter,
# 'kind': FILTER},
# }
#
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
. Output only the next line. | 'length':629}, |
Using the snippet: <|code_start|> 'scores':{'expect': 2e-138,
'identity': 100.0}
},
],
}
],
}
]
n_blasts = 0
for index, blast in enumerate(alignments):
_check_blast(blast, expected_results[index])
n_blasts += 1
assert n_blasts == 2
blast_file = open(os.path.join(TEST_DATA_DIR, 'blast2.blast'))
parser = TextBlastParser(fhand=blast_file)
alignments = list(parser)
expected_results = [
{'query':{'name':'arabi', 'length': 456},
'matches':[
{'subject':{'name':'AT1G55265.1',
'description': 'a gene',
'length':693},
'scores':{'expect': 0.0},
'match_parts':[{'query_start':0, 'query_end':455,
'subject_start':237,
'subject_end':692,
'scores':{'expect': 0.0,
<|code_end|>
, determine the next line of code. You have imports:
import unittest
import os
import math
from StringIO import StringIO
from tempfile import NamedTemporaryFile
from crumbs.seq.alignment_result import (BlastParser, TabularBlastParser,
alignment_results_scores,
ExonerateParser,
filter_alignments,
covered_segments_from_match_parts,
elongate_match_parts_till_global,
TextBlastParser, QUERY, SUBJECT)
from crumbs.utils.test_utils import TEST_DATA_DIR
and context (class names, function names, or code) available:
# Path: crumbs/seq/alignment_result.py
# def _text_blasts_in_file(fhand):
# def _split_description(string):
# def _text_blast_parser(fhand):
# def __init__(self, fhand):
# def __iter__(self):
# def next(self):
# def _lines_for_every_tab_blast(fhand, line_format):
# def _group_match_parts_by_subject(match_parts):
# def _tabular_blast_parser(fhand, line_format):
# def __init__(self, fhand, line_format=DEFAULT_TABBLAST_FORMAT):
# def __iter__(self):
# def next(self):
# def __init__(self, fhand, subj_def_as_accesion=None):
# def __iter__(self):
# def _create_result_structure(self, bio_result):
# def _get_blast_metadata(self):
# def next(self):
# def __init__(self, fhand):
# def __iter__(self):
# def _results_query_from_exonerate(self):
# def _create_structure_result(query_result):
# def next(self):
# def _strand_transform(strand):
# def _match_num_if_exists_in_struc(subject_name, struct_dict):
# def get_alignment_parser(kind):
# def get_match_score(match, score_key, query=None, subject=None):
# def get_match_scores(match, score_keys, query, subject):
# def alignment_results_scores(results, scores, filter_same_query_subject=True):
# def build_relations_from_aligment(fhand, query_name, subject_name):
# def _get_match_score(match, score_key, query=None, subject=None):
# def _score_above_threshold(score, min_score, max_score, log_tolerance,
# log_best_score):
# def _create_scores_mapper_(score_key, score_tolerance=None,
# max_score=None, min_score=None):
# def map_(alignment):
# def _create_best_scores_mapper(score_key, score_tolerance=None,
# max_score=None, min_score=None):
# def _create_scores_mapper(score_key, max_score=None, min_score=None):
# def _create_deepcopy_mapper():
# def map_(alignment):
# def _create_empty_filter():
# def filter_(alignment):
# def _fix_match_scores(match, score_keys):
# def _fix_match_start_end(match):
# def _fix_matches(alignment, score_keys=None):
# def _create_fix_matches_mapper():
# def covered_segments_from_match_parts(match_parts, in_query=True,
# merge_segments_closer=1):
# def elongate_match_part_till_global(match_part, query_length, subject_length,
# align_completely):
# def elongate_match_parts_till_global(match_parts, query_length,
# subject_length, align_completely):
# def _match_length(match, length_from_query):
# def _match_part_length(match_part, length_in_query):
# def _match_long_enough(match_length, total_length, min_num_residues,
# min_percentage, length_in_query):
# def _create_min_length_mapper(length_in_query, min_num_residues=None,
# min_percentage=None, filter_match_parts=False):
# def map_(alignment):
# def filter_alignments(alignments, config):
# class TextBlastParser(object):
# class TabularBlastParser(object):
# class BlastParser(object):
# class ExonerateParser(object):
# DEFAULT_TABBLAST_FORMAT = ('query', 'subject', 'identity', 'alignment_length',
# 'mismatches', 'gap_open', 'query_start',
# 'query_end', 'subject_start', 'subject_end',
# 'expect', 'score')
# MAPPER = 1
# FILTER = 2
# FILTER_COLLECTION = {'best_scores':
# {'funct_factory': _create_best_scores_mapper,
# 'kind': MAPPER},
# 'score_threshold':
# {'funct_factory': _create_scores_mapper,
# 'kind': MAPPER},
# 'min_length': {'funct_factory': _create_min_length_mapper,
# 'kind': MAPPER},
# 'deepcopy': {'funct_factory': _create_deepcopy_mapper,
# 'kind': MAPPER},
# 'fix_matches':
# {'funct_factory': _create_fix_matches_mapper,
# 'kind': MAPPER},
# 'filter_empty':
# {'funct_factory': _create_empty_filter,
# 'kind': FILTER},
# }
#
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
. Output only the next line. | 'identity': 100.0} |
Using the snippet: <|code_start|> self.fail('Error expected')
except CalledProcessError:
assert 'No qualities available' in open(stderr.name).read()
# bad_format_fastq
bad_fastq_fhand = _make_fhand(FASTQ + 'aklsjhdas')
fasta_out_fhand = NamedTemporaryFile()
qual_out_fhand = NamedTemporaryFile()
stderr = NamedTemporaryFile()
try:
print check_output([seqio_bin, '-o', fasta_out_fhand.name,
'-f', 'fasta', bad_fastq_fhand.name],
stderr=stderr)
self.fail('error expected')
except CalledProcessError:
assert 'Lengths of sequence and qualit' in open(stderr.name).read()
# fastq to fastq
fastq_fhand = _make_fhand(FASTQ)
fastq_fhand2 = _make_fhand(FASTQ)
fastq_out_fhand = NamedTemporaryFile()
stderr = NamedTemporaryFile()
check_output([seqio_bin, '-o', fastq_out_fhand.name, '-f',
'fastq-illumina', fastq_fhand.name, fastq_fhand2.name],
stderr=stderr)
out_fastq = open(fastq_out_fhand.name).read()
assert '+\n^^^^^\n@seq1\natcgt' in out_fastq
# test stdin
fasta_out_fhand = NamedTemporaryFile()
<|code_end|>
, determine the next line of code. You have imports:
import unittest
import os.path
from tempfile import NamedTemporaryFile
from subprocess import check_output, CalledProcessError
from crumbs.utils.bin_utils import BIN_DIR
and context (class names, function names, or code) available:
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
. Output only the next line. | check_output([seqio_bin, '-o', fasta_out_fhand.name, '-f', 'fasta'], |
Given the code snippet: <|code_start|> for snv in snvs:
try:
self.write_snv(snv)
except IOError, error:
# The pipe could be already closed
if 'Broken pipe' in str(error):
break
else:
raise
def flush(self):
flush_fhand(self.stream)
BiallelicGts = namedtuple('BiallelicGts', ['AA', 'Aa', 'aa'])
def get_ids(snp):
if hasattr(snp, 'calls'):
ids = snp.record.ID
else:
ids = snp.ID
if ids is None:
return []
return ids.split(';')
def get_or_create_id(snp, prefix=''):
ids = get_ids(snp)
if ids:
<|code_end|>
, generate the next line using the imports in this file:
import sys
import gzip
from collections import Counter, OrderedDict, namedtuple
from operator import itemgetter
from vcf import Reader as pyvcfReader
from vcf import Writer as pyvcfWriter
from vcf.model import make_calldata_tuple
from vcf.model import _Call as pyvcfCall
from vcf.model import _Record as pyvcfRecord
from crumbs.iterutils import generate_windows
from crumbs.seq.seqio import read_seqs
from crumbs.seq.seq import get_name, get_length
from crumbs.utils.file_utils import flush_fhand
from traceback import print_exception
and context (functions, classes, or occasionally code) from other files:
# Path: crumbs/iterutils.py
# def generate_windows(size, step=None, start=0, end=None):
# if step is None:
# step = size
#
# win_start = None
# while True:
# if win_start is None:
# win_start = start
# else:
# win_start += step
# win_end = win_start + size
# if end:
# if win_end > end:
# break
# yield Window(win_start, win_end)
#
# Path: crumbs/seq/seqio.py
# def read_seqs(fhands, out_format=None, prefered_seq_classes=None):
# 'It returns a stream of seqs in different codings: seqrecords, seqitems...'
#
# if not prefered_seq_classes:
# prefered_seq_classes = [SEQITEM, SEQRECORD]
# try:
# in_format = get_format(fhands[0])
# except FileIsEmptyError:
# return []
# # seqitems is incompatible with different input and output formats
# # or when in_format != a fasta or fastq
# if ((out_format not in (None, GUESS_FORMAT) and in_format != out_format
# and SEQITEM in prefered_seq_classes) or
# (in_format not in ('fasta',) + SANGER_FASTQ_FORMATS +
# ILLUMINA_FASTQ_FORMATS)):
# prefered_seq_classes.pop(prefered_seq_classes.index(SEQITEM))
#
# if not prefered_seq_classes:
# msg = 'No valid seq class left or prefered'
# raise ValueError(msg)
#
# for seq_class in prefered_seq_classes:
# if seq_class == SEQITEM:
# try:
# return _read_seqitems(fhands)
# except NotImplementedError:
# continue
# elif seq_class == SEQRECORD:
# try:
# seqs = _read_seqrecords(fhands)
# return assing_kind_to_seqs(SEQRECORD, seqs, None)
# except NotImplementedError:
# continue
# else:
# raise ValueError('Unknown class for seq: ' + seq_class)
# raise RuntimeError('We should not be here, fixme')
#
# Path: crumbs/seq/seq.py
# def get_name(seq):
# if 'SeqRecord' in seq.__class__.__name__:
# seq_class = SEQRECORD
# else:
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# name = seq.name
# elif seq_class == SEQRECORD:
# name = seq.id
# return name
#
# def get_length(seq):
# return len(get_str_seq(seq))
#
# Path: crumbs/utils/file_utils.py
# def flush_fhand(fhand):
# try:
# fhand.flush()
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' not in str(error):
# raise
. Output only the next line. | return ids[0] |
Given snippet: <|code_start|>
def windows(self):
chrom_lengths = self._get_chrom_lengths()
snp_queue = self._snp_queue
for chrom, chrom_length in chrom_lengths.items():
wins = generate_windows(start=0,
size=self.win_size, step=self.win_step,
end=chrom_length + 1)
snp_queue.empty()
for win in wins:
snp_queue.pop(win.start)
if snp_queue.queue:
new_strech_start = snp_queue.queue[-1].pos + 1
else:
new_strech_start = win.start
new_snps = self._reader.fetch_snvs(chrom, new_strech_start,
win.end)
snp_queue.extend(new_snps)
if len(snp_queue.queue) >= self.min_num_snps:
yield {'chrom': chrom, 'start': win.start, 'end': win.end,
'snps': snp_queue.queue[:]}
class VCFReader(object):
def __init__(self, fhand, compressed=None, filename=None,
min_calls_for_pop_stats=DEF_MIN_CALLS_FOR_POP_STATS):
self.fhand = fhand
self.pyvcf_reader = pyvcfReader(fsock=fhand, compressed=compressed,
filename=filename)
self.min_calls_for_pop_stats = min_calls_for_pop_stats
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
import gzip
from collections import Counter, OrderedDict, namedtuple
from operator import itemgetter
from vcf import Reader as pyvcfReader
from vcf import Writer as pyvcfWriter
from vcf.model import make_calldata_tuple
from vcf.model import _Call as pyvcfCall
from vcf.model import _Record as pyvcfRecord
from crumbs.iterutils import generate_windows
from crumbs.seq.seqio import read_seqs
from crumbs.seq.seq import get_name, get_length
from crumbs.utils.file_utils import flush_fhand
from traceback import print_exception
and context:
# Path: crumbs/iterutils.py
# def generate_windows(size, step=None, start=0, end=None):
# if step is None:
# step = size
#
# win_start = None
# while True:
# if win_start is None:
# win_start = start
# else:
# win_start += step
# win_end = win_start + size
# if end:
# if win_end > end:
# break
# yield Window(win_start, win_end)
#
# Path: crumbs/seq/seqio.py
# def read_seqs(fhands, out_format=None, prefered_seq_classes=None):
# 'It returns a stream of seqs in different codings: seqrecords, seqitems...'
#
# if not prefered_seq_classes:
# prefered_seq_classes = [SEQITEM, SEQRECORD]
# try:
# in_format = get_format(fhands[0])
# except FileIsEmptyError:
# return []
# # seqitems is incompatible with different input and output formats
# # or when in_format != a fasta or fastq
# if ((out_format not in (None, GUESS_FORMAT) and in_format != out_format
# and SEQITEM in prefered_seq_classes) or
# (in_format not in ('fasta',) + SANGER_FASTQ_FORMATS +
# ILLUMINA_FASTQ_FORMATS)):
# prefered_seq_classes.pop(prefered_seq_classes.index(SEQITEM))
#
# if not prefered_seq_classes:
# msg = 'No valid seq class left or prefered'
# raise ValueError(msg)
#
# for seq_class in prefered_seq_classes:
# if seq_class == SEQITEM:
# try:
# return _read_seqitems(fhands)
# except NotImplementedError:
# continue
# elif seq_class == SEQRECORD:
# try:
# seqs = _read_seqrecords(fhands)
# return assing_kind_to_seqs(SEQRECORD, seqs, None)
# except NotImplementedError:
# continue
# else:
# raise ValueError('Unknown class for seq: ' + seq_class)
# raise RuntimeError('We should not be here, fixme')
#
# Path: crumbs/seq/seq.py
# def get_name(seq):
# if 'SeqRecord' in seq.__class__.__name__:
# seq_class = SEQRECORD
# else:
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# name = seq.name
# elif seq_class == SEQRECORD:
# name = seq.id
# return name
#
# def get_length(seq):
# return len(get_str_seq(seq))
#
# Path: crumbs/utils/file_utils.py
# def flush_fhand(fhand):
# try:
# fhand.flush()
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' not in str(error):
# raise
which might include code, classes, or functions. Output only the next line. | self._snpcaller = None |
Predict the next line for this snippet: <|code_start|> snvs = self.pyvcf_reader.fetch(chrom, start + 1, end=end)
except KeyError:
snvs = []
if snvs is None:
snvs = []
for snp in snvs:
snp = SNV(snp, reader=self,
min_calls_for_pop_stats=min_calls_for_pop_stats)
yield snp
def sliding_windows(self, size, step=None, ref_fhand=None,
min_num_snps=DEF_MIN_NUM_SNPS_IN_WIN):
random_snp_reader = VCFReader(open(self.fhand.name))
sliding_window = _SNPSlidingWindow(snp_reader=random_snp_reader,
win_size=size, win_step=step,
min_num_snps=min_num_snps,
ref_fhand=ref_fhand)
for window in sliding_window.windows():
yield window
@property
def snpcaller(self):
if self._snpcaller is not None:
return self._snpcaller
metadata = self.pyvcf_reader.metadata
if 'source' in metadata:
if 'VarScan2' in metadata['source']:
snpcaller = VARSCAN
<|code_end|>
with the help of current file imports:
import sys
import gzip
from collections import Counter, OrderedDict, namedtuple
from operator import itemgetter
from vcf import Reader as pyvcfReader
from vcf import Writer as pyvcfWriter
from vcf.model import make_calldata_tuple
from vcf.model import _Call as pyvcfCall
from vcf.model import _Record as pyvcfRecord
from crumbs.iterutils import generate_windows
from crumbs.seq.seqio import read_seqs
from crumbs.seq.seq import get_name, get_length
from crumbs.utils.file_utils import flush_fhand
from traceback import print_exception
and context from other files:
# Path: crumbs/iterutils.py
# def generate_windows(size, step=None, start=0, end=None):
# if step is None:
# step = size
#
# win_start = None
# while True:
# if win_start is None:
# win_start = start
# else:
# win_start += step
# win_end = win_start + size
# if end:
# if win_end > end:
# break
# yield Window(win_start, win_end)
#
# Path: crumbs/seq/seqio.py
# def read_seqs(fhands, out_format=None, prefered_seq_classes=None):
# 'It returns a stream of seqs in different codings: seqrecords, seqitems...'
#
# if not prefered_seq_classes:
# prefered_seq_classes = [SEQITEM, SEQRECORD]
# try:
# in_format = get_format(fhands[0])
# except FileIsEmptyError:
# return []
# # seqitems is incompatible with different input and output formats
# # or when in_format != a fasta or fastq
# if ((out_format not in (None, GUESS_FORMAT) and in_format != out_format
# and SEQITEM in prefered_seq_classes) or
# (in_format not in ('fasta',) + SANGER_FASTQ_FORMATS +
# ILLUMINA_FASTQ_FORMATS)):
# prefered_seq_classes.pop(prefered_seq_classes.index(SEQITEM))
#
# if not prefered_seq_classes:
# msg = 'No valid seq class left or prefered'
# raise ValueError(msg)
#
# for seq_class in prefered_seq_classes:
# if seq_class == SEQITEM:
# try:
# return _read_seqitems(fhands)
# except NotImplementedError:
# continue
# elif seq_class == SEQRECORD:
# try:
# seqs = _read_seqrecords(fhands)
# return assing_kind_to_seqs(SEQRECORD, seqs, None)
# except NotImplementedError:
# continue
# else:
# raise ValueError('Unknown class for seq: ' + seq_class)
# raise RuntimeError('We should not be here, fixme')
#
# Path: crumbs/seq/seq.py
# def get_name(seq):
# if 'SeqRecord' in seq.__class__.__name__:
# seq_class = SEQRECORD
# else:
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# name = seq.name
# elif seq_class == SEQRECORD:
# name = seq.id
# return name
#
# def get_length(seq):
# return len(get_str_seq(seq))
#
# Path: crumbs/utils/file_utils.py
# def flush_fhand(fhand):
# try:
# fhand.flush()
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' not in str(error):
# raise
, which may contain function names, class names, or code. Output only the next line. | elif 'freebayes' in metadata['source'][0].lower(): |
Continue the code snippet: <|code_start|> vcf_fhand = gzip.open(self._reader.fhand.name)
for line in vcf_fhand:
line = line.strip()
if line.startswith('#'):
continue
items = line.split()
chrom = items[0]
loc = int(items[1])
if chrom not in chrom_lens:
chrom_lens[chrom] = loc
else:
if loc > chrom_lens[chrom]:
chrom_lens[chrom] = loc
else:
for read in read_seqs([self._ref_fhand]):
chrom_lens[get_name(read)] = get_length(read)
return chrom_lens
def windows(self):
chrom_lengths = self._get_chrom_lengths()
snp_queue = self._snp_queue
for chrom, chrom_length in chrom_lengths.items():
wins = generate_windows(start=0,
size=self.win_size, step=self.win_step,
end=chrom_length + 1)
snp_queue.empty()
for win in wins:
snp_queue.pop(win.start)
if snp_queue.queue:
<|code_end|>
. Use current file imports:
import sys
import gzip
from collections import Counter, OrderedDict, namedtuple
from operator import itemgetter
from vcf import Reader as pyvcfReader
from vcf import Writer as pyvcfWriter
from vcf.model import make_calldata_tuple
from vcf.model import _Call as pyvcfCall
from vcf.model import _Record as pyvcfRecord
from crumbs.iterutils import generate_windows
from crumbs.seq.seqio import read_seqs
from crumbs.seq.seq import get_name, get_length
from crumbs.utils.file_utils import flush_fhand
from traceback import print_exception
and context (classes, functions, or code) from other files:
# Path: crumbs/iterutils.py
# def generate_windows(size, step=None, start=0, end=None):
# if step is None:
# step = size
#
# win_start = None
# while True:
# if win_start is None:
# win_start = start
# else:
# win_start += step
# win_end = win_start + size
# if end:
# if win_end > end:
# break
# yield Window(win_start, win_end)
#
# Path: crumbs/seq/seqio.py
# def read_seqs(fhands, out_format=None, prefered_seq_classes=None):
# 'It returns a stream of seqs in different codings: seqrecords, seqitems...'
#
# if not prefered_seq_classes:
# prefered_seq_classes = [SEQITEM, SEQRECORD]
# try:
# in_format = get_format(fhands[0])
# except FileIsEmptyError:
# return []
# # seqitems is incompatible with different input and output formats
# # or when in_format != a fasta or fastq
# if ((out_format not in (None, GUESS_FORMAT) and in_format != out_format
# and SEQITEM in prefered_seq_classes) or
# (in_format not in ('fasta',) + SANGER_FASTQ_FORMATS +
# ILLUMINA_FASTQ_FORMATS)):
# prefered_seq_classes.pop(prefered_seq_classes.index(SEQITEM))
#
# if not prefered_seq_classes:
# msg = 'No valid seq class left or prefered'
# raise ValueError(msg)
#
# for seq_class in prefered_seq_classes:
# if seq_class == SEQITEM:
# try:
# return _read_seqitems(fhands)
# except NotImplementedError:
# continue
# elif seq_class == SEQRECORD:
# try:
# seqs = _read_seqrecords(fhands)
# return assing_kind_to_seqs(SEQRECORD, seqs, None)
# except NotImplementedError:
# continue
# else:
# raise ValueError('Unknown class for seq: ' + seq_class)
# raise RuntimeError('We should not be here, fixme')
#
# Path: crumbs/seq/seq.py
# def get_name(seq):
# if 'SeqRecord' in seq.__class__.__name__:
# seq_class = SEQRECORD
# else:
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# name = seq.name
# elif seq_class == SEQRECORD:
# name = seq.id
# return name
#
# def get_length(seq):
# return len(get_str_seq(seq))
#
# Path: crumbs/utils/file_utils.py
# def flush_fhand(fhand):
# try:
# fhand.flush()
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' not in str(error):
# raise
. Output only the next line. | new_strech_start = snp_queue.queue[-1].pos + 1 |
Given the following code snippet before the placeholder: <|code_start|>
def _get_chrom_lengths(self):
chrom_lens = OrderedDict()
if self._ref_fhand is None:
vcf_fhand = gzip.open(self._reader.fhand.name)
for line in vcf_fhand:
line = line.strip()
if line.startswith('#'):
continue
items = line.split()
chrom = items[0]
loc = int(items[1])
if chrom not in chrom_lens:
chrom_lens[chrom] = loc
else:
if loc > chrom_lens[chrom]:
chrom_lens[chrom] = loc
else:
for read in read_seqs([self._ref_fhand]):
chrom_lens[get_name(read)] = get_length(read)
return chrom_lens
def windows(self):
chrom_lengths = self._get_chrom_lengths()
snp_queue = self._snp_queue
for chrom, chrom_length in chrom_lengths.items():
wins = generate_windows(start=0,
size=self.win_size, step=self.win_step,
end=chrom_length + 1)
<|code_end|>
, predict the next line using imports from the current file:
import sys
import gzip
from collections import Counter, OrderedDict, namedtuple
from operator import itemgetter
from vcf import Reader as pyvcfReader
from vcf import Writer as pyvcfWriter
from vcf.model import make_calldata_tuple
from vcf.model import _Call as pyvcfCall
from vcf.model import _Record as pyvcfRecord
from crumbs.iterutils import generate_windows
from crumbs.seq.seqio import read_seqs
from crumbs.seq.seq import get_name, get_length
from crumbs.utils.file_utils import flush_fhand
from traceback import print_exception
and context including class names, function names, and sometimes code from other files:
# Path: crumbs/iterutils.py
# def generate_windows(size, step=None, start=0, end=None):
# if step is None:
# step = size
#
# win_start = None
# while True:
# if win_start is None:
# win_start = start
# else:
# win_start += step
# win_end = win_start + size
# if end:
# if win_end > end:
# break
# yield Window(win_start, win_end)
#
# Path: crumbs/seq/seqio.py
# def read_seqs(fhands, out_format=None, prefered_seq_classes=None):
# 'It returns a stream of seqs in different codings: seqrecords, seqitems...'
#
# if not prefered_seq_classes:
# prefered_seq_classes = [SEQITEM, SEQRECORD]
# try:
# in_format = get_format(fhands[0])
# except FileIsEmptyError:
# return []
# # seqitems is incompatible with different input and output formats
# # or when in_format != a fasta or fastq
# if ((out_format not in (None, GUESS_FORMAT) and in_format != out_format
# and SEQITEM in prefered_seq_classes) or
# (in_format not in ('fasta',) + SANGER_FASTQ_FORMATS +
# ILLUMINA_FASTQ_FORMATS)):
# prefered_seq_classes.pop(prefered_seq_classes.index(SEQITEM))
#
# if not prefered_seq_classes:
# msg = 'No valid seq class left or prefered'
# raise ValueError(msg)
#
# for seq_class in prefered_seq_classes:
# if seq_class == SEQITEM:
# try:
# return _read_seqitems(fhands)
# except NotImplementedError:
# continue
# elif seq_class == SEQRECORD:
# try:
# seqs = _read_seqrecords(fhands)
# return assing_kind_to_seqs(SEQRECORD, seqs, None)
# except NotImplementedError:
# continue
# else:
# raise ValueError('Unknown class for seq: ' + seq_class)
# raise RuntimeError('We should not be here, fixme')
#
# Path: crumbs/seq/seq.py
# def get_name(seq):
# if 'SeqRecord' in seq.__class__.__name__:
# seq_class = SEQRECORD
# else:
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# name = seq.name
# elif seq_class == SEQRECORD:
# name = seq.id
# return name
#
# def get_length(seq):
# return len(get_str_seq(seq))
#
# Path: crumbs/utils/file_utils.py
# def flush_fhand(fhand):
# try:
# fhand.flush()
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' not in str(error):
# raise
. Output only the next line. | snp_queue.empty() |
Given the following code snippet before the placeholder: <|code_start|># This file is part of seq_crumbs.
# seq_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# seq_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=W0402
# pylint: disable=C0111
IS_PAIRED = SAM_FLAG_BITS['is_paired']
IS_IN_PROPER_PAIR = SAM_FLAG_BITS['is_in_proper_pair']
class FlagTests(unittest.TestCase):
def test_create_flag(self):
assert bit_tags_to_int_flag([IS_PAIRED]) == 1
assert bit_tags_to_int_flag([IS_IN_PROPER_PAIR]) == 2
assert bit_tags_to_int_flag([IS_PAIRED, IS_IN_PROPER_PAIR]) == 3
<|code_end|>
, predict the next line using imports from the current file:
import unittest
from crumbs.bam.flag import (bit_tags_to_int_flag, int_flag_to_bit_tags,
SAM_FLAG_BITS)
and context including class names, function names, and sometimes code from other files:
# Path: crumbs/bam/flag.py
# def bit_tags_to_int_flag(bit_tags):
# 'It returns the integer corresponding to the given list of tags'
# return reduce(or_, bit_tags)
#
# def int_flag_to_bit_tags(flag):
# 'It returns a list with the indexes of the bits set to 1 in the given flag'
# return [num for num in SAM_FLAG_BINARIES if num & flag]
#
# SAM_FLAG_BITS = {
# 'is_paired': 0x0001, # the read is paired in sequencing
# 'is_in_proper_pair': 0x0002, # the read is mapped in a proper pair
# 'is_unmapped': 0x0004, # the query sequence itself is unmapped
# 'mate_is_unmapped': 0x0008, # the mate is unmapped
# 'strand': 0x0010, # strand of the query (1 for reverse)
# 'mate_strand': 0x0020, # strand of the mate
# 'is_first_in_pair': 0x0040, # the read is the first read in a pair
# 'is_second_in_pair': 0x0080, # the read is the second read in a pair
# 'is_not_primary': 0x0100, # the alignment is not primary
# 'failed_quality': 0x0200, # the read fails platform/vendor quality checks
# 'is_duplicate': 0x0400, # the read is either a PCR or an optical duplicate
# }
. Output only the next line. | def test_flag_to_binary(self): |
Given the code snippet: <|code_start|># This file is part of seq_crumbs.
# seq_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# seq_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=W0402
# pylint: disable=C0111
IS_PAIRED = SAM_FLAG_BITS['is_paired']
IS_IN_PROPER_PAIR = SAM_FLAG_BITS['is_in_proper_pair']
class FlagTests(unittest.TestCase):
def test_create_flag(self):
assert bit_tags_to_int_flag([IS_PAIRED]) == 1
assert bit_tags_to_int_flag([IS_IN_PROPER_PAIR]) == 2
assert bit_tags_to_int_flag([IS_PAIRED, IS_IN_PROPER_PAIR]) == 3
<|code_end|>
, generate the next line using the imports in this file:
import unittest
from crumbs.bam.flag import (bit_tags_to_int_flag, int_flag_to_bit_tags,
SAM_FLAG_BITS)
and context (functions, classes, or occasionally code) from other files:
# Path: crumbs/bam/flag.py
# def bit_tags_to_int_flag(bit_tags):
# 'It returns the integer corresponding to the given list of tags'
# return reduce(or_, bit_tags)
#
# def int_flag_to_bit_tags(flag):
# 'It returns a list with the indexes of the bits set to 1 in the given flag'
# return [num for num in SAM_FLAG_BINARIES if num & flag]
#
# SAM_FLAG_BITS = {
# 'is_paired': 0x0001, # the read is paired in sequencing
# 'is_in_proper_pair': 0x0002, # the read is mapped in a proper pair
# 'is_unmapped': 0x0004, # the query sequence itself is unmapped
# 'mate_is_unmapped': 0x0008, # the mate is unmapped
# 'strand': 0x0010, # strand of the query (1 for reverse)
# 'mate_strand': 0x0020, # strand of the mate
# 'is_first_in_pair': 0x0040, # the read is the first read in a pair
# 'is_second_in_pair': 0x0080, # the read is the second read in a pair
# 'is_not_primary': 0x0100, # the alignment is not primary
# 'failed_quality': 0x0200, # the read fails platform/vendor quality checks
# 'is_duplicate': 0x0400, # the read is either a PCR or an optical duplicate
# }
. Output only the next line. | def test_flag_to_binary(self): |
Next line prediction: <|code_start|>
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=W0402
# pylint: disable=C0111
IS_PAIRED = SAM_FLAG_BITS['is_paired']
IS_IN_PROPER_PAIR = SAM_FLAG_BITS['is_in_proper_pair']
class FlagTests(unittest.TestCase):
def test_create_flag(self):
assert bit_tags_to_int_flag([IS_PAIRED]) == 1
assert bit_tags_to_int_flag([IS_IN_PROPER_PAIR]) == 2
assert bit_tags_to_int_flag([IS_PAIRED, IS_IN_PROPER_PAIR]) == 3
def test_flag_to_binary(self):
assert not int_flag_to_bit_tags(0)
assert IS_PAIRED in int_flag_to_bit_tags(1)
assert IS_IN_PROPER_PAIR in int_flag_to_bit_tags(2)
assert IS_PAIRED in int_flag_to_bit_tags(1 | 2)
assert IS_IN_PROPER_PAIR in int_flag_to_bit_tags(1 | 2)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'ComplexityFilterTest']
<|code_end|>
. Use current file imports:
(import unittest
from crumbs.bam.flag import (bit_tags_to_int_flag, int_flag_to_bit_tags,
SAM_FLAG_BITS))
and context including class names, function names, or small code snippets from other files:
# Path: crumbs/bam/flag.py
# def bit_tags_to_int_flag(bit_tags):
# 'It returns the integer corresponding to the given list of tags'
# return reduce(or_, bit_tags)
#
# def int_flag_to_bit_tags(flag):
# 'It returns a list with the indexes of the bits set to 1 in the given flag'
# return [num for num in SAM_FLAG_BINARIES if num & flag]
#
# SAM_FLAG_BITS = {
# 'is_paired': 0x0001, # the read is paired in sequencing
# 'is_in_proper_pair': 0x0002, # the read is mapped in a proper pair
# 'is_unmapped': 0x0004, # the query sequence itself is unmapped
# 'mate_is_unmapped': 0x0008, # the mate is unmapped
# 'strand': 0x0010, # strand of the query (1 for reverse)
# 'mate_strand': 0x0020, # strand of the mate
# 'is_first_in_pair': 0x0040, # the read is the first read in a pair
# 'is_second_in_pair': 0x0080, # the read is the second read in a pair
# 'is_not_primary': 0x0100, # the alignment is not primary
# 'failed_quality': 0x0200, # the read fails platform/vendor quality checks
# 'is_duplicate': 0x0400, # the read is either a PCR or an optical duplicate
# }
. Output only the next line. | unittest.main() |
Continue the code snippet: <|code_start|>
def uppercase_length(string):
'It returns the number of uppercase characters found in the string'
return len(re.findall("[A-Z]", string))
def get_uppercase_segments(string):
'''It detects the unmasked regions of a sequence
It returns a list of (start, end) tuples'''
start = 0
for is_upper, group in itertools.groupby(string, lambda x: x.isupper()):
group = list(group)
end = start + len(group) - 1
if is_upper:
yield start, end
start = end + 1
class ChangeCase(object):
'It changes the sequence case.'
def __init__(self, action):
'The initiator'
if action not in (UPPERCASE, LOWERCASE, SWAPCASE):
msg = 'Action should be: uppercase, lowercase or invertcase'
raise ValueError(msg)
self.action = action
def __call__(self, seqs):
<|code_end|>
. Use current file imports:
import re
import itertools
from multiprocessing import Pool
from crumbs.utils.tags import UPPERCASE, LOWERCASE, SWAPCASE
from crumbs.seq.seq import get_description, get_name, get_str_seq, copy_seq
and context (classes, functions, or code) from other files:
# Path: crumbs/utils/tags.py
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SWAPCASE = 'swap'
#
# Path: crumbs/seq/seq.py
# def get_description(seq):
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# title_items = seq.lines[0].split(' ', 1)
# desc = title_items[1] if len(title_items) == 2 else None
# elif seq_class == SEQRECORD:
# desc = seq.description
# if desc == '<unknown description>': # BioPython default
# return None
# return desc
#
# def get_name(seq):
# if 'SeqRecord' in seq.__class__.__name__:
# seq_class = SEQRECORD
# else:
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# name = seq.name
# elif seq_class == SEQRECORD:
# name = seq.id
# return name
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# def copy_seq(seqwrapper, seq=None, name=None):
# seq_class = seqwrapper.kind
# seq_obj = seqwrapper.object
# if seq_class == SEQITEM:
# seq = _copy_seqitem(seqwrapper, seq=seq, name=name)
# elif seq_class == SEQRECORD:
# seq_obj = _copy_seqrecord(seq_obj, seq=seq, name=name, id_=name)
# seq = SeqWrapper(kind=seqwrapper.kind, object=seq_obj,
# file_format=seqwrapper.file_format)
# return seq
. Output only the next line. | 'It changes the case of the seqrecords.' |
Predict the next line for this snippet: <|code_start|> desc = ''
desc += text
seqrecord.object.description = desc
class _FunctionRunner(object):
'a class to join all the mapper functions in a single function'
def __init__(self, map_functions):
'Class initiator'
self.map_functions = map_functions
def __call__(self, seq_packet):
'It runs all the map_functions for each seq_packet '
processed_packet = seq_packet
for map_function in self.map_functions:
processed_packet = map_function(processed_packet)
return processed_packet
def process_seq_packets(seq_packets, map_functions, processes=1,
keep_order=True):
'It processes the SeqRecord packets'
if processes > 1:
workers = Pool(processes=processes)
mapper = workers.imap if keep_order else workers.imap_unordered
else:
workers = None
mapper = itertools.imap
run_functions = _FunctionRunner(map_functions)
<|code_end|>
with the help of current file imports:
import re
import itertools
from multiprocessing import Pool
from crumbs.utils.tags import UPPERCASE, LOWERCASE, SWAPCASE
from crumbs.seq.seq import get_description, get_name, get_str_seq, copy_seq
and context from other files:
# Path: crumbs/utils/tags.py
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SWAPCASE = 'swap'
#
# Path: crumbs/seq/seq.py
# def get_description(seq):
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# title_items = seq.lines[0].split(' ', 1)
# desc = title_items[1] if len(title_items) == 2 else None
# elif seq_class == SEQRECORD:
# desc = seq.description
# if desc == '<unknown description>': # BioPython default
# return None
# return desc
#
# def get_name(seq):
# if 'SeqRecord' in seq.__class__.__name__:
# seq_class = SEQRECORD
# else:
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# name = seq.name
# elif seq_class == SEQRECORD:
# name = seq.id
# return name
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# def copy_seq(seqwrapper, seq=None, name=None):
# seq_class = seqwrapper.kind
# seq_obj = seqwrapper.object
# if seq_class == SEQITEM:
# seq = _copy_seqitem(seqwrapper, seq=seq, name=name)
# elif seq_class == SEQRECORD:
# seq_obj = _copy_seqrecord(seq_obj, seq=seq, name=name, id_=name)
# seq = SeqWrapper(kind=seqwrapper.kind, object=seq_obj,
# file_format=seqwrapper.file_format)
# return seq
, which may contain function names, class names, or code. Output only the next line. | seq_packets = mapper(run_functions, seq_packets) |
Next line prediction: <|code_start|>
def uppercase_length(string):
'It returns the number of uppercase characters found in the string'
return len(re.findall("[A-Z]", string))
def get_uppercase_segments(string):
'''It detects the unmasked regions of a sequence
It returns a list of (start, end) tuples'''
start = 0
for is_upper, group in itertools.groupby(string, lambda x: x.isupper()):
group = list(group)
end = start + len(group) - 1
if is_upper:
yield start, end
start = end + 1
class ChangeCase(object):
'It changes the sequence case.'
def __init__(self, action):
'The initiator'
if action not in (UPPERCASE, LOWERCASE, SWAPCASE):
msg = 'Action should be: uppercase, lowercase or invertcase'
raise ValueError(msg)
self.action = action
<|code_end|>
. Use current file imports:
(import re
import itertools
from multiprocessing import Pool
from crumbs.utils.tags import UPPERCASE, LOWERCASE, SWAPCASE
from crumbs.seq.seq import get_description, get_name, get_str_seq, copy_seq)
and context including class names, function names, or small code snippets from other files:
# Path: crumbs/utils/tags.py
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SWAPCASE = 'swap'
#
# Path: crumbs/seq/seq.py
# def get_description(seq):
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# title_items = seq.lines[0].split(' ', 1)
# desc = title_items[1] if len(title_items) == 2 else None
# elif seq_class == SEQRECORD:
# desc = seq.description
# if desc == '<unknown description>': # BioPython default
# return None
# return desc
#
# def get_name(seq):
# if 'SeqRecord' in seq.__class__.__name__:
# seq_class = SEQRECORD
# else:
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# name = seq.name
# elif seq_class == SEQRECORD:
# name = seq.id
# return name
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# def copy_seq(seqwrapper, seq=None, name=None):
# seq_class = seqwrapper.kind
# seq_obj = seqwrapper.object
# if seq_class == SEQITEM:
# seq = _copy_seqitem(seqwrapper, seq=seq, name=name)
# elif seq_class == SEQRECORD:
# seq_obj = _copy_seqrecord(seq_obj, seq=seq, name=name, id_=name)
# seq = SeqWrapper(kind=seqwrapper.kind, object=seq_obj,
# file_format=seqwrapper.file_format)
# return seq
. Output only the next line. | def __call__(self, seqs): |
Next line prediction: <|code_start|> str_seq = str_seq.lower()
elif action == SWAPCASE:
str_seq = str_seq.swapcase()
else:
raise NotImplementedError()
seq = copy_seq(seq, seq=str_seq)
processed_seqs.append(seq)
return processed_seqs
def append_to_description(seqrecord, text):
'it appends the text to the seqrecord description'
desc = get_description(seqrecord)
if desc in (None, get_name(seqrecord), '<unknown description>'):
desc = ''
desc += text
seqrecord.object.description = desc
class _FunctionRunner(object):
'a class to join all the mapper functions in a single function'
def __init__(self, map_functions):
'Class initiator'
self.map_functions = map_functions
def __call__(self, seq_packet):
'It runs all the map_functions for each seq_packet '
processed_packet = seq_packet
for map_function in self.map_functions:
processed_packet = map_function(processed_packet)
<|code_end|>
. Use current file imports:
(import re
import itertools
from multiprocessing import Pool
from crumbs.utils.tags import UPPERCASE, LOWERCASE, SWAPCASE
from crumbs.seq.seq import get_description, get_name, get_str_seq, copy_seq)
and context including class names, function names, or small code snippets from other files:
# Path: crumbs/utils/tags.py
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SWAPCASE = 'swap'
#
# Path: crumbs/seq/seq.py
# def get_description(seq):
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# title_items = seq.lines[0].split(' ', 1)
# desc = title_items[1] if len(title_items) == 2 else None
# elif seq_class == SEQRECORD:
# desc = seq.description
# if desc == '<unknown description>': # BioPython default
# return None
# return desc
#
# def get_name(seq):
# if 'SeqRecord' in seq.__class__.__name__:
# seq_class = SEQRECORD
# else:
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# name = seq.name
# elif seq_class == SEQRECORD:
# name = seq.id
# return name
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# def copy_seq(seqwrapper, seq=None, name=None):
# seq_class = seqwrapper.kind
# seq_obj = seqwrapper.object
# if seq_class == SEQITEM:
# seq = _copy_seqitem(seqwrapper, seq=seq, name=name)
# elif seq_class == SEQRECORD:
# seq_obj = _copy_seqrecord(seq_obj, seq=seq, name=name, id_=name)
# seq = SeqWrapper(kind=seqwrapper.kind, object=seq_obj,
# file_format=seqwrapper.file_format)
# return seq
. Output only the next line. | return processed_packet |
Continue the code snippet: <|code_start|> desc = ''
desc += text
seqrecord.object.description = desc
class _FunctionRunner(object):
'a class to join all the mapper functions in a single function'
def __init__(self, map_functions):
'Class initiator'
self.map_functions = map_functions
def __call__(self, seq_packet):
'It runs all the map_functions for each seq_packet '
processed_packet = seq_packet
for map_function in self.map_functions:
processed_packet = map_function(processed_packet)
return processed_packet
def process_seq_packets(seq_packets, map_functions, processes=1,
keep_order=True):
'It processes the SeqRecord packets'
if processes > 1:
workers = Pool(processes=processes)
mapper = workers.imap if keep_order else workers.imap_unordered
else:
workers = None
mapper = itertools.imap
run_functions = _FunctionRunner(map_functions)
<|code_end|>
. Use current file imports:
import re
import itertools
from multiprocessing import Pool
from crumbs.utils.tags import UPPERCASE, LOWERCASE, SWAPCASE
from crumbs.seq.seq import get_description, get_name, get_str_seq, copy_seq
and context (classes, functions, or code) from other files:
# Path: crumbs/utils/tags.py
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SWAPCASE = 'swap'
#
# Path: crumbs/seq/seq.py
# def get_description(seq):
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# title_items = seq.lines[0].split(' ', 1)
# desc = title_items[1] if len(title_items) == 2 else None
# elif seq_class == SEQRECORD:
# desc = seq.description
# if desc == '<unknown description>': # BioPython default
# return None
# return desc
#
# def get_name(seq):
# if 'SeqRecord' in seq.__class__.__name__:
# seq_class = SEQRECORD
# else:
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# name = seq.name
# elif seq_class == SEQRECORD:
# name = seq.id
# return name
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# def copy_seq(seqwrapper, seq=None, name=None):
# seq_class = seqwrapper.kind
# seq_obj = seqwrapper.object
# if seq_class == SEQITEM:
# seq = _copy_seqitem(seqwrapper, seq=seq, name=name)
# elif seq_class == SEQRECORD:
# seq_obj = _copy_seqrecord(seq_obj, seq=seq, name=name, id_=name)
# seq = SeqWrapper(kind=seqwrapper.kind, object=seq_obj,
# file_format=seqwrapper.file_format)
# return seq
. Output only the next line. | seq_packets = mapper(run_functions, seq_packets) |
Given snippet: <|code_start|>
# pylint: disable=R0903
# pylint: disable=C0111
def uppercase_length(string):
'It returns the number of uppercase characters found in the string'
return len(re.findall("[A-Z]", string))
def get_uppercase_segments(string):
'''It detects the unmasked regions of a sequence
It returns a list of (start, end) tuples'''
start = 0
for is_upper, group in itertools.groupby(string, lambda x: x.isupper()):
group = list(group)
end = start + len(group) - 1
if is_upper:
yield start, end
start = end + 1
class ChangeCase(object):
'It changes the sequence case.'
def __init__(self, action):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
import itertools
from multiprocessing import Pool
from crumbs.utils.tags import UPPERCASE, LOWERCASE, SWAPCASE
from crumbs.seq.seq import get_description, get_name, get_str_seq, copy_seq
and context:
# Path: crumbs/utils/tags.py
# UPPERCASE = 'upper'
#
# LOWERCASE = 'lower'
#
# SWAPCASE = 'swap'
#
# Path: crumbs/seq/seq.py
# def get_description(seq):
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# title_items = seq.lines[0].split(' ', 1)
# desc = title_items[1] if len(title_items) == 2 else None
# elif seq_class == SEQRECORD:
# desc = seq.description
# if desc == '<unknown description>': # BioPython default
# return None
# return desc
#
# def get_name(seq):
# if 'SeqRecord' in seq.__class__.__name__:
# seq_class = SEQRECORD
# else:
# seq_class = seq.kind
# seq = seq.object
# if seq_class == SEQITEM:
# name = seq.name
# elif seq_class == SEQRECORD:
# name = seq.id
# return name
#
# def get_str_seq(seq):
# seq_class = seq.kind
# if seq_class == SEQITEM:
# seq = seq.object.lines[1].strip()
# elif seq_class == SEQRECORD:
# seq = str(seq.object.seq)
# return seq.strip()
#
# def copy_seq(seqwrapper, seq=None, name=None):
# seq_class = seqwrapper.kind
# seq_obj = seqwrapper.object
# if seq_class == SEQITEM:
# seq = _copy_seqitem(seqwrapper, seq=seq, name=name)
# elif seq_class == SEQRECORD:
# seq_obj = _copy_seqrecord(seq_obj, seq=seq, name=name, id_=name)
# seq = SeqWrapper(kind=seqwrapper.kind, object=seq_obj,
# file_format=seqwrapper.file_format)
# return seq
which might include code, classes, or functions. Output only the next line. | 'The initiator' |
Based on the snippet: <|code_start|># Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of seq_crumbs.
# seq_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# seq_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=W0402
# pylint: disable=C0111
FASTQ_NO_DUPS1 = '''@CUESXEL822 1:Y:18:ATCACG
TAATACACCCAGTCTCAATTCCATCCTGGGAACTAAGT
+
<|code_end|>
, predict the immediate next line with the help of imports:
import unittest
import os
from subprocess import check_output
from tempfile import NamedTemporaryFile
from StringIO import StringIO
from crumbs.seq.seq import SeqWrapper, SeqItem
from crumbs.utils.tags import SEQITEM
from crumbs.seq.bulk_filters import (filter_duplicates, _read_pairs,
_seqitem_pairs_equal)
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.exceptions import UndecidedFastqVersionError
from crumbs.utils.file_utils import flush_fhand
and context (classes, functions, sometimes code) from other files:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# Path: crumbs/seq/bulk_filters.py
# def filter_duplicates(in_fhands, out_fhand, paired_reads, use_length=None,
# n_seqs_packet=None, tempdir=None):
# if not in_fhands:
# raise ValueError('At least one input fhand is required')
# pairs = _read_pairs(in_fhands, paired_reads)
# get_pair_key = _PairKeyGetter(use_length=use_length)
# if n_seqs_packet is None:
# unique_pairs = unique_unordered(pairs, key=get_pair_key)
# else:
# sorted_pairs = sorted_items(pairs, key=get_pair_key, tempdir=tempdir,
# max_items_in_memory=n_seqs_packet)
# unique_pairs = unique(sorted_pairs, key=get_pair_key)
# for pair in unique_pairs:
# write_seqs(pair, out_fhand)
#
# def _read_pairs(in_fhands, paired_reads):
# seqs = read_seqs(in_fhands, prefered_seq_classes=[SEQITEM])
# if paired_reads:
# pairs = group_pairs_by_name(seqs)
# else:
# pairs = group_pairs(seqs, n_seqs_in_pair=1)
# return pairs
#
# def _seqitem_pairs_equal(pair1, pair2):
# if len(pair1) != len(pair2):
# return False
# else:
# for read1, read2 in zip(pair1, pair2):
# if not get_str_seq(read1) == get_str_seq(read2):
# return False
# return True
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/exceptions.py
# class UndecidedFastqVersionError(Exception):
# 'The file is Fastq, but the version is difficult to guess'
# pass
#
# Path: crumbs/utils/file_utils.py
# def flush_fhand(fhand):
# try:
# fhand.flush()
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' not in str(error):
# raise
. Output only the next line. | AEGDFG5GGEGGF;EGD=D@>GCCGFFGGGCECFE:D@ |
Based on the snippet: <|code_start|># it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# seq_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=W0402
# pylint: disable=C0111
FASTQ_NO_DUPS1 = '''@CUESXEL822 1:Y:18:ATCACG
TAATACACCCAGTCTCAATTCCATCCTGGGAACTAAGT
+
AEGDFG5GGEGGF;EGD=D@>GCCGFFGGGCECFE:D@
@CUESXEL822 2:Y:18:ATCACG
TCATTACGTAGCTCCGGCTCCGCCATGTCTGTTCCTTC
+
CG?BEGGGGFGGGGGGGGGGGGGGGGBGGGA<EE=515
@CUESXEL824 1:Y:18:ATCACG
<|code_end|>
, predict the immediate next line with the help of imports:
import unittest
import os
from subprocess import check_output
from tempfile import NamedTemporaryFile
from StringIO import StringIO
from crumbs.seq.seq import SeqWrapper, SeqItem
from crumbs.utils.tags import SEQITEM
from crumbs.seq.bulk_filters import (filter_duplicates, _read_pairs,
_seqitem_pairs_equal)
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.exceptions import UndecidedFastqVersionError
from crumbs.utils.file_utils import flush_fhand
and context (classes, functions, sometimes code) from other files:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# Path: crumbs/seq/bulk_filters.py
# def filter_duplicates(in_fhands, out_fhand, paired_reads, use_length=None,
# n_seqs_packet=None, tempdir=None):
# if not in_fhands:
# raise ValueError('At least one input fhand is required')
# pairs = _read_pairs(in_fhands, paired_reads)
# get_pair_key = _PairKeyGetter(use_length=use_length)
# if n_seqs_packet is None:
# unique_pairs = unique_unordered(pairs, key=get_pair_key)
# else:
# sorted_pairs = sorted_items(pairs, key=get_pair_key, tempdir=tempdir,
# max_items_in_memory=n_seqs_packet)
# unique_pairs = unique(sorted_pairs, key=get_pair_key)
# for pair in unique_pairs:
# write_seqs(pair, out_fhand)
#
# def _read_pairs(in_fhands, paired_reads):
# seqs = read_seqs(in_fhands, prefered_seq_classes=[SEQITEM])
# if paired_reads:
# pairs = group_pairs_by_name(seqs)
# else:
# pairs = group_pairs(seqs, n_seqs_in_pair=1)
# return pairs
#
# def _seqitem_pairs_equal(pair1, pair2):
# if len(pair1) != len(pair2):
# return False
# else:
# for read1, read2 in zip(pair1, pair2):
# if not get_str_seq(read1) == get_str_seq(read2):
# return False
# return True
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/exceptions.py
# class UndecidedFastqVersionError(Exception):
# 'The file is Fastq, but the version is difficult to guess'
# pass
#
# Path: crumbs/utils/file_utils.py
# def flush_fhand(fhand):
# try:
# fhand.flush()
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' not in str(error):
# raise
. Output only the next line. | GATTGAAGCTCCAAACCGCCATGTTCACCACCGCAAGC |
Given snippet: <|code_start|># Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of seq_crumbs.
# seq_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# seq_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=W0402
# pylint: disable=C0111
FASTQ_NO_DUPS1 = '''@CUESXEL822 1:Y:18:ATCACG
TAATACACCCAGTCTCAATTCCATCCTGGGAACTAAGT
+
AEGDFG5GGEGGF;EGD=D@>GCCGFFGGGCECFE:D@
@CUESXEL822 2:Y:18:ATCACG
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
import os
from subprocess import check_output
from tempfile import NamedTemporaryFile
from StringIO import StringIO
from crumbs.seq.seq import SeqWrapper, SeqItem
from crumbs.utils.tags import SEQITEM
from crumbs.seq.bulk_filters import (filter_duplicates, _read_pairs,
_seqitem_pairs_equal)
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.exceptions import UndecidedFastqVersionError
from crumbs.utils.file_utils import flush_fhand
and context:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# Path: crumbs/seq/bulk_filters.py
# def filter_duplicates(in_fhands, out_fhand, paired_reads, use_length=None,
# n_seqs_packet=None, tempdir=None):
# if not in_fhands:
# raise ValueError('At least one input fhand is required')
# pairs = _read_pairs(in_fhands, paired_reads)
# get_pair_key = _PairKeyGetter(use_length=use_length)
# if n_seqs_packet is None:
# unique_pairs = unique_unordered(pairs, key=get_pair_key)
# else:
# sorted_pairs = sorted_items(pairs, key=get_pair_key, tempdir=tempdir,
# max_items_in_memory=n_seqs_packet)
# unique_pairs = unique(sorted_pairs, key=get_pair_key)
# for pair in unique_pairs:
# write_seqs(pair, out_fhand)
#
# def _read_pairs(in_fhands, paired_reads):
# seqs = read_seqs(in_fhands, prefered_seq_classes=[SEQITEM])
# if paired_reads:
# pairs = group_pairs_by_name(seqs)
# else:
# pairs = group_pairs(seqs, n_seqs_in_pair=1)
# return pairs
#
# def _seqitem_pairs_equal(pair1, pair2):
# if len(pair1) != len(pair2):
# return False
# else:
# for read1, read2 in zip(pair1, pair2):
# if not get_str_seq(read1) == get_str_seq(read2):
# return False
# return True
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/exceptions.py
# class UndecidedFastqVersionError(Exception):
# 'The file is Fastq, but the version is difficult to guess'
# pass
#
# Path: crumbs/utils/file_utils.py
# def flush_fhand(fhand):
# try:
# fhand.flush()
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' not in str(error):
# raise
which might include code, classes, or functions. Output only the next line. | TCATTACGTAGCTCCGGCTCCGCCATGTCTGTTCCTTC |
Based on the snippet: <|code_start|># Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of seq_crumbs.
# seq_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# seq_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=W0402
# pylint: disable=C0111
FASTQ_NO_DUPS1 = '''@CUESXEL822 1:Y:18:ATCACG
TAATACACCCAGTCTCAATTCCATCCTGGGAACTAAGT
+
AEGDFG5GGEGGF;EGD=D@>GCCGFFGGGCECFE:D@
@CUESXEL822 2:Y:18:ATCACG
TCATTACGTAGCTCCGGCTCCGCCATGTCTGTTCCTTC
<|code_end|>
, predict the immediate next line with the help of imports:
import unittest
import os
from subprocess import check_output
from tempfile import NamedTemporaryFile
from StringIO import StringIO
from crumbs.seq.seq import SeqWrapper, SeqItem
from crumbs.utils.tags import SEQITEM
from crumbs.seq.bulk_filters import (filter_duplicates, _read_pairs,
_seqitem_pairs_equal)
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.exceptions import UndecidedFastqVersionError
from crumbs.utils.file_utils import flush_fhand
and context (classes, functions, sometimes code) from other files:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# Path: crumbs/seq/bulk_filters.py
# def filter_duplicates(in_fhands, out_fhand, paired_reads, use_length=None,
# n_seqs_packet=None, tempdir=None):
# if not in_fhands:
# raise ValueError('At least one input fhand is required')
# pairs = _read_pairs(in_fhands, paired_reads)
# get_pair_key = _PairKeyGetter(use_length=use_length)
# if n_seqs_packet is None:
# unique_pairs = unique_unordered(pairs, key=get_pair_key)
# else:
# sorted_pairs = sorted_items(pairs, key=get_pair_key, tempdir=tempdir,
# max_items_in_memory=n_seqs_packet)
# unique_pairs = unique(sorted_pairs, key=get_pair_key)
# for pair in unique_pairs:
# write_seqs(pair, out_fhand)
#
# def _read_pairs(in_fhands, paired_reads):
# seqs = read_seqs(in_fhands, prefered_seq_classes=[SEQITEM])
# if paired_reads:
# pairs = group_pairs_by_name(seqs)
# else:
# pairs = group_pairs(seqs, n_seqs_in_pair=1)
# return pairs
#
# def _seqitem_pairs_equal(pair1, pair2):
# if len(pair1) != len(pair2):
# return False
# else:
# for read1, read2 in zip(pair1, pair2):
# if not get_str_seq(read1) == get_str_seq(read2):
# return False
# return True
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/exceptions.py
# class UndecidedFastqVersionError(Exception):
# 'The file is Fastq, but the version is difficult to guess'
# pass
#
# Path: crumbs/utils/file_utils.py
# def flush_fhand(fhand):
# try:
# fhand.flush()
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' not in str(error):
# raise
. Output only the next line. | + |
Next line prediction: <|code_start|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=W0402
# pylint: disable=C0111
FASTQ_NO_DUPS1 = '''@CUESXEL822 1:Y:18:ATCACG
TAATACACCCAGTCTCAATTCCATCCTGGGAACTAAGT
+
AEGDFG5GGEGGF;EGD=D@>GCCGFFGGGCECFE:D@
@CUESXEL822 2:Y:18:ATCACG
TCATTACGTAGCTCCGGCTCCGCCATGTCTGTTCCTTC
+
CG?BEGGGGFGGGGGGGGGGGGGGGGBGGGA<EE=515
@CUESXEL824 1:Y:18:ATCACG
GATTGAAGCTCCAAACCGCCATGTTCACCACCGCAAGC
+
HHGEHD8EEHHHDGHHHHHHHHHCEHHHHDHHHHEHHH
@CUESXEL824 2:Y:18:ATCACG
TGCTTGCTGCACTTTGATGTTATTATCTGTGTTGTGTT
+
<|code_end|>
. Use current file imports:
(import unittest
import os
from subprocess import check_output
from tempfile import NamedTemporaryFile
from StringIO import StringIO
from crumbs.seq.seq import SeqWrapper, SeqItem
from crumbs.utils.tags import SEQITEM
from crumbs.seq.bulk_filters import (filter_duplicates, _read_pairs,
_seqitem_pairs_equal)
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.exceptions import UndecidedFastqVersionError
from crumbs.utils.file_utils import flush_fhand)
and context including class names, function names, or small code snippets from other files:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# Path: crumbs/seq/bulk_filters.py
# def filter_duplicates(in_fhands, out_fhand, paired_reads, use_length=None,
# n_seqs_packet=None, tempdir=None):
# if not in_fhands:
# raise ValueError('At least one input fhand is required')
# pairs = _read_pairs(in_fhands, paired_reads)
# get_pair_key = _PairKeyGetter(use_length=use_length)
# if n_seqs_packet is None:
# unique_pairs = unique_unordered(pairs, key=get_pair_key)
# else:
# sorted_pairs = sorted_items(pairs, key=get_pair_key, tempdir=tempdir,
# max_items_in_memory=n_seqs_packet)
# unique_pairs = unique(sorted_pairs, key=get_pair_key)
# for pair in unique_pairs:
# write_seqs(pair, out_fhand)
#
# def _read_pairs(in_fhands, paired_reads):
# seqs = read_seqs(in_fhands, prefered_seq_classes=[SEQITEM])
# if paired_reads:
# pairs = group_pairs_by_name(seqs)
# else:
# pairs = group_pairs(seqs, n_seqs_in_pair=1)
# return pairs
#
# def _seqitem_pairs_equal(pair1, pair2):
# if len(pair1) != len(pair2):
# return False
# else:
# for read1, read2 in zip(pair1, pair2):
# if not get_str_seq(read1) == get_str_seq(read2):
# return False
# return True
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/exceptions.py
# class UndecidedFastqVersionError(Exception):
# 'The file is Fastq, but the version is difficult to guess'
# pass
#
# Path: crumbs/utils/file_utils.py
# def flush_fhand(fhand):
# try:
# fhand.flush()
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' not in str(error):
# raise
. Output only the next line. | AA=AF7CDEDAFFDF@5D>D;FCF;GGGDGGEGGGFGE |
Given snippet: <|code_start|># Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of seq_crumbs.
# seq_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# seq_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=W0402
# pylint: disable=C0111
FASTQ_NO_DUPS1 = '''@CUESXEL822 1:Y:18:ATCACG
TAATACACCCAGTCTCAATTCCATCCTGGGAACTAAGT
+
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
import os
from subprocess import check_output
from tempfile import NamedTemporaryFile
from StringIO import StringIO
from crumbs.seq.seq import SeqWrapper, SeqItem
from crumbs.utils.tags import SEQITEM
from crumbs.seq.bulk_filters import (filter_duplicates, _read_pairs,
_seqitem_pairs_equal)
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.exceptions import UndecidedFastqVersionError
from crumbs.utils.file_utils import flush_fhand
and context:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# Path: crumbs/seq/bulk_filters.py
# def filter_duplicates(in_fhands, out_fhand, paired_reads, use_length=None,
# n_seqs_packet=None, tempdir=None):
# if not in_fhands:
# raise ValueError('At least one input fhand is required')
# pairs = _read_pairs(in_fhands, paired_reads)
# get_pair_key = _PairKeyGetter(use_length=use_length)
# if n_seqs_packet is None:
# unique_pairs = unique_unordered(pairs, key=get_pair_key)
# else:
# sorted_pairs = sorted_items(pairs, key=get_pair_key, tempdir=tempdir,
# max_items_in_memory=n_seqs_packet)
# unique_pairs = unique(sorted_pairs, key=get_pair_key)
# for pair in unique_pairs:
# write_seqs(pair, out_fhand)
#
# def _read_pairs(in_fhands, paired_reads):
# seqs = read_seqs(in_fhands, prefered_seq_classes=[SEQITEM])
# if paired_reads:
# pairs = group_pairs_by_name(seqs)
# else:
# pairs = group_pairs(seqs, n_seqs_in_pair=1)
# return pairs
#
# def _seqitem_pairs_equal(pair1, pair2):
# if len(pair1) != len(pair2):
# return False
# else:
# for read1, read2 in zip(pair1, pair2):
# if not get_str_seq(read1) == get_str_seq(read2):
# return False
# return True
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/exceptions.py
# class UndecidedFastqVersionError(Exception):
# 'The file is Fastq, but the version is difficult to guess'
# pass
#
# Path: crumbs/utils/file_utils.py
# def flush_fhand(fhand):
# try:
# fhand.flush()
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' not in str(error):
# raise
which might include code, classes, or functions. Output only the next line. | AEGDFG5GGEGGF;EGD=D@>GCCGFFGGGCECFE:D@ |
Given the code snippet: <|code_start|># Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of seq_crumbs.
# seq_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# seq_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=W0402
# pylint: disable=C0111
FASTQ_NO_DUPS1 = '''@CUESXEL822 1:Y:18:ATCACG
TAATACACCCAGTCTCAATTCCATCCTGGGAACTAAGT
+
AEGDFG5GGEGGF;EGD=D@>GCCGFFGGGCECFE:D@
@CUESXEL822 2:Y:18:ATCACG
TCATTACGTAGCTCCGGCTCCGCCATGTCTGTTCCTTC
<|code_end|>
, generate the next line using the imports in this file:
import unittest
import os
from subprocess import check_output
from tempfile import NamedTemporaryFile
from StringIO import StringIO
from crumbs.seq.seq import SeqWrapper, SeqItem
from crumbs.utils.tags import SEQITEM
from crumbs.seq.bulk_filters import (filter_duplicates, _read_pairs,
_seqitem_pairs_equal)
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.exceptions import UndecidedFastqVersionError
from crumbs.utils.file_utils import flush_fhand
and context (functions, classes, or occasionally code) from other files:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# Path: crumbs/seq/bulk_filters.py
# def filter_duplicates(in_fhands, out_fhand, paired_reads, use_length=None,
# n_seqs_packet=None, tempdir=None):
# if not in_fhands:
# raise ValueError('At least one input fhand is required')
# pairs = _read_pairs(in_fhands, paired_reads)
# get_pair_key = _PairKeyGetter(use_length=use_length)
# if n_seqs_packet is None:
# unique_pairs = unique_unordered(pairs, key=get_pair_key)
# else:
# sorted_pairs = sorted_items(pairs, key=get_pair_key, tempdir=tempdir,
# max_items_in_memory=n_seqs_packet)
# unique_pairs = unique(sorted_pairs, key=get_pair_key)
# for pair in unique_pairs:
# write_seqs(pair, out_fhand)
#
# def _read_pairs(in_fhands, paired_reads):
# seqs = read_seqs(in_fhands, prefered_seq_classes=[SEQITEM])
# if paired_reads:
# pairs = group_pairs_by_name(seqs)
# else:
# pairs = group_pairs(seqs, n_seqs_in_pair=1)
# return pairs
#
# def _seqitem_pairs_equal(pair1, pair2):
# if len(pair1) != len(pair2):
# return False
# else:
# for read1, read2 in zip(pair1, pair2):
# if not get_str_seq(read1) == get_str_seq(read2):
# return False
# return True
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/exceptions.py
# class UndecidedFastqVersionError(Exception):
# 'The file is Fastq, but the version is difficult to guess'
# pass
#
# Path: crumbs/utils/file_utils.py
# def flush_fhand(fhand):
# try:
# fhand.flush()
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' not in str(error):
# raise
. Output only the next line. | + |
Continue the code snippet: <|code_start|># but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=W0402
# pylint: disable=C0111
FASTQ_NO_DUPS1 = '''@CUESXEL822 1:Y:18:ATCACG
TAATACACCCAGTCTCAATTCCATCCTGGGAACTAAGT
+
AEGDFG5GGEGGF;EGD=D@>GCCGFFGGGCECFE:D@
@CUESXEL822 2:Y:18:ATCACG
TCATTACGTAGCTCCGGCTCCGCCATGTCTGTTCCTTC
+
CG?BEGGGGFGGGGGGGGGGGGGGGGBGGGA<EE=515
@CUESXEL824 1:Y:18:ATCACG
GATTGAAGCTCCAAACCGCCATGTTCACCACCGCAAGC
+
HHGEHD8EEHHHDGHHHHHHHHHCEHHHHDHHHHEHHH
@CUESXEL824 2:Y:18:ATCACG
TGCTTGCTGCACTTTGATGTTATTATCTGTGTTGTGTT
<|code_end|>
. Use current file imports:
import unittest
import os
from subprocess import check_output
from tempfile import NamedTemporaryFile
from StringIO import StringIO
from crumbs.seq.seq import SeqWrapper, SeqItem
from crumbs.utils.tags import SEQITEM
from crumbs.seq.bulk_filters import (filter_duplicates, _read_pairs,
_seqitem_pairs_equal)
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.exceptions import UndecidedFastqVersionError
from crumbs.utils.file_utils import flush_fhand
and context (classes, functions, or code) from other files:
# Path: crumbs/seq/seq.py
# class SeqItem(_SeqItem):
# def __new__(cls, name, lines, annotations=None):
# def get_title(seq):
# def get_description(seq):
# def get_name(seq):
# def get_file_format(seq):
# def _break():
# def _is_fastq_plus_line(line, seq_name):
# def _get_seqitem_quals(seq):
# def get_str_seq(seq):
# def get_length(seq):
# def _get_seqitem_qualities(seqwrap):
# def get_int_qualities(seq):
# def _int_quals_to_str_quals(int_quals, out_format):
# def get_str_qualities(seq, out_format=None):
# def get_annotations(seq):
# def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
# def _copy_seqitem(seqwrapper, seq=None, name=None):
# def copy_seq(seqwrapper, seq=None, name=None):
# def _slice_seqitem(seqwrap, start, stop):
# def slice_seq(seq, start=None, stop=None):
# def assing_kind_to_seqs(kind, seqs, file_format):
# SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
# ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
# SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
# ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
#
# Path: crumbs/utils/tags.py
# SEQITEM = 'seqitem'
#
# Path: crumbs/seq/bulk_filters.py
# def filter_duplicates(in_fhands, out_fhand, paired_reads, use_length=None,
# n_seqs_packet=None, tempdir=None):
# if not in_fhands:
# raise ValueError('At least one input fhand is required')
# pairs = _read_pairs(in_fhands, paired_reads)
# get_pair_key = _PairKeyGetter(use_length=use_length)
# if n_seqs_packet is None:
# unique_pairs = unique_unordered(pairs, key=get_pair_key)
# else:
# sorted_pairs = sorted_items(pairs, key=get_pair_key, tempdir=tempdir,
# max_items_in_memory=n_seqs_packet)
# unique_pairs = unique(sorted_pairs, key=get_pair_key)
# for pair in unique_pairs:
# write_seqs(pair, out_fhand)
#
# def _read_pairs(in_fhands, paired_reads):
# seqs = read_seqs(in_fhands, prefered_seq_classes=[SEQITEM])
# if paired_reads:
# pairs = group_pairs_by_name(seqs)
# else:
# pairs = group_pairs(seqs, n_seqs_in_pair=1)
# return pairs
#
# def _seqitem_pairs_equal(pair1, pair2):
# if len(pair1) != len(pair2):
# return False
# else:
# for read1, read2 in zip(pair1, pair2):
# if not get_str_seq(read1) == get_str_seq(read2):
# return False
# return True
#
# Path: crumbs/utils/bin_utils.py
# BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
# 'bin'))
#
# Path: crumbs/utils/test_utils.py
# TEST_DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
# '..', 'test', 'test_data'))
#
# Path: crumbs/exceptions.py
# class UndecidedFastqVersionError(Exception):
# 'The file is Fastq, but the version is difficult to guess'
# pass
#
# Path: crumbs/utils/file_utils.py
# def flush_fhand(fhand):
# try:
# fhand.flush()
# except IOError, error:
# # The pipe could be already closed
# if 'Broken pipe' not in str(error):
# raise
. Output only the next line. | + |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.