id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
13,516
from minichain import show, prompt, OpenAI, Python, GradioConf import gradio as gr def math_prompt(model, question ): "Prompt to call GPT with a Jinja template" return model(dict(question=question)) def python(model, code): "Prompt to call Python interpreter" code = "\n".join(code.strip().split("\n")[1:-1]) return model(dict(code=code)) The provided code snippet includes necessary dependencies for implementing the `math_demo` function. Write a Python function `def math_demo(question)` to solve the following problem: Chain them together Here is the function: def math_demo(question): "Chain them together" return python(math_prompt(question))
Chain them together
13,517
import trio from minichain import TemplatePrompt, show_log, start_chain The provided code snippet includes necessary dependencies for implementing the `chunk` function. Write a Python function `def chunk(f, width=4000, overlap=800)` to solve the following problem: Split a documents into 4800 character overlapping chunks Here is the function: def chunk(f, width=4000, overlap=800): "Split a documents into 4800 character overlapping chunks" text = open(f).read().replace("\n\n", "\n") chunks = [] for i in range(4): if i * width > len(text): break chunks.append({"text": text[i * width : (i + 1) * width + overlap]}) return chunks
Split a documents into 4800 character overlapping chunks
13,518
from dataclasses import dataclass, replace from typing import Optional from minichain import prompt, show, OpenAI, Google, transform class State: question: str history: str = "" next_query: Optional[str] = None final_answer: Optional[str] = None template_file = "selfask.pmpt.tpl") def self_ask(model, state): return model(state) def next_step(ask): res = ask.split(":", 1)[1] if out.startswith("Follow up:"): return replace(state, next_query=res) elif out.startswith("So the final answer is:"): return replace(state, final_answer=res) def google(model, state): if state.next_query is None: return "" return model(state.next_query) def update(state, result): if not result: return state return State(state.question, state.history + "\nIntermediate answer: " + result + "\n") def selfask(question): state = State(question) for i in range(3): state = next_step(self_ask(state)) state = update(google(state)) return state
null
13,519
from minichain import prompt, show, OpenAI, transform from dataclasses import dataclass, is_dataclass, fields from typing import List, Type, Dict, Any, get_origin, get_args from enum import Enum from jinja2 import select_autoescape, FileSystemLoader, Environment import json def type_to_prompt(out: type) -> str: tmp = env.get_template("type_prompt.pmpt.tpl") d = walk(out) return tmp.render({"typ": d}) class Player: player: str stats: List[Stat] def stats(model, passage): return model.stream(dict(passage=passage, typ=type_to_prompt(Player)))
null
13,520
from minichain import prompt, show, OpenAI, transform from dataclasses import dataclass, is_dataclass, fields from typing import List, Type, Dict, Any, get_origin, get_args from enum import Enum from jinja2 import select_autoescape, FileSystemLoader, Environment import json class Player: player: str stats: List[Stat] def to_data(s:str): return [Player(**j) for j in json.loads(s)]
null
13,521
import datasets import numpy as np from minichain import prompt, show, HuggingFaceEmbed, OpenAI, transform def embed(model, inp): return model(inp) def get_neighbors(embedding, k=1): res = gatsby.get_nearest_examples("embeddings", np.array(embedding), k) return res.examples["passages"] def ask(model, query, neighbors): return model(dict(question=query, docs=neighbors)) def gatsby_q(query): n = get_neighbors(embed(query)) return ask(query, n)
null
13,522
import pandas as pd from minichain import prompt, Mock, show, OpenAI, GradioConf import minichain import json import gradio as gr import requests names = { '3-pointer percentage': 'FG3_PCT', '3-pointers attempted': 'FG3A', '3-pointers made': 'FG3M', 'Assists': 'AST', 'Blocks': 'BLK', 'Field goal percentage': 'FG_PCT', 'Field goals attempted': 'FGA', 'Field goals made': 'FGM', 'Free throw percentage': 'FT_PCT', 'Free throws attempted': 'FTA', 'Free throws made': 'FTM', 'Minutes played': 'MIN', 'Personal fouls': 'PF', 'Points': 'PTS', 'Rebounds': 'REB', 'Rebounds (Defensive)': 'DREB', 'Rebounds (Offensive)': 'OREB', 'Steals': 'STL', 'Turnovers': 'TO' } import os def to_df(d): players = {player for v in d.values() if v is not None for player, _ in v.items()} lookup = {k: {a: b for a, b in v.items()} for k,v in d.items()} rows = [dict(**{"player": p}, **{k: "_" if p not in lookup.get(k, []) else lookup[k][p] for k in names.keys()}) for p in players] return pd.DataFrame.from_dict(rows).astype("str").sort_values(axis=0, by="player", ignore_index=True).transpose()
null
13,523
import pandas as pd from minichain import prompt, Mock, show, OpenAI, GradioConf import minichain import json import gradio as gr import requests import os def make_html(out): return "<table><tr><td>" + out.replace("\t", "</td><td>").replace("\n", "</td></tr><tr><td>") + "</td></td></table>"
null
13,524
import pandas as pd from minichain import prompt, Mock, show, OpenAI, GradioConf import minichain import json import gradio as gr import requests def extract(model, passage, typ): return model(dict(player_keys=names.items(), examples=examples, passage=passage, type=typ)) import os def run(query): return extract(query, "Player")
null
13,525
from minichain import Id, prompt, OpenAI, show, transform, Mock, Break from gradio_tools.tools import StableDiffusionTool, ImageCaptioningTool, ImageToMusicTool def agent(model, query, history): return model(dict(tools=[(str(tool.__class__.__name__), tool.description) for tool in tools], input=query, agent_scratchpad=history )) def tool_parse(out): lines = out.split("\n") if lines[0].split("?")[-1].strip() == "Yes": tool = lines[1].split(":", 1)[-1].strip() command = lines[2].split(":", 1)[-1].strip() return tool, command else: return Break() def tool_use(model, usage): selector, command = usage for i, tool in enumerate(tools): if selector == tool.__class__.__name__: return model(command, tool_num=i) return ("",) def append(history, new, observation): return history + "\n" + new + "Observation: " + observation def run(query): history = "" observations = [] for i in range(3): select_input = agent(query, history) observations.append(tool_use(tool_parse(select_input))) history = append(history, select_input, observations[i]) return observations[-1]
null
13,526
import datasets import numpy as np from minichain import prompt, transform, show, OpenAIEmbed, OpenAI from manifest import Manifest def embed(model, inp): return model(inp) def get_neighbors(inp, k): res = olympics.get_nearest_examples("embeddings", np.array(inp), k) return res.examples["content"] def get_result(model, query, neighbors): return model(dict(question=query, docs=neighbors)) def qa(query): n = get_neighbors(embed(query), 3) return get_result(query, n)
null
13,527
from minichain import prompt, show, GradioConf, OpenAI, Python import gradio as gr def pal_prompt(model, question): def python(model, inp): def pal(question): return python(pal_prompt(question))
null
13,528
from minichain import show, prompt, OpenAI, GradioConf import gradio as gr from gradio_tools.tools import StableDiffusionTool, ImageCaptioningTool def picture(model, query): return model(query) gradio_conf=GradioConf( block_output= lambda: gr.Image(), block_input= lambda: gr.Textbox(show_label=False))) def gen(model, query): return model(query) gradio_conf=GradioConf( block_input= lambda: gr.Image(), block_output=lambda: gr.Textbox(show_label=False))) def caption(model, img_src): return model(img_src) def gradio_example(query): return caption(gen(picture(query)))
null
13,529
from minichain import show, prompt, OpenAI, Bash def cli_prompt(model, query): def bash_run(model, x): def bash(query): return bash_run(cli_prompt(query))
null
13,530
from minichain import prompt, transform, show, OpenAI import json def ner_extract(model, kwargs): return model(kwargs) def to_json(chat_output): return json.loads(chat_output) def team_describe(model, inp): query = "Can you describe these basketball teams? " + \ " ".join([i["E"] for i in inp if i["T"] =="Team"]) return model(query) def ner(text_input, labels, domain): extract = to_json(ner_extract(dict(text_input=text_input, labels=labels, domain=domain))) return team_describe(extract)
null
13,543
from minichain import prompt, show, GradioConf, OpenAI, Python import gradio as gr def pal_prompt(model, question): return model(dict(question=question)) gradio_conf=GradioConf(block_input = lambda: gr.Code(language="python"))) def python(model, inp): return model(inp + "\nprint(solution())") def pal(question): return python(pal_prompt(question))
null
13,545
from minichain import show, prompt, OpenAI, Bash def cli_prompt(model, query): return model(dict(question=query)) def bash_run(model, x): x = "\n".join(x.strip().split("\n")[1:-1]) return model(x) def bash(query): return bash_run(cli_prompt(query))
null
13,547
import os import subprocess import time from dataclasses import dataclass from types import TracebackType from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple from eliot import start_action, to_file def set_minichain_log(name: str) -> None: to_file(open(f"{name}.log", "w"))
null
13,548
from dataclasses import asdict, dataclass from itertools import count from typing import ( Any, Callable, Generic, Iterable, Iterator, List, Optional, TypeVar, Union, ) from jinja2 import Environment, FileSystemLoader, Template from .backend import Backend, MinichainContext, PromptSnap, RunLog class History: expand: Callable[[List[Any]], Iterator[Any]] inputs: List[Any] class Chain: history: History name: str cache: Any = None def run_gen(self) -> Any: # Lazily instantiate all the inputs args = [] for i, base_input in enumerate(self.history.inputs): function_input = base_input if isinstance(base_input, Chain): if base_input.cache is not None: function_input = base_input.cache if isinstance(function_input, Break): yield Break() return else: for function_input in base_input.run_gen(): if isinstance(function_input, Break): base_input.cache = Break() yield Break() return yield None base_input.cache = function_input args.append(function_input) # Run the current prompt for out in self.history.expand(*args): if isinstance(out, Break): yield Break() return yield None yield out def run(self) -> Any: for x in self.run_gen(): pass return x def transform(): # type: ignore return lambda fn: lambda *args: Chain( History(lambda *x: iter((fn(*x),)), list(args)), fn.__name__ )
null
13,549
import inspect import os from dataclasses import dataclass, field from typing import Any, Callable, Dict, List, Set, Tuple, Union import gradio as gr from gradio.blocks import Block from minichain import start_chain from .backend import MinichainContext from .base import Prompt CSS = """ #clean div.form {border: 0px} #response {border: 0px; background: #ffeec6} #prompt {border: 0px;background: aliceblue} #json {border: 0px} span.head {font-size: 60pt; font-family: cursive;} div.gradio-container {color: black} div.form {background: inherit} div.form div.block {padding: 0px; background: #fcfcfc} """ all_data = gr.State({}) final_output = gr.State({}) class Constructor: fns: List[Callable[[Dict[Block, Any]], Dict[Block, Any]]] = field( default_factory=list ) inputs: Set[Block] = field(default_factory=set) outputs: Set[Block] = field(default_factory=set) def merge(self, other: "Constructor") -> "Constructor": return Constructor( self.fns + other.fns, self.inputs | other.inputs, self.outputs | other.outputs, ) def add_inputs(self, inputs: List[Block]) -> "Constructor": return Constructor(self.fns, self.inputs | set(inputs), self.outputs) def fn(self, data: Dict[Block, Any]) -> Dict[Block, Any]: out: Dict[Block, Any] = {} for fn in self.fns: out = {**out, **fn(data)} return out def chain_blocks( prompts: List[Prompt[Any, Any, Any]], show_advanced: bool = True ) -> Constructor: cons = Constructor() count: Dict[int, int] = {} for p in prompts: count.setdefault(p._id, 0) i = count[p._id] cons = cons.merge(to_gradio_block(p, i, show_advanced=show_advanced)) count[p._id] += 1 return cons def api_keys(keys: Set[str] = {"OPENAI_API_KEY"}) -> None: if all([k in os.environ for k in keys]): return key_names = {} with gr.Accordion(label="API Keys", elem_id="json", open=False): if "OPENAI_API_KEY" in keys and "OPENAI_API_KEY" not in os.environ: key_names["OPENAI_API_KEY"] = gr.Textbox( os.environ.get("OPENAI_API_KEY"), label="OpenAI Key", elem_id="json", type="password", ) gr.Markdown( """ * [OpenAI Key](https://platform.openai.com/account/api-keys) """ ) if "HF_KEY" in keys: gr.Markdown( """ * [Hugging Face Key](https://huggingface.co/settings/tokens) """ ) key_names["HF_KEY"] = gr.Textbox( os.environ.get("HF_KEY"), label="Hugging Face Key", elem_id="inner", type="password", ) if "SERP_KEY" in keys: gr.Markdown( """ * [Search Key](https://serpapi.com/users/sign_in) """ ) key_names["SERP_KEY"] = gr.Textbox( os.environ.get("SERP_KEY"), label="Search Key", elem_id="inner", type="password", ) api_btn = gr.Button("Save") def api_run(data): # type: ignore for k, v in key_names.items(): if data[v] is not None and data[v] != "": os.environ[k] = data[v] return {} api_btn.click(api_run, inputs=set(key_names.values())) import gradio as gr from gradio.blocks import Block class MinichainContext: id_: int = 0 prompt_store: Dict[Tuple[int, int], List[PromptSnap]] = {} prompt_count: Dict[int, int] = {} name: str = "" class Prompt(Generic[Input, Output, FnOutput]): counter = count() def __init__( self, fn: Callable[[Callable[[Input], Output]], Iterable[FnOutput]], backend: Union[List[Backend], Backend], template_file: Optional[str], template: Optional[str], gradio_conf: Any = None, ): self.fn = fn if not isinstance(backend, List): self.backend = [backend] else: self.backend = backend self.template_file: Optional[str] = template_file self.template: Optional[str] = template self.gradio_conf = gradio_conf self._fn: str = fn.__name__ self._id: int = Prompt.counter.__next__() def run(self, request: str, tool_num: int = 0) -> Iterator[RunLog]: if not hasattr(self.backend[tool_num], "run_stream"): yield RunLog(request, None) response: Union[str, Any] = self.backend[tool_num].run(request) yield RunLog(request, response) else: yield RunLog(request, None) for r in self.backend[tool_num].run_stream(request): yield RunLog(request, r) def template_fill(self, inp: Any) -> str: kwargs = inp if self.template_file: tmp = Environment(loader=FileSystemLoader(".")).get_template( name=self.template_file ) elif self.template: tmp = Template(self.template) return str(tmp.render(**kwargs)) def __call__(self, *args: Any) -> Chain: return Chain(History(self.expand, list(args)), self.fn.__name__) class Model: def __init__(self, prompt: "Prompt[Input, Output, FnOutput]", data: Any): self.prompt = prompt self.data = data self.run_log = RunLog() def __call__(self, model_input: Any, tool_num: int = 0) -> Any: for r in self.stream(model_input, tool_num): yield r # print("hello tool") # for out in self.prompt.dynamic[tool_num].expand(*model_input): # self.run_log = self.prompt.dynamic[tool_num].model.run_log # self.run_log.dynamic = tool_num # yield out def stream( self, model_input: Any, tool_num: int = 0 ) -> Iterator[Optional[str]]: if ( self.prompt.template is not None or self.prompt.template_file is not None ): if not isinstance(model_input, dict): model_input = asdict(model_input) result = self.prompt.template_fill(model_input) else: result = model_input for run_log in self.prompt.run(result, tool_num): r = self.run_log.response if run_log.response is None: out = r elif not r: out = run_log.response else: out = r + run_log.response self.run_log = RunLog(run_log.request, out, dynamic=tool_num) yield self.run_log.response def expand( self, *args: List[Any], data: Any = None ) -> Iterator[Optional[FnOutput]]: # Times prompt has been used. MinichainContext.prompt_count.setdefault(self._id, -1) MinichainContext.prompt_count[self._id] += 1 count = MinichainContext.prompt_count[self._id] # Snap of the prompt MinichainContext.prompt_store.setdefault((self._id, count), []) MinichainContext.prompt_store[self._id, count].append(PromptSnap()) # Model to be passed to function model = self.Model(self, data) for output in self.fn(model, *args): t = model.run_log assert model.run_log, str(model) snap = PromptSnap(args, t, output) count = MinichainContext.prompt_count[self._id] MinichainContext.prompt_store.setdefault((self._id, count), []) MinichainContext.prompt_store[self._id, count][-1] = snap yield None assert model.run_log, str(model) t = model.run_log snap = PromptSnap(args, t, output) MinichainContext.prompt_store[self._id, count][-1] = snap yield output The provided code snippet includes necessary dependencies for implementing the `show` function. Write a Python function `def show( prompt: Prompt[Any, Any, Any], examples: Union[List[str], List[Tuple[str]]] = [""], subprompts: List[Prompt[Any, Any, Any]] = [], fields: List[str] = [], initial_state: Any = None, out_type: str = "markdown", keys: Set[str] = {"OPENAI_API_KEY"}, description: str = "", code: str = "", css: str = "", show_advanced: bool = True, ) -> gr.Blocks` to solve the following problem: Constructs a gradio component to show a prompt chain. Args: prompt: A prompt or prompt chain to display. examples: A list of example inputs, either string or tuples of fields subprompts: The `Prompt` objects to display. fields: The names of the field input to the prompt. initial_state: For stateful prompts, the initial value. out_type: type of final output keys: user keys required description: description of the model code: code to display css : additional css show_advanced : show the "..." advanced elements Returns: Gradio block Here is the function: def show( prompt: Prompt[Any, Any, Any], examples: Union[List[str], List[Tuple[str]]] = [""], subprompts: List[Prompt[Any, Any, Any]] = [], fields: List[str] = [], initial_state: Any = None, out_type: str = "markdown", keys: Set[str] = {"OPENAI_API_KEY"}, description: str = "", code: str = "", css: str = "", show_advanced: bool = True, ) -> gr.Blocks: """ Constructs a gradio component to show a prompt chain. Args: prompt: A prompt or prompt chain to display. examples: A list of example inputs, either string or tuples of fields subprompts: The `Prompt` objects to display. fields: The names of the field input to the prompt. initial_state: For stateful prompts, the initial value. out_type: type of final output keys: user keys required description: description of the model code: code to display css : additional css show_advanced : show the "..." advanced elements Returns: Gradio block """ fields = [arg for arg in inspect.getfullargspec(prompt).args if arg != "state"] with gr.Blocks(css=CSS + "\n" + css, theme=gr.themes.Monochrome()) as demo: # API Keys api_keys() constructor = Constructor() # Collect all the inputs state = gr.State(initial_state) constructor = constructor.merge(Constructor(inputs={state}, outputs={state})) # Show the description gr.Markdown(description) # Build the top query box with one input for each field. inputs = list([gr.Textbox(label=f) for f in fields]) examples = gr.Examples(examples=examples, inputs=inputs) query_btn = gr.Button(value="Run") constructor = constructor.add_inputs(inputs) with gr.Group(): # Intermediate prompt displays constructor = constructor.merge( chain_blocks(subprompts, show_advanced=show_advanced) ) # Final Output result # with gr.Accordion(label="✔️", elem_id="result"): # typ = gr.JSON if out_type == "json" else gr.Markdown # output = typ(elem_id="inner") def output_fn(data: Dict[Block, Any]) -> Dict[Block, Any]: final = data[final_output] return {state: final} # output: final} constructor = constructor.merge(Constructor([output_fn], set(), set())) def run(data): # type: ignore prompt_inputs = {k: data[v] for k, v in zip(fields, inputs)} if initial_state is not None: prompt_inputs["state"] = data[state] with start_chain("temp"): for output in prompt(**prompt_inputs).run_gen(): data[all_data] = dict(MinichainContext.prompt_store) data[final_output] = output yield constructor.fn(data) if output is not None: break yield constructor.fn(data) query_btn.click(run, inputs=constructor.inputs, outputs=constructor.outputs) if code: gr.Code(code, language="python", elem_id="inner") return demo
Constructs a gradio component to show a prompt chain. Args: prompt: A prompt or prompt chain to display. examples: A list of example inputs, either string or tuples of fields subprompts: The `Prompt` objects to display. fields: The names of the field input to the prompt. initial_state: For stateful prompts, the initial value. out_type: type of final output keys: user keys required description: description of the model code: code to display css : additional css show_advanced : show the "..." advanced elements Returns: Gradio block
13,550
import os import sys from datetime import date import subprocess def git_authors(): result = subprocess.run( ["git", "shortlog", "--summary", "HEAD"], stdout = subprocess.PIPE, check = True) names = [ line.strip().split("\t")[1] for line in result.stdout.decode("utf-8").splitlines() ] return names
null
13,551
import os import sys from datetime import date import subprocess def prose_list(items): if not items: return "" if len(items) == 1: return items[0] elif len(items) == 2: return " and ".join(items) else: return ", ".join([*items[0:-1], "and " + items[-1]])
null
13,552
import logging import hashlib import collections.abc as abc import os import shutil import sys import errno def _make_dir_recursively(dir_): try: os.makedirs(dir_) except OSError as ex: from errno import EEXIST if ex.errno != EEXIST: raise
null
13,553
import logging import hashlib import collections.abc as abc import os import shutil import sys import errno def update_checksum(checksum, obj): if isinstance(obj, str): checksum.update(obj.encode("utf8")) else: checksum.update(obj)
null
13,554
import argparse from augur.io import open_file, read_metadata from Bio import SeqIO import csv import sys def parse_args(): parser = argparse.ArgumentParser( description=""" Custom script to combine metadata files from different origins. In the case where metadata files specify different values, the latter provided file will take priority. Columns will be added for each origin with values "yes" or "no" to identify the input source (origin) of each sample. """, formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--metadata', required=True, nargs='+', metavar="TSV", help="Metadata files") parser.add_argument('--origins', required=True, nargs='+', metavar="STR", help="Names of origins (order should match provided metadata)") parser.add_argument('--output', required=True, metavar="TSV", help="Output (merged) metadata") args = parser.parse_args() return args
null
13,555
import argparse import json import re from numpy import linspace from math import floor The provided code snippet includes necessary dependencies for implementing the `adjust_coloring_for_epiweeks` function. Write a Python function `def adjust_coloring_for_epiweeks(dataset)` to solve the following problem: If an auspice JSON specifies a colouring with the key "epiweek" (case sensitive) then we create a categorical colorscale which evenly spaces the canonical nextstrain rainbow across the observed time window. NOTE: epiweek must be in CDC format ("YYYYMM") but this may be relaxed to include ISO format in the future. Here is the function: def adjust_coloring_for_epiweeks(dataset): """ If an auspice JSON specifies a colouring with the key "epiweek" (case sensitive) then we create a categorical colorscale which evenly spaces the canonical nextstrain rainbow across the observed time window. NOTE: epiweek must be in CDC format ("YYYYMM") but this may be relaxed to include ISO format in the future. """ EPIKEY="epiweek" try: (cidx, coloring) = [(i, c) for i, c in enumerate(dataset['meta'].get("colorings", [])) if c['key']==EPIKEY][0] except IndexError: # coloring doesn't define an epiweek return # remove any duplicate coloring entries in the JSON to ensure the entry we edit is the one used by Auspice # (NOTE: this is augur bug https://github.com/nextstrain/augur/issues/719) dataset['meta']['colorings'] = [c for i,c in enumerate(dataset['meta']['colorings']) if not (c['key']==EPIKEY and i!=cidx)] # delay import to support older setups not using epiweeks package from epiweeks import Year, Week observed_values = set() def recurse(node): value = node.get("node_attrs", {}).get(EPIKEY, {}).get("value", False) if value: # we validate using both the epiweeks package and a regex (epiweeks will perform coercion of non-valid data into valid data) if not re.match(r'^(\d{4})(\d{2})$', value): raise(ValueError(f"Epiweek value {value} was not in format YYYYMM.")) week = Week.fromstring(value, system="cdc") # raises ValueError if not valid observed_values.add(week) for child in node.get("children", []): recurse(child) try: recurse(dataset["tree"]) except ValueError as e: print(str(e)) print("Skipping color scale creation for epiweek.") return observed_values = sorted(list(observed_values)) ## generate epiweeks across the entire observed range for color generation epiweeks = [ observed_values[0] ] while epiweeks[-1] < observed_values[-1]: epiweeks.append(epiweeks[-1]+1) ## generate rainbow colour scale across epiweeks. ## Since a "default" augur install does not include matplotlib, rather than interpolating between values in the scale ## we reuse them. This only applies when n(epiweeks)>30, where distinguising between colors is problematic anyway. rainbow = ["#511EA8", "#482BB6", "#4039C3", "#3F4ACA", "#3E5CD0", "#416CCE", "#447CCD", "#4989C4", "#4E96BC", "#559FB0", "#5DA8A4", "#66AE96", "#6FB388", "#7AB77C", "#85BA6F", "#91BC64", "#9DBE5A", "#AABD53", "#B6BD4B", "#C2BA46", "#CDB642", "#D6B03F", "#DDA83C", "#E29D39", "#E69036", "#E67F33", "#E56D30", "#E2592C", "#DF4428", "#DC2F24"] color_indicies = [floor(x) for x in linspace(0, len(rainbow), endpoint=False, num=len(epiweeks))] coloring['scale'] = [ [epiweek.cdcformat(), rainbow[color_indicies[i]]] for i,epiweek in enumerate(epiweeks) if epiweek in observed_values ] ## auspice will order the legend according to the provided color scale, so there is no need to set ## `coloring['legend']` unless we want to restrict this for some reason. coloring['type'] = 'categorical' # force the scale type to be categorical
If an auspice JSON specifies a colouring with the key "epiweek" (case sensitive) then we create a categorical colorscale which evenly spaces the canonical nextstrain rainbow across the observed time window. NOTE: epiweek must be in CDC format ("YYYYMM") but this may be relaxed to include ISO format in the future.
13,556
import argparse import sys from datetime import datetime import pandas as pd import numpy as np INSERT_BEFORE_THIS_COLUMN = "pango_lineage" column_map = { "clade": "Nextstrain_clade", "Nextclade_pango": "Nextclade_pango", "totalMissing": "missing_data", "totalSubstitutions": "divergence", "totalNonACGTNs": "nonACGTN", "privateNucMutations.totalUnlabeledSubstitutions": "rare_mutations", "privateNucMutations.totalReversionSubstitutions": "reversion_mutations", "privateNucMutations.totalLabeledSubstitutions": "potential_contaminants", "qc.overallScore": "QC_overall_score", "qc.overallStatus": "QC_overall_status", "qc.missingData.status": "QC_missing_data", "qc.mixedSites.status": "QC_mixed_sites", "qc.privateMutations.status": "QC_rare_mutations", "qc.snpClusters.status": "QC_snp_clusters", "qc.frameShifts.status": "QC_frame_shifts", "qc.stopCodons.status": "QC_stop_codons", "frameShifts": "frame_shifts", "deletions": "deletions", "insertions": "insertions", "substitutions": "substitutions", "aaSubstitutions": "aaSubstitutions", "immune_escape": "immune_escape", "ace2_binding": "ace2_binding", } The provided code snippet includes necessary dependencies for implementing the `reorder_columns` function. Write a Python function `def reorder_columns(result: pd.DataFrame)` to solve the following problem: Moves the new clade column after a specified column Here is the function: def reorder_columns(result: pd.DataFrame): """ Moves the new clade column after a specified column """ columns = list(result.columns) columns.remove(column_map['clade']) insert_at = columns.index(INSERT_BEFORE_THIS_COLUMN) columns.insert(insert_at, column_map['clade']) return result[columns]
Moves the new clade column after a specified column
13,557
import argparse import sys from datetime import datetime import pandas as pd import numpy as np def parse_args(): parser = argparse.ArgumentParser( description="Joins metadata file with Nextclade clade output", ) parser.add_argument("first_file") parser.add_argument("second_file") parser.add_argument("-o", default=sys.stdout) return parser.parse_args()
null
13,558
import argparse import sys from datetime import datetime import pandas as pd import numpy as np def datestr_to_ordinal(x): try: return datetime.strptime(x,"%Y-%m-%d").toordinal() except: return np.nan
null
13,559
import argparse import sys from datetime import datetime import pandas as pd import numpy as np def isfloat(value): try: float(value) return True except ValueError: return False
null
13,560
import argparse from augur.io import open_file, read_sequences, write_sequences import hashlib from pathlib import Path import re import sys from utils import stream_tar_file_contents The provided code snippet includes necessary dependencies for implementing the `rename_sequences` function. Write a Python function `def rename_sequences(sequences, pattern)` to solve the following problem: Rename the given sequences' ids by replacing the given patterns with the empty string. Here is the function: def rename_sequences(sequences, pattern): """Rename the given sequences' ids by replacing the given patterns with the empty string. """ for sequence in sequences: # Replace the given patterns in the sequence description with the empty # string. For a simple FASTA record with only an identifier in the # defline, the description is identical to the `id` and `name` fields. # For a complex FASTA record that has spaces in the identifier or other # additional information, we need to parse the description to get any # trailing components of the strain name. sequence.id = re.sub(pattern, "", sequence.description) # Replace standard characters that are not accepted by all downstream # tools as valid FASTA names. sequence.id = sequence.id.replace("'", "-") # The name field stores the same information for a simple FASTA input, so we need to override its value, too. sequence.name = sequence.id # Do not keep additional information that follows the sequence identifier. sequence.description = "" yield sequence
Rename the given sequences' ids by replacing the given patterns with the empty string.
13,561
import argparse from augur.io import open_file, read_sequences, write_sequences import hashlib from pathlib import Path import re import sys from utils import stream_tar_file_contents class DuplicateSequenceError(ValueError): pass The provided code snippet includes necessary dependencies for implementing the `drop_duplicate_sequences` function. Write a Python function `def drop_duplicate_sequences(sequences, error_on_duplicates=False)` to solve the following problem: Identify and drop duplicate sequences from the given iterator. Parameters ---------- sequences : Iterator Yields ------ Bio.SeqIO.Seq : Unique sequence records Raises ------ DuplicateSequenceError : If `error_on_duplicates` is True and any duplicate records are found, raises an exception with a comma-delimited list of duplicates as the message. Here is the function: def drop_duplicate_sequences(sequences, error_on_duplicates=False): """Identify and drop duplicate sequences from the given iterator. Parameters ---------- sequences : Iterator Yields ------ Bio.SeqIO.Seq : Unique sequence records Raises ------ DuplicateSequenceError : If `error_on_duplicates` is True and any duplicate records are found, raises an exception with a comma-delimited list of duplicates as the message. """ sequence_hash_by_name = {} duplicate_strains = set() for sequence in sequences: # Hash each sequence and check whether another sequence with the same # name already exists and if the hash is different. sequence_hash = hashlib.sha256(str(sequence.seq).encode("utf-8")).hexdigest() if sequence.name in sequence_hash_by_name: # If the hashes differ (multiple entries with the same strain name # but different sequences), we keep the first sequence and add the # strain to a list of duplicates to report at the end. if sequence_hash_by_name.get(sequence.name) != sequence_hash: duplicate_strains.add(sequence.name) # If the current strain has been seen before, don't write # out its sequence again. continue sequence_hash_by_name[sequence.name] = sequence_hash yield sequence # Report names of duplicate strains with different sequences when requested. if len(duplicate_strains) > 0 and error_on_duplicates: raise DuplicateSequenceError(", ".join(duplicate_strains))
Identify and drop duplicate sequences from the given iterator. Parameters ---------- sequences : Iterator Yields ------ Bio.SeqIO.Seq : Unique sequence records Raises ------ DuplicateSequenceError : If `error_on_duplicates` is True and any duplicate records are found, raises an exception with a comma-delimited list of duplicates as the message.
13,562
from io import TextIOWrapper import lzma from pathlib import Path import sys import tarfile import tempfile EXTENSION_BY_FILETYPE = { "metadata": ".tsv", "sequences": ".fasta", } The provided code snippet includes necessary dependencies for implementing the `extract_tar_file_contents` function. Write a Python function `def extract_tar_file_contents(filename, filetype)` to solve the following problem: Try to extract the contents of a given file type (e.g., metadata or sequences) from the given tar filename. Parameters ---------- filename : str or Path-like Path to the tar archive to search for the given file type. filetype : str Type of file to search for in the given tar archive based on the associated file extension. Returns ------- tempfile.TemporaryDir : Temporary directory containing the file extracted from the tar archive. pathlib.Path : Path to the file extracted from the archive with the same name as the file in the original archive. Raises ------ FileNotFoundError : When a file with the the requested file type's extension could not be found in the given tar archive. Here is the function: def extract_tar_file_contents(filename, filetype): """Try to extract the contents of a given file type (e.g., metadata or sequences) from the given tar filename. Parameters ---------- filename : str or Path-like Path to the tar archive to search for the given file type. filetype : str Type of file to search for in the given tar archive based on the associated file extension. Returns ------- tempfile.TemporaryDir : Temporary directory containing the file extracted from the tar archive. pathlib.Path : Path to the file extracted from the archive with the same name as the file in the original archive. Raises ------ FileNotFoundError : When a file with the the requested file type's extension could not be found in the given tar archive. """ extension = EXTENSION_BY_FILETYPE[filetype] with tarfile.open(filename) as tar: internal_member = None for member in tar.getmembers(): suffixes = Path(member.name).suffixes if extension in suffixes: # Only use the last part of the member file name, excluding any # leading directories that might include the root file system. member_path = Path(member.name) member.name = member_path.name # By default, return the binary stream for the member file. internal_member = member break if internal_member is None: raise FileNotFoundError(f"Could not find a {filetype} file in '{filename}'") # Extract the internal file with its original name in the tar archive to # a temporary directory. This approach allows downstream processes to # re-read the file in multiple passes instead of making a single pass # through a stream. temporary_dir = tempfile.TemporaryDirectory() tar.extractall( temporary_dir.name, members=(internal_member,) ) extracted_file_path = Path(temporary_dir.name) / Path(internal_member.name) print(f"Extracted {filetype} file from {filename} to {extracted_file_path}", file=sys.stderr) # Return temporary directory with the path to the extract file to allow the # caller to clean up this directory and to maintain a reference to this # directory until it is no longer needed. Python will automatically clean up # the temporary directory when its object is destroyed. For more details, see # https://docs.python.org/3/library/tempfile.html#tempfile.TemporaryDirectory return temporary_dir, extracted_file_path
Try to extract the contents of a given file type (e.g., metadata or sequences) from the given tar filename. Parameters ---------- filename : str or Path-like Path to the tar archive to search for the given file type. filetype : str Type of file to search for in the given tar archive based on the associated file extension. Returns ------- tempfile.TemporaryDir : Temporary directory containing the file extracted from the tar archive. pathlib.Path : Path to the file extracted from the archive with the same name as the file in the original archive. Raises ------ FileNotFoundError : When a file with the the requested file type's extension could not be found in the given tar archive.
13,563
from io import TextIOWrapper import lzma from pathlib import Path import sys import tarfile import tempfile EXTENSION_BY_FILETYPE = { "metadata": ".tsv", "sequences": ".fasta", } The provided code snippet includes necessary dependencies for implementing the `stream_tar_file_contents` function. Write a Python function `def stream_tar_file_contents(filename, filetype)` to solve the following problem: Try to extract the contents of a given file type (e.g., metadata or sequences) from the given tar filename. Parameters ---------- filename : str or Path-like Path to the tar archive to search for the given file type. filetype : str Type of file to search for in the given tar archive based on the associated file extension. Returns ------- io.BufferedReader : A stream of the requested file from the tar archive. TarFile : A handle to the original tar archive to be closed when the stream has been read. Raises ------ FileNotFoundError : When a file with the the requested file type's extension could not be found in the given tar archive. Here is the function: def stream_tar_file_contents(filename, filetype): """Try to extract the contents of a given file type (e.g., metadata or sequences) from the given tar filename. Parameters ---------- filename : str or Path-like Path to the tar archive to search for the given file type. filetype : str Type of file to search for in the given tar archive based on the associated file extension. Returns ------- io.BufferedReader : A stream of the requested file from the tar archive. TarFile : A handle to the original tar archive to be closed when the stream has been read. Raises ------ FileNotFoundError : When a file with the the requested file type's extension could not be found in the given tar archive. """ extension = EXTENSION_BY_FILETYPE[filetype] tar = tarfile.open(filename) internal_file = None for member in tar.getmembers(): suffixes = Path(member.name).suffixes if extension in suffixes: # By default, return the binary stream for the member file. internal_file = tar.extractfile(member.name) if ".xz" in suffixes: # Check for LZMA-compressed data and open these with the # corresponding library. internal_file = lzma.open(internal_file, "rt") elif extension == ".fasta": # For sequence data, handle decoding of the binary stream prior # to passing the data back to the caller. internal_file = TextIOWrapper(internal_file) break if internal_file is None: tar.close() raise FileNotFoundError(f"Could not find a {filetype} file in '{filename}'") return internal_file, tar
Try to extract the contents of a given file type (e.g., metadata or sequences) from the given tar filename. Parameters ---------- filename : str or Path-like Path to the tar archive to search for the given file type. filetype : str Type of file to search for in the given tar archive based on the associated file extension. Returns ------- io.BufferedReader : A stream of the requested file from the tar archive. TarFile : A handle to the original tar archive to be closed when the stream has been read. Raises ------ FileNotFoundError : When a file with the the requested file type's extension could not be found in the given tar archive.
13,564
import argparse import json from Bio import Phylo from collections import defaultdict def attach_labels(d, labeled_nodes): if "children" in d: for c in d["children"]: if c["name"] in labeled_nodes: if "labels" not in c["branch_attrs"]: c["branch_attrs"]["labels"] = {} c['branch_attrs']['labels']['mlabel'] = labeled_nodes[c["name"]][0] print(c['branch_attrs']['labels']) attach_labels(c, labeled_nodes)
null
13,565
import argparse from datetime import datetime from augur.io import read_metadata import json def get_recency(date_str, ref_date): date_submitted = datetime.strptime(date_str, '%Y-%m-%d').toordinal() ref_day = ref_date.toordinal() delta_days = ref_day - date_submitted if delta_days<=0: return 'New' elif delta_days<3: return '1-2 days ago' elif delta_days<8: return '3-7 days ago' elif delta_days<15: return 'One week ago' elif delta_days<31: return 'One month ago' elif delta_days>=31: return 'Older'
null
13,566
import argparse import numpy as np import pandas as pd from datetime import datetime, timedelta def isfloat(value): try: float(value) return True except ValueError: return False
null
13,567
import argparse import numpy as np import pandas as pd from datetime import datetime, timedelta def datestr_to_ordinal(x, minus_weeks=0): try: return (datetime.strptime(x,"%Y-%m-%d") - timedelta(weeks=minus_weeks)).toordinal() except: return np.nan def earliest_clade_date(Nextstrain_clade, clade_emergence_dates_filename, window_weeks=2): clade_dates = pd.read_csv(clade_emergence_dates_filename, index_col="Nextstrain_clade", sep='\t') try: return datestr_to_ordinal(clade_dates.loc[Nextstrain_clade]['first_sequence'], minus_weeks=window_weeks) except: return np.nan
null
13,568
import argparse from augur.io import open_file, read_sequences, write_sequences import Bio import Bio.SeqIO from Bio.Seq import Seq def mask_terminal_gaps(seq): L = len(seq) seq_trimmed = seq.lstrip('-') left_gaps = L - len(seq_trimmed) seq_trimmed = seq_trimmed.rstrip('-') right_gaps = L - len(seq_trimmed) - left_gaps return "N"*left_gaps + seq_trimmed + "N"*right_gaps
null
13,569
import argparse import json def update_strain_names(n): # closure if "NODE_" not in n["name"] and args.prefix not in n["name"]: n["name"] = args.prefix + n["name"] if "children" in n: for c in n["children"]: update_strain_names(c)
null
13,570
import argparse from augur.frequency_estimators import logit_transform from augur.utils import annotate_parents_for_tree, read_node_data, read_tree, write_json import Bio.Phylo from collections import defaultdict import json import math import numpy as np from scipy.stats import linregress import sys The provided code snippet includes necessary dependencies for implementing the `read_frequencies` function. Write a Python function `def read_frequencies(frequencies_file)` to solve the following problem: Returns a dictionary of frequencies and their parameters indexed by strain name from a given auspice tip frequencies file. Here is the function: def read_frequencies(frequencies_file): """Returns a dictionary of frequencies and their parameters indexed by strain name from a given auspice tip frequencies file. """ with open(frequencies_file) as fh: frequencies_json = json.load(fh) parameters = {} frequencies = {} for key, values in frequencies_json.items(): if "frequencies" in values: frequencies[key] = values["frequencies"] else: parameters[key] = values return frequencies, parameters
Returns a dictionary of frequencies and their parameters indexed by strain name from a given auspice tip frequencies file.
13,571
import argparse, os, glob from augur.io import open_file from Bio import SeqIO, SeqFeature, Seq from Bio.SeqIO.FastaIO import SimpleFastaParser import numpy as np import pandas as pd def read_reference(fname, genemap): try: ref = str(SeqIO.read(fname, 'fasta').seq) except: with open(fname, 'r') as fh: ref = "".join([x.strip() for x in fh]) translations = {} with open(genemap, 'r') as fh: for line in fh: if line[0]=='#': continue entries = [x.strip() for x in line.strip().split('\t')] start = int(entries[3]) end = int(entries[4]) strand = entries[6] attributes = {x.split('=')[0]:'='.join(x.split('=')[1:]) for x in entries[8].split(';')} if 'gene_name' in attributes: name = attributes['gene_name'].strip('"') else: name = None translation = Seq.translate(SeqFeature.SeqFeature(SeqFeature.FeatureLocation(start-1, end, strand=-1 if strand=='-' else 1)).extract(ref)) translations[name] = str(translation) return {"nuc":ref, "translations":translations}
null
13,572
import argparse, os, glob from augur.io import open_file from Bio import SeqIO, SeqFeature, Seq from Bio.SeqIO.FastaIO import SimpleFastaParser import numpy as np import pandas as pd def summarise_differences(ref, query, isAA): """ Summarise the differences between a provided reference and a query (both of which are numpy arrays with dtype int8) Returns a string of comma-seperated mutations """ # in int8: A = 65 T = 84 C = 67 G = 71 N = 78 - = 45 X = 88 ambiguous = 88 if isAA else 78 # 'X' or 'N' # replace all leading and trailing gaps with the ambiguous character idx_not_gaps = np.where(query!=45)[0] # 45 is '-' (gap) if idx_not_gaps.size: query[0:idx_not_gaps[0]] = ambiguous query[idx_not_gaps[-1]+1:len(query)] = ambiguous else: # the query is nothing but gaps! We don't report any mutations here return "" # sometimes the query length is longer than the reference. In this case we preserve previous behavior # by discarding extra characters in the query if query.size>ref.size: query = query[0:ref.size] # find indicies where the query differs from the reference, and is not ambiguous changes = np.logical_and(ref!=query, query!=ambiguous).nonzero()[0] # save these as a comma-seperated list of <from><base><to>, where the base (position) is 1-based return ",".join([f"{chr(ref[idx])}{idx+1}{chr(query[idx])}" for idx in changes]) def to_numpy_array(input_string): return np.frombuffer(input_string.upper().encode('utf-8'), dtype=np.int8).copy() def to_mutations(aln_file, ref, aa=False): res = {} ref_array = to_numpy_array(ref) with open_file(aln_file, 'r') as fh: for name, seq in SimpleFastaParser(fh): res[name] = summarise_differences(ref_array, to_numpy_array(seq), aa) return res
null
13,573
import argparse from augur.io import read_sequences from random import shuffle from collections import defaultdict import Bio import numpy as np from Bio.SeqIO.FastaIO import SimpleFastaParser from Bio.Seq import Seq from Bio import AlignIO, SeqIO from scipy import sparse import sys def compactify_sequences(sparse_matrix, sequence_names): sequence_groups = defaultdict(list) for s, snps in zip(sequence_names, sparse_matrix): ind = snps.nonzero() vals = np.array(snps[ind]) if len(ind[1]): sequence_groups[tuple(zip(ind[1], vals[0]))].append(s) else: sequence_groups[tuple()].append(s) return sequence_groups
null
13,574
import argparse from augur.io import read_sequences from random import shuffle from collections import defaultdict import Bio import numpy as np from Bio.SeqIO.FastaIO import SimpleFastaParser from Bio.Seq import Seq from Bio import AlignIO, SeqIO from scipy import sparse import sys INITIALISATION_LENGTH = 1000000 def sequence_to_int_array(s, fill_value=110, fill_gaps=True): seq = np.frombuffer(str(s).lower().encode('utf-8'), dtype=np.int8).copy() if fill_gaps: seq[(seq!=97) & (seq!=99) & (seq!=103) & (seq!=116)] = fill_value else: seq[(seq!=97) & (seq!=99) & (seq!=103) & (seq!=116) & (seq!=45)] = fill_value return seq def calculate_snp_matrix(fastafile, consensus=None, zipped=False, fill_value=110, chunk_size=0, ignore_seqs=None): # This function generate a sparse matrix where differences to the consensus are coded as integers. if ignore_seqs is None: ignore_seqs = [] row = np.empty(INITIALISATION_LENGTH) col = np.empty(INITIALISATION_LENGTH, dtype=np.int64) val = np.empty(INITIALISATION_LENGTH, dtype=np.int8) r = 0 n_snps = 0 nseqs = 0 seq_names = [] filled_positions = [] current_length = INITIALISATION_LENGTH for record in fastafile: h = record.name s = str(record.seq) if h in ignore_seqs: continue if consensus is None: align_length = len(s) # Take consensus as first sequence consensus = sequence_to_int_array(s, fill_value=fill_value) else: align_length = len(consensus) nseqs +=1 seq_names.append(h) if(len(s)!=align_length): raise ValueError('Fasta file appears to have sequences of different lengths!') s = sequence_to_int_array(s, fill_value=fill_value) snps = (consensus!=s) & (s!=fill_value) right = n_snps + np.sum(snps) filled_positions.append(np.where(s==fill_value)[0]) if right >= (current_length/2): current_length = current_length + INITIALISATION_LENGTH row.resize(current_length) col.resize(current_length) val.resize(current_length) row[n_snps:right] = r col[n_snps:right] = np.flatnonzero(snps) val[n_snps:right] = s[snps] r += 1 n_snps = right if chunk_size and chunk_size==nseqs: break if nseqs==0: return None row = row[0:right] col = col[0:right] val = val[0:right] sparse_snps = sparse.csc_matrix((val, (row, col)), shape=(nseqs, align_length)) return {'snps': sparse_snps, 'consensus': consensus, 'names': seq_names, 'filled_positions': filled_positions}
null
13,575
import argparse from augur.io import read_sequences from random import shuffle from collections import defaultdict import Bio import numpy as np from Bio.SeqIO.FastaIO import SimpleFastaParser from Bio.Seq import Seq from Bio import AlignIO, SeqIO from scipy import sparse import sys def calculate_distance_matrix(sparse_matrix_A, sparse_matrix_B, consensus): n_seqs_A = sparse_matrix_A.shape[0] n_seqs_B = sparse_matrix_B.shape[0] d = (1*(sparse_matrix_A==97)) * (sparse_matrix_B.transpose()==97) d = d + (1*(sparse_matrix_A==99) * (sparse_matrix_B.transpose()==99)) d = d + (1*(sparse_matrix_A==103) * (sparse_matrix_B.transpose()==103)) d = d + (1*(sparse_matrix_A==116) * (sparse_matrix_B.transpose()==116)) d = d.todense() n_comp = (1*(sparse_matrix_A==110) * ((sparse_matrix_B==110).transpose())).todense() d = d + n_comp temp_total = np.zeros((n_seqs_A, n_seqs_B)) temp_total[:] = (1*(sparse_matrix_A>0)).sum(1) temp_total += (1*(sparse_matrix_B>0)).sum(1).transpose() total_differences_shared = (1*(sparse_matrix_A>0)) * (sparse_matrix_B.transpose()>0) n_total = np.zeros((n_seqs_A, n_seqs_B)) n_sum = (1*(sparse_matrix_A==110)).sum(1) n_total[:] = n_sum n_total += (1*(sparse_matrix_B==110)).sum(1).transpose() diff_n = n_total - 2*n_comp d = temp_total - total_differences_shared.todense() - d - diff_n return d
null
13,576
import argparse from augur.io import open_file, read_metadata import csv import os from pathlib import Path import pandas as pd import re import shutil import sys from tempfile import NamedTemporaryFile from utils import extract_tar_file_contents The provided code snippet includes necessary dependencies for implementing the `parse_new_column_names` function. Write a Python function `def parse_new_column_names(renaming_rules)` to solve the following problem: Parse the mapping of current to new column names from the given list of renaming rules. Parameters ---------- renaming_rules : list[str] A list of strings mapping an old column name to a new one delimited by an equal symbol (e.g., "old_column=new_column"). Returns ------- dict : A mapping of new column names for each old column name. >>> parse_new_column_names(["old=new", "new=old"]) {'old': 'new', 'new': 'old'} >>> parse_new_column_names(["old->new"]) {} Here is the function: def parse_new_column_names(renaming_rules): """Parse the mapping of current to new column names from the given list of renaming rules. Parameters ---------- renaming_rules : list[str] A list of strings mapping an old column name to a new one delimited by an equal symbol (e.g., "old_column=new_column"). Returns ------- dict : A mapping of new column names for each old column name. >>> parse_new_column_names(["old=new", "new=old"]) {'old': 'new', 'new': 'old'} >>> parse_new_column_names(["old->new"]) {} """ new_column_names = {} for rule in renaming_rules: if "=" in rule: old_column, new_column = rule.split("=") new_column_names[old_column] = new_column else: print( f"WARNING: missing mapping of old to new column in form of 'Virus name=strain' for rule: '{rule}'.", file=sys.stderr ) return new_column_names
Parse the mapping of current to new column names from the given list of renaming rules. Parameters ---------- renaming_rules : list[str] A list of strings mapping an old column name to a new one delimited by an equal symbol (e.g., "old_column=new_column"). Returns ------- dict : A mapping of new column names for each old column name. >>> parse_new_column_names(["old=new", "new=old"]) {'old': 'new', 'new': 'old'} >>> parse_new_column_names(["old->new"]) {}
13,577
import argparse from augur.io import open_file, read_metadata import csv import os from pathlib import Path import pandas as pd import re import shutil import sys from tempfile import NamedTemporaryFile from utils import extract_tar_file_contents The provided code snippet includes necessary dependencies for implementing the `parse_location_string` function. Write a Python function `def parse_location_string(location_string, location_fields)` to solve the following problem: Parse location string from GISAID into the given separate geographic scales and return a dictionary of parse values by scale. Parameters ---------- location_string : str location_fields : list Returns ------- dict : dictionary of geographic fields parsed from the given string >>> location_fields = ["region", "country", "division", "location"] >>> parse_location_string("Asia / Japan", location_fields) {'region': 'Asia', 'country': 'Japan', 'division': '?', 'location': '?'} >>> parse_location_string("Europe / Iceland / Reykjavik", location_fields) {'region': 'Europe', 'country': 'Iceland', 'division': 'Reykjavik', 'location': '?'} >>> parse_location_string("North America / USA / Washington / King County", location_fields) {'region': 'North America', 'country': 'USA', 'division': 'Washington', 'location': 'King County'} Additional location entries beyond what has been specified should be stripped from output. >>> parse_location_string("North America / USA / Washington / King County / Extra field", location_fields) {'region': 'North America', 'country': 'USA', 'division': 'Washington', 'location': 'King County'} Trailing location delimiters should be stripped from the output. >>> parse_location_string("North America / USA / Washington / King County / ", location_fields) {'region': 'North America', 'country': 'USA', 'division': 'Washington', 'location': 'King County'} Handle inconsistently delimited strings. >>> parse_location_string("North America/USA/New York/New York", location_fields) {'region': 'North America', 'country': 'USA', 'division': 'New York', 'location': 'New York'} >>> parse_location_string("Europe/ Lithuania", location_fields) {'region': 'Europe', 'country': 'Lithuania', 'division': '?', 'location': '?'} Here is the function: def parse_location_string(location_string, location_fields): """Parse location string from GISAID into the given separate geographic scales and return a dictionary of parse values by scale. Parameters ---------- location_string : str location_fields : list Returns ------- dict : dictionary of geographic fields parsed from the given string >>> location_fields = ["region", "country", "division", "location"] >>> parse_location_string("Asia / Japan", location_fields) {'region': 'Asia', 'country': 'Japan', 'division': '?', 'location': '?'} >>> parse_location_string("Europe / Iceland / Reykjavik", location_fields) {'region': 'Europe', 'country': 'Iceland', 'division': 'Reykjavik', 'location': '?'} >>> parse_location_string("North America / USA / Washington / King County", location_fields) {'region': 'North America', 'country': 'USA', 'division': 'Washington', 'location': 'King County'} Additional location entries beyond what has been specified should be stripped from output. >>> parse_location_string("North America / USA / Washington / King County / Extra field", location_fields) {'region': 'North America', 'country': 'USA', 'division': 'Washington', 'location': 'King County'} Trailing location delimiters should be stripped from the output. >>> parse_location_string("North America / USA / Washington / King County / ", location_fields) {'region': 'North America', 'country': 'USA', 'division': 'Washington', 'location': 'King County'} Handle inconsistently delimited strings. >>> parse_location_string("North America/USA/New York/New York", location_fields) {'region': 'North America', 'country': 'USA', 'division': 'New York', 'location': 'New York'} >>> parse_location_string("Europe/ Lithuania", location_fields) {'region': 'Europe', 'country': 'Lithuania', 'division': '?', 'location': '?'} """ # Try to extract values for specific geographic scales. values = re.split(r"[ ]*/[ ]*", location_string) # Create a default mapping of location fields to missing values and update # these from the values in the location string. locations = {field: "?" for field in location_fields} locations.update(dict(zip(location_fields, values))) return locations
Parse location string from GISAID into the given separate geographic scales and return a dictionary of parse values by scale. Parameters ---------- location_string : str location_fields : list Returns ------- dict : dictionary of geographic fields parsed from the given string >>> location_fields = ["region", "country", "division", "location"] >>> parse_location_string("Asia / Japan", location_fields) {'region': 'Asia', 'country': 'Japan', 'division': '?', 'location': '?'} >>> parse_location_string("Europe / Iceland / Reykjavik", location_fields) {'region': 'Europe', 'country': 'Iceland', 'division': 'Reykjavik', 'location': '?'} >>> parse_location_string("North America / USA / Washington / King County", location_fields) {'region': 'North America', 'country': 'USA', 'division': 'Washington', 'location': 'King County'} Additional location entries beyond what has been specified should be stripped from output. >>> parse_location_string("North America / USA / Washington / King County / Extra field", location_fields) {'region': 'North America', 'country': 'USA', 'division': 'Washington', 'location': 'King County'} Trailing location delimiters should be stripped from the output. >>> parse_location_string("North America / USA / Washington / King County / ", location_fields) {'region': 'North America', 'country': 'USA', 'division': 'Washington', 'location': 'King County'} Handle inconsistently delimited strings. >>> parse_location_string("North America/USA/New York/New York", location_fields) {'region': 'North America', 'country': 'USA', 'division': 'New York', 'location': 'New York'} >>> parse_location_string("Europe/ Lithuania", location_fields) {'region': 'Europe', 'country': 'Lithuania', 'division': '?', 'location': '?'}
13,578
import argparse from augur.io import open_file, read_metadata import csv import os from pathlib import Path import pandas as pd import re import shutil import sys from tempfile import NamedTemporaryFile from utils import extract_tar_file_contents class MissingColumnException(Exception): """An exception caused by a missing column that was expected in the metadata. """ pass class DuplicateException(Exception): """An exception caused by the presence of any duplicate metadata records by strain name. """ pass def strip_prefixes(strain_name, prefixes): """Strip the given prefixes from the given strain name. Parameters ---------- strain_name : str Name of a strain to be sanitized prefixes : list[str] A list of prefixes to be stripped from the strain name. Returns ------- str : Strain name without any of the given prefixes. >>> strip_prefixes("hCoV-19/RandomStrain/1/2020", ["hCoV-19/", "SARS-CoV-2/"]) 'RandomStrain/1/2020' >>> strip_prefixes("SARS-CoV-2/RandomStrain/2/2020", ["hCoV-19/", "SARS-CoV-2/"]) 'RandomStrain/2/2020' >>> strip_prefixes("hCoV-19/RandomStrain/1/2020", ["SARS-CoV-2/"]) 'hCoV-19/RandomStrain/1/2020' """ joined_prefixes = "|".join(prefixes) pattern = f"^({joined_prefixes})" return re.sub(pattern, "", strain_name) def sanitize_strain_names(metadata, prefixes_to_strip): """Remove and replace certain characters in strain names. Parameters ---------- metadata : pandas.DataFrame A data frame indexed by strain name. prefixes_to_strip : list[str] A list of prefixes to be stripped from the strain name. """ # Reset the data frame index, to make the "strain" column available # for transformation. strain_field = metadata.index.name metadata = metadata.reset_index() # Strip prefixes from strain names. if prefixes_to_strip: metadata[strain_field] = metadata[strain_field].apply( lambda strain: strip_prefixes(strain, prefixes_to_strip) ) # Replace whitespaces from strain names with nothing to match Nextstrain's # convention since whitespaces are not allowed in FASTA record names. metadata[strain_field] = metadata[strain_field].str.replace(" ", "") # Replace standard characters that are not accepted by all downstream # tools as valid FASTA names. metadata[strain_field] = metadata[strain_field].str.replace("'", "-") # Set the index back to the strain column. metadata = metadata.set_index(strain_field) return metadata The provided code snippet includes necessary dependencies for implementing the `get_database_ids_by_strain` function. Write a Python function `def get_database_ids_by_strain(metadata_file, metadata_id_columns, database_id_columns, metadata_chunk_size, error_on_duplicates=False)` to solve the following problem: Get a mapping of all database ids for each strain name. Parameters ---------- metadata_file : str or Path-like or file object Path or file object for a metadata file to process. metadata_id_columns : list[str] A list of potential id columns for strain names in the metadata. database_id_columns : list[str] A list of potential database id columns whose values can be used to deduplicate records with the same strain name. metadata_chunk_size : int Number of records to read into memory at once from the metadata. error_on_duplicates : bool Throw an error when duplicate records are detected. Returns ------- str or Path-like or file object or None : Path or file object containing the mapping of database ids for each strain name (one row per combination). Returns None, if no valid database ids were found and no duplicates exist. Raises ------ DuplicateException : When duplicates are detected and the caller has requested an error on duplicates. MissingColumnException : When none of the requested metadata id columns exist. Here is the function: def get_database_ids_by_strain(metadata_file, metadata_id_columns, database_id_columns, metadata_chunk_size, error_on_duplicates=False): """Get a mapping of all database ids for each strain name. Parameters ---------- metadata_file : str or Path-like or file object Path or file object for a metadata file to process. metadata_id_columns : list[str] A list of potential id columns for strain names in the metadata. database_id_columns : list[str] A list of potential database id columns whose values can be used to deduplicate records with the same strain name. metadata_chunk_size : int Number of records to read into memory at once from the metadata. error_on_duplicates : bool Throw an error when duplicate records are detected. Returns ------- str or Path-like or file object or None : Path or file object containing the mapping of database ids for each strain name (one row per combination). Returns None, if no valid database ids were found and no duplicates exist. Raises ------ DuplicateException : When duplicates are detected and the caller has requested an error on duplicates. MissingColumnException : When none of the requested metadata id columns exist. """ try: metadata_reader = read_metadata( metadata_file, id_columns=metadata_id_columns, chunk_size=metadata_chunk_size, ) except Exception as error: # Augur's `read_metadata` function can throw a generic Exception when # the input is missing id columns. This exception is not easily # distinguished from any other error, so we check the contents of the # error message and raise a more specific error for better handling of # unexpected errors. if "None of the possible id columns" in str(error): raise MissingColumnException(str(error)) from error else: raise # Track strains we have observed, so we can alert the caller to duplicate # strains when an error on duplicates has been requested. observed_strains = set() duplicate_strains = set() with NamedTemporaryFile(delete=False, mode="wt", encoding="utf-8", newline="") as mapping_file: mapping_path = mapping_file.name header = True for metadata in metadata_reader: metadata = sanitize_strain_names(metadata, args.strip_prefixes) # Check for database id columns. valid_database_id_columns = metadata.columns.intersection( database_id_columns ) if mapping_path and len(valid_database_id_columns) == 0: # Do not write out mapping of ids. Default to error on # duplicates, since we have no way to resolve duplicates # automatically. mapping_path = None error_on_duplicates = True print( "WARNING: Skipping deduplication of metadata records.", f"None of the possible database id columns ({database_id_columns}) were found in the metadata's columns {tuple([metadata.index.name] + metadata.columns.values.tolist())}", file=sys.stderr ) # Track duplicates in memory, as needed. if error_on_duplicates: for strain in metadata.index.values: if strain in observed_strains: duplicate_strains.add(strain) else: observed_strains.add(strain) if mapping_path: # Write mapping of database and strain ids to disk. metadata.loc[:, valid_database_id_columns].to_csv( mapping_file, sep="\t", header=header, index=True, ) header = False # Clean up temporary file. if mapping_path is None: os.unlink(mapping_file.name) if error_on_duplicates and len(duplicate_strains) > 0: duplicates_file = metadata_file + ".duplicates.txt" with open(duplicates_file, "w") as oh: for strain in duplicate_strains: oh.write(f"{strain}\n") raise DuplicateException(f"{len(duplicate_strains)} strains have duplicate records. See '{duplicates_file}' for more details.") return mapping_path
Get a mapping of all database ids for each strain name. Parameters ---------- metadata_file : str or Path-like or file object Path or file object for a metadata file to process. metadata_id_columns : list[str] A list of potential id columns for strain names in the metadata. database_id_columns : list[str] A list of potential database id columns whose values can be used to deduplicate records with the same strain name. metadata_chunk_size : int Number of records to read into memory at once from the metadata. error_on_duplicates : bool Throw an error when duplicate records are detected. Returns ------- str or Path-like or file object or None : Path or file object containing the mapping of database ids for each strain name (one row per combination). Returns None, if no valid database ids were found and no duplicates exist. Raises ------ DuplicateException : When duplicates are detected and the caller has requested an error on duplicates. MissingColumnException : When none of the requested metadata id columns exist.
13,579
import argparse from augur.io import open_file, read_metadata import csv import os from pathlib import Path import pandas as pd import re import shutil import sys from tempfile import NamedTemporaryFile from utils import extract_tar_file_contents The provided code snippet includes necessary dependencies for implementing the `filter_duplicates` function. Write a Python function `def filter_duplicates(metadata, database_ids_by_strain)` to solve the following problem: Filter duplicate records by the strain name in the given data frame index using the given file containing a mapping of strain names to database ids. Database ids allow us to identify duplicate records that need to be excluded. We prefer the latest record for a given strain name across all possible database ids and filter out all other records for that same strain name. Parameters ---------- metadata : pandas.DataFrame A data frame indexed by strain name. database_ids_by_strain : str or Path-like or file object Path or file object containing the mapping of database ids for each strain name (one row per combination). Returns ------- pandas.DataFrame : A filtered data frame with no duplicate records. Here is the function: def filter_duplicates(metadata, database_ids_by_strain): """Filter duplicate records by the strain name in the given data frame index using the given file containing a mapping of strain names to database ids. Database ids allow us to identify duplicate records that need to be excluded. We prefer the latest record for a given strain name across all possible database ids and filter out all other records for that same strain name. Parameters ---------- metadata : pandas.DataFrame A data frame indexed by strain name. database_ids_by_strain : str or Path-like or file object Path or file object containing the mapping of database ids for each strain name (one row per combination). Returns ------- pandas.DataFrame : A filtered data frame with no duplicate records. """ # Get strain names for the given metadata. strain_ids = set(metadata.index.values) # Get the mappings of database ids to strain names for the current strains. with open(database_ids_by_strain, "r", encoding="utf-8", newline="") as fh: reader = csv.DictReader(fh, delimiter="\t") # The mapping file stores the strain name in the first column. All other # fields are database ids. strain_field = reader.fieldnames[0] database_id_columns = reader.fieldnames[1:] # Keep only records matching the current strain ids. mappings = pd.DataFrame([ row for row in reader if row[strain_field] in strain_ids ]) # Check for duplicate strains in the given metadata or strains that do not # have any mappings. If there are none, return the metadata as it is. If # duplicates or strains without mappings exist, filter them out. if any(mappings.duplicated(strain_field)) or len(strain_ids) != mappings.shape[0]: # Create a list of database ids of records to keep. To this end, we sort by # database ids in descending order such that the latest record appears # first, then we take the first record for each strain name. records_to_keep = mappings.sort_values( database_id_columns, ascending=False ).groupby(strain_field).first() # Select metadata corresponding to database ids to keep. Database ids # may not be unique for different strains (e.g., "?"), so we need to # merge on strain name and database ids. Additionally, the same strain # may appear multiple times in the metadata with the same id. These # accidental duplicates will also produce a merge with records to keep # that is not a one-to-one merge. To handle this case, we need to drop # any remaining duplicate records by strain name. The order that we # resolve these duplicates does not matter, since the fields we would # use to resolve these contain identical values. merge_columns = sorted(set([strain_field]) | set(database_id_columns)) metadata = metadata.reset_index().merge( records_to_keep, on=merge_columns, ).drop_duplicates(subset=strain_field).set_index(strain_field) # Track strains that we've processed and drop these from the mappings file. # In this way, we can track strains that have been processed across multiple # chunks of metadata and avoid emiting duplicates that appear in different # chunks. with open(database_ids_by_strain, "r", encoding="utf-8", newline="") as fh: reader = csv.DictReader(fh, delimiter="\t") with NamedTemporaryFile(delete=False, mode="wt", encoding="utf-8", newline="") as new_mapping_file: new_mapping_path = new_mapping_file.name writer = csv.DictWriter( new_mapping_file, fieldnames=reader.fieldnames, delimiter="\t", lineterminator="\n", ) writer.writeheader() for row in reader: if row[strain_field] not in strain_ids: writer.writerow(row) # After writing out the new mapping of ids without strains we just # processed, copy the new mapping over the original file and delete the # temporary new mapping file. shutil.copyfile( new_mapping_path, database_ids_by_strain, ) os.unlink(new_mapping_path) return metadata
Filter duplicate records by the strain name in the given data frame index using the given file containing a mapping of strain names to database ids. Database ids allow us to identify duplicate records that need to be excluded. We prefer the latest record for a given strain name across all possible database ids and filter out all other records for that same strain name. Parameters ---------- metadata : pandas.DataFrame A data frame indexed by strain name. database_ids_by_strain : str or Path-like or file object Path or file object containing the mapping of database ids for each strain name (one row per combination). Returns ------- pandas.DataFrame : A filtered data frame with no duplicate records.
13,580
import argparse import json from Bio import Phylo, SeqIO from Bio.Align import MultipleSeqAlignment from treetime import TreeAnc from augur.utils import load_features def annotation_json(features, reference): annotations = {} for fname, feat in features.items(): annotations[fname] = {'seqid':reference.id, 'type':feat.type, 'start':int(feat.location.start)+1, 'end':int(feat.location.end), 'strand': '+' if feat.location.strand else '-'} annotations['nuc'] = {'seqid':reference.id, 'type':'source', 'start': 1, 'end': len(reference), 'strand': '+'} return annotations
null
13,581
from os import listdir from pathlib import Path import argparse from collections import defaultdict def cut(s): key = s.split(":")[0] content = ":".join(s.split(":")[1:])[1:] return (key, content) def read_data(path): additional_info = {} for file in sorted(listdir(path)): if file == '.DS_Store': continue id = "" if file.startswith("additional-info-changes"): with open(path + file) as f: data = f.readlines() added = False #only consider newly added additional info removed = False for i in range(len(data)): k = data[i].strip("\n") if k.endswith("info added"): added = True removed = False if k.endswith("info changed"): #skip changed info added = False removed = False if k.endswith("info removed"): #consider removed info added = False removed = True if ":" in k: if added or removed: (key, content) = cut(k) key = key.strip() if key == "gisaid_epi_isl": id = content if added: if id in additional_info: print("WARNING: additional info added two times for same strain! (" + id + ")") additional_info[id] = {} else: if added: additional_info[id][key] = content if removed: if id in additional_info: if key in additional_info[id]: additional_info[id].pop(key) if additional_info[id] == {}: additional_info.pop(id) return additional_info
null
13,582
from os import listdir from pathlib import Path import argparse from collections import defaultdict def bold(s): return('\033[1m' + s + '\033[0m') def read_simple_file(name): with open(name) as myfile: data_file = myfile.readlines() return [l.strip() for l in data_file] def read_dict(name): with open(name) as myfile: data_file = myfile.readlines() return {l.split("\t")[0]:l.split("\t")[1].strip() for l in data_file} def add_to_simple_file(file_name, line): with open(file_name) as myfile: data_file = myfile.readlines() if not data_file[-1].endswith("\n"): line = "\n" + line with open(file_name, "a") as myfile: myfile.write(line + "\n") def read_metadata(file_name, additional_info): with open(file_name) as myfile: data_file = myfile.readlines() metadata = {} header = data_file[0].strip().split("\t") for line in data_file[1:]: l = line.strip().split("\t") id = l[2] if id in additional_info: metadata[id] = {} for i in range(len(l)): type = header[i] if type == "gisaid_epi_isl": continue metadata[id][type] = l[i] for id in additional_info: if id not in metadata: print(bold("WARNING: " + id + " missing from metadata! Please download most recent metadata before running this script.")) return None return metadata def read_ordering_file(file_name): with open(file_name) as myfile: data_file = myfile.readlines() data = {"Asia": {}, "Oceania": {}, "Africa": {}, "Europe": {}, "South America": {}, "North America": {}} region = "" country = "" division = "" for line in data_file: if line == "\n": continue if line.startswith("###"): if len(line.split("### ")) > 1: #country place = line.strip().split("### ")[1] country = place if country not in data[region]: data[region][country] = {} else: if line.startswith("#"): if len(line.split("# ")) > 1: #region or division place = line.strip().split("# ")[1] if place in data: region = place else: division = place if division not in data[region][country]: data[region][country][division] = [] else: l = line.strip().split("\t") type = l[0] #location, division etc place = l[1] if type == "division": division = place if division not in data[region][country]: data[region][country][division] = [] if type == "location": location = place if location not in data[region][country][division]: data[region][country][division].append(location) return data def rearrange_additional_info(additional_info): sorted_info = {} for id in additional_info: info_found = 0 for key in additional_info[id]: #Special case: if additional_info[id]["additional_location_info"] == "Migrants ship" and "additional_host_info" in additional_info[id] and additional_info[id]["additional_host_info"] != "": content = additional_info[id]["additional_location_info"] + " " + additional_info[id]["additional_host_info"] print("Merge two additional info (host and location) to " + bold(content)) if content not in sorted_info: sorted_info[content] = [] sorted_info[content].append((id, additional_info[id]["strain"])) elif key == "additional_host_info" or key == "additional_location_info": content = additional_info[id][key] if content == "" or content == " ": continue info_found += 1 access = (content, key) if access not in sorted_info: sorted_info[access] = [] sorted_info[access].append((id, additional_info[id]["strain"])) if info_found > 1: if additional_info[id]["additional_location_info"] != additional_info[id]["additional_host_info"]: print("Warning: " + id + " has more than one relevant info (\"" + additional_info[id]["additional_host_info"] + "\" and \"" + additional_info[id]["additional_location_info"] + "\"). Possible conflict!") return sorted_info def find_place_in_ordering(place, ordering, variants): if place == "Unknown": return None place = apply_variant(place, variants) for region in ordering: for country in ordering[region]: for division in ordering[region][country]: for location in ordering[region][country][division]: if break_down(place) == break_down(location): return (region, country, division, location) for region in ordering: for country in ordering[region]: for division in ordering[region][country]: if break_down(place) == break_down(division): return (region, country, division, "") for region in ordering: for country in ordering[region]: if break_down(place) == break_down(country): # If given name is country, return as division also, leave location empty. (Useful for travel exposure) return (region, country, country, "") for region in ordering: if break_down(place) == break_down(region): return (region, region, region, "") return None def create_annontation(id, strain, new, old, travel, info, annotations_append, prev = True): (region, country, division, location) = new (region_original, country_original, division_original, location_original) = old info_str = " (" + info + ")" t = "" if travel: t = "_exposure" if region_original != region: p = "" if prev: p = " # previously " + region_original + info_str annotations_append.append(strain + "\t" + id + "\t" + "region" + t + "\t" + region + p) info_str = "" print("Adjust region" + t + " " + region_original + " to " + region + " for sequence " + strain) if country_original != country: p = "" if prev: p = " # previously " + country_original + info_str annotations_append.append(strain + "\t" + id + "\t" + "country" + t + "\t" + country + p) info_str = "" print("Adjust country" + t + " " + country_original + " to " + country + " for sequence " + strain) if division_original != division: p = "" if prev: p = " # previously " + division_original + info_str annotations_append.append(strain + "\t" + id + "\t" + "division" + t + "\t" + division + p) print("Adjust division" + t + " " + division_original + " to " + division + " for sequence " + strain) info_str = "" if location != None and location_original != location: p = "" if prev: p = " # previously " + location_original + info_str annotations_append.append(strain + "\t" + id + "\t" + "location" + t + "\t" + location + p) print("Adjust location" + t + " " + location_original + " to " + location + " for sequence " + strain) info_str = "" if info_str != "": print("No adjustments necessary for sequence " + strain) return annotations_append def check_additional_location(info, strain_list, location_pattern, ordering, metadata, annotations_append, variants, auto): place = find_longest_pattern(info, location_pattern) if place != None: return annotations_append, True else: return annotations_append, False if info.endswith(" (interpreted as patient residence)"): place = info.split(" (interpreted as patient residence)")[0] if place == None: # No pattern found - try whether found as location anyway pattern_found = False place = info print("Testing for single location name...") ordering_result = find_place_in_ordering(place, ordering, variants) if ordering_result == None: if auto: return annotations_append, True print("Location not found. Returning to manual processing...\n") return annotations_append, False else: pattern_found = True print("Known " + bold("patient residence") + " pattern found. Extracted location(s): " + bold(place)) place = "".join(place.split("_h")) place = " ".join(place.split("_")) place = adjust_caps(place) place_interpretation = interpret_location(place, ordering, variants, pattern_found) if place_interpretation is None and auto: return annotations_append, True while place_interpretation == None: s = "Could not identify " + bold(place) + ". You have the following options:" s += "\n- Enter different name (e.g. in case of typo) for repeated search" s += "\n- Enter desired region/country/division/location in this exact format" s += "\n- Leave answer empty to return to manual processing" s += "\nType your input here: " answer = input(s) if answer == "": print("Return to manual processing...\n") return annotations_append, False place = answer place_interpretation = interpret_location(place, ordering, variants, pattern_found) (region, country, division, location) = place_interpretation if region == "" and country == "" and division == "": if auto: return annotations_append, True if pattern_found: print("Location not found in ordering and no additional geography given. Assume new location " + bold(location)) regions = [] countries = [] divisions = [] locations = [] for (id, strain) in strain_list: if metadata[id]["region"] not in regions: regions.append(metadata[id]["region"]) if metadata[id]["country"] not in countries: countries.append(metadata[id]["country"]) if metadata[id]["division"] not in divisions: divisions.append(metadata[id]["division"]) if metadata[id]["location"] not in locations: locations.append(metadata[id]["location"]) if len(regions) == 1 and len(countries) == 1 and len(divisions) == 1: answer = input("Suggested pattern (please double check!): " + bold(metadata[id]["region"] + ", " + metadata[id]["country"] + ", " + metadata[id]["division"] + ", " + place) + ". Leave empty to approve, otherwise press any key to return to manual processing: ") if answer != "": print("Return to manual processing...\n") return annotations_append, False else: region = metadata[id]["region"] country = metadata[id]["country"] division = metadata[id]["division"] location = place for (id, strain) in strain_list: new = (region, country, division, location) old = (metadata[id]["region"], metadata[id]["country"], metadata[id]["division"], metadata[id]["location"]) create_annontation(id, strain, new, old, False, info, annotations_append) return annotations_append, True else: print("Could not correctly interpret " + info + " (unknown description " + place + "). No suggestions possible (several countries/divisions with this info). Returning to manual processing...\n") return annotations_append, False else: answer = "" if auto else input("Interpreted " + bold(info) + " as " + bold(region + "/" + country + "/" + division + "/" + location) + ". Press enter to approve, press any other key to return to manual processing: ") if answer != "": print("Return to manual processing...\n") return annotations_append, False for (id, strain) in strain_list: new = (region, country, division, location) old = (metadata[id]["region"], metadata[id]["country"], metadata[id]["division"], metadata[id]["location"]) create_annontation(id, strain, new, old, False, info, annotations_append) return annotations_append, True return annotations_append, False def check_travel_history(info, strain_list, travel_pattern, ordering, metadata, annotations_append, variants, auto): place = find_longest_pattern(info, travel_pattern) if info.endswith(" (interpreted as travel exposure)"): place = info.split(" (interpreted as travel exposure)")[0] if place != None: print("Known " + bold("travel exposure") + " pattern found. Extracted location(s): " + bold(place)) if "," in place: places = [p.strip() for p in place.split(",")] print("Several location names found. Process each individually: " + bold("[" + ", ".join(places) + "]")) elif " and " in place: places = [p.strip() for p in place.split(" and ")] print("Several location names found. Process each individually: " + bold("[" + ", ".join(places) + "]")) else: places = [place] results = {"region_exposure": [], "country_exposure": [], "division_exposure": []} # In case several travels are listed, check all of them and find overlaps for place in places: if "/" in place: place = place.split("/")[-1].strip() ordering_result = find_place_in_ordering(place, ordering, variants) if ordering_result is None and auto: return annotations_append, True while ordering_result == None: s = "Could not identify " + bold(place) + ". You have the following options:" s += "\n- Enter different name (e.g. in case of typo) for repeated search" s += "\n- Enter desired region/country/division in this exact format" s += "\n- Leave answer empty to return to manual processing" s += "\nType your input here: " answer = input(s) if answer == "": print("Return to manual processing...\n") return annotations_append, False if answer.count("/") == 2: ordering_result = (answer.split("/")[0], answer.split("/")[1], answer.split("/")[2], "") else: place = answer ordering_result = find_place_in_ordering(place, ordering, variants) (region_exposure, country_exposure, division_exposure, location_exposure) = ordering_result if region_exposure not in results["region_exposure"]: results["region_exposure"].append(region_exposure) if country_exposure not in results["country_exposure"]: results["country_exposure"].append(country_exposure) if division_exposure not in results["division_exposure"]: results["division_exposure"].append(division_exposure) if len(results["region_exposure"]) > 1: answer = "" if auto else input("Contains exposures from different regions. No annotation possible. Press " + bold("ENTER") + " to skip this info, otherwise press any key to return to manual processing: ") if answer == "": return annotations_append, True else: print("Return to manual processing...\n") return annotations_append, False final_region = results["region_exposure"][0] if len(results["country_exposure"]) > 1: final_country = final_region else: final_country = results["country_exposure"][0] if len(results["division_exposure"]) > 1: final_division = final_country else: final_division = results["division_exposure"][0] answer = "" if auto else input("Interpreted as " + bold(final_region + ", " + final_country + ", " + final_division) + ". Press " + bold("ENTER") + " to approve, otherwise press any key to return to manual processing: ") if answer != "": print("Return to manual processing...\n") return annotations_append, False for (id, strain) in strain_list: new = (final_region, final_country, final_division, None) old = (metadata[id]["region"], metadata[id]["country"], metadata[id]["division"], metadata[id]["location"]) create_annontation(id, strain, new, old, True, info, annotations_append) return annotations_append, True return annotations_append, False path_to_nextstrain = "../" path_to_outputs = "scripts/curate_metadata/outputs_new_sequences/" def check_additional_info(additional_info, path_to_config_files, auto): metadata = read_metadata(path_to_nextstrain + "ncov/data/downloaded_gisaid.tsv", additional_info) if metadata == None: return [] ordering = read_ordering_file(path_to_nextstrain + "ncov/defaults/color_ordering.tsv") variants = read_dict(path_to_config_files + "variants.txt") sorted_info = rearrange_additional_info(additional_info) # Collected patterns info_ignore = read_simple_file(path_to_config_files + "info_ignore.txt") location_pattern = read_simple_file(path_to_config_files + "location_pattern.txt") travel_pattern = read_simple_file(path_to_config_files + "travel_pattern.txt") purpose_of_sequencing = read_dict(path_to_config_files + "purpose_of_sequencing.txt") print("\n##########################################\n") annotations_append = [] for key in sorted_info: (info, info_type) = key strain_list = sorted_info[key] info_found = False if info.startswith("Other: "): info = info[7:] print("Treating \"Other: " + info + "\" as \"" + info + "\"") if info.startswith("other: "): info = info[7:] print("Treating \"other: " + info + "\" as \"" + info + "\"") print("Processing " + bold(info) + ":") while True: auto_comments = ["travel surveillance"] # Special case: if (info.startswith("Resident of ") or info.startswith("resident of ")) and " tested in " in info: if info.startswith("Resident of "): division = ((info.split(" tested in ")[0].strip(",")).strip(";")).split("Resident of ")[1].strip() if info.startswith("resident of "): division = ((info.split(" tested in ")[0].strip(",")).strip(";")).split("resident of ")[1].strip() division_exposure = info.split(" tested in ")[1].strip() ordering_result = find_place_in_ordering(division, ordering, variants) if ordering_result != None: (region, country, division, location) = ordering_result ordering_result = find_place_in_ordering(division_exposure, ordering, variants) if ordering_result != None: (region_exposure, country_exposure, division_exposure, location) = ordering_result answer = "" if auto else input("Interpret " + bold(info) + " as special case with division " + bold(division) + " and exposure " + bold(division_exposure) + ". Leave empty to approve, or press any key to continue with manual processing: ") if answer == "": for (id, strain) in strain_list: new_residence = (region, country, division, None) new_exposure = (region_exposure, country_exposure, division_exposure, None) old = (metadata[id]["region"], metadata[id]["country"], metadata[id]["division"],metadata[id]["location"]) create_annontation(id, strain, new_residence, old, False, info, annotations_append) # Change residence create_annontation(id, strain, new_exposure, new_residence, True, info, annotations_append, prev = False) # Change exposure right back info_found = True if info_found: break if info in purpose_of_sequencing: answer = "" if auto else input("Identified as \"purpose_of_sequencing\". Press " + bold("ENTER") + " to approve, otherwise press any key: ") if answer == "": for (id, strain) in strain_list: annotations_append.append(strain + "\t" + id + "\t" + "sampling_strategy" + "\t" + purpose_of_sequencing[info] + " # " + info) print("purpose_of_sequencing info added as annotation for strain " + id) break if info in info_ignore: answer = "" if auto else input("Identified as \"Ignore\". Press " + bold("ENTER") + " to approve, otherwise press any key: ") if answer == "": break annotations_append, info_found = check_travel_history(info, strain_list, travel_pattern, ordering, metadata, annotations_append, variants, auto) if info_found: break annotations_append, info_found = check_additional_location(info, strain_list, location_pattern, ordering, metadata, annotations_append, variants, auto) if info_found or auto: print("Location pattern found. Skipping entry...") break s = bold(info) + " did not contain known pattern or could not be interpreted. You have the following options:" s += "\n" + bold("l") + " - force interpretation as " + bold("patient residence") s += "\n" + bold("t") + " - force interpretation as " + bold("travel exposure") s += "\n" + bold("i") + " - add info to " + bold("ignore") s += "\n" + bold("a") + " - add to annotations as a " + bold("comment") s += "\n" + bold("nl") + " - add new " + bold("patient residence") + " pattern" s += "\n" + bold("nt") + " - add new " + bold("travel exposure") + " pattern" s += "\n" + bold("ns") + " - add new " + bold("sequencing purpose") + " pattern" s += "\n" + bold("s") + " - " + bold("skip") + " this additional info" s += "\n" + bold("r") + " - " + bold("repeat") + " as before" s += "\nType input here: " answer = input(s) if answer == "a": for (id, strain) in strain_list: annotations_append.append("# " + strain + "\t" + id + "\t" + info_type + ": " + info) print("Add comment for " + id) break if answer == "i": add_to_simple_file(path_to_config_files + "info_ignore.txt", info) break elif answer == "l": print("Process " + bold(info) + " now as " + bold(info + " (interpreted as patient residence)")) info = info + " (interpreted as patient residence)" elif answer == "t": print("Process " + bold(info) + " now as " + bold(info + " (interpreted as travel exposure)")) info = info + " (interpreted as travel exposure)" elif answer == "nl": pattern = input("Type pattern here (don't forget XXX as placeholder): ") add_to_simple_file(path_to_config_files + "location_pattern.txt", pattern) location_pattern.append(pattern) elif answer == "nt": pattern = input("Type pattern here (don't forget XXX as placeholder): ") add_to_simple_file(path_to_config_files + "travel_pattern.txt", pattern) travel_pattern.append(pattern) elif answer == "ns": pattern1 = input("Type additional info pattern here: ") pattern2 = input("Type desired metadata entry here: ") add_to_simple_file(path_to_config_files + "purpose_of_sequencing.txt", pattern1 + "\t" + pattern2) purpose_of_sequencing[pattern1] = pattern2 elif answer == "s": break print("\n") print("\n-------\n") with open(path_to_outputs + "omicron_additional_info.txt", "w") as out: for key in sorted_info: (info, info_type) = key strain_list = sorted_info[key] for (id, strain) in strain_list: if metadata[id]["Nextstrain_clade"] == "21K (Omicron)": out.write(id + ": " + info + "\n") return annotations_append
null
13,583
import sys import datetime import pandas as pd from pathlib import Path import os def bold(s): return('\033[1m' + s + '\033[0m') def read_excel_lab_file(table_file_name): if not os.path.exists(table_file_name): print(bold("Missing input file: " + table_file_name)) return None excel_table = pd.read_excel(table_file_name, index_col=0, skiprows=1) excel_table = excel_table.fillna("empty?") lab_dictionary = {} for country, row in excel_table.iterrows(): description = row["Who"] handle = row["Who to tag"] if country not in lab_dictionary: lab_dictionary[country] = {} if description in lab_dictionary[country]: print("Warning: lab description is found two times in excel table in same country (" + str(country) + ", " + str(description) + ")") lab_dictionary[country][str(description).lower()] = handle return lab_dictionary
null
13,584
import sys import datetime import pandas as pd from pathlib import Path import os def read_metadata(filename, date_g, tweet): uk_divisions = ["England", "Wales", "Northern Ireland", "Scotland", "United Kingdom"] year_g = date_g[:4] month_g = date_g[5:7] if month_g == "12": year_gplus = str(int(year_g) + 1) year_gplus2 = str(int(year_g) + 1) month_gplus = "01" month_gplus2 = "02" elif month_g == "11": year_gplus = year_g month_gplus = str(int(month_g) + 1) year_gplus2 = str(int(year_g) + 1) month_gplus2 = "01" else: year_gplus = year_g month_gplus = str(int(month_g) + 1) year_gplus2 = year_g month_gplus2 = str(int(month_g) + 2) if len(month_gplus) == 1: month_gplus = "0" + month_gplus if len(month_gplus2) == 1: month_gplus2 = "0" + month_gplus2 countries_old = {"Africa": [], "Asia": [], "Europe": [], "North America": [], "Oceania": [], "South America": []} countries = {"Africa": [], "Asia": [], "Europe": [], "North America": [], "Oceania": [], "South America": []} labs_old = {"Africa": {}, "Asia": {}, "Europe": {}, "North America": {}, "Oceania": {}, "South America": {}} labs = {"Africa": {}, "Asia": {}, "Europe": {}, "North America": {}, "Oceania": {}, "South America": {}} new_seqs_count = 0 new_seqs_count_regions = {"Africa": 0, "Asia": 0, "Europe": 0, "North America": 0, "Oceania": 0, "South America": 0} with open(filename) as f: header = f.readline().strip().split("\t") country_i = header.index("country") region_i = header.index("region") division_i = header.index("division") subm_date_i = header.index("date_submitted") sampl_date_i = header.index("date") subm_lab_i = header.index("submitting_lab") orig_lab_i = header.index("originating_lab") author_i = header.index("authors") clock_deviation_i = header.index("rare_mutations") pango_lineage_i = header.index("pango_lineage") sra_accession_i = header.index("sra_accession") strain_i = header.index("strain") gisaid_epi_isl_i = header.index("gisaid_epi_isl") genbank_accession_i = header.index("genbank_accession") line = f.readline() while line: l = line.split("\t") country = l[country_i] region = l[region_i] division = l[division_i] lab = l[subm_lab_i] orig_lab = l[orig_lab_i] author = l[author_i] date = l[subm_date_i] if lab[5:] == "Tricity SARS-CoV-2 sequencing consortium: University of Gdansk, Medical University of Gdansk, Vaxican Ltd., Invicta Ltd. 2. National Institute of Public Health - National Institute of Hygiene, Warsaw, Poland": lab = lab[5:] # Skip all entries with invalid dates if len(l[sampl_date_i]) != 10: line = f.readline() continue year = date[:4] month = date[5:7] # Collect all labs and countries from the specified month if year == year_g and month == month_g: new_seqs_count += 1 new_seqs_count_regions[region] += 1 if country not in countries[region]: countries[region].append(country) labs[region][country] = {"submitting_lab": [], "originating_lab": [], "authors": []} if country != "United Kingdom" or division not in uk_divisions: # Skip all UK entries that are not overseas territories if lab == "?": # Since only submitting labs are considered for the tweet, replace unknown submitting labs with originating labs or author if not orig_lab == "?": if orig_lab not in labs[region][country]["submitting_lab"]: labs[region][country]["submitting_lab"].append(orig_lab) else: if author not in labs[region][country]["submitting_lab"]: labs[region][country]["submitting_lab"].append(author) else: if lab not in labs[region][country]["submitting_lab"]: labs[region][country]["submitting_lab"].append(lab) if orig_lab not in labs[region][country]["originating_lab"]: labs[region][country]["originating_lab"].append(orig_lab) if author not in labs[region][country]["authors"]: labs[region][country]["authors"].append(author) else: if tweet: # Also check for next month in case we're late with tweeting if not (month == month_gplus and year == year_gplus) and not (month == month_gplus2 and year == year_gplus2): # Collect all old labs and countries if country not in countries_old[region]: countries_old[region].append(country) labs_old[region][country] = {"submitting_lab": [], "originating_lab": [], "authors": []} if country != "United Kingdom" or division not in uk_divisions: if lab not in labs_old[region][country]["submitting_lab"]: labs_old[region][country]["submitting_lab"].append(lab) if orig_lab not in labs_old[region][country]["originating_lab"]: labs_old[region][country]["originating_lab"].append(orig_lab) if author not in labs_old[region][country]["authors"]: labs_old[region][country]["authors"].append(author) line = f.readline() print(new_seqs_count_regions) new_countries = {} for region in countries: for country in countries[region]: if country not in countries_old[region]: if region not in new_countries: new_countries[region] = [] new_countries[region].append(country) return labs, labs_old, new_countries, new_seqs_count
null
13,585
import sys import datetime import pandas as pd from pathlib import Path import os def collect_labs(labs, lab_dictionary, old): lab_collection = {} for region in labs: if region not in lab_collection: lab_collection[region] = {} for country in sorted(labs[region]): if country not in lab_collection[region]: lab_collection[region][country] = {} for lab in labs[region][country]["submitting_lab"]: # Only consider submitting lab so far # Handle known if (country in lab_dictionary and lab.lower() in lab_dictionary[country]): lab_collection[region][country][lab] = lab_dictionary[country][lab.lower()] continue # Handle unknown if (country not in lab_dictionary or lab.lower() not in lab_dictionary[country]): if not old: lab_collection[region][country][lab] = "?" continue ''' if data == "open": # Needs special treatment due to many "?" labs and authors print(country) for lab_type in labs[region][country]: # iterate also over originating lab and authors print(lab_type) for lab in labs[region][country][lab_type]: print(lab) if lab == "?": print(country) continue # Handle known if (country in lab_dictionary and lab.lower() in lab_dictionary[country]): lab_collection[region][country][lab] = lab_dictionary[country][lab.lower()] continue # Handle unknown if (country not in lab_dictionary or lab.lower() not in lab_dictionary[country]): if not old: lab_collection[region][country][lab] = "?" continue ''' lab_collection_clean = {} for region in lab_collection: for country in lab_collection[region]: if lab_collection[region][country] != {}: if region not in lab_collection_clean: lab_collection_clean[region] = {} lab_collection_clean[region][country] = lab_collection[region][country] return lab_collection_clean
null
13,586
import sys import datetime import pandas as pd from pathlib import Path import os path_to_outputs = "scripts/curate_metadata/outputs_new_sequences/" def print_labs(lab_collection, data): output_file = path_to_outputs + "twitter_handles_" + data + ".txt" with open(output_file, "w") as out: for region in lab_collection: for country in lab_collection[region]: out.write(country + "\n") s = country + ":\n" for lab in lab_collection[region][country]: out.write(lab + ": " + lab_collection[region][country][lab] + "\n") if lab_collection[region][country][lab] == "?": s += lab + ": ?\n" if s != country + ":\n": print(s) out.write("\n") print("All labs and handles written out to " + output_file)
null
13,587
import sys import datetime import pandas as pd from pathlib import Path import os path_to_outputs = "scripts/curate_metadata/outputs_new_sequences/" def generate_tweet(new_seqs_count, lab_collection, lab_collection_old, new_countries, data): known_handles = [] for region in lab_collection_old: for country in lab_collection_old[region]: for lab in lab_collection_old[region][country]: if lab not in known_handles: for handle in lab_collection_old[region][country][lab].split(", "): known_handles.append(handle) tweet = [] char_total = 260 links = { "Africa": "nextstrain.org/ncov/gisaid/africa", "Asia": "nextstrain.org/ncov/gisaid/asia", "Europe": "nextstrain.org/ncov/gisaid/europe", "North America": "nextstrain.org/ncov/gisaid/north-america", "Oceania": "nextstrain.org/ncov/gisaid/oceania", "South America": "nextstrain.org/ncov/gisaid/south-america" } tweet.append("Thanks to #opendata sharing via @GISAID, we've updated nextstrain.org/ncov/gisaid with " + str(new_seqs_count) + " new #COVID19 #SARSCoV2 sequences during the last month!") if len(new_countries) > 0: countries = [country for region in new_countries for country in new_countries[region]] countries_links = [links[region] for region in new_countries] if len(countries) > 1: c = ", ".join(countries[:-1]) + " and " + countries[-1] l = ", ".join(countries_links[:-1]) + " and " + countries_links[-1] else: c = countries[0] l = countries_links[0] tweet.append("We have received our first sequences from " + c + ". Check them out on " + l + "!") # create simple list of all labs without duplicates labs = [] for region in lab_collection: for country in lab_collection[region]: for lab in lab_collection[region][country]: for handle in lab_collection[region][country][lab].split(", "): if handle == "?": labs.append("???") else: if handle not in labs and handle not in known_handles: labs.append(handle) t = "Thanks to all new submitters:\n\n" + labs[0] for i in range(1,len(labs)): if len(t) + len(labs[i]) <= char_total: t += ", " + labs[i] else: tweet.append(t) t = labs[i] tweet.append(t) with open(path_to_outputs + data + "_tweet.txt", "w") as out: for i, t in enumerate(tweet): out.write(t + "\n\n" + str(i+1) + "/" + str(len(tweet)) + "\n\n\n")
null
13,588
import os import matplotlib.pyplot as plt import pandas as pd from pandas.plotting import register_matplotlib_converters import random import numpy as np import math import datetime def cut(s): key = s.split(":")[0] content = ":".join(s.split(":")[1:])[1:] return (key, content) def read_data(path): data = {} # added and changed sequences list_of_strains = [] for file in sorted(os.listdir(path)): if file == '.DS_Store': continue if file.startswith("metadata-changes"): with open(path + file) as f: metadata_changes = f.readlines() added = False changed = False for i in range(len(metadata_changes)): k = metadata_changes[i].strip() if k.endswith("sequences added") or k.endswith("sequence added"): added = True changed = False if k.endswith("sequences changed") or k.endswith("sequence changed"): added = False changed = True if k.endswith("sequences removed") or k.endswith("sequence removed"): added = False changed = False if k.startswith("gisaid_epi_isl"): (key, id) = cut(k) if added: list_of_strains.append(id) if id in data: print("Attention, same sequence added two times! (" + id + ")") data[id] = {} j = -2 while (i+j) < len(metadata_changes) and metadata_changes[i+j] != "\n": k = metadata_changes[i+j].strip() (key, content) = cut(k) if key != "gisaid_epi_isl": data[id][key] = content j += 1 elif changed: if id in data: j = 1 k = metadata_changes[i+j] while k != "\n": (key, content) = cut(k.strip()) data[id][key] = content.split("=>")[1].strip().strip("\"") #overwrite with latest changes j += 1 if (i+j) >= len(metadata_changes): break k = metadata_changes[i+j] if file.startswith("metadata-additions"): with open(path + file) as f: metadata_additions = f.readlines() header = metadata_additions[0].strip().split("\t") for line in metadata_additions[1:]: l = line.strip().split("\t") id = l[2] if id in data: print("Attention, same sequence added two times! (" + id + ")") data[id] = {} for i in range(len(header)): if header != "gisaid_epi_isl": data[id][header[i]] = l[i] return (data, list_of_strains)
null
13,589
import os import matplotlib.pyplot as plt import pandas as pd from pandas.plotting import register_matplotlib_converters import random import numpy as np import math import datetime def bold(s): return('\033[1m' + s + '\033[0m') def read_excel_lab_file(table_file_name): excel_table = pd.read_excel(table_file_name, index_col=0, skiprows=1) excel_table = excel_table.fillna("empty?") lab_dictionary = {} for country, row in excel_table.iterrows(): description = row["Who"] handle = row["Who to tag"] if country not in lab_dictionary: lab_dictionary[country] = {} if description in lab_dictionary[country]: print("Warning: lab description is found two times in excel table in same country (" + str( country) + ", " + str(description) + ")") lab_dictionary[country][description.lower()] = handle return lab_dictionary today = str(datetime.datetime.now())[:10] def check_for_recency(counts, list_of_strains, lab_collection, path_to_metadata, table_file_name): print("\n----------------------------------------------\n") countries = {} subm_labs = {} origlab_authors = {} cutoff_date = datetime.datetime.combine(datetime.date.today(), datetime.datetime.min.time()) - datetime.timedelta(days=30) #last XX days with open(path_to_metadata + "downloaded_gisaid.tsv") as f: header = f.readline().split("\t") country_i = header.index("country") subm_date_i = header.index("date_submitted") strain_i = header.index("gisaid_epi_isl") subm_lab_i = header.index("submitting_lab") orig_lab_i = header.index("originating_lab") author_i = header.index("authors") print("Collecting all labs from the last month from metadata... (this may take a while)") line = f.readline() while line: l = line.split("\t") country = l[country_i] lab = l[subm_lab_i] orig_lab = l[orig_lab_i] author = l[author_i] if country == "United Kingdom": line = f.readline() continue if country in counts: if country in subm_labs and lab in subm_labs[country] and subm_labs[country][lab] > 20 and orig_lab in origlab_authors[country] and origlab_authors[country][orig_lab] > 20 and author in origlab_authors[country] and origlab_authors[country][author] > 20: line = f.readline() continue date = datetime.datetime.fromisoformat(l[subm_date_i]) if date > cutoff_date: strain = l[strain_i] if strain not in list_of_strains: if country not in countries: print(country) countries[country] = 0 countries[country] += 1 if country not in subm_labs: subm_labs[country] = {} origlab_authors[country] = {} if lab not in subm_labs[country]: subm_labs[country][lab] = 0 subm_labs[country][lab] += 1 if author not in origlab_authors[country]: origlab_authors[country][author] = 0 origlab_authors[country][author] += 1 if orig_lab not in origlab_authors[country]: origlab_authors[country][orig_lab] = 0 origlab_authors[country][orig_lab] += 1 line = f.readline() print("\nSearching for twitter handles... ") rare_countries = [] for c in counts: if c != "United Kingdom": if c not in countries or countries[c] <= 20: rare_countries.append(c) lab_dictionary = read_excel_lab_file(table_file_name) lab_collection_present = {} for country in subm_labs: if country not in lab_collection_present: lab_collection_present[country] = {} for lab in subm_labs[country]: n = subm_labs[country][lab] if country in lab_dictionary and lab.lower() in lab_dictionary[country]: k = lab_dictionary[country][lab.lower()] for l in k.split(", "): if l not in lab_collection_present[country]: lab_collection_present[country][l] = 0 lab_collection_present[country][l] += n else: print("Lab " + bold(lab) + " (" + country + ") not found and will be excluded from future steps. Please fill into excel table to include it.") for lab in origlab_authors[country]: n = origlab_authors[country][lab] if country in lab_dictionary and lab.lower() in lab_dictionary[country]: k = lab_dictionary[country][lab.lower()] for l in k.split(", "): if l not in lab_collection_present[country]: if l not in lab_collection_present[country]: lab_collection_present[country][l] = 0 lab_collection_present[country][l] += n lab_collection_present["United Kingdom"] = {"@CovidGenomicsUK": 1000} rare_labs = {} for region in lab_collection: for country in lab_collection[region]: for lab in lab_collection[region][country]: if country not in lab_collection_present or lab not in lab_collection_present[country] or country in rare_countries: if region not in rare_labs: rare_labs[region] = {} if country not in rare_labs[region]: rare_labs[region][country] = [] rare_labs[region][country].append(lab) print("\nCountries that have submitted < 20 sequences last month (all of these will be included in the tweet):") print(rare_countries) print("\nSubmitters that have not submitted last month (all of these will be included in the tweet):") print(rare_labs) return rare_countries, rare_labs
null
13,590
import os import matplotlib.pyplot as plt import pandas as pd from pandas.plotting import register_matplotlib_converters import random import numpy as np import math import datetime def check_dates(data, today): clade_dates = { "19A": "2019-12-01", "19B": "2019-12-01", "20A": "2020-01-20", "20A.EU2": "2020-02-15", "20B": "2020-02-14", "20C": "2020-02-25", "20D": "2020-03-12", "20E (EU1)": "2020-05-27", "20F": "2020-05-24", "20G": "2020-06-11", "20H (Beta, V2)": "2020-08-10", "20I (Alpha, V1)": "2020-09-20", "20J (Gamma, V3)": "2020-10-29", "21A (Delta)": "2020-10-30", "21B (Kappa)": "2020-10-30", "21C (Epsilon)": "2020-08-03", "21D (Eta)": "2020-11-21", "21E (Theta)": "2021-01-10", "21F (Iota)": "2020-11-20", "21G (Lambda)": "2021-01-05", "21H": "2021-01-05", } invalid_sample_date = {} suspicious_sample_date = {} for id in list(data.keys()): date = data[id]["date"] strain = data[id]["strain"] country = data[id]["country"] if len(date) != len(today): invalid_sample_date[strain] = (date, country) data.pop(id) continue day = int(date[8:]) month = int(date[5:7]) year = int(date[:4]) day_today = int(today[8:]) month_today = int(today[5:7]) year_today = int(today[:4]) #check for past dates if year < 2019 or ((year) == 2019 and month < 12): invalid_sample_date[strain] = (date, country) data.pop(id) continue # Check for future dates if (year > year_today) or (year == year_today and month > month_today) or (year == year_today and month == month_today and day > day_today): invalid_sample_date[strain] = (date, country) data.pop(id) continue #Check for early dates #if (year == 2020 and (month == 2 or month == 1)) or year == 2019: #suspicious_sample_date[strain] = date clade = data[id]["Nextstrain_clade"] dev = data[id]["clock_deviation"] if clade == "": print("Clade missing for sequence " + id) else: if clade not in clade_dates: print("Unknown clade " + clade + " for sequence " + id) else: clade_day = clade_dates[clade] day_clade = int(clade_day[8:]) month_clade = int(clade_day[5:7]) year_clade = int(clade_day[:4]) if (year < year_clade) or (year == year_clade and month < month_clade) or (year == year_clade and month == month_clade and day < day_clade): suspicious_sample_date[strain] = date + " (" + clade + ", clock deviation = " + dev + ")" data.pop(id) continue invalid_dates_by_country = {} for strain in invalid_sample_date: (date, country) = invalid_sample_date[strain] if country not in invalid_dates_by_country: invalid_dates_by_country[country] = {} if date not in invalid_dates_by_country[country]: invalid_dates_by_country[country][date] = 0 invalid_dates_by_country[country][date] += 1 print("\n----------------------------------------------\n") print("Invalid sample dates (automatically excluded from total counts):") for country in invalid_dates_by_country: print(country) for date in invalid_dates_by_country[country]: print(date + " (" + str(invalid_dates_by_country[country][date]) + ")") print("") print("\nSample date before clade (automatically excluded from total counts):") for strain in suspicious_sample_date: print(strain + ": " + suspicious_sample_date[strain]) return data
null
13,591
import os import matplotlib.pyplot as plt import pandas as pd from pandas.plotting import register_matplotlib_converters import random import numpy as np import math import datetime def bold(s): return('\033[1m' + s + '\033[0m') flagged_properties = {"originating_lab": ["Synlab Haut de France"]} path_to_outputs = "scripts/curate_metadata/outputs_new_sequences/" def check_flagged_properties(data): flagged_strains = {} for p in flagged_properties: flagged_strains[p] = {} for name in flagged_properties[p]: flagged_strains[p][name] = [] seqs_found = False for id in list(data.keys()): strain = data[id]["strain"] exclude = False for p in flagged_properties: prop = data[id][p] for name in flagged_properties[p]: if prop == name: flagged_strains[p][name].append(strain) seqs_found = True exclude = True if exclude: data.pop(id) if seqs_found: print(bold("\nFlagged properties found! Please check outputs_new_sequences/sequences_exclude.txt for strain names to exclude") + " (automatically excluded from total counts).\n") else: print("\nNo flagged properties found.\n") with open(path_to_outputs + "sequences_exclude.txt", "w") as out: out.write("\n\nStrains to add to exclude (based on flagged properties):\n") for p in flagged_strains: for name in flagged_properties[p]: out.write(p + " = \"" + name + "\":\n") for strain in flagged_strains[p][name]: out.write(strain + "\n") out.write("\n") return data
null
13,592
import os import matplotlib.pyplot as plt import pandas as pd from pandas.plotting import register_matplotlib_converters import random import numpy as np import math import datetime def plot_dates(data, path): dates_by_country = {} for id in data: country = data[id]["country"] date = datetime.datetime.strptime(data[id]["date"], '%Y-%m-%d') if country not in dates_by_country: dates_by_country[country] = {} if date not in dates_by_country[country]: dates_by_country[country][date] = 0 dates_by_country[country][date] += 1 #remove old plots for f in os.listdir(path): if f.startswith("dates_"): os.remove(path + f) for country in dates_by_country: dates = list(dates_by_country[country].keys()) values = list(dates_by_country[country].values()) plt.figure() plt.bar(dates, values) plt.title(country) plt.xticks(rotation=45, ha="right", size = 7) plt.savefig(path + "dates_" + country.replace(".", "")) plt.close()
null
13,593
import os import matplotlib.pyplot as plt import pandas as pd from pandas.plotting import register_matplotlib_converters import random import numpy as np import math import datetime path_to_outputs = "scripts/curate_metadata/outputs_new_sequences/" def print_counts(data): counts = {} for id in data: country = data[id]["country"] division = data[id]["division"] if country not in counts: counts[country] = {} if division not in counts[country]: counts[country][division] = 0 counts[country][division] += 1 sum_total = 0 for country in counts: sum_country = 0 for division in counts[country]: sum_country += counts[country][division] sum_total += sum_country print("\n----------------------------------------------\n") print("Total counts: " + str(sum_total)) with open(path_to_outputs + "tweet_resources.txt", "w") as out: for country in counts: s = country + ": " sum_country = 0 for division in counts[country]: sum_country += counts[country][division] s = s + str(sum_country) if len(counts[country]) == 1: s = s + " (" + division + ")" else: s = s + " (" + ", ".join([str(counts[country][division]) + " " + division for division in counts[country]]) + ")" print(s) out.write(s + "\n") out.write("\n\n\n") return counts
null
13,594
import os import matplotlib.pyplot as plt import pandas as pd from pandas.plotting import register_matplotlib_converters import random import numpy as np import math import datetime def bold(s): return('\033[1m' + s + '\033[0m') def strike(s): return('\u0336'.join(s) + '\u0336') def read_excel_lab_file(table_file_name): excel_table = pd.read_excel(table_file_name, index_col=0, skiprows=1) excel_table = excel_table.fillna("empty?") lab_dictionary = {} for country, row in excel_table.iterrows(): description = row["Who"] handle = row["Who to tag"] if country not in lab_dictionary: lab_dictionary[country] = {} if description in lab_dictionary[country]: print("Warning: lab description is found two times in excel table in same country (" + str( country) + ", " + str(description) + ")") lab_dictionary[country][description.lower()] = handle return lab_dictionary def collect_labs(data, table_file_name): print("\n----------------------------------------------") submitting_labs = {} originating_labs = {} authors = {} for id in data: region = data[id]["region"] country = data[id]["country"] submitting_lab = data[id]["submitting_lab"] originating_lab = data[id]["originating_lab"] author = data[id]["authors"] if region not in submitting_labs: submitting_labs[region] = {} if country not in submitting_labs[region]: submitting_labs[region][country] = [] if submitting_lab not in submitting_labs[region][country]: submitting_labs[region][country].append(submitting_lab) if region not in originating_labs: originating_labs[region] = {} if country not in originating_labs[region]: originating_labs[region][country] = [] if originating_lab not in originating_labs[region][country] and originating_lab != submitting_lab: originating_labs[region][country].append(originating_lab) if region not in authors: authors[region] = {} if country not in authors[region]: authors[region][country] = [] if author not in authors[region][country]: authors[region][country].append(author) lab_dictionary = read_excel_lab_file(table_file_name) lab_UK = lab_dictionary["United Kingdom"]["COVID-19 Genomics UK Consortium".lower()] lab_collection = {} print("\nSubmitting labs:\n(Note: small differences in spelling might cause lab to not be identified. Consider adjusting the spelling in the spreadsheet!)\n") for region in submitting_labs: if region not in lab_collection: lab_collection[region] = {} for country in sorted(submitting_labs[region]): if country not in lab_collection[region]: lab_collection[region][country] = [] s = country + ":\n" for lab in submitting_labs[region][country]: s += lab + ": " if country in lab_dictionary and lab.lower() in lab_dictionary[country]: k = lab_dictionary[country][lab.lower()] for l in lab_dictionary[country][lab.lower()].split(", "): if l not in lab_collection[region][country]: lab_collection[region][country].append(l) else: k = "?" lab_collection[region][country].append("???") if country == "United Kingdom": k = strike(k) + " " + lab_UK s += bold(k) + "\n" print(s) print("----------------------------------------------\n") print("Originating labs (only printed if found in excel sheet):\n") for region in originating_labs: for country in originating_labs[region]: s = country + ":\n" for lab in originating_labs[region][country]: if country in lab_dictionary and lab.lower() in lab_dictionary[country]: s += lab s += ": " k = lab_dictionary[country][lab.lower()] if country == "United Kingdom": k = strike(k) + " " + lab_UK s += bold(k) for l in lab_dictionary[country][lab.lower()].split(", "): if l not in lab_collection[region][country]: lab_collection[region][country].append(l) s += "\n" if s != country + ":\n": print(s) print("----------------------------------------------\n") print("Authors (only printed if found in excel sheet):\n") for region in authors: for country in authors[region]: s = country + ":\n" for author in authors[region][country]: if country in lab_dictionary and author.lower() in lab_dictionary[country]: s += author s += ": " k = lab_dictionary[country][author.lower()] if country == "United Kingdom": k = strike(k) + " " + lab_UK s += bold(k) for a in lab_dictionary[country][author.lower()].split(", "): if a not in lab_collection[region][country]: lab_collection[region][country].append(a) s += "\n" if s != country + ":\n": print(s) if "Europe" in lab_collection: if "United Kingdom" in lab_collection["Europe"]: lab_collection["Europe"]["United Kingdom"] = [lab_UK] return lab_collection
null
13,595
import os import matplotlib.pyplot as plt import pandas as pd from pandas.plotting import register_matplotlib_converters import random import numpy as np import math import datetime def overview_with_dates(data, file_name): data_sorted = {} for id in data: strain = data[id]["strain"] submission_date = data[id]["date_submitted"] samlpe_date = data[id]["date"] country = data[id]["country"] if country not in data_sorted: data_sorted[country] = [] data_sorted[country].append(strain + "\t" + samlpe_date + "\t" + submission_date) with open(file_name, "w") as myfile: myfile.write("strain\tsampling date\tsubmission date\n") for country in data_sorted: myfile.write(country + "\n") for s in data_sorted[country]: myfile.write(s + "\n") myfile.write("\n")
null
13,596
import os import matplotlib.pyplot as plt import pandas as pd from pandas.plotting import register_matplotlib_converters import random import numpy as np import math import datetime def filter_for_date_region(data, path_to_outputs, params): (region, month) = params special_strains = {} for id in data: date = data[id]["date"] if int(date[5:7]) >= month and region == data[id]["region"]: country = data[id]["country"] if country not in special_strains: special_strains[country] = {} if date[:7] not in special_strains[country]: special_strains[country][date[:7]] = 0 special_strains[country][date[:7]] += 1 with open(path_to_outputs + "special_check_" + region + "_" + str(month) + ".txt", "w") as myfile: myfile.write("New sequences from " + region + " after month " + str(month) + "\n\n") for country in special_strains: myfile.write(country + "\n") for date in sorted(special_strains[country]): myfile.write(date + ": " + str(special_strains[country][date]) + "\n") myfile.write("\n")
null
13,597
import os import matplotlib.pyplot as plt import pandas as pd from pandas.plotting import register_matplotlib_converters import random import numpy as np import math import datetime path_to_outputs = "scripts/curate_metadata/outputs_new_sequences/" def prepare_tweet(counts, total_lab_collection, lab_collection): links = { "Africa": "nextstrain.org/ncov/gisaid/africa", "Asia": "nextstrain.org/ncov/gisaid/asia", "Europe": "nextstrain.org/ncov/gisaid/europe", "North America": "nextstrain.org/ncov/gisaid/north-america", "Oceania": "nextstrain.org/ncov/gisaid/oceania", "South America": "nextstrain.org/ncov/gisaid/south-america" } starters = [ ("Check out new sequences from ", " on "), ("New sequences from ", " can be found on "), ("You can see new sequences from ", " on "), ("You can find new sequences from ", " on "), ("New sequences from ", " can be seen on ") ] starters_split = [ ("Check out new sequences from ", " below"), ("New sequences from ", " can be found below"), ("You can see new sequences from ", " below"), ("You can find new sequences from ", " below"), ("New sequences from ", " can be seen below") ] the = ["USA", "United Kingdom", "Democratic Republic of the Congo"] counts_country = {region: {country: sum(counts[country].values()) for country in total_lab_collection[region]} for region in total_lab_collection} total = sum([sum(counts_country[region].values()) for region in counts_country]) start_tweet = "Thanks to #opendata sharing via @GISAID, we've updated nextstrain.org/ncov/gisaid with " + str( total) + " new #COVID19 #SARSCoV2 sequences!" char_total = 230 char_available = char_total - len("Check out the new sequences from on ") - len("(Thanks to )") - len("1/1") tweet_collection_full = {} tweet_collection_split = {} lengths = {} for region in lab_collection: countries_list = list(lab_collection[region].keys()) length_prediction = [len(country) + len(", ".join(lab_collection[region][country])) for country in lab_collection[region]] if sum(length_prediction) > char_available: countries_extra = [] #extra large countries while len(length_prediction) > 0 and max(length_prediction) > char_available: i = np.argmax(length_prediction) countries_extra.append([countries_list[i]]) countries_list.pop(i) length_prediction.pop(i) if len(countries_list) > 0: countries = [] while(sum(length_prediction) > char_available): length_prediction_sum = np.cumsum(length_prediction) k = np.argmax(length_prediction_sum > char_available) countries.append(countries_list[:k]) countries_list = countries_list[k:] length_prediction = length_prediction[k:] countries.append(countries_list) countries = countries + countries_extra else: countries = countries_extra i = 1 for countries_list in countries: h = [] for country in countries_list: for l in lab_collection[region][country]: if l not in h: h.append(l) c = ["the " + country if country in the else country for country in countries_list] r = region if i > 1: r += str(i) tweet_collection_split[r] = (c, h) i += 1 else: h = [] for country in lab_collection[region]: for l in lab_collection[region][country]: if l not in h: h.append(l) c = ["the " + country if country in the else country for country in countries_list] tweet_collection_full[region] = (c, h) lengths[region] = len(", ".join(c)) + len(", ".join(h)) + len(links.get(region, "")) tweet = [] tweet.append((start_tweet + "\n\n", "\n\n[pic_Global]")) while len(lengths) > 0: current_region = min(lengths, key=lengths.get) best_partner = "" current_length = lengths[current_region] for region, length in sorted(lengths.items(), key=lambda x: x[1]): if region == current_region: continue if current_length + length > char_available: break best_partner = region lengths.pop(current_region) c = tweet_collection_full[current_region][0] h = tweet_collection_full[current_region][1] p = "[pic_" + current_region.replace(" ", "") + "]" l = links.get(current_region, "") if best_partner != "": current_length += lengths[best_partner] lengths.pop(best_partner) c += tweet_collection_full[best_partner][0] h += tweet_collection_full[best_partner][1] l += " and " + links[best_partner] p += " " + "[pic_" + best_partner.replace(" ", "") + "]" if len(c) > 1: c = ", ".join(c[:-1]) + " and " + c[-1] else: c = c[0] if current_length > char_available: h = " ".join(h) else: h = ", ".join(h) starter = random.choice(starters) s = starter[0] + c + starter[1] + l + ".\n\n" s += "(Thanks to " + h + ")\n\n" tweet.append((s, "\n\n" + p)) for region in tweet_collection_split: c = tweet_collection_split[region][0] h = tweet_collection_split[region][1] p = "[pic_" + region.replace(" ", "") + "]" if region in links: starter = random.choice(starters) l = links[region] else: starter = random.choice(starters_split) l = "" if len(c) > 1: c = ", ".join(c[:-1]) + " and " + c[-1] else: c = c[0] if len(", ".join(c)) + len(", ".join(h)) + len(l) > char_available: h = " ".join(h) else: h = ", ".join(h) s = starter[0] + c + starter[1] + l + ".\n\n" s += "(Thanks to " + h + ")\n\n" tweet.append((s, "\n\n" + p)) with open(path_to_outputs + "tweet_resources.txt", "a") as out: out.write("===============================\n\n") for i, t in enumerate(tweet): (s, p) = t out.write(s + str(i+1) + "/" + str(len(tweet)) + p + "\n\n\n")
null
13,598
import os import matplotlib.pyplot as plt import pandas as pd from pandas.plotting import register_matplotlib_converters import random import numpy as np import math import datetime def prepare_tweet_new_format(counts, rare_labs): links = { "Africa": "nextstrain.org/ncov/africa", "Asia": "nextstrain.org/ncov/asia", "Europe": "nextstrain.org/ncov/europe", "North America": "nextstrain.org/ncov/north-america", "Oceania": "nextstrain.org/ncov/oceania", "South America": "nextstrain.org/ncov/south-america" } counts_country = {region: {country: sum(counts[country].values()) for country in lab_collection[region]} for region in lab_collection} total = sum([sum(counts_country[region].values()) for region in counts_country]) start_tweet = "Thanks to #opendata sharing by @GISAID, we've updated nextstrain.org/ncov with " + str( total) + " new #COVID19 #SARSCoV2 sequences!" char_total = 260 char_available = char_total - len("Check out the new sequences from on ") - len("(Thanks to )") - len("1/1") char_available_first = char_available - len(start_tweet)
null
13,599
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter path_to_metadata = "data/" def read_metadata(metadata_filename, data, geo_location_occurences, genbank=False): with open(path_to_metadata + metadata_filename) as f: header = f.readline().split("\t") country_i = header.index("country") region_i = header.index("region") division_i = header.index("division") location_i = header.index("location") line = f.readline() while line: l = line.split("\t") country = l[country_i] region = l[region_i] division = l[division_i] location = l[location_i] # automatically increment genbank locations to the threshold since we # don't want to skip any for now. increment = 1 if not genbank else 20 geo_location_occurences["region"].update({region: increment}) geo_location_occurences["country"].update({country: increment}) geo_location_occurences["division"].update({division: increment}) geo_location_occurences["location"].update({location: increment}) if region not in data: data[region] = {} if country not in data[region]: data[region][country] = {} if division not in data[region][country]: data[region][country][division] = [] if location not in data[region][country][division]: data[region][country][division].append(location) line = f.readline() return data, geo_location_occurences
null
13,600
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter def bold(s): return('\033[1m' + s + '\033[0m') def read_local_file(file_name): path_file_name = path_to_config_files + file_name with open(path_file_name) as myfile: file_content = myfile.readlines() list_format = [accepted_additions_file] if file_name in list_format: # simple list return [line.strip() for line in file_content[1:]] abbreviations_format = [abbreviations_file] if file_name in abbreviations_format: # dictionary, keys seperated from content with tabs content = {} for line in file_content: if line == "\n": continue if line.startswith("#"): key = line.strip().split("# ")[1] content[key] = {} continue l = line.strip().split("\t") if l[0] in content[key]: print("Attention, duplicate found while reading " + file_name + ": " + l[0] + " -> " + l[1] + ", " + content[key][l[0]]) content[key][l[0]] = l[1] return content geoLocationRule_format = [geoLocationRules_file, manualAnnotationRules_file, internationalExceptions_file] if file_name in geoLocationRule_format: # Read as simple dictionary content = {} for line in file_content: if line == "\n": continue l = line.strip().split("\t") k = l[0] c = l[1] if k in content: print("Attention, duplicate found while reading " + file_name + ": " + k + " -> " + c + ", " + content[k]) content[k] = c return content path_to_metadata = "data/" def read_exposure(data, metadata_filename, accepted_additions_file): # divisions and countries that are accepted additions to the metadata accepted_exposure = read_local_file(accepted_additions_file) # Check given accepted exposures and print warning if already included in the data # (e.g. if the country was unknown when the exposure was registered, but had sequences added in the meantime) for region in data: for country in data[region]: if country + " (" + region + ")" in accepted_exposure: print("Specified exposure " + bold(country + " (" + region + ")") + " is no longer needed and can be removed from " + accepted_additions_file + ".") for division in data[region]: if division + " (" + country + ", " + region + ")" in accepted_exposure: print("Specified exposure " + bold(division + " (" + country + ", " + region + ")") + " is no longer needed and can be removed from " + accepted_additions_file + ".") with open(path_to_metadata + metadata_filename) as f: header = f.readline().split("\t") region_i = header.index("region_exposure") country_i = header.index("country_exposure") division_i = header.index("division_exposure") epi_i = header.index("gisaid_epi_isl") line = f.readline() while line: l = line.split("\t") region = l[region_i] country = l[country_i] division = l[division_i] epi = l[epi_i] if region not in data: print("Strain " + epi + " has unknown region_exposure " + bold(region) + ". Please correct!") else: s1 = country + " (" + region + ")" country_present = True if country not in data[region]: country_present = False if s1 in accepted_exposure or country == region: data[region][country] = {} country_present = True else: print("Strain " + epi + " has unknown country_exposure " + bold(country) + ". Please correct or consider adding " + bold(s1) + " to " + accepted_additions_file + "!") if country_present: s2 = division + " (" + country + ", " + region + ")" if division not in data[region][country]: if s2 in accepted_exposure or division == country: data[region][country][division] = [""] else: print("Strain " + epi + " has unknown division_exposure " + bold(division) + ". Please correct or consider adding " + bold(s2) + " to " + accepted_additions_file + "!") line = f.readline() return data
null
13,601
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter def bold(s): return('\033[1m' + s + '\033[0m') def read_local_file(file_name): path_file_name = path_to_config_files + file_name with open(path_file_name) as myfile: file_content = myfile.readlines() list_format = [accepted_additions_file] if file_name in list_format: # simple list return [line.strip() for line in file_content[1:]] abbreviations_format = [abbreviations_file] if file_name in abbreviations_format: # dictionary, keys seperated from content with tabs content = {} for line in file_content: if line == "\n": continue if line.startswith("#"): key = line.strip().split("# ")[1] content[key] = {} continue l = line.strip().split("\t") if l[0] in content[key]: print("Attention, duplicate found while reading " + file_name + ": " + l[0] + " -> " + l[1] + ", " + content[key][l[0]]) content[key][l[0]] = l[1] return content geoLocationRule_format = [geoLocationRules_file, manualAnnotationRules_file, internationalExceptions_file] if file_name in geoLocationRule_format: # Read as simple dictionary content = {} for line in file_content: if line == "\n": continue l = line.strip().split("\t") k = l[0] c = l[1] if k in content: print("Attention, duplicate found while reading " + file_name + ": " + k + " -> " + c + ", " + content[k]) content[k] = c return content def correct_data(data, corrections): for (region, country, division, location, region_correct, country_correct, division_correct, location_correct) in corrections: if country_correct not in data[region_correct]: data[region_correct][country_correct] = {} if division_correct not in data[region_correct][country_correct]: data[region_correct][country_correct][division_correct] = [] if location_correct not in data[region_correct][country_correct][division_correct]: data[region_correct][country_correct][division_correct].append(location_correct) # If no entries are contained downstream, assume geography level is now obsolete and delete from the data dictionary # (e.g. if a misspelled region is corrected, delete it from data after copying over all downstream countries, divisions and locations) data[region][country][division].remove(location) if data[region][country][division] == []: del data[region][country][division] if data[region][country] == {}: del data[region][country] if data[region] == {}: del data[region] return data def formulate_correction(given, before, correct): (region, country, division, location) = given (region_before, country_before, division_before, location_before) = before (region_correct, country_correct, division_correct, location_correct) = correct if region_correct == "*": region2 = region else: region2 = region_correct if country_correct == "*": country2 = country else: country2 = country_correct if division_correct == "*": division2 = division else: division2 = division_correct if location_correct == "*": location2 = location else: location2 = location_correct if (region == region_before or region_before == "*") \ and (country == country_before or country_before == "*") \ and (division == division_before or division_before == "*") \ and (location == location_before or location_before == "*"): return (region, country, division, location, region2, country2, division2, location2) return None def apply_rules(data, ruleSet, delimiter = ["/"], print_rules = True): rules = read_local_file(ruleSet) applied_rules = {} for g in rules: for d in delimiter: rules_apply = [] if d not in g or d not in rules[g]: continue (region_before, country_before, division_before, location_before) = g.split(d) (region_correct, country_correct, division_correct, location_correct) = rules[g].split(d) # Due to reoccuring bug: Since empty divisions are automatically filled with the country name later in the # ncov-ingest pipeline, give a warning when detecting a rule that might be affected if country_before == division_before: recommended_rule = d.join([region_before, country_before, "", ""]) if recommended_rule not in rules: print(bold("Attention: Consider automatic division filler applied after geoLocationRules (Hint: add [" + recommended_rule + "\t" + rules[g] + "])")) for region in data: for country in data[region]: for division in data[region][country]: for location in data[region][country][division]: correction = formulate_correction((region, country, division, location), (region_before, country_before, division_before, location_before), (region_correct, country_correct, division_correct, location_correct)) if correction is not None: rules_apply.append(correction) if print_rules: print("/".join(correction[:4]) + "\t" + "/".join(correction[4:])) applied_rules[correction[:4]] = correction[4:] data = correct_data(data, rules_apply) # Also return all rules that were applied to the data to make detection of conflicting annotations easier later return data, applied_rules
null
13,602
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter def bold(s): return('\033[1m' + s + '\033[0m') def check_division_inconsistency(data): for region in data: for country in data[region]: for division in data[region][country]: if division != "": for location in data[region][country][division]: if location != "": if location in data[region][country] and location != division: print(bold(location) + " found as both division and location within division " + bold(division) + ".") if list(data[region][country][location]) != [""]: # Locations found below both divisions - needs manual review which division is proper print("Conflict found: Both divisions contain locations:") l = data[region][country][location] if len(l) > 10: s = ", ".join(l[:10]) + "... (plus " + str(len(l) - 10) + " more)" else: s = ", ".join(l) print("division " + bold(location) + ": location(s) " + s) l = data[region][country][division] if len(l) > 10: s = ", ".join(l[:10]) + "... (plus " + str(len(l) - 10) + " more)" else: s = ", ".join(l) print("division " + bold(division) + ": location(s) " + s) print("(Template for correction" + "[" + "/".join([region, country, location, "?"]) + "\t" + "/".join([region, country, division, "?"]) + "])\n") else: # No location found below the affected location/division - change to proper level print("/".join([region, country, location, ""]) + "\t" + "/".join([region, country, division, location]) + "\n")
null
13,603
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter def bold(s): def read_local_file(file_name): region_order = ["Asia", "Oceania", "Africa", "Europe", "South America", "North America"] def check_duplicates(data, abbreviations_file): abbreviations = read_local_file(abbreviations_file) # Collect all locations and their (region, country, division) origin # Cut away all present duplicate specifiers (e.g. 'Guadalupe ES' -> 'Guadalupe') - data is treated as if no # duplicate adjustment has happened before. This way, changes in duplicates can be detected and properly treated # (e.g. if another duplicate appears in the same country but different division, adjust the duplicate specifier # from country abbreviation to division) (e.g. 'Guadalupe ES' -> 'Guadalupe (Extremadura)') location_origin = {} for region in data: if region not in region_order: continue for country in data[region]: if country == region: continue if country not in abbreviations["country"]: print("Abbreviation missing for " + country + ". Please add to " + abbreviations_file) continue for division in data[region][country]: if country == "USA" and division not in abbreviations["division"]: print("Abbreviation missing for US state " + division + ". Please add to " + abbreviations_file) continue for location in data[region][country][division]: if location == "": continue if location.endswith(" (" + division + ")"): # Cut away already existing duplicate specifiers location = location.split(" (" + division + ")")[0] elif location.endswith(" " + abbreviations["country"][country]): location = location.split(" " + abbreviations["country"][country])[0] elif country == "USA" and location.endswith(" " + abbreviations["division"][division]): location = location.split(" " + abbreviations["division"][division])[0] elif len(location.split(" ")[-1]) > 1 and location.upper().split(" ")[-1] == location.split(" ")[-1] or "(" in location: # If parentheses or caps found at the end of the location, consider potential invalid duplicate specifier if not location.split(" ")[-1].isnumeric(): #print(f"{'/'.join([region, country, division, location])}\t{'/'.join([region, country, division, location.split(' ')[0]])}") print("Potential duplicate inconsistent with current rules: " + location) if location not in location_origin: location_origin[location] = [] location_origin[location].append((region, country, division)) print() # Filter for duplicates (locations that have more than one (region, country, division) origin set) locations_duplicates = {} for location in location_origin: if len(location_origin[location]) > 1: # more than one (region, country, division) origin reduced = [] countries = [] for combination in location_origin[location]: if combination not in reduced: countries.append(combination[1]) reduced.append(combination) # If, after reducing, only one set of (region, country, division) is left, that means there was a location # unnecessarily specified as duplicate where the location doesn't exist in any other country/division # In that case print out warning that this location needs not be considered as a duplicate anymore and the # corresponding geoLocationRules and annotations should be corrected if len(reduced) == 1 and countries != ["USA"]: print("Unnecessary duplicate: " + bold(location) + "\n") else: # Unless country is USA, then leave these "unneccessary" duplicate specifications, as the automatic # county assignment already considers duplicates locations_duplicates[location] = reduced # Apply duplicates rules for location in locations_duplicates: printed_message = [] divisions = {} for (region, country, division) in locations_duplicates[location]: if country not in divisions: divisions[country] = [] divisions[country].append(division) for (region, country, division) in locations_duplicates[location]: if country == "USA": # For locations in the USA: always use state abbreviation location_new = location + " " + abbreviations["division"][division] if location in data[region][country][division]: printed_message.append("/".join([region, country, division, location]) + "\t" + "/".join([region, country, division, location_new])) if location + " (" + division + ")" in data[region][country][division]: #printed_message.append("Please update duplicate " + bold(location + " (" + division + ")") + " to " + bold(location_new) + " for consistency.") printed_message.append("/".join([region, country, division, location + " (" + division + ")"]) + "\t" + "/".join([region, country, division, location_new])) elif len(divisions[country]) == 1: # Among-country duplicate - use country abbreviation location_new = location + " " + abbreviations["country"][country] if location in data[region][country][division]: printed_message.append("/".join([region, country, division, location]) + "\t" + "/".join([region, country, division, location_new])) if location + " (" + division + ")" in data[region][country][division]: #printed_message.append("Please update duplicate " + bold(location + " (" + division + ")") + " to " + bold(location_new) + " for consistency.") printed_message.append("/".join([region, country, division, location + " (" + division + ")"]) + "\t" + "/".join([region, country, division, location_new])) else: # Within-country duplicate - use division as unique identifier location_new = location + " (" + division + ")" if location in data[region][country][division]: printed_message.append("/".join([region, country, division, location]) + "\t" + "/".join([region, country, division, location_new])) if location + " " + abbreviations["country"][country] in data[region][country][division]: #printed_message.append("Please update duplicate " + bold(location + " " + abbreviations["country"][country]) + " to " + bold(location_new) + " for consistency.") printed_message.append("/".join([region, country, division, location + " " + abbreviations["country"][country]]) + "\t" + "/".join([region, country, division, location_new])) if printed_message != []: #print("Duplicate found: " + bold(location)) for l in printed_message: print(l) #print() ### DIVISION ### print("\n----------\n") print("Checking for division duplicates...\n") # Collect all divisions and their (region, country) origin division_origin = {} for region in data: if region not in region_order: continue for country in data[region]: if country == region or country not in abbreviations["country"]: continue for division in data[region][country]: if division.endswith(" " + abbreviations["country"][country]): division = division.split(" " + abbreviations["country"][country])[0] if division not in division_origin: division_origin[division] = [] # Special cases where a duplicate specification seems out of place for one of the countries (e.g. US states) if (division == "Montana" or division == "Maryland") and country == "USA": print("(Ignoring duplicate division " + division + " in favor of the USA.)") elif country == "Luxembourg" and division == "Luxembourg": print("(Ignoring duplicate division " + division + " in favor of the country Luxembourg.)") else: division_origin[division].append((region, country)) # Filter for duplicates division_duplicates = {} for division in division_origin: if len(division_origin[division]) > 1: reduced = [] countries = [] for combination in division_origin[division]: if combination not in reduced: countries.append(combination[1]) reduced.append(combination) if len(reduced) == 1: print("Unnecessary duplicate: " + bold(division) + "\n") else: division_duplicates[division] = reduced print() # Apply duplicates rules for division in division_duplicates: printed_message = [] for (region, country) in division_duplicates[division]: division_new = division + " " + abbreviations["country"][country] if division in data[region][country]: printed_message.append("/".join([region, country, division, "*"]) + "\t" + "/".join([region, country, division_new, "*"])) if printed_message != []: print("Duplicate found: " + bold(division)) for l in printed_message: print(l) print() ### COUNTRY ### print("\n----------\n") print("Checking for country duplicates...\n") country_origin = {} for region in data: if region not in region_order: continue for country in data[region]: if country not in country_origin: country_origin[country] = [] country_origin[country].append(region) for country in country_origin: if len(country_origin[country]) > 1: print("Duplicate country found: " + bold(country) + " within " + bold(", ".join(country_origin[country]))) if len(country_origin[country]) == 2: print("/".join([country_origin[country][0], country, "*", "*"]) + " <-> " + "/".join([country_origin[country][1], country, "*", "*"])) return locations_duplicates
null
13,604
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter def read_latlongs(path): with open(path + latlongs_file) as myfile: file_content = myfile.readlines() latlongs = {"location": {}, "division": {}, "country": {}, "region": {}} for line in file_content: if line == "\n" or line.startswith("#"): continue l = line.strip().split("\t") type = l[0] # location, division etc. name = l[1] if name not in latlongs[type]: latlongs[type][name] = (float(l[2]), float(l[3])) # Store as float to enable sorting by lat_longs else: print("Duplicate in lat_longs? (" + l[0] + " " + l[1] + ")\n") return latlongs region_order = ["Asia", "Oceania", "Africa", "Europe", "South America", "North America"] def missing_coordinates(data, path, geo_location_occurences): missing_latlongs = {"region": [], "country": {}, "division": {}, "location": {}} latlongs = read_latlongs(path) for region in data: if region not in region_order: missing_latlongs["region"].append(region) for country in data[region]: if country not in latlongs["country"]: if region not in missing_latlongs["country"]: missing_latlongs["country"][region] = [] missing_latlongs["country"][region].append(country) division_threshold_function = lambda division: division not in latlongs["division"] and (geo_location_occurences["division"][division] >= 20) for division in filter(division_threshold_function, data[region][country]): if region not in missing_latlongs["division"]: missing_latlongs["division"][region] = {} if country not in missing_latlongs["division"][region]: missing_latlongs["division"][region][country] = [] missing_latlongs["division"][region][country].append(division) return missing_latlongs
null
13,605
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter def bold(s): return('\033[1m' + s + '\033[0m') def print_missing_places(missing_latlongs): ### DIVISION ### print("\n----------\n") if missing_latlongs['division']: print("Missing divisions:") for region in missing_latlongs["division"]: print("# " + region + " #") for country in missing_latlongs["division"][region]: print(country) for division in missing_latlongs["division"][region][country]: print("\tdivision\t" + bold(division)) print() else: print("No missing divisions") ### COUNTRY ### print("\n----------\n") if missing_latlongs['country']: print("\nMissing countries:") for region in missing_latlongs["country"]: print("# " + region + " #") for country in missing_latlongs["country"][region]: print("\tcountry\t" + bold(country)) else: print("No missing countries") ### REGION ### if missing_latlongs['region']: print("\n----------\n") print("\nMissing regions:") for region in missing_latlongs["region"]: print("\tregion\t" + bold(region))
null
13,606
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter def bold(s): return('\033[1m' + s + '\033[0m') def clean_string(s): s = s.lower() for c in replace_special_char: s = s.replace(c, replace_special_char[c]) return s def read_local_file(file_name): path_file_name = path_to_config_files + file_name with open(path_file_name) as myfile: file_content = myfile.readlines() list_format = [accepted_additions_file] if file_name in list_format: # simple list return [line.strip() for line in file_content[1:]] abbreviations_format = [abbreviations_file] if file_name in abbreviations_format: # dictionary, keys seperated from content with tabs content = {} for line in file_content: if line == "\n": continue if line.startswith("#"): key = line.strip().split("# ")[1] content[key] = {} continue l = line.strip().split("\t") if l[0] in content[key]: print("Attention, duplicate found while reading " + file_name + ": " + l[0] + " -> " + l[1] + ", " + content[key][l[0]]) content[key][l[0]] = l[1] return content geoLocationRule_format = [geoLocationRules_file, manualAnnotationRules_file, internationalExceptions_file] if file_name in geoLocationRule_format: # Read as simple dictionary content = {} for line in file_content: if line == "\n": continue l = line.strip().split("\t") k = l[0] c = l[1] if k in content: print("Attention, duplicate found while reading " + file_name + ": " + k + " -> " + c + ", " + content[k]) content[k] = c return content abbreviations_file = "abbreviations.txt" def search_similar_names(data, missing_latlongs, locations_duplicates): abbreviations = read_local_file(abbreviations_file) ### DIVISION ### print("\n----------\n") identical = [] similar = {} for region in missing_latlongs["division"]: for country in missing_latlongs["division"][region]: for division in missing_latlongs["division"][region][country]: similarity_score = 0 identical_hit = False best_match = None for division2 in data[region][country]: if division2 == division: continue if clean_string(division) == clean_string(division2): # Identical except for alternative chars identical.append("/".join([region, country, bold(division), "*"]) + "\t" + "/".join([region, country, bold(division2), "*"])) identical_hit = True break diff = SequenceMatcher(None, division, division2).ratio() # Similarity score if not perfect hit if diff > 0.6: if diff > similarity_score: similarity_score = diff best_match = division2 if not identical_hit and best_match is not None: while similarity_score in similar: similarity_score += 0.000000000000001 similar[similarity_score] = "/".join([region, country, bold(division), "*"]) + "\t" + "/".join([region, country, bold(best_match), "*"]) if identical: print("Identical divisions:") for l in identical: print(l) if similar: print("\nSimilar divisions (sorted by descending similarity):") for l in sorted(similar, reverse=True): print(similar[l]) ### COUNTRY ### print("\n----------\n") identical = [] similar = {} for region in missing_latlongs["country"]: for country in missing_latlongs["country"][region]: similarity_score = 0 identical_hit = False best_match = None for country2 in data[region]: if country2 == country: continue if clean_string(country) == clean_string(country2): # Identical except for alternative chars identical.append("/".join([region, bold(country), "*", "*"]) + "\t" + "/".join([region, bold(country2), "*", "*"])) identical_hit = True break diff = SequenceMatcher(None, country, country2).ratio() # Similarity score if not perfect hit if diff > 0.6: if diff > similarity_score: similarity_score = diff best_match = country2 if not identical_hit and best_match is not None: while similarity_score in similar: similarity_score += 0.000000000000001 similar[similarity_score] = "/".join([region, bold(country), "*", "*"]) + "\t" + "/".join([region, bold(best_match), "*", "*"]) if identical: print("Identical countries:") for l in identical: print(l) if similar: print("\nSimilar countries (sorted by descending similarity):") for l in sorted(similar, reverse=True): print(similar[l]) ### REGION ### print("\n----------\n") identical = [] similar = {} for region in missing_latlongs["region"]: similarity_score = 0 identical_hit = False best_match = None for region2 in data: if region2 == region: continue if clean_string(region) == clean_string(region2): # Identical except for alternative chars identical.append("/".join([bold(region), "*", "*", "*"]) + "\t" + "/".join([bold(region2), "*", "*", "*"])) identical_hit = True break diff = SequenceMatcher(None, region, region2).ratio() # Similarity score if not perfect hit if diff > 0.6: if diff > similarity_score: similarity_score = diff best_match = region2 if not identical_hit and best_match is not None: while similarity_score in similar: similarity_score += 0.000000000000001 similar[similarity_score] = "/".join([bold(region), "*", "*", "*"]) + "\t" + "/".join([bold(best_match), "*", "*", "*"]) if identical: print("Identical regions:") for l in identical: print(l) if similar: print("\nSimilar regions (sorted by descending similarity):") for l in sorted(similar, reverse=True): print(similar[l])
null
13,607
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter def find_place(geo_level, place, full_place, geolocator, region = "*"): typed_place = full_place redo = True tries = 0 while redo == True: if tries < 5: try: new_place = geolocator.geocode(typed_place, language='en') except: tries += 1 continue else: new_place = None tries = 0 if str(new_place) == 'None': print("\nCurrent place for missing {}:\t".format(geo_level) + full_place) print("The place as currently written could not be found.") answer = 'n' else: new_place_string = new_place.address full_place_string = full_place for level in full_place.split(", "): if clean_string(level) in clean_string(new_place_string): new_place_string = bold(level).join(new_place_string.split(level)) full_place_string = bold(level).join(full_place_string.split(level)) for level in new_place.address.split(", "): if clean_string(level) in clean_string(full_place_string): full_place_string = bold(level).join(full_place_string.split(level)) new_place_string = bold(level).join(new_place_string.split(level)) print("\nCurrent place for missing {}:\t".format(geo_level) + full_place_string) print("Geopy suggestion: "+ new_place_string) if geo_level != "division": answer = input('Is this the right place [y/n]? ') else: answer = input('Is this the right place (a - alter division level) [y/n/a]? ') if answer.lower() == 'y': coordinates = (geo_level + "\t" + place + "\t" + str(new_place.latitude) + "\t" + str(new_place.longitude)) redo = False elif geo_level == "division" and answer.lower() == "a": division2 = input("Type correct division to produce corrective rule: ") (division, country) = full_place.split(", ") print(bold("/".join([region, country, division, ""]) + "\t" + "/".join([region, country, division2, division]))) redo = False coordinates = ("location" + "\t" + place + "\t") else: # Let the user correct/have more detail for what's typed print("For: "+full_place) typed_place = input("Type a more specific place name or 'NA' to leave blank: ") if typed_place.lower() == 'na': coordinates = (geo_level + "\t" + place + "\t") redo = False #print(coordinates) return coordinates def auto_add_lat_longs(new_lat_longs): with open("defaults/lat_longs.tsv") as f: lat_longs_old = f.readlines() lat_longs = lat_longs_old + [l + "\n" for l in new_lat_longs if len(l.split("\t")) == 4] dataset = {"location": [], "division": [], "country": [], "region": []} for line in lat_longs: if line == "\n": continue dataset[line.split("\t")[0]].append(line) lat_longs_sorted = [] regions_list = [] for type in dataset: no_special_char = {clean_string(dataset[type][i].split("\t")[1]): i for i in range(len(dataset[type]))} for line in sorted(no_special_char): i = no_special_char[line] line_orig = dataset[type][i] if line_orig.startswith("country") and line_orig.split("\t")[1] in region_order: regions_list.append(line_orig) continue lat_longs_sorted.append(line_orig) if type == "country": lat_longs_sorted.append("\n") lat_longs_sorted += regions_list lat_longs_sorted.append("\n") if lat_longs_sorted != lat_longs_old: with open(path_to_output_files + latlongs_file, "w") as f: for line in lat_longs_sorted: f.write(line) print(bold("\nNew lat_longs written out to " + path_to_output_files + latlongs_file + ". Remember to replace the old file in " + path_to_default_files + ".")) else: print("No changes to " + latlongs_file + ".") def search_missing_latlongs(missing_latlongs): geolocator = Nominatim(user_agent="hello@nextstrain.org") new_lat_longs = [] for country in missing_latlongs["location"]: print("# " + country + " #") for division in missing_latlongs["location"][country]: print("\ndivision: " + division) for location in missing_latlongs["location"][country][division]: full_location = location + ", " + division + ", " + country new_lat_longs.append(find_place("location", location, full_location, geolocator)) print() for region in missing_latlongs["division"]: for country in missing_latlongs["division"][region]: print("# " + country + " #") for division in missing_latlongs["division"][region][country]: full_division = division + ", " + country new_lat_longs.append(find_place("division", division, full_division, geolocator, region)) print() for region in missing_latlongs["country"]: for country in missing_latlongs["country"][region]: new_lat_longs.append(find_place("country", country, country, geolocator)) auto_add_lat_longs(new_lat_longs)
null
13,608
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter def bold(s): def read_ordering(path): def read_latlongs(path): def sort_by_coordinates(data, coordinates): path_to_default_files = "defaults/" path_to_output_files = "scripts/curate_metadata/output_curate_metadata/" ordering_file = "color_ordering.tsv" region_order = ["Asia", "Oceania", "Africa", "Europe", "South America", "North America"] def build_ordering(data, new_latlongs): ordering = read_ordering(path_to_default_files) if new_latlongs: latlongs_path = path_to_output_files else: latlongs_path = path_to_default_files latlongs = read_latlongs(latlongs_path) # Drop all empty locations data_clean = {} for region in data: data_clean[region] = {} for country in data[region]: data_clean[region][country] = {} for division in data[region][country]: data_clean[region][country][division] = [] for location in data[region][country][division]: if location != "": data_clean[region][country][division].append(location) with open(path_to_output_files + ordering_file, "w") as out: for hierarchy in ordering: if hierarchy not in ["region", "country", "division", "location"]: for l in ordering[hierarchy]: out.write(hierarchy + "\t" + l + "\n") else: for region in region_order: if hierarchy == "region": out.write("region\t" + region + "\n") else: out.write("\n# " + region + "\n") for country in sort_by_coordinates(data_clean[region], latlongs["country"]): if hierarchy == "country": out.write("country\t" + country + "\n") else: if hierarchy == "location": if sum([len(data_clean[region][country][d]) for d in data_clean[region][country]]) > 0: # only write country as a comment if there is data following it out.write("\n### " + country) if hierarchy == "division": if len(data_clean[region][country]) > 0: out.write("\n### " + country + "\n") for division in sort_by_coordinates(data_clean[region][country], latlongs["division"]): if hierarchy == "division": out.write("division\t" + division + "\n") continue if len(data_clean[region][country][division]) > 0: # only write division as a comment if there is data following it out.write("\n# " + division + "\n") for location in sorted(data_clean[region][country][division]): out.write("location\t" + location + "\n") if hierarchy == "location" or hierarchy == "division": out.write("\n################\n") out.write("\n################\n\n\n") new_ordering = read_ordering(path_to_output_files) if not new_ordering == ordering: print(bold("Attention: " + ordering_file + " was altered! Remember to replace the old file in " + path_to_default_files + ".")) else: print("No changes to " + ordering_file + ".")
null
13,609
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter path_to_annotations = "../ncov-ingest/source-data/" def read_annotations(annotationsFile, gisaid): types = {"geography": ["location", "division", "country", "region", "division_exposure", "country_exposure", "region_exposure"], "special": ["sampling_strategy", "date", "host", "strain"], "paper": ["title", "paper_url"]} types_inverted = {t:section for section, type in types.items() for t in type} annotations = {"comments": [], "geography": {}, "special": {}, "paper": {}} with open(path_to_annotations + annotationsFile) as f: line = f.readline() while line: if line.startswith("#"): annotations["comments"].append(line.strip()) else: l = line.strip().split("\t") if line.endswith("\t\n"): l.append("") if gisaid: if len(l) != 4: print("Invalid annotation length (annotation deleted): " + line.strip()) line = f.readline() continue else: id = l[0] + "\t" + l[1] type = l[2] content = l[3] else: if len(l) != 3: print("Invalid annotation: " + line.strip()) line = f.readline() continue else: id = l[0] type = l[1] content = l[2] if type not in types_inverted: print("Invalid annotation type (annotation deleted): " + line.strip()) else: section = types_inverted[type] if id not in annotations[section]: annotations[section][id] = {} if type in annotations[section][id]: print("Duplicate annotation (first annotation deleted): " + line.strip() + " vs. " + type + "\t" + annotations[section][id][type]) annotations[section][id][type] = content line = f.readline() return annotations
null
13,610
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter path_to_metadata = "data/" def create_annotations(metadata_filename, applied_rules_geoLocation, applied_rules_manual, gisaid): geoLocationAnnotations = {} manualAnnotations = {} with open(path_to_metadata + metadata_filename) as f: header = f.readline().split("\t") country_i = header.index("country") region_i = header.index("region") division_i = header.index("division") location_i = header.index("location") strain_i = header.index("strain") gisaid_epi_isl_i = header.index("gisaid_epi_isl") genbank_accession_i = header.index("genbank_accession") host_i = header.index("host") line = f.readline() while line: l = line.split("\t") country = l[country_i] region = l[region_i] division = l[division_i] location = l[location_i] strain = l[strain_i] if gisaid: id = l[gisaid_epi_isl_i] else: id = l[genbank_accession_i] if (region, country, division, location) in applied_rules_geoLocation: geoLocationAnnotations[id] = (region, country, division, location), applied_rules_geoLocation[(region, country, division, location)], strain if (region, country, division, location) in applied_rules_manual: manualAnnotations[id] = (region, country, division, location), applied_rules_manual[(region, country, division, location)], strain line = f.readline() return geoLocationAnnotations, manualAnnotations
null
13,611
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter def bold(s): return('\033[1m' + s + '\033[0m') def find_conflicting_annotations(annotations, geoLocationAnnotations, manualAnnotations, gisaid): for id in annotations["geography"]: EPI_ISL = id.split("\t")[-1] for ruleSet in [geoLocationAnnotations, manualAnnotations]: if EPI_ISL in ruleSet: (region2, country2, division2, location2) = ruleSet[EPI_ISL][1] annotations_correct = {"region": region2, "country": country2, "division": division2, "location": location2} for type in annotations_correct: if type in annotations["geography"][id]: name0 = annotations["geography"][id][type] comment = "" if "#" in name0: (name0, comment) = name0.split(" #") if name0 != annotations_correct[type]: print(f"Conflicting annotation: {id}\t{bold(type + ' ' + name0)} will be replaced with {bold(annotations_correct[type])}") annotations["geography"][id][type] = annotations_correct[type] if comment != "": annotations["geography"][id][type] += " #" + comment for EPI_ISL in manualAnnotations: (region, country, division, location) = manualAnnotations[EPI_ISL][0] (region2, country2, division2, location2) = manualAnnotations[EPI_ISL][1] strain = manualAnnotations[EPI_ISL][2] if gisaid: id = strain + "\t" + EPI_ISL else: id = EPI_ISL annotations_correct = {"region": (region, region2), "country": (country, country2), "division": (division, division2), "location": (location, location2)} for type in annotations_correct: if annotations_correct[type][0] != annotations_correct[type][1]: if id not in annotations["geography"]: annotations["geography"][id] = {} if type not in annotations["geography"][id]: annotations["geography"][id][type] = annotations_correct[type][1] + " # previously " + annotations_correct[type][0] return annotations
null
13,612
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter def bold(s): path_to_metadata = "data/" def special_metadata_checks(metadata_filename, annotations, gisaid): special_annotations = {} unknown_clades = [] with open(path_to_metadata + metadata_filename) as f: header = f.readline().strip().split("\t") strain_i = header.index("strain") gisaid_epi_isl_i = header.index("gisaid_epi_isl") genbank_accession_i = header.index("genbank_accession") host_i = header.index("host") clade_i = header.index("Nextstrain_clade") date_i = header.index("date") clock_deviation_i = header.index("clock_deviation") line = f.readline() while line: l = line.strip().split("\t") host = l[host_i] strain = l[strain_i] if gisaid: id = strain + "\t" + l[gisaid_epi_isl_i] else: id = l[genbank_accession_i] # Check for special cases where annotations need to be introduced, e.g. special characters in strain names, or adjustment to "Mink" if host == "Neovison vison" or host == "Mustela lutreola": print("Adjust host " + host + " to Mink") if id not in special_annotations: special_annotations[id] = {} special_annotations[id]["host"] = "Mink # previously " + host problematic_char = ["'", "`"] for c in problematic_char: if c in strain: strain2 = strain.replace(c, "-") print("Adjust strain " + strain + " to " + strain2) if id not in special_annotations: special_annotations[id] = {} special_annotations[id]["strain"] = strain2 + " # previously " + strain line = f.readline() for id in special_annotations: if id not in annotations["special"]: annotations["special"][id] = {} for type in special_annotations[id]: if type in annotations["special"][id]: if annotations["special"][id][type] != special_annotations[id][type]: print("Conflicting annotation: " + id + "\t" + bold(type + " " + annotations["special"][id][type]) + " will be replaced with " + bold(special_annotations[id][type])) annotations["special"][id][type] = special_annotations[id][type] return annotations
null
13,613
import os from difflib import SequenceMatcher from pathlib import Path from geopy.geocoders import Nominatim from collections import Counter path_to_output_files = "scripts/curate_metadata/output_curate_metadata/" def write_annotations(annotations, annotationsFile): with open(path_to_output_files + annotationsFile, "w") as out: for section in annotations: if section == "comments": for line in sorted(annotations[section]): out.write(line + "\n") else: for id in sorted(annotations[section]): for type in sorted(annotations[section][id]): out.write(id + "\t" + type + "\t" + annotations[section][id][type] + "\n")
null
13,614
from datetime import datetime import math import struct import sys def scramble(data): acc = 0 nacc = 0 t = 0x2953 u = 0xD9C2 v = 0x3FF1 x = 1 it = 0 while it < len(data): t0 = t & 1 t1 = (t >> 1) & 1 u0 = u & 1 u1 = (u >> 1) & 1 v0 = v & 1 x ^= t1 ^ v0 x ^= u0 | u1 x ^= (t0 ^ u1 ^ v0) & (t0 ^ u0) if t0 == u0: v >>= 1 if v0: v ^= 0xB3D0 if t0 == 0: u >>= 1 if u0: u ^= 0xFB10 t >>= 1 if t0: t ^= 0xA740 nacc = (nacc + 1) % 256 acc = (acc * 2 + x) % 256 if nacc == 8: data[it] ^= acc nacc = 0 it += 1 return data
null
13,615
from datetime import datetime import math import struct import sys def flatten_dol(data): header = struct.unpack(">64I", data[:256]) dol_min = min(a for a in header[18:36] if a) dol_max = max(a + s for a, s in zip(header[18:36], header[36:54])) img = bytearray(dol_max - dol_min) for offset, address, length in zip(header[:18], header[18:36], header[36:54]): img[address - dol_min:address + length - dol_min] = data[offset:offset + length] # Entry point, load address, memory image return header[56], dol_min, img
null
13,616
from datetime import datetime import math import struct import sys def bytes_to_c_array(data): p_list = [data[i:i + 4] for i in range(0, len(data), 4)] return ["0x%08x" % int.from_bytes(b, byteorder='big', signed=False) for b in p_list]
null
13,617
from datetime import datetime import math import struct import sys def generate_header_file(elements, executable, input_file, output_file, size): output = '#include <stdio.h>\n\n' output += '//\n' output += '// Command: {0} {1} {2}\n'.format(executable, input_file, output_file) output += '//\n' output += '// File: {0}, size: {1} bytes\n'.format(input_file, size) output += '//\n' output += '// File generated on {0}\n'.format(datetime.now().strftime("%d.%m.%Y %H:%M:%S")) output += '//\n\n' output += 'uint32_t __in_flash("ipl_data") ipl[] = {\n\t' for num in range(len(elements)): if num > 0 and num % 4 == 0: output += '\n\t' output += elements[num] if num != len(elements): output += ', ' output += '\n};\n' return output
null
13,618
from datetime import datetime import math import struct import sys The provided code snippet includes necessary dependencies for implementing the `process_scrambled_ipl` function. Write a Python function `def process_scrambled_ipl(ipl, size)` to solve the following problem: Does additional processing to scrambled IPL payload. Payload used by PicoBoot has to be preprocessed. Whole payload has to be aligned to 1K blocks then every bit needs to be duplicated 4 times. Here is the function: def process_scrambled_ipl(ipl, size): """Does additional processing to scrambled IPL payload. Payload used by PicoBoot has to be preprocessed. Whole payload has to be aligned to 1K blocks then every bit needs to be duplicated 4 times. """ out2 = int.from_bytes(ipl, byteorder='big', signed=False) out2 = out2 << 1 binary = ''.join([char * 4 for char in format(out2, 'b')]) binary = int(binary, 2) payload = binary.to_bytes(size * 4, 'big') return payload
Does additional processing to scrambled IPL payload. Payload used by PicoBoot has to be preprocessed. Whole payload has to be aligned to 1K blocks then every bit needs to be duplicated 4 times.
13,619
import os import sys import torch from d2l import torch as d2l from torch import nn import d2lutil.common as common def dropout_layer(X, dropout): assert 0 <= dropout <= 1 # 在本情况中,所有元素都被丢弃。 if dropout == 1: return torch.zeros_like(X) # 在本情况中,所有元素都被保留。 if dropout == 0: return X mask = (torch.Tensor(X.shape).uniform_(0, 1) > dropout).float() return mask * X / (1.0 - dropout)
null
13,620
import os import sys import torch from torch import nn import d2lutil.common as common batch_size = min(10, train_labels.shape[0]) dataset = torch.utils.data.TensorDataset(train_features, train_labels) train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True) net = nn.Linear(train_features.shape[-1], 1) loss = nn.MSELoss() optimizer = torch.optim.SGD(net.parameters(), lr=0.01) train_ls, test_ls = [], [] for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X), y.reshape(-1, 1)) optimizer.zero_grad() l.backward() optimizer.step() train_ls.append(loss(net(train_features), train_labels.reshape(-1, 1)).item()) test_ls.append(loss(net(test_features), test_labels.reshape(-1, 1)).item()) common.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss', range(1, num_epochs + 1), test_ls, ['train', 'test']) print('weight:', net.weight.data, 'bias:', net.bias.data def fit_and_plot(train_features, train_labels, test_features, test_labels): batch_size = min(10, train_labels.shape[0]) dataset = torch.utils.data.TensorDataset(train_features, train_labels) train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True) net = nn.Linear(train_features.shape[-1], 1) loss = nn.MSELoss() optimizer = torch.optim.SGD(net.parameters(), lr=0.01) train_ls, test_ls = [], [] for epoch in range(num_epochs): for X, y in train_iter: l = loss(net(X), y.reshape(-1, 1)) optimizer.zero_grad() l.backward() optimizer.step() train_ls.append(loss(net(train_features), train_labels.reshape(-1, 1)).item()) test_ls.append(loss(net(test_features), test_labels.reshape(-1, 1)).item()) common.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss', range(1, num_epochs + 1), test_ls, ['train', 'test']) print('weight:', net.weight.data, 'bias:', net.bias.data)
null
13,621
import os import sys import torch from d2l import torch as d2l from torch import nn def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01)
null
13,622
import hashlib import os import tarfile import zipfile import requests import numpy as np import pandas as pd import torch from torch import nn from d2l import torch as d2l def download(name, cache_dir=os.path.join('..', 'data')): # @save """下载一个DATA_HUB中的文件,返回本地文件名""" assert name in DATA_HUB, f"{name} 不存在于 {DATA_HUB}" url, sha1_hash = DATA_HUB[name] os.makedirs(cache_dir, exist_ok=True) fname = os.path.join(cache_dir, url.split('/')[-1]) if os.path.exists(fname): sha1 = hashlib.sha1() with open(fname, 'rb') as f: while True: data = f.read(1048576) if not data: break sha1.update(data) if sha1.hexdigest() == sha1_hash: return fname # 命中缓存 print(f'正在从{url}下载{fname}...') r = requests.get(url, stream=True, verify=True) with open(fname, 'wb') as f: f.write(r.content) return fname The provided code snippet includes necessary dependencies for implementing the `download_extract` function. Write a Python function `def download_extract(name, folder=None)` to solve the following problem: 下载并解压zip/tar文件 Here is the function: def download_extract(name, folder=None): # @save """下载并解压zip/tar文件""" fname = download(name) base_dir = os.path.dirname(fname) data_dir, ext = os.path.splitext(fname) if ext == '.zip': fp = zipfile.ZipFile(fname, 'r') elif ext in ('.tar', '.gz'): fp = tarfile.open(fname, 'r') else: assert False, '只有zip/tar文件可以被解压缩' fp.extractall(base_dir) return os.path.join(base_dir, folder) if folder else data_dir
下载并解压zip/tar文件
13,623
import hashlib import os import tarfile import zipfile import requests import numpy as np import pandas as pd import torch from torch import nn from d2l import torch as d2l DATA_HUB = dict() def download(name, cache_dir=os.path.join('..', 'data')): # @save """下载一个DATA_HUB中的文件,返回本地文件名""" assert name in DATA_HUB, f"{name} 不存在于 {DATA_HUB}" url, sha1_hash = DATA_HUB[name] os.makedirs(cache_dir, exist_ok=True) fname = os.path.join(cache_dir, url.split('/')[-1]) if os.path.exists(fname): sha1 = hashlib.sha1() with open(fname, 'rb') as f: while True: data = f.read(1048576) if not data: break sha1.update(data) if sha1.hexdigest() == sha1_hash: return fname # 命中缓存 print(f'正在从{url}下载{fname}...') r = requests.get(url, stream=True, verify=True) with open(fname, 'wb') as f: f.write(r.content) return fname DATA_HUB['kaggle_house_test'] = ( # @save DATA_URL + 'kaggle_house_pred_test.csv', 'fa19780a7b011d9b009e8bff8e99922a8ee2eb90') The provided code snippet includes necessary dependencies for implementing the `download_all` function. Write a Python function `def download_all()` to solve the following problem: 下载DATA_HUB中的所有文件 Here is the function: def download_all(): # @save """下载DATA_HUB中的所有文件""" for name in DATA_HUB: download(name)
下载DATA_HUB中的所有文件
13,625
import os import sys import numpy as np import torch from d2l import torch as d2l import d2lutil.common as common train_features, test_features = features[:n_train, :], features[n_train:, :] train_labels, test_labels = labels[:n_train], labels[n_train:] train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True) def init_params(): w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True) b = torch.zeros(1, requires_grad=True) return [w, b] def l2_penalty(w): return torch.sum(w.pow(2)) / 2 net, loss = d2l.linreg, d2l.squared_loss def fit_and_plot(lambd): w, b = init_params() train_ls, test_ls = [], [] for _ in range(num_epochs): for X, y in train_iter: # 添加了L2范数惩罚项 l = loss(net(X, w, b), y) + lambd * l2_penalty(w) l = l.sum() if w.grad is not None: w.grad.data.zero_() b.grad.data.zero_() l.backward() d2l.sgd([w, b], lr, batch_size) train_ls.append(loss(net(train_features, w, b), train_labels).mean().item()) test_ls.append(loss(net(test_features, w, b), test_labels).mean().item()) common.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss', range(1, num_epochs + 1), test_ls, ['train', 'test']) print('L2 norm of w:', w.norm().item())
null
13,627
import os import sys import numpy as np import torch from d2l import torch as d2l import d2lutil.common as common train_features, test_features = features[:n_train, :], features[n_train:, :] train_labels, test_labels = labels[:n_train], labels[n_train:] def init_params(): w = torch.randn((num_inputs, 1), requires_grad=True) b = torch.zeros(1, requires_grad=True) return [w, b] def l2_penalty(w): return (w ** 2).sum() / 2 net, loss = d2l.linreg, d2l.squared_loss train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True) def fit_and_plot(lambd): w, b = init_params() train_ls, test_ls = [], [] for _ in range(num_epochs): for X, y in train_iter: # 添加了L2范数惩罚项 l = loss(net(X, w, b), y) + lambd * l2_penalty(w) l = l.sum() if w.grad is not None: w.grad.data.zero_() b.grad.data.zero_() l.backward() d2l.sgd([w, b], lr, batch_size) train_ls.append(loss(net(train_features, w, b), train_labels).mean().item()) test_ls.append(loss(net(test_features, w, b), test_labels).mean().item()) common.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss', range(1, num_epochs + 1), test_ls, ['train', 'test']) print('L2 norm of w:', w.norm().item())
null
13,628
import math import os import numpy as np import torch from d2l import torch as d2l def normal(x, mu, sigma): p = 1 / math.sqrt(2 * math.pi * sigma ** 2) return p * np.exp((- 0.5 / sigma ** 2) * (x - mu) ** 2)
null
13,629
import sys import os import matplotlib.pyplot as plt import torch import torchvision from torchvision import transforms from torch.utils import data from d2l import torch as d2l import d2lutil.common as common -shirt', 'trouser', 'pullover', 'dress', 'coat', 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot'] return [text_labels[int(i)] for i in labels The provided code snippet includes necessary dependencies for implementing the `get_fashion_mnist_labels` function. Write a Python function `def get_fashion_mnist_labels(labels)` to solve the following problem: 返回Fashion-MNIST数据集的文本标签。 Here is the function: def get_fashion_mnist_labels(labels): # @save """返回Fashion-MNIST数据集的文本标签。""" text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat', 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot'] return [text_labels[int(i)] for i in labels]
返回Fashion-MNIST数据集的文本标签。
13,630
import sys import os import matplotlib.pyplot as plt import torch import torchvision from torchvision import transforms from torch.utils import data from d2l import torch as d2l import d2lutil.common as common d2l.use_svg_display() (images), figsize=(12, 12)) for f, img, lbl in zip(figs, images, labels): f.imshow(img.view((28, 28)).numpy()) f.set_title(lbl) f.axes.get_xaxis().set_visible(False) f.axes.get_yaxis().set_visible(False) plt.show( def show_fashion_mnist(images, labels): d2l.use_svg_display() # 这里的_表示我们忽略(不使用)的变量 _, figs = plt.subplots(1, len(images), figsize=(12, 12)) for f, img, lbl in zip(figs, images, labels): f.imshow(img.view((28, 28)).numpy()) f.set_title(lbl) f.axes.get_xaxis().set_visible(False) f.axes.get_yaxis().set_visible(False) plt.show()
null
13,631
import sys import os import matplotlib.pyplot as plt import torch import torchvision from torchvision import transforms from torch.utils import data from d2l import torch as d2l import d2lutil.common as common W = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True) b = torch.zeros(num_outputs, requires_grad=True) return softmax(torch.matmul(X.reshape(-1, num_inputs), W) + b def net(X): return softmax(torch.matmul(X.reshape(-1, num_inputs), W) + b)
null