id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
160,463 | from typing import Callable
import random
import aiohttp, random
from lxml import html
import logging
async def generate_4chan_url(__keyword__: str):
logging.info("[Pre-collect] generating 4chan target URL.")
return "https://boards.4channel.org/biz/" | null |
160,464 | from typing import Callable
import random
import aiohttp, random
from lxml import html
import logging
The provided code snippet includes necessary dependencies for implementing the `generate_reddit_url` function. Write a Python function `async def generate_reddit_url(keyword: str)` to solve the following problem:
Generate a subreddit URL using the search tool with `keyword`. It randomly chooses one of the resulting subreddit.
Here is the function:
async def generate_reddit_url(keyword: str):
"""
Generate a subreddit URL using the search tool with `keyword`.
It randomly chooses one of the resulting subreddit.
"""
logging.info("[Pre-collect] generating Reddit target URL.")
async with aiohttp.ClientSession() as session:
async with session.get(
f"https://www.reddit.com/search/?q={keyword}&type=sr"
) as response:
html_content = await response.text()
tree = html.fromstring(html_content)
urls = [
url
for url in tree.xpath('//a[contains(@href, "/r/")]//@href')
if not "/r/popular" in url
]
result = f"https://old.reddit.com{random.choice(urls)}new"
return result | Generate a subreddit URL using the search tool with `keyword`. It randomly chooses one of the resulting subreddit. |
160,465 | from typing import Callable
import random
import aiohttp, random
from lxml import html
import logging
def convert_spaces_to_percent20(input_string):
return input_string.replace(" ", "%20")
async def generate_twitter_url(keyword: str, live_mode=True):
logging.info("[Pre-collect] generating Twitter target URL.")
base_url = f"https://twitter.com/search?q={convert_spaces_to_percent20(keyword)}&src=typed_query"
if live_mode:
base_url = base_url + "&f=live"
return base_url | null |
160,466 | from typing import Callable
import random
import aiohttp, random
from lxml import html
import logging
url_generators: list[list] = [
[generate_twitter_url, 0, 60],
[generate_reddit_url, 0, 35],
[generate_4chan_url, 0, 5],
]
async def generate_url(keyword: str):
while True:
random_generator, _, _ = random.choices(
url_generators, weights=[item[2] for item in url_generators]
)[0]
try:
url = await random_generator(keyword)
return url
except:
logging.exception(" [!] An error occured in generate_url [!]") | null |
160,467 | import logging
import argparse
from typing import Callable
from exorde.persist import PersistedDict
from exorde.notification import send_notification
from exorde.models import LiveConfiguration
last_notification = build_last_notification()
class PersistedDict:
def __init__(
self,
file_path: str,
serializer: Union[Callable, None] = None,
custom_object_hook: Union[Callable, None] = None,
):
self.file_path = file_path
self.serializer = serializer
self.custom_object_hook = custom_object_hook
self.data = self._load()
self.hold_persist: bool = False
async def _persist(self):
await persist(
self.data, self.file_path, custom_serializer=self.serializer
)
def _load(self):
return load(self.file_path, custom_object_hook=self.custom_object_hook)
def __getitem__(self, key):
try:
return self.data[key]
except:
return None
def __setitem__(self, key, value):
self.data[key] = value
if not self.hold_persist:
asyncio.create_task(self._persist())
def __delitem__(self, key):
del self.data[key]
if not self.hold_persist:
asyncio.create_task(self._persist())
async def deep_merge(self, update_context: dict) -> None:
"""
Merge the update_context dictionary into the PersistedDict object deeply.
Args:
update_context (dict): The dictionary to merge into the PersistedDict object.
"""
self.hold_persist = True
self.data = self._deep_merge_dicts(self.data, update_context)
self.hold_persist = False
await self._persist()
await asyncio.sleep(0.01)
def _deep_merge_dicts(self, target, source):
for key, value in source.items():
if (
key in target
and isinstance(target[key], dict)
and isinstance(value, dict)
):
target[key] = self._deep_merge_dicts(target[key], value)
else:
target[key] = value
return target
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
def __str__(self):
return str(self.data)
def __repr__(self):
return repr(self.data)
async def send_notification(
command_line_arguments: argparse.Namespace, data: str
):
"""
- In exorde-client, the `topic` is passed using the `ntfy` key and
retrieved here using the command_line_arguments variable.
"""
logging.info(
"If you like ntfy, please consider sponsoring me via GitHub Sponsors or Liberapay , or subscribing to ntfy Pro."
)
async with aiohttp.ClientSession() as session:
url = f"https://ntfy.sh/{command_line_arguments.ntfy}"
payload = data.encode(encoding="utf-8")
async with session.post(url, data=payload) as response:
response_text = await response.text()
return response_text
class LiveConfiguration(dict):
"""
Configuration is not a MadType because we do not want to break the
configuration instantiation if a key is not defined in the python
code.
! it therfor requires a manual checking ; what happens when the user
is unable to reach the configuration but the protocol is still online ?
"""
remote_kill: bool
online: bool
batch_size: int
last_info: Optional[str]
worker_version: Optional[str]
protocol_version: Optional[str]
expiration_delta: Optional[int] # data freshness
target: Optional[str]
default_gas_price: Optional[int]
default_gas_amount: Optional[int]
gas_cap_min: Optional[int]
inter_spot_delay_seconds: int
last_notification: str
def build_last_notification() -> Callable:
persisted_last_notification = PersistedDict(
"/tmp/exorde/last_notification.json"
)
async def last_notification(
live_configuration: LiveConfiguration,
command_line_arguments: argparse.Namespace,
) -> None:
last_notification = live_configuration.get("last_notification", None)
if not last_notification:
logging.warning("no last_notification found in LiveConfiguration")
return
nonlocal persisted_last_notification
if (
persisted_last_notification["last_notification"] == None
or persisted_last_notification["last_notification"]
!= last_notification
):
await send_notification(command_line_arguments, last_notification)
persisted_last_notification[
"last_notification"
] = last_notification
return last_notification | null |
160,468 | import argparse
from exorde.counter import AsyncItemCounter
from exorde.models import Ponderation
from exorde.notification import send_notification
class AsyncItemCounter:
def __init__(self):
self.data: Dict[str, deque] = load(
STATS_FILE_PATH, ItemCounterObjectHook
)
async def increment(self, key: str) -> None:
occurrences = self.data.get(key, deque())
occurrences.append(datetime.now())
self.data[key] = occurrences
await persist(
self.data, STATS_FILE_PATH, custom_serializer=ItemCounterSerializer
)
async def count_last_n_items(self, n_items: int) -> Dict[str, int]:
result = {}
for key in self.data:
occurrences = self.data.get(key, deque())
# Convert to list and take the last n_items
result[key] = len(list(occurrences)[-n_items:])
return result
async def count_occurrences(
self, key: str, time_period: timedelta = timedelta(hours=24)
) -> int:
now = datetime.now()
# Cleaning up is always enforced on static 24h
valid_time_cleanup = now - timedelta(hours=24)
occurrences = self.data.get(key, deque())
# Remove dates older than 24 hours
while occurrences and occurrences[0] < valid_time_cleanup:
occurrences.popleft()
# Count occurrences within the specified time period
valid_time_count = now - time_period
count = sum(1 for occ in occurrences if occ >= valid_time_count)
return count
class Ponderation:
enabled_modules: Dict[str, List[str]]
generic_modules_parameters: Dict[str, Union[int, str, bool]]
specific_modules_parameters: Dict[str, Dict[str, Union[int, str, bool]]]
weights: Dict[str, float]
lang_map: Dict[str, list] # module_name as key
new_keyword_alg: int # weight for #986
async def send_notification(
command_line_arguments: argparse.Namespace, data: str
):
"""
- In exorde-client, the `topic` is passed using the `ntfy` key and
retrieved here using the command_line_arguments variable.
"""
logging.info(
"If you like ntfy, please consider sponsoring me via GitHub Sponsors or Liberapay , or subscribing to ntfy Pro."
)
async with aiohttp.ClientSession() as session:
url = f"https://ntfy.sh/{command_line_arguments.ntfy}"
payload = data.encode(encoding="utf-8")
async with session.post(url, data=payload) as response:
response_text = await response.text()
return response_text
async def inactivity_notification(
ponderation: Ponderation,
counter: AsyncItemCounter,
command_line_arguments: argparse.Namespace,
) -> None:
rep = 0
for item in ponderation.weights:
rep += await counter.count_occurrences("rep_" + item)
rep += await counter.count_occurrences("other")
if rep == 0:
await send_notification(
command_line_arguments,
f"You didn't collect any post over the last 30 minutes",
) | null |
160,469 | import asyncio
from collections import deque
from datetime import datetime, timedelta
from typing import Dict
from exorde.persist import persist, load
def ItemCounterSerializer(obj):
if isinstance(obj, datetime):
return {"__datetime__": True, "value": obj.timestamp()}
if isinstance(obj, deque):
return {"__deque__": True, "value": list(obj)}
return obj | null |
160,470 | import asyncio
from collections import deque
from datetime import datetime, timedelta
from typing import Dict
from exorde.persist import persist, load
def ItemCounterObjectHook(obj):
if "__datetime__" in obj:
return datetime.fromtimestamp(obj["value"])
if "__deque__" in obj:
return deque(obj["value"])
return obj | null |
160,471 | import logging
import argparse
import os
from exorde.models import Processed
from typing import Union
from exorde.prepare_batch import prepare_batch
from exorde.process_batch import process_batch, Batch
from exorde.spot_data import spot_data
from exorde.get_transaction_receipt import get_transaction_receipt
from exorde.ipfs import download_ipfs_file, upload_to_ipfs
from exorde.models import LiveConfiguration, StaticConfiguration
from exorde.counter import AsyncItemCounter
import json
import logging
import argparse
import aiohttp
from typing import Callable
from exorde.counter import AsyncItemCounter
from datetime import datetime, timedelta
from uuid import uuid4, UUID
from exorde.create_error_identifier import create_error_identifier
import traceback
async def _get_alias() -> dict[str, str]:
async with aiohttp.ClientSession() as session:
async with session.get(ALIASES_URL) as response:
response.raise_for_status()
raw_data: str = await response.text()
try:
json_data = json.loads(raw_data)
except Exception:
logging.exception(raw_data)
return {
"4chan": "4chan",
"4channel.org": "4chan",
"reddit.com": "reddit",
"twitter.com": "twitter",
"t.co": "twitter",
"x.com": "twitter",
"youtube.com": "youtube",
"yt.co": "youtube",
"mastodon.social": "mastodon",
"mastodon": "mastodon",
"weibo.com": "weibo",
"weibo.org": "weibo",
"nostr.social": "nostr",
"nostr.com": "forocoches",
"jeuxvideo.com": "jvc",
"forocoches.com": "forocoches",
"bitcointalk.org": "bitcointalk",
"ycombinator.com": "hackernews",
"tradingview.com": "tradingview",
"followin.in": "followin",
"seekingalpha.io": "seekingalpha",
}
return json_data
def alias_geter() -> Callable:
memoised = None
last_call = datetime.now()
async def get_alias_wrapper() -> dict[str, str]:
nonlocal memoised, last_call
now = datetime.now()
if not memoised or (now - last_call) > timedelta(minutes=1):
last_call = datetime.now()
memoised = await _get_alias()
return memoised
return get_alias_wrapper | null |
160,472 | import logging
import argparse
import os
from exorde.models import Processed
from typing import Union
from exorde.prepare_batch import prepare_batch
from exorde.process_batch import process_batch, Batch
from exorde.spot_data import spot_data
from exorde.get_transaction_receipt import get_transaction_receipt
from exorde.ipfs import download_ipfs_file, upload_to_ipfs
from exorde.models import LiveConfiguration, StaticConfiguration
from exorde.counter import AsyncItemCounter
import json
import logging
import argparse
import aiohttp
from typing import Callable
from exorde.counter import AsyncItemCounter
from datetime import datetime, timedelta
from uuid import uuid4, UUID
from exorde.create_error_identifier import create_error_identifier
import traceback
async def count_rep_for_each_domain(
counter: AsyncItemCounter, batch: dict
) -> None:
"""
Uses the Counter in order to store the rep gained for each source. Instead
of spawning a new specific counter for the task it has been choosed to pre_fix
each domain with a key `rep_` in order to keep the implementation unique.
"""
global get_aliases
aliases = await get_aliases()
# 1 REP is gained for every new item that has been processed by the protocol
# so we have to iterate over the post_upload_file in order to define how
# many new items have been processed per source
for item in batch["items"]:
domain = item["item"]["domain"]
alias = aliases.get(domain, "other")
await counter.increment(f"rep_{alias}")
class StaticConfiguration(dict):
main_address: str
worker_account: LocalAccount
protocol_configuration: dict
network_configuration: dict
contracts_and_abi: dict
contracts: dict
read_web3: AsyncWeb3
write_web3: AsyncWeb3
lab_configuration: dict
gas_cache: dict
class LiveConfiguration(dict):
"""
Configuration is not a MadType because we do not want to break the
configuration instantiation if a key is not defined in the python
code.
! it therfor requires a manual checking ; what happens when the user
is unable to reach the configuration but the protocol is still online ?
"""
remote_kill: bool
online: bool
batch_size: int
last_info: Optional[str]
worker_version: Optional[str]
protocol_version: Optional[str]
expiration_delta: Optional[int] # data freshness
target: Optional[str]
default_gas_price: Optional[int]
default_gas_amount: Optional[int]
gas_cap_min: Optional[int]
inter_spot_delay_seconds: int
last_notification: str
class Processed(dict, metaclass=MadType):
translation: Translation
top_keywords: Keywords
classification: Classification
item: Item
async def prepare_batch(
static_configuration: StaticConfiguration,
live_configuration: LiveConfiguration,
command_line_arguments: argparse.Namespace,
counter: AsyncItemCounter,
websocket_send: Callable,
spotting_identifier: str,
) -> list[tuple[int, Processed]]:
max_depth_classification: int = live_configuration["max_depth"]
batch: list[tuple[int, Processed]] = [] # id, item
generator: AsyncGenerator[Item, None] = get_item(
command_line_arguments, counter, websocket_send
)
lab_configuration: dict = static_configuration["lab_configuration"]
item_id = -1
selected_batch_size = (
command_line_arguments.custom_batch_size
if command_line_arguments.custom_batch_size
else live_configuration["batch_size"]
)
gather_time = time.time()
times = [time.time()] # [prepare_batch_start_time, ... item.recolt_time]
async for item in generator:
diff = time.time() - gather_time
gather_time = time.time()
times.append(gather_time)
item_id = item_id + 1
try:
start_time: float = time.perf_counter()
splitted_mode = False
try:
processed_item: Processed = await process(
item, lab_configuration, max_depth_classification
)
batch.append((item_id, processed_item))
await websocket_send(
{
"jobs": {
spotting_identifier: {
"items": {
str(item_id): {
"collection_time": datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
),
"domain": str(item.domain),
"url": str(item.url),
}
}
}
}
}
)
except TooBigError:
logging.info("\n_________ Paragraph maker __________________")
splitted: list[Item] = split_item(
item, live_configuration["max_token_count"]
)
splitted_mode = True
# print all splitted items with index
for i, item in enumerate(splitted):
logging.info(
f"\t\t[Paragraph] Sub-split item {i} = {item}"
)
for chunk in splitted:
processed_chunk: Processed = await process(
chunk, lab_configuration, max_depth_classification
)
batch.append((item_id, processed_chunk))
await websocket_send(
{
"jobs": {
spotting_identifier: {
"items": {
str(item_id): {
"collection_time": datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
),
"domain": str(item.domain),
"url": str(item.url),
}
}
}
}
}
)
item_token_count_ = evaluate_token_count(
str(chunk.content)
)
end_time: float = time.perf_counter()
exec_time_s: float = end_time - start_time
logging.info(
f"[PARAGRAPH MODE] + A new sub-item has been processed {len(batch)}/{selected_batch_size} - ({exec_time_s} s) - Source = {str(item['domain'])} - token count = {item_token_count_}"
)
if splitted_mode == False:
end_time: float = time.perf_counter()
item_token_count = evaluate_token_count(str(item.content))
exec_time_s: float = end_time - start_time
logging.info(
f" + A new item has been processed {len(batch)}/{selected_batch_size} - ({exec_time_s} s) - Source = {str(item['domain'])} - token count = {item_token_count}"
)
if diff > 90 and len(batch) >= 5:
logging.info("Early-Stop current batch to prevent data-aging")
return batch
# Evaluate the maximum allowed cumulated token count in batch
try:
max_batch_total_tokens_ = int(
live_configuration["batch_size"]
) * int(live_configuration["max_token_count"])
except:
max_batch_total_tokens_ = 30000 # default value
# Evaluate the cumulated number of tokens in the batch
try:
cumulative_token_size = sum(
[
evaluate_token_count(str(item.item.content))
for (__id__, item) in batch
]
)
except:
cumulative_token_size = 150 * len(batch)
if (
# If we have enough items of each enough tokens
cumulative_token_size > max_batch_total_tokens_
# Or If we have enough items overall
or len(batch) >= selected_batch_size # add spent_time notion
):
return batch
except Exception as e:
logging.exception("An error occured while processing an item")
return []
async def process_batch(
batch: list[tuple[int, Processed]], static_configuration
) -> Batch:
lab_configuration: dict = static_configuration["lab_configuration"]
logging.info(f"running batch for {len(batch)}")
analysis_results: list[Analysis] = tag(
[processed.translation.translation for (__id__, processed) in batch],
lab_configuration,
)
complete_processes: dict[int, list[ProcessedItem]] = {}
for (id, processed), analysis in zip(batch, analysis_results):
prot_item: ProtocolItem = ProtocolItem(
created_at=processed.item.created_at,
domain=processed.item.domain,
url=Url(processed.item.url),
language=processed.translation.language,
)
if processed.item.title:
prot_item.title = processed.item.title
if processed.item.summary:
prot_item.summary = processed.item.summary
if processed.item.picture:
prot_item.picture = processed.item.picture
if processed.item.author:
prot_item.author = processed.item.author
if processed.item.external_id:
prot_item.external_id = processed.item.external_id
if processed.item.external_parent_id:
prot_item.external_parent_id = processed.item.external_parent_id
completed: ProcessedItem = ProcessedItem(
item=prot_item,
analysis=ProtocolAnalysis(
classification=processed.classification,
top_keywords=processed.top_keywords,
language_score=analysis.language_score,
gender=analysis.gender,
sentiment=analysis.sentiment,
embedding=analysis.embedding,
source_type=get_source_type(prot_item),
text_type=analysis.text_type,
emotion=analysis.emotion,
irony=analysis.irony,
age=analysis.age,
),
collection_client_version=CollectionClientVersion(
f"exorde:v.{metadata.version('exorde_data')}"
),
collection_module=CollectionModule("unknown"),
collected_at=CollectedAt(datetime.now().isoformat() + "Z"),
)
if not complete_processes.get(id, {}):
complete_processes[id] = []
complete_processes[id].append(completed)
aggregated = []
for __key__, values in complete_processes.items():
merged_ = merge_chunks(values)
if merged_ is not None:
aggregated.append(merged_)
result_batch: Batch = Batch(items=aggregated, kind=BatchKindEnum.SPOTTING)
return result_batch
async def spot_data(
cid,
item_count_,
worker_account,
configuration,
gas_cache,
contracts,
read_web3,
write_web3,
static_configuration,
):
for i in range(0, 5):
try:
logging.info(f"[Spot Data] transaction attempt ({i}/5)")
previous_nonce = await read_web3.eth.get_transaction_count(
worker_account.address
)
item_count = min(
int(item_count_), int(configuration["batch_size"])
)
assert isinstance(cid, str)
transaction = await (
contracts["DataSpotting"]
.functions.SpotData([cid], [""], [item_count], "")
.build_transaction(
{
"nonce": previous_nonce,
"from": worker_account.address,
"gasPrice": configuration["default_gas_price"],
}
)
)
estimated_transaction = await estimate_gas(
transaction, read_web3, gas_cache, configuration
)
signed_transaction = read_web3.eth.account.sign_transaction(
estimated_transaction, worker_account.key.hex()
)
transaction_hash = await write_web3.eth.send_raw_transaction(
signed_transaction.rawTransaction
)
logging.info(f"[Spot Data] transaction sent")
return transaction_hash, previous_nonce
except ValueError as ve:
if "balance is too low" in ve.args[0].get("message", ""):
# Account balance is too low
for i in range(0, 3):
try:
await faucet(static_configuration)
break
except:
timeout = i * 1.5 + 1
logging.exception(
f"An error occured during faucet (attempt {i}) (retry in {timeout})"
)
await asyncio.sleep(timeout)
except Exception as e:
await asyncio.sleep(i * 1.5 + 1)
logging.exception(
f"[Spot Data] An error occured during spot_data ({i}/5)"
)
raise SpottingError()
async def get_transaction_receipt(
transaction_hash, previous_nonce, static_configuration
):
# worker_account = static_configuration["worker_account"]
read_web3 = static_configuration["read_web3"]
await asyncio.sleep(2)
logging.info("Waiting for transaction confirmation")
# for i in range(5):
# sleep_time = i + 1
# logging.debug(
# f"Waiting {sleep_time} seconds for faucet transaction confirmation"
# )
# await asyncio.sleep(sleep_time)
# # wait for new nounce by reading proxy
# current_nounce = await read_web3.eth.get_transaction_count(
# worker_account.address
# )
# if current_nounce > previous_nonce:
# # found a new transaction because account nounce has increased
# break
transaction_receipt = await read_web3.eth.wait_for_transaction_receipt(
transaction_hash, timeout=30, poll_latency=6
)
return transaction_receipt
async def upload_to_ipfs(
value,
job_id: str,
websocket_send: Callable,
ipfs_path="http://ipfs-api.exorde.network/add",
) -> Union[str, None]:
empty_content_flag = False
for i in range(5): # Retry up to 5 times
try:
async with aiohttp.ClientSession() as session:
_value = json.dumps(
value, cls=EnumEncoder
) # Make sure EnumEncoder is defined
async with session.post(
ipfs_path,
data=_value,
headers={"Content-Type": "application/json"},
timeout=90, # Set a timeout for the request
) as resp:
logging.info(f"[IPFS API Initial trace] Response status = {resp.status}, content = {await resp.text()}")
# if empty content in response, raise exception
if "empty content" in await resp.text():
empty_content_flag = True
raise Exception(
"[IPFS API] Upload failed because items are too old"
)
if resp.status == 200:
logging.debug("Upload to IPFS succeeded")
response = await resp.json()
logging.info(
f"[IPFS API] Success, response = {response}"
)
return response["cid"]
if resp.status == 500:
text = await resp.text()
error_identifier = create_error_identifier([text])
await websocket_send(
{
"jobs": {
job_id: {
"steps": {
"ipfs_upload": {
"attempts": {
i: {
"status": resp.status,
"text": text,
}
}
}
}
}
},
"errors": {
error_identifier: {
"traceback": [text],
"module": "upload_to_ipfs",
"intents": {
job_id: {
datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
): {}
}
},
}
},
}
)
logging.error(
f"[IPFS API - Error 500] API rejection: {text}"
)
if text == "empty content":
empty_content_flag = True
raise Exception(
"[IPFS API] Upload failed because items are too old"
)
await asyncio.sleep(i * 1.5) # Adjust sleep factor
logging.info(
f"Failed upload, retrying ({i + 1}/5)"
) # Update retry count
continue # Retry after handling the error
else:
error_text = await resp.text()
error_identifier = create_error_identifier(
[error_text]
)
await websocket_send(
{
"jobs": {
job_id: {
"steps": {
"ipfs_upload": {
"attempts": {
i: {
"status": resp.status,
"text": error_text,
}
}
}
}
}
},
"errors": {
error_identifier: {
"traceback": [error_text],
"module": "upload_to_ipfs",
"intents": {
job_id: {
datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
): {}
}
},
}
},
}
)
logging.info(
f"[IPFS API] Failed, response status = {resp.status}, text = {error_text}"
)
except Exception as e:
if empty_content_flag:
break
logging.exception(f"[IPFS API] Error: {e}")
await asyncio.sleep(i * 1.5) # Adjust sleep factor
# Retrieve and format the traceback as a list of strings
traceback_list = traceback.format_exception(
type(e), e, e.__traceback__
)
error_identifier = create_error_identifier(traceback_list)
await websocket_send(
{
"jobs": {
job_id: {
"steps": {
"ipfs_upload": {
"attempts": {
i: {
"status": "error",
"error": error_identifier,
}
}
}
}
}
},
"errors": {
error_identifier: {
"traceback": traceback_list,
"module": "upload_to_ipfs",
"intents": {
job_id: {
datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
): {}
}
},
}
},
}
)
logging.info(
f"Failed upload, retrying ({i + 1}/5)"
) # Update retry count
if empty_content_flag == False:
await websocket_send(
{
"jobs": {
job_id: {
"steps": {"ipfs_upload": {"failed": "will not retry"}}
}
}
}
)
raise Exception("Failed to upload to IPFS")
async def download_ipfs_file(cid: str, max_attempts: int = 5) -> dict:
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.146 Safari/537.36",
"Connection": "close",
}
gateways = rotate_gateways()
async with ClientSession(headers=headers) as session:
for i in range(max_attempts):
url = next(gateways) + cid
logging.info("[IPFS Download] download of %s (%s)", url, i)
try:
async with session.get(
url, timeout=45, allow_redirects=True
) as response:
if response.status == 200:
logging.info("download of %s OK after (%s)", url, i)
return await response.json()
else:
logging.info(
"[IPFS Download] Failed download attempt %s of %s, status code: %s",
i + 1,
max_attempts,
response.status,
)
except Exception as error:
logging.info(
"[IPFS Download] Failed to download from %s: %s (%s)",
url,
error.__class__.__name__,
error,
)
await asyncio.sleep(i * 1.5) # Adjust sleep factor
raise DownloadError(
"Failed to download file from IPFS after multiple attempts"
)
class AsyncItemCounter:
def __init__(self):
self.data: Dict[str, deque] = load(
STATS_FILE_PATH, ItemCounterObjectHook
)
async def increment(self, key: str) -> None:
occurrences = self.data.get(key, deque())
occurrences.append(datetime.now())
self.data[key] = occurrences
await persist(
self.data, STATS_FILE_PATH, custom_serializer=ItemCounterSerializer
)
async def count_last_n_items(self, n_items: int) -> Dict[str, int]:
result = {}
for key in self.data:
occurrences = self.data.get(key, deque())
# Convert to list and take the last n_items
result[key] = len(list(occurrences)[-n_items:])
return result
async def count_occurrences(
self, key: str, time_period: timedelta = timedelta(hours=24)
) -> int:
now = datetime.now()
# Cleaning up is always enforced on static 24h
valid_time_cleanup = now - timedelta(hours=24)
occurrences = self.data.get(key, deque())
# Remove dates older than 24 hours
while occurrences and occurrences[0] < valid_time_cleanup:
occurrences.popleft()
# Count occurrences within the specified time period
valid_time_count = now - time_period
count = sum(1 for occ in occurrences if occ >= valid_time_count)
return count
def create_error_identifier(traceback_list: list[str]) -> str:
# Concatenate the list of strings into a single string
traceback_str = "\n".join(traceback_list)
# Create a hashlib object (SHA-256 in this example, but you can choose other hash algorithms)
hasher = hashlib.sha256()
# Update the hash object with the traceback string
hasher.update(traceback_str.encode("utf-8"))
# Get the hexadecimal representation of the hash
exception_identifier = hasher.hexdigest()
return exception_identifier
async def spotting(
live_configuration: LiveConfiguration,
static_configuration: StaticConfiguration,
command_line_arguments: argparse.Namespace,
counter: AsyncItemCounter,
websocket_send: Callable,
) -> None:
spotting_identifier: str = str(uuid4())
await websocket_send(
{
"jobs": {
spotting_identifier: {
"start": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
}
}
)
batch: list[tuple[int, Processed]] = await prepare_batch(
static_configuration,
live_configuration,
command_line_arguments,
counter,
websocket_send,
spotting_identifier,
)
await websocket_send(
{
"jobs": {
spotting_identifier: {
"steps": {
"process_batch": {
"start": datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
)
}
}
}
}
}
)
logging.info("Processing batch")
try:
processed_batch: Batch = await process_batch(
batch, static_configuration
)
logging.info("Successfully processed batch")
###############################################
### SETTING HUGGINFACE HUB TO OFFLINE MODE
##### NOW THAT ALL MODELS ARE PROVEN OK
# check if TRANSFORMERS_OFFLINE env var is 0
# if so, set it to 1 and print the change
# Check if the TRANSFORMERS_OFFLINE environment variable is set and not equal to '1'
if os.environ.get("TRANSFORMERS_OFFLINE") != "1":
# Set the TRANSFORMERS_OFFLINE environment variable to '1'
os.environ["TRANSFORMERS_OFFLINE"] = "1"
logging.info("TRANSFORMERS_OFFLINE environment variable was set to 1.")
else:
# If the variable is already set to '1', inform the user
logging.info("[HUGGING FACE MODE] OFFLINE")
###############################################
await websocket_send(
{
"jobs": {
spotting_identifier: {
"steps": {
"process_batch": {
"end": datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
)
}
}
}
}
}
)
except Exception as e:
traceback_list = traceback.format_exception(
type(e), e, e.__traceback__
)
error_id = create_error_identifier(traceback_list)
await websocket_send(
{
"jobs": {
spotting_identifier: {
"steps": {
"process_batch": {
"end": datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
),
"failed": "will not retry",
}
}
}
},
"errors": {
error_id: {
"traceback": traceback_list,
"module": "process",
"intents": {
spotting_identifier: {
datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
): {}
}
},
}
},
}
)
logging.exception("An error occured during batch processing")
return
try:
await websocket_send(
{
"jobs": {
spotting_identifier: {
"steps": {
"ipfs_upload": {
"start": datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
)
}
}
}
}
}
)
cid: Union[str, None] = await upload_to_ipfs(
processed_batch, str(spotting_identifier), websocket_send
)
if cid != None:
logging.info("Successfully uploaded file to ipfs")
post_upload_file: dict = await download_ipfs_file(cid)
await count_rep_for_each_domain(counter, post_upload_file)
item_count = len(post_upload_file["items"])
await websocket_send(
{
"jobs": {
spotting_identifier: {
"steps": {
"ipfs_upload": {
"end": datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
),
"cid": cid,
"count": item_count,
}
}
}
}
}
)
else:
item_count = 0
except:
logging.exception("An error occured during IPFS uploading")
return
if item_count == 0:
await websocket_send(
{
"jobs": {
spotting_identifier: {"new_items_collected": item_count}
}
}
)
logging.error(
"All items of previous batch are already discovered, skipped."
)
return
await websocket_send(
{
"jobs": {
spotting_identifier: {
"steps": {
"filter": {
"end": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
}
}
}
}
)
try:
logging.info(f"Building a spot-data transaction ({item_count} items)")
transaction_hash, previous_nonce = await spot_data(
cid,
item_count,
static_configuration["worker_account"],
live_configuration,
static_configuration["gas_cache"],
static_configuration["contracts"],
static_configuration["read_web3"],
static_configuration["write_web3"],
static_configuration,
)
await websocket_send(
{"jobs": {spotting_identifier: {"steps": {"send_spot": "ok"}}}}
)
except:
logging.exception("An error occured during transaction building")
return
try:
await websocket_send(
{
"jobs": {
spotting_identifier: {
"steps": {
"receipt": {
"start": datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
)
}
}
}
}
}
)
logging.info("Looking for transaction receipt")
receipt = await get_transaction_receipt(
transaction_hash, previous_nonce, static_configuration
)
await websocket_send(
{
"jobs": {
spotting_identifier: {
"steps": {
"receipt": {
"value": str(receipt.blockNumber),
"end": datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
),
}
}
}
},
"receipt": {
str(spotting_identifier): {
"value": str(receipt.blockNumber),
"end": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
}
},
}
)
except Exception as e:
traceback_list = traceback.format_exception(
type(e), e, e.__traceback__
)
error_identifier = create_error_identifier(traceback_list)
await websocket_send(
{
"jobs": {
spotting_identifier: {
"steps": {"receipt": {"failed": error_identifier}}
}
},
"errors": {
error_identifier: {
"traceback": traceback_list,
"module": "upload_to_ipfs",
"intents": {
spotting_identifier: {
datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
): {}
}
},
}
},
}
)
logging.exception("An error occured during transaction validation")
return
logging.info("+ A receipt for previous transaction has been confirmed") | null |
160,473 | from exorde.get_contracts import get_contracts
from exorde.read_web3 import read_web3 as _read_web3
from exorde.write_web3 import write_web3 as _write_web3
from exorde.get_worker_account import get_worker_account
from exorde.get_protocol_configuration import get_protocol_configuration
from exorde.get_contracts_and_abi_cnf import get_contracts_and_abi_cnf
from exorde.get_network_configuration import get_network_configuration
from exorde.models import StaticConfiguration
from argparse import Namespace
from exorde.lab_initialization import lab_initialization
import logging, os
async def do_get_static_configuration(
command_line_arguments: Namespace, live_configuration
) -> StaticConfiguration:
main_address: str = command_line_arguments.main_address
protocol_configuration: dict = get_protocol_configuration()
network_configuration: dict = await get_network_configuration()
contracts_and_abi = await get_contracts_and_abi_cnf(
protocol_configuration, live_configuration
)
read_web3 = _read_web3(
protocol_configuration, network_configuration, live_configuration
)
contracts = get_contracts(
read_web3,
contracts_and_abi,
protocol_configuration,
live_configuration,
)
worker_account = get_worker_account("some-worker-name")
gas_cache = {}
write_web3 = _write_web3(
protocol_configuration, network_configuration, live_configuration
)
lab_configuration = lab_initialization()
return StaticConfiguration(
main_address=main_address,
worker_account=worker_account,
protocol_configuration=protocol_configuration,
network_configuration=network_configuration,
contracts=contracts,
contracts_and_abi=contracts_and_abi,
read_web3=read_web3,
write_web3=write_web3,
lab_configuration=lab_configuration,
gas_cache=gas_cache,
)
class StaticConfiguration(dict):
main_address: str
worker_account: LocalAccount
protocol_configuration: dict
network_configuration: dict
contracts_and_abi: dict
contracts: dict
read_web3: AsyncWeb3
write_web3: AsyncWeb3
lab_configuration: dict
gas_cache: dict
async def get_static_configuration(
command_line_arguments: Namespace, live_configuration
) -> StaticConfiguration:
try:
static_configuration: StaticConfiguration = (
await do_get_static_configuration(
command_line_arguments, live_configuration
)
)
return static_configuration
except:
logging.exception(
"An error occured retrieving static configuration, exiting"
)
os._exit(1) | null |
160,474 | import os
import subprocess
from importlib import metadata
from packaging import version
import logging
from exorde.get_latest_tag import get_latest_tag
def normalize_version(version_string):
async def get_latest_tag():
async def self_update():
try:
logging.info("[SELF CLIENT UPDATE] Checking...")
# Try to get latest tag, if not possible, log and return
try:
latest_tag = await get_latest_tag()
except Exception as e:
logging.info(
"[SELF CLIENT UPDATE] Unable to retrieve latest tag from GitHub: %s",
e,
)
return
# Try to get local version, if not found set default version
try:
local_version = metadata.version("exorde")
except metadata.PackageNotFoundError:
logging.info(
"Package 'exorde' not found in the system. Setting default version"
)
local_version = "0.0.1"
# Normalize the versions
latest_tag = normalize_version(latest_tag)
local_version = normalize_version(local_version)
logging.info(
f"[CLIENT VERSION] Online latest version of the exorde-client: {latest_tag}, local version: {local_version}"
)
try:
if version.parse(latest_tag) > version.parse(local_version):
logging.info(
f"[SELF CLIENT UPDATE] Updating from {local_version} to version {latest_tag}"
)
exorde_repository_path = (
"git+https://github.com/exorde-labs/exorde-client.git"
)
data_repository_path = (
"git+https://github.com/exorde-labs/exorde_data.git"
)
try:
subprocess.check_call(
["pip", "install", "--user", exorde_repository_path]
)
subprocess.check_call(
["pip", "install", "--user", data_repository_path]
)
except subprocess.CalledProcessError as e:
logging.info(
"[SELF CLIENT UPDATE] Update failed, pip install returned non-zero exit status: %s",
e,
)
return
os._exit(42)
except version.InvalidVersion:
logging.info("Error parsing version string")
except Exception as e:
logging.info("[SELF CLIENT UPDATE] Error during self update: %s", e) | null |
160,475 | import json
from .models import Item
from madtypes import json_schema
def print_schema():
schem = json_schema(
Item,
**{
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": f'https://github.com/exorde-labs/exorde-client/repo/tree/v{metadata.version("exorde")}/exorde/schema/schema.json',
},
)
try:
print(
json.dumps(
schem,
indent=4,
)
)
except Exception as err:
print(err)
print(schem) | null |
160,476 | from aiohttp import web
import logging
import asyncio
import os
from typing import Callable
from exorde.persist import PersistedDict
import ssl
import os
def websocket_handler_factory():
async def index_handler(request):
import argparse
async def setup_web(command_line_arguments: argparse.Namespace) -> Callable:
if command_line_arguments.web:
# Create an aiohttp application
app = web.Application()
# Get both the WebSocket handler and the push function from the factory
websocket_handler, ws_push = websocket_handler_factory()
# Add a WebSocket route, using the handler from the factory
app.router.add_get("/ws", websocket_handler)
dist_folder = os.path.abspath("./ui/dist")
logging.info(f"serving static from {dist_folder}")
app.router.add_get("/", index_handler)
app.router.add_static("/", dist_folder)
# Load SSL/TLS context with the generated certificate and private key
CERT_PATH = os.getenv("CERT_PATH")
if CERT_PATH:
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(
CERT_PATH, keyfile=os.getenv("CERT_KEYFILE")
)
PORT = 443
else:
ssl_context = None
PORT = 8080
# Combine the WebSocket app with the existing app
runner = web.AppRunner(app)
await runner.setup()
# Start the server
site = web.TCPSite(runner, "0.0.0.0", PORT, ssl_context=ssl_context)
await site.start()
logging.info("")
logging.info("")
logging.info("")
logging.info(f"serving on {PORT} (ssl_context={ssl_context})")
logging.info("")
logging.info("")
logging.info("")
# Return the ws_push function
return ws_push
async def do_nothing(message):
pass
return do_nothing | null |
160,477 | from wtpsplit import WtP
print("[PRE-INSTALL] Installing WtP")
from transformers import AutoModel, AutoTokenizer
import os
from argostranslate import package
from typing import cast
import logging
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, pipeline
from huggingface_hub import hf_hub_download
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from ftlangdetect import detect as ft_test_detect
print("init ftlangdetect")
model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
print("install emoji_lexicon")
print(f"emoji lexicon downloaded : {emoji_lexicon}")
print("install loughran_dict")
print(f"loughran downloaded : {loughran_dict}")
print("Contents of the cache folder:", content_list)
def install_hugging_face_models(models):
for model in models:
print(f"installing model {model}...")
__tokenizer__ = AutoTokenizer.from_pretrained(model)
model = AutoModel.from_pretrained(model) | null |
160,478 | from wtpsplit import WtP
from transformers import AutoModel, AutoTokenizer
import os
from argostranslate import package
from typing import cast
import logging
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, pipeline
from huggingface_hub import hf_hub_download
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from ftlangdetect import detect as ft_test_detect
def is_english_target(s):
return '→ English' in s | null |
160,479 | from wtpsplit import WtP
from transformers import AutoModel, AutoTokenizer
import os
from argostranslate import package
from typing import cast
import logging
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, pipeline
from huggingface_hub import hf_hub_download
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from ftlangdetect import detect as ft_test_detect
langs_to_exclude_from_preinstall = ["Catalan", "Esperanto"]
def is_to_exclude(s):
for lang in langs_to_exclude_from_preinstall:
if lang in s:
return True
return False | null |
160,480 | import logging
import argparse
from exorde.get_current_rep import get_current_rep
async def get_current_rep(main_address):
async with aiohttp.ClientSession() as session:
async with session.get(
"https://raw.githubusercontent.com/exorde-labs/TestnetProtocol/main/Stats/leaderboard.json"
) as response:
leaderboard = json.loads(await response.text())
return round(leaderboard.get(main_address, 0), 4)
async def log_user_rep(command_line_arguments: argparse.Namespace):
try:
current_reputation = await get_current_rep(
command_line_arguments.main_address
)
logging.info(
f"\n*********\n[REPUTATION] Current Main Address REP = {current_reputation}\n*********\n"
)
except:
logging.exception(
"An error occured while logging the current reputation"
) | null |
160,481 | import logging
import aiofiles
import json
import asyncio
import os
from datetime import datetime
from collections import deque
from pathlib import Path
from typing import Callable, Union
import os
The provided code snippet includes necessary dependencies for implementing the `_persist` function. Write a Python function `async def _persist(data: dict, file_path: str) -> None` to solve the following problem:
Write data as JSON to the specified file path asynchronously. If the file already exists, create a backup with a .backup extension. Args: data: The data to be written as JSON. file_path (str): The path to the file where data will be written.
Here is the function:
async def _persist(data: dict, file_path: str) -> None:
"""
Write data as JSON to the specified file path asynchronously.
If the file already exists, create a backup with a .backup extension.
Args:
data: The data to be written as JSON.
file_path (str): The path to the file where data will be written.
"""
backup_extension = ".backup"
backup_path = Path(file_path + backup_extension)
if Path(file_path).is_file():
os.rename(file_path, backup_path)
async with aiofiles.open(file_path, "w") as file:
try:
json_data = json.dumps(data, indent=4)
await file.write(json_data)
except Exception as err:
logging.error(err)
logging.error(data) | Write data as JSON to the specified file path asynchronously. If the file already exists, create a backup with a .backup extension. Args: data: The data to be written as JSON. file_path (str): The path to the file where data will be written. |
160,482 | import logging
import aiofiles
import json
import asyncio
import os
from datetime import datetime
from collections import deque
from pathlib import Path
from typing import Callable, Union
persist = make_persist_function()
import os
def make_persist_function():
current_task = None
async def persist(
data: dict, file_path: str, custom_serializer=None
) -> None:
"""Writes the content of `data` into a file specified at `file_path`"""
nonlocal current_task
# Cancel the previous task if exists
if current_task:
current_task.cancel()
# Define a new task
async def write_task():
try:
backup_extension = ".backup"
backup_path = Path(file_path + backup_extension)
# Check if the directory exists, create it if it doesn't
parent_dir = Path(file_path).parent
parent_dir.mkdir(parents=True, exist_ok=True)
if Path(file_path).is_file():
os.rename(file_path, backup_path)
# Use the custom serializer if provided, otherwise use the default
serializer = custom_serializer if custom_serializer else None
async with aiofiles.open(file_path, "w") as file:
try:
json_data = json.dumps(
data, indent=4, default=serializer
)
await file.write(json_data)
except asyncio.exceptions.CancelledError:
pass # Ignore the CancelledError exception
except Exception:
logging.exception("An error occured in persist")
# Set the current task to the new task
current_task = asyncio.create_task(write_task())
try:
await current_task
except:
pass
return persist | null |
160,483 | import logging
import aiofiles
import json
import asyncio
import os
from datetime import datetime
from collections import deque
from pathlib import Path
from typing import Callable, Union
import os
async def test_load_from_backup_on_corrupted():
file_to_load = "test_data.json"
backup_file = "test_data.json.backup"
with open(file_to_load, "w") as file:
file.write("Invalid JSON Content")
data_to_persist = {"name": "Jane Smith", "age": 25, "city": "Testington"}
with open(backup_file, "w") as backup_file:
json.dump(data_to_persist, backup_file, indent=4)
# Attempt to load from backup
loaded_data = load(file_to_load)
expected_data = {"name": "Jane Smith", "age": 25, "city": "Testington"}
assert (
loaded_data == expected_data
), f"Expected {expected_data}, got {loaded_data}"
async def test_load_from_backup_on_both_corrupted():
file_to_load = "test_data.json"
backup_file = "test_data.json.backup"
with open(file_to_load, "w") as file:
file.write("Invalid JSON Content")
with open(backup_file, "w") as backup_file:
backup_file.write("Invalid JSON Content")
# Attempt to load from backup
loaded_data = load(file_to_load)
assert loaded_data == {}, f"Expected empty dictionary, got {loaded_data}"
async def test_many_concurrent_writes():
"""
if persist = _persist this test could use task 958 as last task (for example)
"""
file_to_persist = "test_data.json"
# Generate a list of data with unique ids
data_list = [
{
"name": f"Person {i}",
"age": i,
"city": f"City {i}",
"id": f"task_{i}",
}
for i in range(1000)
]
# Trigger concurrent writes for each data
tasks = [
asyncio.create_task(persist(data, file_to_persist))
for data in data_list
]
try:
await asyncio.gather(*tasks)
except asyncio.CancelledError:
pass # Ignore the CancelledError exception
# Load data from the file
async with aiofiles.open(file_to_persist, "r") as file:
loaded_data = await file.read()
print("Loaded data from file after concurrent writes:", loaded_data)
async def test_backup_behavior_on_interrupt():
file_to_persist = "test_data.json"
data_to_persist = {"name": "John Doe", "age": 30, "city": "City A"}
# Trigger a persist task and then immediately interrupt it
task = asyncio.create_task(persist(data_to_persist, file_to_persist))
await asyncio.sleep(0.5) # Wait for half a second
task.cancel()
# Load data from the backup file
backup_file = "test_data.json.backup"
async with aiofiles.open(backup_file, "r") as file:
loaded_data = await file.read()
print("Loaded data from backup after interrupted write:", loaded_data)
async def test_custom_serializer():
def custom_serializer(obj):
if isinstance(obj, datetime):
return {"__datetime__": True, "value": obj.timestamp()}
if isinstance(obj, deque):
return {"__deque__": True, "value": list(obj)}
return obj
def custom_object_hook(obj):
if "__datetime__" in obj:
return datetime.fromtimestamp(obj["value"])
if "__deque__" in obj:
return deque(obj["value"])
return obj
file_to_persist = "test_data.json"
data_to_persist = {
"name": "John Doe",
"age": 30,
"city": "City A",
"timestamp": datetime.now(),
"events": deque([1, 2, 3, 4, 5]),
}
# Write data using custom serializer
await persist(data_to_persist, file_to_persist, custom_serializer)
# Load data using custom serializer
loaded_data = load(file_to_persist, custom_object_hook)
assert (
loaded_data == data_to_persist
), f"Expected {data_to_persist}, got {loaded_data}"
async def test_persisted_dict():
file_path = "test_data_b.json"
persisted_dict = PersistedDict(file_path)
# Test setting and persisting data
persisted_dict["name"] = "John"
persisted_dict["age"] = 30
await asyncio.sleep(1) # Allow time for the data to persist
# Test loading persisted data
loaded_dict = PersistedDict(file_path)
assert loaded_dict["name"] == "John"
assert loaded_dict["age"] == 30
# Test deleting and persisting data
del persisted_dict["age"]
await asyncio.sleep(1) # Allow time for the data to persist
# Test loading after deletion
loaded_dict = PersistedDict(file_path)
assert "age" not in loaded_dict
async def run_tests():
await test_load_from_backup_on_corrupted()
print("test_load_from_backup_on_corrupted - ok")
await test_load_from_backup_on_both_corrupted()
print("test_load_from_backup_on_both_corrupted - ok")
await test_many_concurrent_writes()
print("test_many_concurrent_writes - ok")
await test_backup_behavior_on_interrupt()
print("test_backup_behavior_on_interrupt - ok")
await test_custom_serializer()
print("test_custom_serializer - ok")
await test_persisted_dict()
print("test_persisted_dict - ok") | null |
160,484 | import os
import logging
import argparse
from typing import Callable
from exorde.persist import PersistedDict
from exorde.notification import send_notification
from exorde.models import LiveConfiguration
docker_version_notifier = build_docker_version_notifier()
class PersistedDict:
def __init__(
self,
file_path: str,
serializer: Union[Callable, None] = None,
custom_object_hook: Union[Callable, None] = None,
):
self.file_path = file_path
self.serializer = serializer
self.custom_object_hook = custom_object_hook
self.data = self._load()
self.hold_persist: bool = False
async def _persist(self):
await persist(
self.data, self.file_path, custom_serializer=self.serializer
)
def _load(self):
return load(self.file_path, custom_object_hook=self.custom_object_hook)
def __getitem__(self, key):
try:
return self.data[key]
except:
return None
def __setitem__(self, key, value):
self.data[key] = value
if not self.hold_persist:
asyncio.create_task(self._persist())
def __delitem__(self, key):
del self.data[key]
if not self.hold_persist:
asyncio.create_task(self._persist())
async def deep_merge(self, update_context: dict) -> None:
"""
Merge the update_context dictionary into the PersistedDict object deeply.
Args:
update_context (dict): The dictionary to merge into the PersistedDict object.
"""
self.hold_persist = True
self.data = self._deep_merge_dicts(self.data, update_context)
self.hold_persist = False
await self._persist()
await asyncio.sleep(0.01)
def _deep_merge_dicts(self, target, source):
for key, value in source.items():
if (
key in target
and isinstance(target[key], dict)
and isinstance(value, dict)
):
target[key] = self._deep_merge_dicts(target[key], value)
else:
target[key] = value
return target
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
def __str__(self):
return str(self.data)
def __repr__(self):
return repr(self.data)
async def send_notification(
command_line_arguments: argparse.Namespace, data: str
):
"""
- In exorde-client, the `topic` is passed using the `ntfy` key and
retrieved here using the command_line_arguments variable.
"""
logging.info(
"If you like ntfy, please consider sponsoring me via GitHub Sponsors or Liberapay , or subscribing to ntfy Pro."
)
async with aiohttp.ClientSession() as session:
url = f"https://ntfy.sh/{command_line_arguments.ntfy}"
payload = data.encode(encoding="utf-8")
async with session.post(url, data=payload) as response:
response_text = await response.text()
return response_text
class LiveConfiguration(dict):
"""
Configuration is not a MadType because we do not want to break the
configuration instantiation if a key is not defined in the python
code.
! it therfor requires a manual checking ; what happens when the user
is unable to reach the configuration but the protocol is still online ?
"""
remote_kill: bool
online: bool
batch_size: int
last_info: Optional[str]
worker_version: Optional[str]
protocol_version: Optional[str]
expiration_delta: Optional[int] # data freshness
target: Optional[str]
default_gas_price: Optional[int]
default_gas_amount: Optional[int]
gas_cap_min: Optional[int]
inter_spot_delay_seconds: int
last_notification: str
def build_docker_version_notifier() -> Callable:
last_notified_version = PersistedDict(
"/tmp/exorde/docker_version_notification.json"
)
async def docker_version_notifier(
live_configuration: LiveConfiguration,
command_line_arguments: argparse.Namespace,
) -> None:
current_img_version = os.environ.get("EXORDE_DOCKER_IMG_VERSION", None)
"""set during build time"""
if not current_img_version:
"""does nothing if no image version is specified in env"""
return
"""else check the version and notify the user"""
nonlocal last_notified_version
"""last version the user has been notified for"""
live_version = live_configuration.get("docker_version", None)
"""and docker version is specified by the network"""
if not live_version:
logging.warning("no docker version specified in LiveConfiguration")
return
if live_version != current_img_version:
"""notify"""
if (
last_notified_version["last_notification"] == None
or last_notified_version != live_version
):
await send_notification(
command_line_arguments, "A new exorde image is available"
)
last_notified_version["last_notification"] = live_version
return docker_version_notifier | null |
160,485 | import json
from typing import Callable, Coroutine
import logging
from functools import wraps
import aiohttp
from exorde.models import LiveConfiguration
class LiveConfiguration(dict):
"""
Configuration is not a MadType because we do not want to break the
configuration instantiation if a key is not defined in the python
code.
! it therfor requires a manual checking ; what happens when the user
is unable to reach the configuration but the protocol is still online ?
"""
remote_kill: bool
online: bool
batch_size: int
last_info: Optional[str]
worker_version: Optional[str]
protocol_version: Optional[str]
expiration_delta: Optional[int] # data freshness
target: Optional[str]
default_gas_price: Optional[int]
default_gas_amount: Optional[int]
gas_cap_min: Optional[int]
inter_spot_delay_seconds: int
last_notification: str
def logic(implementation: Callable) -> Callable:
cache = None
@wraps(implementation)
async def call() -> LiveConfiguration:
nonlocal cache
try:
result = await implementation()
except:
"""If configuration fails we should stop the process"""
logging.exception("An error occured retrieving the configuration.")
if cache:
result = cache
else:
return LiveConfiguration(
online=False, batch_size=0, inter_spot_delay_seconds=60
)
cache = result
return result
return call | null |
160,486 | import json
from typing import Callable, Coroutine
import logging
from functools import wraps
import aiohttp
from exorde.models import LiveConfiguration
class LiveConfiguration(dict):
"""
Configuration is not a MadType because we do not want to break the
configuration instantiation if a key is not defined in the python
code.
! it therfor requires a manual checking ; what happens when the user
is unable to reach the configuration but the protocol is still online ?
"""
remote_kill: bool
online: bool
batch_size: int
last_info: Optional[str]
worker_version: Optional[str]
protocol_version: Optional[str]
expiration_delta: Optional[int] # data freshness
target: Optional[str]
default_gas_price: Optional[int]
default_gas_amount: Optional[int]
gas_cap_min: Optional[int]
inter_spot_delay_seconds: int
last_notification: str
The provided code snippet includes necessary dependencies for implementing the `implementation` function. Write a Python function `async def implementation() -> LiveConfiguration` to solve the following problem:
Retrieve the data from github | todo : retrieve from network
Here is the function:
async def implementation() -> LiveConfiguration:
""" Retrieve the data from github | todo : retrieve from network """
async with aiohttp.ClientSession() as session:
async with session.get(
"https://raw.githubusercontent.com/exorde-labs/TestnetProtocol/main/targets/runtime.json"
) as response:
data = json.loads(await response.text())
return LiveConfiguration(**data) | Retrieve the data from github | todo : retrieve from network |
160,487 | from functools import wraps
from flask import request
from flask import current_app as app
from flask_restful import Resource, HTTPException
from opencve.extensions import limiter
from opencve.models.users import User
class User(BaseModel, UserMixin):
__tablename__ = "users"
__hash__ = UserMixin.__hash__
# User authentication information
username = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False, server_default="")
reset_password_token = db.Column(db.String(100), nullable=False, server_default="")
# User email information
email = db.Column(db.String(255), nullable=False, unique=True)
email_confirmed_at = db.Column(db.DateTime(timezone=True))
# Notification parameters
enable_notifications = db.Column(
db.Boolean(), nullable=False, server_default=expression.true()
)
filters_notifications = db.Column(JSONType, default=get_default_filters)
settings = db.Column(JSONType, default=get_default_settings, nullable=False)
frequency_notifications = db.Column(ChoiceType(FREQUENCIES_TYPES), default="always")
# User information
active = db.Column(
"is_active", db.Boolean(), nullable=False, server_default=expression.false()
)
first_name = db.Column(db.String(100), nullable=False, server_default="")
last_name = db.Column(db.String(100), nullable=False, server_default="")
admin = db.Column(db.Boolean, unique=False, server_default=expression.false())
# Relationships
vendors = db.relationship("Vendor", secondary=users_vendors)
products = db.relationship("Product", secondary=users_products)
alerts = db.relationship("Alert", back_populates="user")
reports = db.relationship("Report", back_populates="user")
tags = db.relationship("UserTag", back_populates="user")
cve_tags = db.relationship("CveTag", back_populates="user")
def is_confirmed(self):
return bool(self.email_confirmed_at)
def __repr__(self):
return "<User {}>".format(self.username)
def __eq__(self, user):
return self.id == user.id if user else False
def set_password(self, password):
self.password = app.user_manager.hash_password(password)
def auth_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
auth = request.authorization
def error_message():
return (
{"message": "Authentication required."},
401,
{"WWW-Authenticate": 'Basic realm="Authentication Required"'},
)
# Auth not provided
if not auth:
return error_message()
# User not found
user = User.query.filter_by(username=auth.username).first()
if not user:
return error_message()
# Bad credentials
if not app.user_manager.verify_password(auth.password, user.password):
return error_message()
f = func(*args, **kwargs)
return f
return wrapper | null |
160,488 | import os
import pathlib
import shutil
import click
from flask.cli import with_appcontext
from opencve.configuration import (
DEFAULT_CONFIG,
DEFAULT_WELCOME_FILES,
OPENCVE_CONFIG,
OPENCVE_HOME,
OPENCVE_WELCOME_FILES,
)
from opencve.commands import info, error
def create_config():
if not pathlib.Path(OPENCVE_CONFIG).exists():
# Do not create the home if user directly specifies the config path
if not os.environ.get("OPENCVE_CONFIG"):
pathlib.Path(OPENCVE_HOME).mkdir(parents=True, exist_ok=True)
with open(DEFAULT_CONFIG) as f:
conf = f.read()
# Generate a unique secret key
conf = conf.replace("{SECRET_KEY}", os.urandom(32).hex())
with open(OPENCVE_CONFIG, "w") as f:
f.write(conf)
# Copy the welcome files
shutil.copytree(DEFAULT_WELCOME_FILES, OPENCVE_WELCOME_FILES)
return OPENCVE_CONFIG, True
return OPENCVE_CONFIG, False
def info(msg, nl=True):
click.echo("[*] {}".format(msg), nl=nl)
def error(msg, nl=True):
click.echo("[error] {}".format(msg), nl=nl)
The provided code snippet includes necessary dependencies for implementing the `init` function. Write a Python function `def init()` to solve the following problem:
Initialize the configuration file.
Here is the function:
def init():
"""Initialize the configuration file."""
path, created = create_config()
if created:
info(f"Configuration created in {path}")
else:
error(f"Configuration already exists ({path})") | Initialize the configuration file. |
160,489 | import os
import click
from flask.cli import with_appcontext
from opencve.commands import ensure_config
The provided code snippet includes necessary dependencies for implementing the `webserver` function. Write a Python function `def webserver(args)` to solve the following problem:
Run the webserver.
Here is the function:
def webserver(args):
"""Run the webserver."""
args = ["gunicorn"] + list(args)
args.append("opencve.app:app")
os.execvp(args[0], args) | Run the webserver. |
160,490 | from pathlib import Path
import click
from flask.cli import with_appcontext
from flask_migrate import upgrade
from opencve.commands import ensure_config
The provided code snippet includes necessary dependencies for implementing the `upgrade_db` function. Write a Python function `def upgrade_db()` to solve the following problem:
Create or upgrade the database.
Here is the function:
def upgrade_db():
"""Create or upgrade the database."""
migrations_path = Path(__file__).parent.parent.resolve() / "migrations"
upgrade(directory=str(migrations_path)) | Create or upgrade the database. |
160,491 | import os
import click
from flask.cli import with_appcontext
from opencve.commands import ensure_config
The provided code snippet includes necessary dependencies for implementing the `celery` function. Write a Python function `def celery()` to solve the following problem:
Run Celery commands.
Here is the function:
def celery():
"""Run Celery commands.""" | Run Celery commands. |
160,492 | import os
import click
from flask.cli import with_appcontext
from opencve.commands import ensure_config
The provided code snippet includes necessary dependencies for implementing the `worker` function. Write a Python function `def worker(args)` to solve the following problem:
Run a Celery worker.
Here is the function:
def worker(args):
"""Run a Celery worker."""
args = "celery worker -A opencve.app:cel".split() + list(args)
os.execvp(args[0], args) | Run a Celery worker. |
160,493 | import os
import click
from flask.cli import with_appcontext
from opencve.commands import ensure_config
The provided code snippet includes necessary dependencies for implementing the `beat` function. Write a Python function `def beat(args)` to solve the following problem:
Start the Celery beat.
Here is the function:
def beat(args):
"""Start the Celery beat."""
args = "celery beat -A opencve.app:cel".split() + list(args)
os.execvp(args[0], args) | Start the Celery beat. |
160,494 | import datetime
import click
from flask import current_app as app
from flask.cli import with_appcontext
from sqlalchemy.exc import IntegrityError
from opencve.commands import ensure_config, error, info
from opencve.extensions import db
from opencve.models.users import User
def info(msg, nl=True):
click.echo("[*] {}".format(msg), nl=nl)
def error(msg, nl=True):
click.echo("[error] {}".format(msg), nl=nl)
db = SQLAlchemy(session_options={"autoflush": False})
class User(BaseModel, UserMixin):
__tablename__ = "users"
__hash__ = UserMixin.__hash__
# User authentication information
username = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False, server_default="")
reset_password_token = db.Column(db.String(100), nullable=False, server_default="")
# User email information
email = db.Column(db.String(255), nullable=False, unique=True)
email_confirmed_at = db.Column(db.DateTime(timezone=True))
# Notification parameters
enable_notifications = db.Column(
db.Boolean(), nullable=False, server_default=expression.true()
)
filters_notifications = db.Column(JSONType, default=get_default_filters)
settings = db.Column(JSONType, default=get_default_settings, nullable=False)
frequency_notifications = db.Column(ChoiceType(FREQUENCIES_TYPES), default="always")
# User information
active = db.Column(
"is_active", db.Boolean(), nullable=False, server_default=expression.false()
)
first_name = db.Column(db.String(100), nullable=False, server_default="")
last_name = db.Column(db.String(100), nullable=False, server_default="")
admin = db.Column(db.Boolean, unique=False, server_default=expression.false())
# Relationships
vendors = db.relationship("Vendor", secondary=users_vendors)
products = db.relationship("Product", secondary=users_products)
alerts = db.relationship("Alert", back_populates="user")
reports = db.relationship("Report", back_populates="user")
tags = db.relationship("UserTag", back_populates="user")
cve_tags = db.relationship("CveTag", back_populates="user")
def is_confirmed(self):
return bool(self.email_confirmed_at)
def __repr__(self):
return "<User {}>".format(self.username)
def __eq__(self, user):
return self.id == user.id if user else False
def set_password(self, password):
self.password = app.user_manager.hash_password(password)
The provided code snippet includes necessary dependencies for implementing the `create_user` function. Write a Python function `def create_user(username, email, password, admin)` to solve the following problem:
Create a user or admin.
Here is the function:
def create_user(username, email, password, admin):
"""Create a user or admin."""
if User.query.filter_by(username=username).first():
raise click.BadParameter(f"{username} already exists.", param_hint="username")
if User.query.filter_by(email=email).first():
raise click.BadParameter(f"{email} already exists.", param_hint="email")
user = User(
username=username,
email=email,
active=True,
admin=admin,
email_confirmed_at=datetime.datetime.utcnow(),
password=app.user_manager.hash_password(password),
)
db.session.add(user)
try:
db.session.commit()
except IntegrityError as e:
error(e)
else:
info("User {} created.".format(username)) | Create a user or admin. |
160,495 | import click
import requests
import time
from flask.cli import with_appcontext
from opencve.commands import ensure_config, error, info
from opencve.models.cve import Cve
from opencve.extensions import db
NVD_API_URL = "https://services.nvd.nist.gov/rest/json/cves/2.0"
def info(msg, nl=True):
click.echo("[*] {}".format(msg), nl=nl)
class Cve(BaseModel):
__tablename__ = "cves"
# CVE are sorted by last modified date, we need to index it.
updated_at = db.Column(
db.DateTime(timezone=True),
default=db.func.now(),
onupdate=db.func.now(),
nullable=False,
index=True,
)
cve_id = db.Column(db.String(), nullable=False)
json = db.Column(JSONB)
# We used initially secondary relationships to fetch the list of
# associated vendors, products and cwes. But it was complicated
# to maintain, and the performance were poor. So we now use the
# JSONB data type associated to the GIN index type.
vendors = db.Column(JSONB)
cwes = db.Column(JSONB)
# Keep the summary separated when searching keywords
summary = db.Column(db.String(), nullable=False)
# Keep CVSS separated when searching a particupal score
cvss2 = db.Column(db.Float())
cvss3 = db.Column(db.Float())
# Relationships
events = db.relationship("Event", back_populates="cve")
changes = db.relationship("Change", back_populates="cve")
alerts = db.relationship("Alert", back_populates="cve")
# Index
__table_args__ = (
db.Index("ix_cves_vendors", vendors, postgresql_using="gin"),
db.Index("ix_cves_cwes", cwes, postgresql_using="gin"),
db.Index(
"ix_cves_summary",
summary,
postgresql_using="gin",
postgresql_ops={
"summary": "gin_trgm_ops",
},
),
db.Index(
"ix_cves_cve_id",
cve_id,
postgresql_using="gin",
postgresql_ops={
"cve_id": "gin_trgm_ops",
},
),
)
def __repr__(self):
return "<Cve {}>".format(self.cve_id)
def raw_tags(self):
if not current_user.is_authenticated:
return []
cve_tag = CveTag.query.filter_by(
user_id=current_user.id, cve_id=self.id
).first()
if not cve_tag:
return []
return cve_tag.tags
def tags(self):
if not current_user.is_authenticated:
return []
return [
UserTag.query.filter_by(user_id=current_user.id, name=t).first()
for t in self.raw_tags
]
def cvss_weight(self):
"""Only used to sort several CVE by their CVSS"""
w = 0
if self.cvss2:
w += self.cvss2
if self.cvss3:
w += self.cvss3
return w
def cvss2_score(self):
if "cve" in self.json.keys():
if "baseMetricV2" in self.json["impact"]:
return self.json["impact"]["baseMetricV2"]["cvssV2"]["baseScore"]
else:
if "cvssMetricV2" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV2"][0]["cvssData"]["baseScore"]
return None
def cvss3_score(self):
if "cve" in self.json.keys():
if "baseMetricV3" in self.json["impact"]:
return self.json["impact"]["baseMetricV3"]["cvssV3"]["baseScore"]
else:
if "cvssMetricV31" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV31"][0]["cvssData"]["baseScore"]
elif "cvssMetricV30" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV30"][0]["cvssData"]["baseScore"]
return None
def cvss2_severity(self):
if "cve" in self.json.keys():
if "baseMetricV2" in self.json["impact"]:
return self.json["impact"]["baseMetricV2"]["severity"]
else:
if "cvssMetricV2" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV2"][0]["baseSeverity"]
return None
def cvss3_severity(self):
if "cve" in self.json.keys():
if "baseMetricV3" in self.json["impact"]:
return self.json["impact"]["baseMetricV3"]["cvssV3"]["baseSeverity"]
else:
if "cvssMetricV31" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV31"][0]["cvssData"][
"baseSeverity"
]
elif "cvssMetricV30" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV30"][0]["cvssData"][
"baseSeverity"
]
return None
def references(self):
if "cve" in self.json.keys():
return self.json["cve"]["references"]["reference_data"]
return self.json["references"]
db = SQLAlchemy(session_options={"autoflush": False})
The provided code snippet includes necessary dependencies for implementing the `migrate_nvd` function. Write a Python function `def migrate_nvd()` to solve the following problem:
Migrate NVD data from JSON 4.0 to 5.0
Here is the function:
def migrate_nvd():
"""Migrate NVD data from JSON 4.0 to 5.0"""
msg = (
"This command will migrate all existing CVEs into the new NVD format. "
"Do you want to continue ?"
)
if not click.confirm(msg):
info("Bye.")
return
url_template = NVD_API_URL + "?startIndex={idx}"
start_index = 0
total_results = 0
while start_index <= total_results:
url = url_template.format(idx=start_index)
info(f"Fetching {url}")
resp = requests.get(url)
# Break if status != 200
if not resp.ok:
info(f"Bad response: {resp.status_code}, sleeping before retrying...")
time.sleep(10)
continue
data = resp.json()
total_results = data.get("totalResults")
for vulnerability in data.get("vulnerabilities"):
cve_data = vulnerability.get("cve")
cve_id = cve_data.get("id")
cve_obj = Cve.query.filter_by(cve_id=cve_id).first()
if cve_obj:
cve_obj.json = cve_data
# NVD requirement is 2000 CVE per page
# and 6 seconds between requests.
start_index += 2000
time.sleep(6)
if (start_index % 10_000 == 0) or (start_index >= total_results):
db.session.flush()
db.session.commit() | Migrate NVD data from JSON 4.0 to 5.0 |
160,496 | import time
import arrow
import requests
from opencve.commands import info, timed_operation
from opencve.extensions import db
from opencve.utils import convert_cpes, flatten_vendors, weaknesses_to_flat
from opencve.models import get_uuid
from opencve.models.changes import Change
from opencve.models.cve import Cve
from opencve.models.tasks import Task
from opencve.models.products import Product
from opencve.models.vendors import Vendor
from opencve.models.metas import Meta
NVD_API_URL = "https://services.nvd.nist.gov/rest/json/cves/2.0"
def get_slug(vendor, product=None):
slug = vendor
if product:
slug += "-{}".format(product)
return slug
def info(msg, nl=True):
click.echo("[*] {}".format(msg), nl=nl)
def timed_operation(msg, nl=False):
start = time.time()
info(msg, nl=nl)
yield
click.echo(" (done in {}s).".format(round(time.time() - start, 3)))
db = SQLAlchemy(session_options={"autoflush": False})
def convert_cpes(conf):
"""
This function takes an object, extracts its CPE uris and transforms them into
a dictionnary representing the vendors with their associated products.
"""
uris = nested_lookup("criteria", conf)
# Try old NVD CVE format if no criteria found
if not uris:
uris = nested_lookup("cpe23Uri", conf)
# Create a list of tuple (vendor, product)
cpes_t = list(set([tuple(uri.split(":")[3:5]) for uri in uris]))
# Transform it into nested dictionnary
cpes = {}
for vendor, product in cpes_t:
if vendor not in cpes:
cpes[vendor] = []
cpes[vendor].append(product)
return cpes
def flatten_vendors(vendors):
"""
Takes a list of nested vendors and products and flat them.
"""
data = []
for vendor, products in vendors.items():
data.append(vendor)
for product in products:
data.append(f"{vendor}{PRODUCT_SEPARATOR}{product}")
return data
def weaknesses_to_flat(weaknesses=None):
return nested_lookup("value", weaknesses)
def get_uuid():
return str(uuid.uuid4())
class Change(BaseModel):
__tablename__ = "changes"
json = db.Column(JSONType)
# Relationships
cve_id = db.Column(UUIDType(binary=False), db.ForeignKey("cves.id"))
cve = db.relationship("Cve", back_populates="changes")
task_id = db.Column(UUIDType(binary=False), db.ForeignKey("tasks.id"))
task = db.relationship("Task", back_populates="changes")
events = db.relationship("Event", back_populates="change")
class Cve(BaseModel):
__tablename__ = "cves"
# CVE are sorted by last modified date, we need to index it.
updated_at = db.Column(
db.DateTime(timezone=True),
default=db.func.now(),
onupdate=db.func.now(),
nullable=False,
index=True,
)
cve_id = db.Column(db.String(), nullable=False)
json = db.Column(JSONB)
# We used initially secondary relationships to fetch the list of
# associated vendors, products and cwes. But it was complicated
# to maintain, and the performance were poor. So we now use the
# JSONB data type associated to the GIN index type.
vendors = db.Column(JSONB)
cwes = db.Column(JSONB)
# Keep the summary separated when searching keywords
summary = db.Column(db.String(), nullable=False)
# Keep CVSS separated when searching a particupal score
cvss2 = db.Column(db.Float())
cvss3 = db.Column(db.Float())
# Relationships
events = db.relationship("Event", back_populates="cve")
changes = db.relationship("Change", back_populates="cve")
alerts = db.relationship("Alert", back_populates="cve")
# Index
__table_args__ = (
db.Index("ix_cves_vendors", vendors, postgresql_using="gin"),
db.Index("ix_cves_cwes", cwes, postgresql_using="gin"),
db.Index(
"ix_cves_summary",
summary,
postgresql_using="gin",
postgresql_ops={
"summary": "gin_trgm_ops",
},
),
db.Index(
"ix_cves_cve_id",
cve_id,
postgresql_using="gin",
postgresql_ops={
"cve_id": "gin_trgm_ops",
},
),
)
def __repr__(self):
return "<Cve {}>".format(self.cve_id)
def raw_tags(self):
if not current_user.is_authenticated:
return []
cve_tag = CveTag.query.filter_by(
user_id=current_user.id, cve_id=self.id
).first()
if not cve_tag:
return []
return cve_tag.tags
def tags(self):
if not current_user.is_authenticated:
return []
return [
UserTag.query.filter_by(user_id=current_user.id, name=t).first()
for t in self.raw_tags
]
def cvss_weight(self):
"""Only used to sort several CVE by their CVSS"""
w = 0
if self.cvss2:
w += self.cvss2
if self.cvss3:
w += self.cvss3
return w
def cvss2_score(self):
if "cve" in self.json.keys():
if "baseMetricV2" in self.json["impact"]:
return self.json["impact"]["baseMetricV2"]["cvssV2"]["baseScore"]
else:
if "cvssMetricV2" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV2"][0]["cvssData"]["baseScore"]
return None
def cvss3_score(self):
if "cve" in self.json.keys():
if "baseMetricV3" in self.json["impact"]:
return self.json["impact"]["baseMetricV3"]["cvssV3"]["baseScore"]
else:
if "cvssMetricV31" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV31"][0]["cvssData"]["baseScore"]
elif "cvssMetricV30" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV30"][0]["cvssData"]["baseScore"]
return None
def cvss2_severity(self):
if "cve" in self.json.keys():
if "baseMetricV2" in self.json["impact"]:
return self.json["impact"]["baseMetricV2"]["severity"]
else:
if "cvssMetricV2" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV2"][0]["baseSeverity"]
return None
def cvss3_severity(self):
if "cve" in self.json.keys():
if "baseMetricV3" in self.json["impact"]:
return self.json["impact"]["baseMetricV3"]["cvssV3"]["baseSeverity"]
else:
if "cvssMetricV31" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV31"][0]["cvssData"][
"baseSeverity"
]
elif "cvssMetricV30" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV30"][0]["cvssData"][
"baseSeverity"
]
return None
def references(self):
if "cve" in self.json.keys():
return self.json["cve"]["references"]["reference_data"]
return self.json["references"]
class Task(BaseModel):
__tablename__ = "tasks"
# Relationships
changes = db.relationship("Change", back_populates="task")
def __repr__(self):
return "<Task {}>".format(self.created_at)
class Product(BaseModel):
__tablename__ = "products"
name = db.Column(db.String(), nullable=False, index=True)
# Relationships
vendor_id = db.Column(UUIDType(binary=False), db.ForeignKey("vendors.id"))
vendor = db.relationship("Vendor", back_populates="products")
users = db.relationship("User", secondary=users_products)
def human_name(self):
return _humanize_filter(self.name)
def __repr__(self):
return "<Product {}>".format(self.name)
class Vendor(BaseModel):
__tablename__ = "vendors"
name = db.Column(db.String(), nullable=False, unique=True)
# Relationships
products = db.relationship("Product", back_populates="vendor")
users = db.relationship("User", secondary=users_vendors)
def human_name(self):
return _humanize_filter(self.name)
def __repr__(self):
return "<Vendor {}>".format(self.name)
class Meta(BaseModel):
__tablename__ = "metas"
name = db.Column(db.String(), nullable=False)
value = db.Column(db.String(), nullable=False)
def __repr__(self):
return "<Meta {}>".format(self.name)
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run()` to solve the following problem:
Import the CVE list. Important notice: This product uses data from the NVD API but is not endorsed or certified by the NVD.
Here is the function:
def run():
"""
Import the CVE list.
Important notice:
This product uses data from the NVD API but is not endorsed or certified by the NVD.
"""
task = Task()
db.session.add(task)
db.session.commit()
task_id = task.id
mappings = {"vendors": {}, "products": {}, "cves": [], "changes": []}
url_template = NVD_API_URL + "?startIndex={idx}"
start_index = 0
total_results = 0
while start_index <= total_results:
url = url_template.format(idx=start_index)
with timed_operation(f"Downloading {url}"):
resp = requests.get(url)
if not resp.ok:
info(f"Bad response: {resp.status_code}, sleeping before retrying")
time.sleep(10)
continue
with timed_operation("Creating model objects"):
data = resp.json()
total_results = data.get("totalResults")
for vulnerability in data.get("vulnerabilities"):
cve_db_id = get_uuid()
cve_data = vulnerability.get("cve")
cve_id = cve_data["id"]
# Takes the CVSS scores
if "cvssMetricV31" in cve_data["metrics"]:
cvss3 = cve_data.get("metrics")["cvssMetricV31"][0]["cvssData"][
"baseScore"
]
elif "cvssMetricV30" in cve_data["metrics"]:
cvss3 = cve_data.get("metrics")["cvssMetricV30"][0]["cvssData"][
"baseScore"
]
else:
cvss3 = None
if "cvssMetricV2" in cve_data.get("metrics"):
cvss2 = cve_data.get("metrics")["cvssMetricV2"][0]["cvssData"][
"baseScore"
]
else:
cvss2 = None
# Construct CWE and CPE lists
cwes = weaknesses_to_flat(cve_data.get("weaknesses"))
vendors_products = convert_cpes(cve_data.get("configurations", {}))
vendors_flatten = flatten_vendors(vendors_products)
# In case of multiple languages, keep the EN one
descriptions = cve_data["descriptions"]
if len(descriptions) > 1:
descriptions = [
d for d in descriptions if d["lang"] in ("en", "en-US")
]
summary = descriptions[0]["value"]
# Create the CVEs mappings
mappings["cves"].append(
dict(
id=cve_db_id,
cve_id=cve_id,
summary=summary,
json=cve_data,
vendors=vendors_flatten,
cwes=cwes,
cvss2=cvss2,
cvss3=cvss3,
created_at=arrow.get(cve_data["published"]).datetime,
updated_at=arrow.get(cve_data["lastModified"]).datetime,
)
)
# Create the vendors and their products
for vendor, products in vendors_products.items():
# Create the vendor
if vendor not in mappings["vendors"].keys():
mappings["vendors"][vendor] = dict(id=get_uuid(), name=vendor)
for product in products:
if get_slug(vendor, product) not in mappings["products"].keys():
mappings["products"][get_slug(vendor, product)] = dict(
id=get_uuid(),
name=product,
vendor_id=mappings["vendors"][vendor]["id"],
)
# NVD requirement is 2000 CVE per page
start_index += 2000
# Insert the objects in database
if (start_index % 20_000 == 0) or (start_index >= total_results):
with timed_operation("Inserting CVE"):
db.session.bulk_insert_mappings(Cve, mappings["cves"])
db.session.commit()
# Create the changes based on CVEs data
for cve in mappings["cves"]:
mappings["changes"].append(
dict(
id=get_uuid(),
created_at=cve["created_at"],
updated_at=cve["updated_at"],
json=cve["json"],
cve_id=cve["id"],
task_id=task_id,
)
)
db.session.bulk_insert_mappings(Change, mappings["changes"])
db.session.commit()
info("{} CVE imported.".format(len(mappings["cves"])))
# Free the memory after each processed year
mappings["cves"] = []
mappings["changes"] = []
# NVD requirement is 6s between requests
if start_index <= total_results:
info("Waiting 6 seconds")
time.sleep(6)
# Save the last CVE in database (will be reused in the handle_events task
with timed_operation("Saving last CVE information"):
last_cve = Cve.query.order_by(Cve.updated_at.desc()).first()
db.session.add(Meta(name="nvd_last_cve_id", value=str(last_cve.cve_id)))
db.session.add(
Meta(name="nvd_last_cve_updated_at", value=str(last_cve.updated_at))
)
db.session.commit()
# Insert the objects in database
with timed_operation("Inserting Vendors and Products"):
db.session.bulk_insert_mappings(Vendor, mappings["vendors"].values())
db.session.bulk_insert_mappings(Product, mappings["products"].values())
db.session.commit() | Import the CVE list. Important notice: This product uses data from the NVD API but is not endorsed or certified by the NVD. |
160,497 | from io import BytesIO
from zipfile import ZipFile
import requests
import untangle
from opencve.commands import header, info, timed_operation
from opencve.extensions import db
from opencve.models import get_uuid
from opencve.models.cwe import Cwe
MITRE_CWE_URL = "https://cwe.mitre.org/data/xml/cwec_latest.xml.zip"
def info(msg, nl=True):
click.echo("[*] {}".format(msg), nl=nl)
def header(msg):
click.echo("#" * len(msg))
click.echo(msg)
click.echo("#" * len(msg))
def timed_operation(msg, nl=False):
start = time.time()
info(msg, nl=nl)
yield
click.echo(" (done in {}s).".format(round(time.time() - start, 3)))
db = SQLAlchemy(session_options={"autoflush": False})
def get_uuid():
return str(uuid.uuid4())
class Cwe(BaseModel):
__tablename__ = "cwes"
cwe_id = db.Column(db.String(), nullable=False, index=True)
name = db.Column(db.String())
description = db.Column(db.String())
def short_id(self):
if not self.cwe_id.startswith("CWE-"):
return None
return self.cwe_id.split("CWE-")[1]
def __repr__(self):
return "<Cwe {}>".format(self.cwe_id)
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run()` to solve the following problem:
Import the CWE list.
Here is the function:
def run():
"""
Import the CWE list.
"""
header("Importing CWE list...")
# Download the file
with timed_operation("Downloading {}...".format(MITRE_CWE_URL)):
resp = requests.get(MITRE_CWE_URL).content
# Parse weaknesses
with timed_operation("Parsing cwes..."):
z = ZipFile(BytesIO(resp))
raw = z.open(z.namelist()[0]).read()
obj = untangle.parse(raw.decode("utf-8"))
weaknesses = obj.Weakness_Catalog.Weaknesses.Weakness
categories = obj.Weakness_Catalog.Categories.Category
# Create the objects
cwes = {}
with timed_operation("Creating mappings..."):
for c in weaknesses + categories:
cwes[c["ID"]] = dict(
id=get_uuid(),
cwe_id=f"CWE-{c['ID']}",
name=c["Name"],
description=c.Description.cdata
if hasattr(c, "Description")
else c.Summary.cdata,
)
# Insert the objects in database
with timed_operation("Inserting CWE..."):
db.session.bulk_insert_mappings(Cwe, cwes.values())
db.session.commit()
info("{} CWE imported.".format(len(cwes)))
del cwes | Import the CWE list. |
160,498 | import os
import click
from opencve import __version__
from opencve.commands.celery import celery
from opencve.commands.create_user import create_user
from opencve.commands.init import init
from opencve.commands.upgrade_db import upgrade_db
from opencve.commands.imports import import_data
from opencve.commands.webserver import webserver
from opencve.commands.migrate_nvd import migrate_nvd
The provided code snippet includes necessary dependencies for implementing the `cli` function. Write a Python function `def cli()` to solve the following problem:
CVE Alerting Platform
Here is the function:
def cli():
"""CVE Alerting Platform"""
os.environ["FLASK_APP"] = "opencve.app:app" | CVE Alerting Platform |
160,499 | from nested_lookup import nested_lookup
from difflib import HtmlDiff
from opencve.constants import PRODUCT_SEPARATOR
from opencve.models.cwe import Cwe
The provided code snippet includes necessary dependencies for implementing the `get_cwes` function. Write a Python function `def get_cwes(problems)` to solve the following problem:
Takes a list of problems and return the CWEs ID.
Here is the function:
def get_cwes(problems):
# TODO: change references to it
"""
Takes a list of problems and return the CWEs ID.
"""
return list(set([p["value"] for p in problems])) | Takes a list of problems and return the CWEs ID. |
160,500 | from nested_lookup import nested_lookup
from difflib import HtmlDiff
from opencve.constants import PRODUCT_SEPARATOR
from opencve.models.cwe import Cwe
def vendors_conf_to_dict(conf):
"""
This function takes an object, extracts its CPE uris and transforms them into
a dictionary representing the vendors with their associated products.
"""
uris = nested_lookup("criteria", conf)
# Create a list of tuple (vendor, product)
cpes_t = list(set([tuple(uri.split(":")[3:5]) for uri in uris]))
# Transform it into nested dictionary
cpes = {}
for vendor, product in cpes_t:
if vendor not in cpes:
cpes[vendor] = []
cpes[vendor].append(product)
return cpes
def vendors_dict_to_flat(vendors):
"""
Takes a list of nested vendors and products and flat them.
"""
data = []
for vendor, products in vendors.items():
data.append(vendor)
for product in products:
data.append(f"{vendor}{PRODUCT_SEPARATOR}{product}")
return data
The provided code snippet includes necessary dependencies for implementing the `vendors_conf_to_flat` function. Write a Python function `def vendors_conf_to_flat(conf=None)` to solve the following problem:
Takes a list of CPEs configuration and returns it in a flat array with a vendor/product separator in each item.
Here is the function:
def vendors_conf_to_flat(conf=None):
"""
Takes a list of CPEs configuration and returns it in a flat
array with a vendor/product separator in each item.
"""
if not conf:
return []
return vendors_dict_to_flat(vendors_conf_to_dict(conf)) | Takes a list of CPEs configuration and returns it in a flat array with a vendor/product separator in each item. |
160,501 | from flask import current_app as app
from flask import flash, redirect, render_template, request, url_for
from flask_login import current_user, login_required
from opencve.controllers.main import main
from opencve.controllers.tags import UserTagController
from opencve.models.cve import Cve
from opencve.models.tags import CveTag, UserTag
from opencve.extensions import db
from opencve.forms import (
ChangeEmailForm,
ChangePasswordForm,
FiltersNotificationForm,
MailNotificationsForm,
TagForm,
)
def subscriptions():
return render_template("profiles/subscriptions.html") | null |
160,502 | from flask import current_app as app
from flask import flash, redirect, render_template, request, url_for
from flask_login import current_user, login_required
from opencve.controllers.main import main
from opencve.controllers.tags import UserTagController
from opencve.models.cve import Cve
from opencve.models.tags import CveTag, UserTag
from opencve.extensions import db
from opencve.forms import (
ChangeEmailForm,
ChangePasswordForm,
FiltersNotificationForm,
MailNotificationsForm,
TagForm,
)
db = SQLAlchemy(session_options={"autoflush": False})
class MailNotificationsForm(FlaskForm):
enable = RadioField(
"Enable email notifications", choices=[("yes", "Yes"), ("no", "No")]
)
frequency = SelectField("Email frequency", choices=FREQUENCIES_TYPES)
submit = SubmitField("Save changes")
class FiltersNotificationForm(FlaskForm):
new_cve = BooleanField("New CVE")
first_time = BooleanField("Subscription appeared for the first time")
references = BooleanField("Reference changed")
cvss = BooleanField("CVSS changed")
cpes = BooleanField("CPE changed")
summary = BooleanField("Summary changed")
cwes = BooleanField("CWE changed")
cvss_score = SelectField("CVSS score", coerce=int, choices=CVSS_SCORES)
submit = SubmitField("Save changes")
def notifications():
mail_notifications_form = MailNotificationsForm(
obj=current_user,
enable="yes" if current_user.enable_notifications else "no",
frequency=current_user.frequency_notifications.code,
)
filters = current_user.filters_notifications or {"event_types": [], "cvss": 0}
filters_notifications_form = FiltersNotificationForm(
obj=current_user,
new_cve=True if "new_cve" in filters["event_types"] else False,
first_time=True if "first_time" in filters["event_types"] else False,
references=True if "references" in filters["event_types"] else False,
cvss=True if "cvss" in filters["event_types"] else False,
cpes=True if "cpes" in filters["event_types"] else False,
summary=True if "summary" in filters["event_types"] else False,
cwes=True if "cwes" in filters["event_types"] else False,
cvss_score=filters["cvss"],
)
if request.method == "POST":
form_name = request.form["form-name"]
if (
form_name == "mail_notifications_form"
and mail_notifications_form.validate()
):
current_user.enable_notifications = (
True if mail_notifications_form.enable.data == "yes" else False
)
current_user.frequency_notifications = (
mail_notifications_form.frequency.data
)
db.session.commit()
flash(
"Your notifications setting has been changed successfully.", "success"
)
return redirect(url_for("main.notifications"))
if (
form_name == "filters_notifications_form"
and filters_notifications_form.validate()
):
filters = {
"event_types": [],
"cvss": filters_notifications_form.cvss_score.data,
}
for typ in [
"new_cve",
"first_time",
"references",
"cvss",
"cpes",
"cwes",
"summary",
]:
if getattr(filters_notifications_form, typ).data:
filters["event_types"].append(typ)
current_user.filters_notifications = filters
db.session.commit()
flash(
"Your notifications setting has been changed successfully.", "success"
)
return redirect(url_for("main.notifications"))
return render_template(
"profiles/notifications.html",
mail_notifications_form=mail_notifications_form,
filters_notifications_form=filters_notifications_form,
) | null |
160,503 | from flask import current_app as app
from flask import flash, redirect, render_template, request, url_for
from flask_login import current_user, login_required
from opencve.controllers.main import main
from opencve.controllers.tags import UserTagController
from opencve.models.cve import Cve
from opencve.models.tags import CveTag, UserTag
from opencve.extensions import db
from opencve.forms import (
ChangeEmailForm,
ChangePasswordForm,
FiltersNotificationForm,
MailNotificationsForm,
TagForm,
)
def tags():
tags, _, pagination = UserTagController.list(
{**request.args, "user_id": current_user.id}
)
tag_form = TagForm()
# Form has been submitted
if request.method == "POST" and tag_form.validate():
# Check if the tag doesn't already exist
if UserTag.query.filter_by(
user_id=current_user.id, name=tag_form.name.data
).first():
flash("This tag already exists.", "error")
# Create the new tag
else:
tag = UserTag(
user=current_user,
name=tag_form.name.data,
description=tag_form.description.data,
color=tag_form.color.data,
)
db.session.add(tag)
db.session.commit()
flash(f"The tag {tag.name} has been successfully added.", "success")
return redirect(
url_for("main.edit_tag", tag=tag.name, page=request.args.get("page"))
)
return render_template(
"profiles/tags.html",
tags=tags,
form=tag_form,
pagination=pagination,
mode="create",
)
class UserTagController(BaseController):
model = UserTag
order = [UserTag.name.asc()]
per_page_param = "TAGS_PER_PAGE"
schema = {
"user_id": {"type": str},
"name": {"type": str},
}
def build_query(cls, args):
return cls.model.query.filter_by(user_id=args.get("user_id")), {}
db = SQLAlchemy(session_options={"autoflush": False})
class TagForm(FlaskForm):
name = StringField(
"Name",
validators=[
validators.DataRequired("Name is required"),
validators.Regexp(
"^[a-zA-Z0-9\-_]+$",
message="Only alphanumeric, dash and underscore characters are accepted",
),
],
)
description = StringField("Description")
color = StringField(
"Color",
validators=[
validators.DataRequired("Color is required"),
validators.Regexp(
"^#[0-9a-fA-F]{6}$", message="Color must be in hexadecimal format"
),
],
default="#000000",
)
submit = SubmitField("Save")
def edit_tag(tag):
tag = UserTagController.get({"user_id": current_user.id, "name": tag})
if not tag:
return redirect(url_for("main.tags"))
tag_form = TagForm(obj=tag, color=tag.color)
if request.method == "POST" and tag_form.validate():
# Prohibit name change
if tag_form.name.data != tag.name:
return redirect(url_for("main.tags"))
# Update the tag
tag_form.populate_obj(tag)
tag.color = tag_form.color.data
db.session.commit()
flash(f"The tag {tag.name} has been successfully updated.", "success")
return redirect(
url_for("main.edit_tag", tag=tag.name, page=request.args.get("page"))
)
tags, _, pagination = UserTagController.list(
{**request.args, "user_id": current_user.id}
)
return render_template(
"profiles/tags.html",
tags=tags,
form=tag_form,
pagination=pagination,
mode="update",
) | null |
160,504 | from flask import current_app as app
from flask import flash, redirect, render_template, request, url_for
from flask_login import current_user, login_required
from opencve.controllers.main import main
from opencve.controllers.tags import UserTagController
from opencve.models.cve import Cve
from opencve.models.tags import CveTag, UserTag
from opencve.extensions import db
from opencve.forms import (
ChangeEmailForm,
ChangePasswordForm,
FiltersNotificationForm,
MailNotificationsForm,
TagForm,
)
def tags():
tags, _, pagination = UserTagController.list(
{**request.args, "user_id": current_user.id}
)
tag_form = TagForm()
# Form has been submitted
if request.method == "POST" and tag_form.validate():
# Check if the tag doesn't already exist
if UserTag.query.filter_by(
user_id=current_user.id, name=tag_form.name.data
).first():
flash("This tag already exists.", "error")
# Create the new tag
else:
tag = UserTag(
user=current_user,
name=tag_form.name.data,
description=tag_form.description.data,
color=tag_form.color.data,
)
db.session.add(tag)
db.session.commit()
flash(f"The tag {tag.name} has been successfully added.", "success")
return redirect(
url_for("main.edit_tag", tag=tag.name, page=request.args.get("page"))
)
return render_template(
"profiles/tags.html",
tags=tags,
form=tag_form,
pagination=pagination,
mode="create",
)
class UserTagController(BaseController):
model = UserTag
order = [UserTag.name.asc()]
per_page_param = "TAGS_PER_PAGE"
schema = {
"user_id": {"type": str},
"name": {"type": str},
}
def build_query(cls, args):
return cls.model.query.filter_by(user_id=args.get("user_id")), {}
class Cve(BaseModel):
__tablename__ = "cves"
# CVE are sorted by last modified date, we need to index it.
updated_at = db.Column(
db.DateTime(timezone=True),
default=db.func.now(),
onupdate=db.func.now(),
nullable=False,
index=True,
)
cve_id = db.Column(db.String(), nullable=False)
json = db.Column(JSONB)
# We used initially secondary relationships to fetch the list of
# associated vendors, products and cwes. But it was complicated
# to maintain, and the performance were poor. So we now use the
# JSONB data type associated to the GIN index type.
vendors = db.Column(JSONB)
cwes = db.Column(JSONB)
# Keep the summary separated when searching keywords
summary = db.Column(db.String(), nullable=False)
# Keep CVSS separated when searching a particupal score
cvss2 = db.Column(db.Float())
cvss3 = db.Column(db.Float())
# Relationships
events = db.relationship("Event", back_populates="cve")
changes = db.relationship("Change", back_populates="cve")
alerts = db.relationship("Alert", back_populates="cve")
# Index
__table_args__ = (
db.Index("ix_cves_vendors", vendors, postgresql_using="gin"),
db.Index("ix_cves_cwes", cwes, postgresql_using="gin"),
db.Index(
"ix_cves_summary",
summary,
postgresql_using="gin",
postgresql_ops={
"summary": "gin_trgm_ops",
},
),
db.Index(
"ix_cves_cve_id",
cve_id,
postgresql_using="gin",
postgresql_ops={
"cve_id": "gin_trgm_ops",
},
),
)
def __repr__(self):
return "<Cve {}>".format(self.cve_id)
def raw_tags(self):
if not current_user.is_authenticated:
return []
cve_tag = CveTag.query.filter_by(
user_id=current_user.id, cve_id=self.id
).first()
if not cve_tag:
return []
return cve_tag.tags
def tags(self):
if not current_user.is_authenticated:
return []
return [
UserTag.query.filter_by(user_id=current_user.id, name=t).first()
for t in self.raw_tags
]
def cvss_weight(self):
"""Only used to sort several CVE by their CVSS"""
w = 0
if self.cvss2:
w += self.cvss2
if self.cvss3:
w += self.cvss3
return w
def cvss2_score(self):
if "cve" in self.json.keys():
if "baseMetricV2" in self.json["impact"]:
return self.json["impact"]["baseMetricV2"]["cvssV2"]["baseScore"]
else:
if "cvssMetricV2" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV2"][0]["cvssData"]["baseScore"]
return None
def cvss3_score(self):
if "cve" in self.json.keys():
if "baseMetricV3" in self.json["impact"]:
return self.json["impact"]["baseMetricV3"]["cvssV3"]["baseScore"]
else:
if "cvssMetricV31" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV31"][0]["cvssData"]["baseScore"]
elif "cvssMetricV30" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV30"][0]["cvssData"]["baseScore"]
return None
def cvss2_severity(self):
if "cve" in self.json.keys():
if "baseMetricV2" in self.json["impact"]:
return self.json["impact"]["baseMetricV2"]["severity"]
else:
if "cvssMetricV2" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV2"][0]["baseSeverity"]
return None
def cvss3_severity(self):
if "cve" in self.json.keys():
if "baseMetricV3" in self.json["impact"]:
return self.json["impact"]["baseMetricV3"]["cvssV3"]["baseSeverity"]
else:
if "cvssMetricV31" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV31"][0]["cvssData"][
"baseSeverity"
]
elif "cvssMetricV30" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV30"][0]["cvssData"][
"baseSeverity"
]
return None
def references(self):
if "cve" in self.json.keys():
return self.json["cve"]["references"]["reference_data"]
return self.json["references"]
class CveTag(BaseModel):
__tablename__ = "cves_tags"
tags = db.Column(JSONB)
# Relationships
user_id = db.Column(UUIDType(binary=False), db.ForeignKey("users.id"))
user = db.relationship("User", back_populates="cve_tags")
cve_id = db.Column(UUIDType(binary=False), db.ForeignKey("cves.id"))
# Index
__table_args__ = (db.Index("ix_cves_tags", tags, postgresql_using="gin"),)
def __repr__(self):
return "<CveTag {}>".format(self.id)
db = SQLAlchemy(session_options={"autoflush": False})
def delete_tag(tag):
tag = UserTagController.get({"user_id": current_user.id, "name": tag})
if not tag:
return redirect(url_for("main.tags"))
count = (
db.session.query(Cve.id)
.join(CveTag)
.filter(CveTag.user_id == current_user.id)
.filter(CveTag.tags.contains([tag.name]))
.count()
)
if count > 0:
flash(
f"The tag {tag.name} is still associated to {count} CVE(s), detach them before removing the tag.",
"error",
)
return redirect(url_for("main.tags"))
# Confirmation page
if request.method == "GET":
return render_template("profiles/delete_tag.html", tag=tag, count=count)
# Delete the tag
else:
db.session.delete(tag)
db.session.commit()
flash(f"The tag {tag.name} has been deleted.", "success")
return redirect(url_for("main.tags")) | null |
160,505 | from flask import request, render_template
from opencve.controllers.main import main
from opencve.controllers.cwes import CweController
class CweController(BaseController):
model = Cwe
order = [Cwe.cwe_id.desc()]
per_page_param = "CWES_PER_PAGE"
schema = {
"search": {"type": str},
}
def build_query(cls, args):
query = cls.model.query
# Filter the list of CWE
if args.get("search"):
search = args.get("search").strip().lower()
# By ID or by string
search = search[4:] if search.startswith("cwe-") else search
try:
search = int(search)
query = query.filter_by(cwe_id=f"CWE-{search}")
except ValueError:
query = query.filter(Cwe.name.ilike("%{}%".format(search)))
return query, {}
def cwes():
objects, _, pagination = CweController.list(request.args)
return render_template("cwes.html", cwes=objects, pagination=pagination) | null |
160,506 | import itertools
import json
import operator
from flask import abort, flash, redirect, request, render_template, url_for
from flask_user import current_user, login_required
from opencve.controllers.cves import CveController
from opencve.controllers.main import main
from opencve.controllers.tags import UserTagController
from opencve.extensions import db
from opencve.models import is_valid_uuid
from opencve.models.changes import Change
from opencve.models.events import Event
from opencve.models.tags import CveTag
from opencve.utils import convert_cpes, get_cwes_details, CustomHtmlHTML
class CveController(BaseController):
model = Cve
order = [Cve.updated_at.desc(), Cve.id.desc()]
per_page_param = "CVES_PER_PAGE"
schema = {
"search": {"type": str},
"vendor": {"type": str},
"product": {"type": str},
"cvss": {"type": str},
"cwe": {"type": str},
"tag": {"type": str},
"user_id": {"type": str},
}
def build_query(cls, args):
vendor = None
product = None
tag = None
query = Cve.query
vendor_query = args.get("vendor")
product_query = args.get("product")
if vendor_query:
vendor_query = vendor_query.replace(" ", "").lower()
if product_query:
product_query = product_query.replace(" ", "_").lower()
# Filter by keyword
if args.get("search"):
possible_vendor = args.get("search").replace(" ", "").lower()
possible_product = args.get("search").replace(" ", "_").lower()
vendor = Vendor.query.filter_by(name=possible_vendor).first()
if vendor:
product = Product.query.filter_by(
name=possible_product, vendor_id=vendor.id
).first()
else:
product = Product.query.filter_by(name=possible_product).first()
query = query.filter(
or_(
Cve.cve_id.contains(args.get("search")),
Cve.summary.ilike(f"%{args.get('search')}%"),
Cve.vendors.contains([vendor.name]) if vendor else None,
Cve.vendors.contains([product.name]) if product else None,
)
)
# Filter by CWE
if args.get("cwe"):
query = query.filter(Cve.cwes.contains([args.get("cwe")]))
# Filter by CVSS score
if args.get("cvss") and args.get("cvss").lower() in [
"none",
"low",
"medium",
"high",
"critical",
]:
if args.get("cvss").lower() == "none":
query = query.filter(Cve.cvss3 == None)
if args.get("cvss").lower() == "low":
query = query.filter(and_(Cve.cvss3 >= 0.1, Cve.cvss3 <= 3.9))
if args.get("cvss").lower() == "medium":
query = query.filter(and_(Cve.cvss3 >= 4.0, Cve.cvss3 <= 6.9))
if args.get("cvss").lower() == "high":
query = query.filter(and_(Cve.cvss3 >= 7.0, Cve.cvss3 <= 8.9))
if args.get("cvss").lower() == "critical":
query = query.filter(and_(Cve.cvss3 >= 9.0, Cve.cvss3 <= 10.0))
# Filter by vendor and product
if vendor_query and product_query:
vendor = Vendor.query.filter_by(name=vendor_query).first()
if not vendor:
abort(404, "Not found.")
product = Product.query.filter_by(
name=product_query, vendor_id=vendor.id
).first()
if not product:
abort(404, "Not found.")
query = query.filter(
Cve.vendors.contains(
[f"{vendor.name}{PRODUCT_SEPARATOR}{product.name}"]
)
)
# Filter by vendor
elif vendor_query:
vendor = Vendor.query.filter_by(name=vendor_query).first()
if not vendor:
abort(404, "Not found.")
query = query.filter(Cve.vendors.contains([vendor.name]))
# Filter by product only
elif product_query:
product = Product.query.filter_by(name=product_query).first()
if not product:
abort(404, "Not found.")
query = query.filter(Cve.vendors.contains([product.name]))
# Filter by tag
if args.get("tag"):
tag = UserTagController.get(
{"user_id": args.get("user_id"), "name": args.get("tag")}
)
if not tag:
abort(404, "Not found.")
query = (
query.join(CveTag)
.filter(CveTag.user_id == args.get("user_id"))
.filter(CveTag.tags.contains([args.get("tag")]))
)
return query, {"vendor": vendor, "product": product, "tag": tag}
class UserTagController(BaseController):
model = UserTag
order = [UserTag.name.asc()]
per_page_param = "TAGS_PER_PAGE"
schema = {
"user_id": {"type": str},
"name": {"type": str},
}
def build_query(cls, args):
return cls.model.query.filter_by(user_id=args.get("user_id")), {}
def cves():
args = request.args
user_tags = []
if current_user.is_authenticated:
args = {**request.args, "user_id": current_user.id}
user_tags = UserTagController.list_items({"user_id": current_user.id})
objects, metas, pagination = CveController.list(args)
return render_template(
"cves.html",
cves=objects,
vendor=metas.get("vendor"),
product=metas.get("product"),
tag=metas.get("tag"),
user_tags=user_tags,
pagination=pagination,
) | null |
160,507 | import itertools
import json
import operator
from flask import abort, flash, redirect, request, render_template, url_for
from flask_user import current_user, login_required
from opencve.controllers.cves import CveController
from opencve.controllers.main import main
from opencve.controllers.tags import UserTagController
from opencve.extensions import db
from opencve.models import is_valid_uuid
from opencve.models.changes import Change
from opencve.models.events import Event
from opencve.models.tags import CveTag
from opencve.utils import convert_cpes, get_cwes_details, CustomHtmlHTML
def cve(cve_id):
cve = CveController.get({"cve_id": cve_id})
vendors = convert_cpes(cve.json.get("configurations", {}))
cwes = get_cwes_details(cve.json)
# Get the user tags
user_tags = []
if current_user.is_authenticated:
user_tags = UserTagController.list_items({"user_id": current_user.id})
# We have to pass an encoded list of tags for the modal box
cve_tags_encoded = json.dumps([t.name for t in cve.tags])
events = Event.query.filter_by(cve_id=cve.id).order_by(Event.created_at.desc())
events_by_time = [
(time, list(evs))
for time, evs in (itertools.groupby(events, operator.attrgetter("created_at")))
]
return render_template(
"cve.html",
cve=cve,
cve_dumped=json.dumps(cve.json),
vendors=vendors,
cwes=cwes,
user_tags=user_tags,
cve_tags_encoded=cve_tags_encoded,
events_by_time=events_by_time,
)
class CveController(BaseController):
model = Cve
order = [Cve.updated_at.desc(), Cve.id.desc()]
per_page_param = "CVES_PER_PAGE"
schema = {
"search": {"type": str},
"vendor": {"type": str},
"product": {"type": str},
"cvss": {"type": str},
"cwe": {"type": str},
"tag": {"type": str},
"user_id": {"type": str},
}
def build_query(cls, args):
vendor = None
product = None
tag = None
query = Cve.query
vendor_query = args.get("vendor")
product_query = args.get("product")
if vendor_query:
vendor_query = vendor_query.replace(" ", "").lower()
if product_query:
product_query = product_query.replace(" ", "_").lower()
# Filter by keyword
if args.get("search"):
possible_vendor = args.get("search").replace(" ", "").lower()
possible_product = args.get("search").replace(" ", "_").lower()
vendor = Vendor.query.filter_by(name=possible_vendor).first()
if vendor:
product = Product.query.filter_by(
name=possible_product, vendor_id=vendor.id
).first()
else:
product = Product.query.filter_by(name=possible_product).first()
query = query.filter(
or_(
Cve.cve_id.contains(args.get("search")),
Cve.summary.ilike(f"%{args.get('search')}%"),
Cve.vendors.contains([vendor.name]) if vendor else None,
Cve.vendors.contains([product.name]) if product else None,
)
)
# Filter by CWE
if args.get("cwe"):
query = query.filter(Cve.cwes.contains([args.get("cwe")]))
# Filter by CVSS score
if args.get("cvss") and args.get("cvss").lower() in [
"none",
"low",
"medium",
"high",
"critical",
]:
if args.get("cvss").lower() == "none":
query = query.filter(Cve.cvss3 == None)
if args.get("cvss").lower() == "low":
query = query.filter(and_(Cve.cvss3 >= 0.1, Cve.cvss3 <= 3.9))
if args.get("cvss").lower() == "medium":
query = query.filter(and_(Cve.cvss3 >= 4.0, Cve.cvss3 <= 6.9))
if args.get("cvss").lower() == "high":
query = query.filter(and_(Cve.cvss3 >= 7.0, Cve.cvss3 <= 8.9))
if args.get("cvss").lower() == "critical":
query = query.filter(and_(Cve.cvss3 >= 9.0, Cve.cvss3 <= 10.0))
# Filter by vendor and product
if vendor_query and product_query:
vendor = Vendor.query.filter_by(name=vendor_query).first()
if not vendor:
abort(404, "Not found.")
product = Product.query.filter_by(
name=product_query, vendor_id=vendor.id
).first()
if not product:
abort(404, "Not found.")
query = query.filter(
Cve.vendors.contains(
[f"{vendor.name}{PRODUCT_SEPARATOR}{product.name}"]
)
)
# Filter by vendor
elif vendor_query:
vendor = Vendor.query.filter_by(name=vendor_query).first()
if not vendor:
abort(404, "Not found.")
query = query.filter(Cve.vendors.contains([vendor.name]))
# Filter by product only
elif product_query:
product = Product.query.filter_by(name=product_query).first()
if not product:
abort(404, "Not found.")
query = query.filter(Cve.vendors.contains([product.name]))
# Filter by tag
if args.get("tag"):
tag = UserTagController.get(
{"user_id": args.get("user_id"), "name": args.get("tag")}
)
if not tag:
abort(404, "Not found.")
query = (
query.join(CveTag)
.filter(CveTag.user_id == args.get("user_id"))
.filter(CveTag.tags.contains([args.get("tag")]))
)
return query, {"vendor": vendor, "product": product, "tag": tag}
class UserTagController(BaseController):
model = UserTag
order = [UserTag.name.asc()]
per_page_param = "TAGS_PER_PAGE"
schema = {
"user_id": {"type": str},
"name": {"type": str},
}
def build_query(cls, args):
return cls.model.query.filter_by(user_id=args.get("user_id")), {}
db = SQLAlchemy(session_options={"autoflush": False})
class CveTag(BaseModel):
__tablename__ = "cves_tags"
tags = db.Column(JSONB)
# Relationships
user_id = db.Column(UUIDType(binary=False), db.ForeignKey("users.id"))
user = db.relationship("User", back_populates="cve_tags")
cve_id = db.Column(UUIDType(binary=False), db.ForeignKey("cves.id"))
# Index
__table_args__ = (db.Index("ix_cves_tags", tags, postgresql_using="gin"),)
def __repr__(self):
return "<CveTag {}>".format(self.id)
def cve_associate_tags(cve_id):
cve = CveController.get({"cve_id": cve_id})
new_tags = request.form.getlist("tags")
# Check if all tags are declared by the user
user_tags = [
t.name for t in UserTagController.list_items({"user_id": current_user.id})
]
for new_tag in new_tags:
if new_tag not in user_tags:
abort(404)
# Update the CVE tags
cve_tag = CveTag.query.filter_by(user_id=current_user.id, cve_id=cve.id).first()
if not cve_tag:
cve_tag = CveTag(user_id=current_user.id, cve_id=cve.id)
cve_tag.tags = new_tags
db.session.add(cve_tag)
db.session.commit()
flash("The CVE tags have been updated.", "success")
return redirect(url_for("main.cve", cve_id=cve_id)) | null |
160,508 | import itertools
import json
import operator
from flask import abort, flash, redirect, request, render_template, url_for
from flask_user import current_user, login_required
from opencve.controllers.cves import CveController
from opencve.controllers.main import main
from opencve.controllers.tags import UserTagController
from opencve.extensions import db
from opencve.models import is_valid_uuid
from opencve.models.changes import Change
from opencve.models.events import Event
from opencve.models.tags import CveTag
from opencve.utils import convert_cpes, get_cwes_details, CustomHtmlHTML
def cve(cve_id):
cve = CveController.get({"cve_id": cve_id})
vendors = convert_cpes(cve.json.get("configurations", {}))
cwes = get_cwes_details(cve.json)
# Get the user tags
user_tags = []
if current_user.is_authenticated:
user_tags = UserTagController.list_items({"user_id": current_user.id})
# We have to pass an encoded list of tags for the modal box
cve_tags_encoded = json.dumps([t.name for t in cve.tags])
events = Event.query.filter_by(cve_id=cve.id).order_by(Event.created_at.desc())
events_by_time = [
(time, list(evs))
for time, evs in (itertools.groupby(events, operator.attrgetter("created_at")))
]
return render_template(
"cve.html",
cve=cve,
cve_dumped=json.dumps(cve.json),
vendors=vendors,
cwes=cwes,
user_tags=user_tags,
cve_tags_encoded=cve_tags_encoded,
events_by_time=events_by_time,
)
class CveController(BaseController):
model = Cve
order = [Cve.updated_at.desc(), Cve.id.desc()]
per_page_param = "CVES_PER_PAGE"
schema = {
"search": {"type": str},
"vendor": {"type": str},
"product": {"type": str},
"cvss": {"type": str},
"cwe": {"type": str},
"tag": {"type": str},
"user_id": {"type": str},
}
def build_query(cls, args):
vendor = None
product = None
tag = None
query = Cve.query
vendor_query = args.get("vendor")
product_query = args.get("product")
if vendor_query:
vendor_query = vendor_query.replace(" ", "").lower()
if product_query:
product_query = product_query.replace(" ", "_").lower()
# Filter by keyword
if args.get("search"):
possible_vendor = args.get("search").replace(" ", "").lower()
possible_product = args.get("search").replace(" ", "_").lower()
vendor = Vendor.query.filter_by(name=possible_vendor).first()
if vendor:
product = Product.query.filter_by(
name=possible_product, vendor_id=vendor.id
).first()
else:
product = Product.query.filter_by(name=possible_product).first()
query = query.filter(
or_(
Cve.cve_id.contains(args.get("search")),
Cve.summary.ilike(f"%{args.get('search')}%"),
Cve.vendors.contains([vendor.name]) if vendor else None,
Cve.vendors.contains([product.name]) if product else None,
)
)
# Filter by CWE
if args.get("cwe"):
query = query.filter(Cve.cwes.contains([args.get("cwe")]))
# Filter by CVSS score
if args.get("cvss") and args.get("cvss").lower() in [
"none",
"low",
"medium",
"high",
"critical",
]:
if args.get("cvss").lower() == "none":
query = query.filter(Cve.cvss3 == None)
if args.get("cvss").lower() == "low":
query = query.filter(and_(Cve.cvss3 >= 0.1, Cve.cvss3 <= 3.9))
if args.get("cvss").lower() == "medium":
query = query.filter(and_(Cve.cvss3 >= 4.0, Cve.cvss3 <= 6.9))
if args.get("cvss").lower() == "high":
query = query.filter(and_(Cve.cvss3 >= 7.0, Cve.cvss3 <= 8.9))
if args.get("cvss").lower() == "critical":
query = query.filter(and_(Cve.cvss3 >= 9.0, Cve.cvss3 <= 10.0))
# Filter by vendor and product
if vendor_query and product_query:
vendor = Vendor.query.filter_by(name=vendor_query).first()
if not vendor:
abort(404, "Not found.")
product = Product.query.filter_by(
name=product_query, vendor_id=vendor.id
).first()
if not product:
abort(404, "Not found.")
query = query.filter(
Cve.vendors.contains(
[f"{vendor.name}{PRODUCT_SEPARATOR}{product.name}"]
)
)
# Filter by vendor
elif vendor_query:
vendor = Vendor.query.filter_by(name=vendor_query).first()
if not vendor:
abort(404, "Not found.")
query = query.filter(Cve.vendors.contains([vendor.name]))
# Filter by product only
elif product_query:
product = Product.query.filter_by(name=product_query).first()
if not product:
abort(404, "Not found.")
query = query.filter(Cve.vendors.contains([product.name]))
# Filter by tag
if args.get("tag"):
tag = UserTagController.get(
{"user_id": args.get("user_id"), "name": args.get("tag")}
)
if not tag:
abort(404, "Not found.")
query = (
query.join(CveTag)
.filter(CveTag.user_id == args.get("user_id"))
.filter(CveTag.tags.contains([args.get("tag")]))
)
return query, {"vendor": vendor, "product": product, "tag": tag}
def is_valid_uuid(val):
"""Check if a given value is a valid UUID"""
try:
uuid.UUID(str(val))
except ValueError:
return False
return True
class Change(BaseModel):
__tablename__ = "changes"
json = db.Column(JSONType)
# Relationships
cve_id = db.Column(UUIDType(binary=False), db.ForeignKey("cves.id"))
cve = db.relationship("Cve", back_populates="changes")
task_id = db.Column(UUIDType(binary=False), db.ForeignKey("tasks.id"))
task = db.relationship("Task", back_populates="changes")
events = db.relationship("Event", back_populates="change")
class CustomHtmlHTML(HtmlDiff):
def __init__(self, *args, **kwargs):
self._table_template = """
<table class="table table-diff table-condensed">
<thead>
<tr>
<th colspan="2">Old JSON</th>
<th colspan="2">New JSON</th>
</tr>
</thead>
<tbody>%(data_rows)s</tbody>
</table>"""
super().__init__(*args, **kwargs)
def _format_line(self, side, flag, linenum, text):
text = text.replace("&", "&").replace(">", ">").replace("<", "<")
text = text.replace(" ", " ").rstrip()
return '<td class="diff_header">%s</td><td class="break">%s</td>' % (
linenum,
text,
)
def cve_change(cve_id, change_id):
cve = CveController.get({"cve_id": cve_id})
if not is_valid_uuid(change_id):
abort(404)
change = Change.query.filter_by(cve_id=cve.id, id=change_id).first()
if not change:
abort(404)
previous = (
Change.query.filter(Change.created_at < change.created_at)
.filter(Change.cve == change.cve)
.order_by(Change.created_at.desc())
.first()
)
previous_json = {}
if previous:
previous_json = previous.json
differ = CustomHtmlHTML()
diff = differ.make_table(
fromlines=json.dumps(previous_json, sort_keys=True, indent=2).split("\n"),
tolines=json.dumps(change.json, sort_keys=True, indent=2).split("\n"),
context=True,
)
return render_template("change.html", change=change, diff=diff) | null |
160,509 | from flask import request, render_template
from opencve.controllers.main import main
from opencve.controllers.products import ProductController
from opencve.controllers.vendors import VendorController
class ProductController(BaseController):
model = Product
order = [Product.name.asc()]
per_page_param = "PRODUCTS_PER_PAGE"
page_parameter = "product_page"
schema = {
"vendor": {"type": str},
"search": {"type": str},
}
def build_query(cls, args):
if "vendor" in args:
vendor = VendorController.get({"name": args.get("vendor")})
query = cls.model.query.filter_by(vendor=vendor)
else:
query = cls.model.query
# Search by term
if args.get("search"):
search = (
args.get("search")
.lower()
.replace("%", "")
.replace("_", "")
.replace(" ", "_")
)
query = query.filter(Product.name.like("%{}%".format(search)))
return query, {}
def get_pagination(cls, args, objects):
return Pagination(
product_page=args.get(cls.page_parameter),
total=objects.total,
per_page=app.config[cls.per_page_param],
page_parameter=cls.page_parameter,
record_name="objects",
css_framework="bootstrap3",
)
def get(cls, filters):
vendor = VendorController.get({"name": filters.get("vendor")})
return super(ProductController, cls).get(
{"vendor_id": vendor.id, "name": filters.get("product")}
)
class VendorController(BaseController):
model = Vendor
order = [Vendor.name.asc()]
per_page_param = "VENDORS_PER_PAGE"
schema = {
"search": {"type": str},
}
def build_query(cls, args):
query = cls.model.query
# Search by term
if args.get("search"):
search = (
args.get("search")
.lower()
.replace("%", "")
.replace("_", "")
.replace(" ", "_")
)
query = query.filter(cls.model.name.like("%{}%".format(search)))
return query, {}
def vendors():
vendors, _, pagination_v = VendorController.list(request.args)
products, _, pagination_p = ProductController.list(request.args)
return render_template(
"vendors.html",
vendors=vendors,
products=products,
pagination_v=pagination_v,
pagination_p=pagination_p,
) | null |
160,510 | from collections import OrderedDict
from flask import request, render_template
from flask_login import login_required
from flask_user import current_user
from opencve.context import _humanize_filter
from opencve.controllers.main import main
from opencve.controllers.alerts import AlertController
from opencve.controllers.reports import ReportController
class ReportController(BaseController):
model = Report
order = [Report.created_at.desc()]
per_page_param = "REPORTS_PER_PAGE"
schema = {
"user_id": {"type": str},
}
def build_query(cls, args):
query = Report.query.filter_by(user_id=args.get("user_id"))
return query, {}
def get(cls, filters):
report = super(ReportController, cls).get(filters)
# The report is now seen
report.seen = True
db.session.commit()
return report
def reports():
reports, _, pagination = ReportController.list(
{**request.args, "user_id": current_user.id}
)
return render_template("reports.html", reports=reports, pagination=pagination) | null |
160,511 | from collections import OrderedDict
from flask import request, render_template
from flask_login import login_required
from flask_user import current_user
from opencve.context import _humanize_filter
from opencve.controllers.main import main
from opencve.controllers.alerts import AlertController
from opencve.controllers.reports import ReportController
def _humanize_filter(s):
return " ".join(map(lambda x: x.capitalize(), s.split("_")))
class AlertController(BaseController):
model = Alert
order = [Alert.created_at.desc()]
per_page_param = "ALERTS_PER_PAGE"
schema = {
"user_id": {"type": str},
"report_id": {"type": str},
}
def build_query(cls, args):
query = cls.model.query.options(joinedload("cve")).filter_by(
report_id=args.get("report_id")
)
return query, {}
class ReportController(BaseController):
model = Report
order = [Report.created_at.desc()]
per_page_param = "REPORTS_PER_PAGE"
schema = {
"user_id": {"type": str},
}
def build_query(cls, args):
query = Report.query.filter_by(user_id=args.get("user_id"))
return query, {}
def get(cls, filters):
report = super(ReportController, cls).get(filters)
# The report is now seen
report.seen = True
db.session.commit()
return report
def report(link):
report = ReportController.get({"public_link": link})
alerts = AlertController.list_items({"report_id": report.id})
# List of vendors/products per alert
alerts_sorted = {}
for alert in alerts:
for vendor in alert.details["vendors"]:
if vendor not in alerts_sorted:
alerts_sorted[vendor] = {
"name": _humanize_filter(vendor),
"alerts": [],
"max": 0,
}
alerts_sorted[vendor]["alerts"].append(alert)
for product in alert.details["products"]:
if product not in alerts_sorted:
alerts_sorted[product] = {
"name": _humanize_filter(product),
"alerts": [],
"max": 0,
}
alerts_sorted[product]["alerts"].append(alert)
# For each vendor, we take the max score
for k, als in alerts_sorted.items():
# Get the max score
cvss = [al.cve.cvss3 for al in als["alerts"] if al.cve.cvss3]
if cvss:
alerts_sorted[k]["max"] = max(cvss)
alerts_sorted = OrderedDict(
sorted(alerts_sorted.items(), key=lambda i: i[1]["max"], reverse=True)
)
# Some stats
total_alerts = len(alerts)
maximum_score = max([v["max"] for k, v in alerts_sorted.items()])
return render_template(
"report.html",
alerts_sorted=alerts_sorted,
total_alerts=total_alerts,
total_vendors_products=len(alerts_sorted.keys()),
maximum_score=maximum_score,
report=report,
) | null |
160,512 | from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils import JSONType
def upgrade():
op.add_column("users", sa.Column("settings", JSONType(), nullable=True))
op.execute('UPDATE users SET settings = \'{"activities_view": "subscriptions"}\';')
op.alter_column("users", "settings", existing_type=JSONType(), nullable=False) | null |
160,513 | from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils import JSONType
def downgrade():
op.drop_column("users", "settings") | null |
160,514 | from alembic import op
import sqlalchemy as sa
The provided code snippet includes necessary dependencies for implementing the `upgrade` function. Write a Python function `def upgrade()` to solve the following problem:
Make Cwe.cwe_id field not nullable and add an index on it.
Here is the function:
def upgrade():
"""Make Cwe.cwe_id field not nullable and add an index on it."""
op.alter_column("cwes", "cwe_id", existing_type=sa.VARCHAR(), nullable=False)
op.create_index(op.f("ix_cwes_cwe_id"), "cwes", ["cwe_id"], unique=False) | Make Cwe.cwe_id field not nullable and add an index on it. |
160,515 | from alembic import op
import sqlalchemy as sa
The provided code snippet includes necessary dependencies for implementing the `downgrade` function. Write a Python function `def downgrade()` to solve the following problem:
Make Cwe.cwe_id field nullable and remove the index on it.
Here is the function:
def downgrade():
"""Make Cwe.cwe_id field nullable and remove the index on it."""
op.drop_index(op.f("ix_cwes_cwe_id"), table_name="cwes")
op.alter_column("cwes", "cwe_id", existing_type=sa.VARCHAR(), nullable=True) | Make Cwe.cwe_id field nullable and remove the index on it. |
160,516 | from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy_utils import UUIDType
def upgrade():
op.create_table(
"cves_tags",
sa.Column("id", UUIDType(binary=False), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("tags", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("user_id", UUIDType(binary=False), nullable=True),
sa.Column("cve_id", UUIDType(binary=False), nullable=True),
sa.ForeignKeyConstraint(
["cve_id"],
["cves.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["users.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_cves_tags", "cves_tags", ["tags"], unique=False, postgresql_using="gin"
)
op.create_index(
op.f("ix_cves_tags_created_at"), "cves_tags", ["created_at"], unique=False
)
op.create_table(
"users_tags",
sa.Column("id", UUIDType(binary=False), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("description", sa.String(), nullable=True),
sa.Column("color", sa.String(), nullable=False),
sa.Column(
"user_id",
UUIDType(binary=False),
nullable=False,
),
sa.ForeignKeyConstraint(
["user_id"],
["users.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name", "user_id", name="ix_userstags_name_userid"),
)
op.create_index(
op.f("ix_users_tags_created_at"), "users_tags", ["created_at"], unique=False
) | null |
160,517 | from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy_utils import UUIDType
def downgrade():
op.drop_index(op.f("ix_users_tags_created_at"), table_name="users_tags")
op.drop_constraint("ix_userstags_name_userid", "users_tags")
op.drop_table("users_tags")
op.drop_index(op.f("ix_cves_tags_created_at"), table_name="cves_tags")
op.drop_index("ix_cves_tags", table_name="cves_tags", postgresql_using="gin")
op.drop_table("cves_tags") | null |
160,518 | from alembic import op
def upgrade():
with op.get_context().autocommit_block():
op.execute("ALTER TYPE event_types ADD VALUE 'first_time';") | null |
160,519 | from alembic import op
def downgrade():
with op.get_context().autocommit_block():
op.execute(
"CREATE TYPE event_types_new AS ENUM ('new_cve', 'references', 'cpes', 'cvss', 'summary', 'cwes');"
)
op.execute(
"ALTER TABLE events ALTER COLUMN type TYPE event_types_new USING (type::text::event_types_new);"
)
op.execute("DROP TYPE event_types;")
op.execute("ALTER TYPE event_types_new RENAME TO event_types;") | null |
160,520 | from alembic import op
import sqlalchemy as sa
The provided code snippet includes necessary dependencies for implementing the `upgrade` function. Write a Python function `def upgrade()` to solve the following problem:
This migration introduces a requirement: the ability to enable the pg_trgm extension (https://www.postgresql.org/docs/current/pgtrgm.html). From PostgreSQL 13 this module is considered as trusted, meaning it can be installed by non-superusers with the CREATE privilege.
Here is the function:
def upgrade():
"""
This migration introduces a requirement: the ability to enable the pg_trgm
extension (https://www.postgresql.org/docs/current/pgtrgm.html).
From PostgreSQL 13 this module is considered as trusted, meaning it can be
installed by non-superusers with the CREATE privilege.
"""
op.execute(sa.text("CREATE EXTENSION IF NOT EXISTS pg_trgm;"))
op.create_index(
"ix_cves_summary",
"cves",
["summary"],
unique=False,
postgresql_using="gin",
postgresql_ops={"summary": "gin_trgm_ops"},
)
op.drop_index("ix_cves_cve_id", table_name="cves")
op.create_index(
"ix_cves_cve_id",
"cves",
["cve_id"],
unique=False,
postgresql_using="gin",
postgresql_ops={"cve_id": "gin_trgm_ops"},
) | This migration introduces a requirement: the ability to enable the pg_trgm extension (https://www.postgresql.org/docs/current/pgtrgm.html). From PostgreSQL 13 this module is considered as trusted, meaning it can be installed by non-superusers with the CREATE privilege. |
160,521 | from alembic import op
import sqlalchemy as sa
def downgrade():
op.drop_index("ix_cves_summary", table_name="cves")
op.drop_index("ix_cves_cve_id", table_name="cves")
op.create_index("ix_cves_cve_id", "cves", ["cve_id"], unique=False)
op.execute(sa.text("DROP EXTENSION IF EXISTS pg_trgm;")) | null |
160,522 | import arrow
from alembic import op
import sqlalchemy as sa
from opencve.models.cve import Cve
from opencve.models.metas import Meta
class Cve(BaseModel):
__tablename__ = "cves"
# CVE are sorted by last modified date, we need to index it.
updated_at = db.Column(
db.DateTime(timezone=True),
default=db.func.now(),
onupdate=db.func.now(),
nullable=False,
index=True,
)
cve_id = db.Column(db.String(), nullable=False)
json = db.Column(JSONB)
# We used initially secondary relationships to fetch the list of
# associated vendors, products and cwes. But it was complicated
# to maintain, and the performance were poor. So we now use the
# JSONB data type associated to the GIN index type.
vendors = db.Column(JSONB)
cwes = db.Column(JSONB)
# Keep the summary separated when searching keywords
summary = db.Column(db.String(), nullable=False)
# Keep CVSS separated when searching a particupal score
cvss2 = db.Column(db.Float())
cvss3 = db.Column(db.Float())
# Relationships
events = db.relationship("Event", back_populates="cve")
changes = db.relationship("Change", back_populates="cve")
alerts = db.relationship("Alert", back_populates="cve")
# Index
__table_args__ = (
db.Index("ix_cves_vendors", vendors, postgresql_using="gin"),
db.Index("ix_cves_cwes", cwes, postgresql_using="gin"),
db.Index(
"ix_cves_summary",
summary,
postgresql_using="gin",
postgresql_ops={
"summary": "gin_trgm_ops",
},
),
db.Index(
"ix_cves_cve_id",
cve_id,
postgresql_using="gin",
postgresql_ops={
"cve_id": "gin_trgm_ops",
},
),
)
def __repr__(self):
return "<Cve {}>".format(self.cve_id)
def raw_tags(self):
if not current_user.is_authenticated:
return []
cve_tag = CveTag.query.filter_by(
user_id=current_user.id, cve_id=self.id
).first()
if not cve_tag:
return []
return cve_tag.tags
def tags(self):
if not current_user.is_authenticated:
return []
return [
UserTag.query.filter_by(user_id=current_user.id, name=t).first()
for t in self.raw_tags
]
def cvss_weight(self):
"""Only used to sort several CVE by their CVSS"""
w = 0
if self.cvss2:
w += self.cvss2
if self.cvss3:
w += self.cvss3
return w
def cvss2_score(self):
if "cve" in self.json.keys():
if "baseMetricV2" in self.json["impact"]:
return self.json["impact"]["baseMetricV2"]["cvssV2"]["baseScore"]
else:
if "cvssMetricV2" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV2"][0]["cvssData"]["baseScore"]
return None
def cvss3_score(self):
if "cve" in self.json.keys():
if "baseMetricV3" in self.json["impact"]:
return self.json["impact"]["baseMetricV3"]["cvssV3"]["baseScore"]
else:
if "cvssMetricV31" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV31"][0]["cvssData"]["baseScore"]
elif "cvssMetricV30" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV30"][0]["cvssData"]["baseScore"]
return None
def cvss2_severity(self):
if "cve" in self.json.keys():
if "baseMetricV2" in self.json["impact"]:
return self.json["impact"]["baseMetricV2"]["severity"]
else:
if "cvssMetricV2" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV2"][0]["baseSeverity"]
return None
def cvss3_severity(self):
if "cve" in self.json.keys():
if "baseMetricV3" in self.json["impact"]:
return self.json["impact"]["baseMetricV3"]["cvssV3"]["baseSeverity"]
else:
if "cvssMetricV31" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV31"][0]["cvssData"][
"baseSeverity"
]
elif "cvssMetricV30" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV30"][0]["cvssData"][
"baseSeverity"
]
return None
def references(self):
if "cve" in self.json.keys():
return self.json["cve"]["references"]["reference_data"]
return self.json["references"]
class Meta(BaseModel):
__tablename__ = "metas"
name = db.Column(db.String(), nullable=False)
value = db.Column(db.String(), nullable=False)
def __repr__(self):
return "<Meta {}>".format(self.name)
The provided code snippet includes necessary dependencies for implementing the `upgrade` function. Write a Python function `def upgrade()` to solve the following problem:
This migration prepares the Meta table to handle the last CVE ID and its associated updatedAt date.
Here is the function:
def upgrade():
"""This migration prepares the Meta table to handle the
last CVE ID and its associated updatedAt date."""
connection = op.get_bind()
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
# Get the last CVE
cve = session.query(Cve).order_by(Cve.updated_at.desc()).first()
if not cve:
return
# Create the metas data
session.add(Meta(name="nvd_last_cve_id", value=cve.cve_id))
session.add(Meta(name="nvd_last_cve_updated_at", value=cve.updated_at))
session.commit() | This migration prepares the Meta table to handle the last CVE ID and its associated updatedAt date. |
160,523 | import arrow
from alembic import op
import sqlalchemy as sa
from opencve.models.cve import Cve
from opencve.models.metas import Meta
class Meta(BaseModel):
__tablename__ = "metas"
name = db.Column(db.String(), nullable=False)
value = db.Column(db.String(), nullable=False)
def __repr__(self):
return "<Meta {}>".format(self.name)
def downgrade():
connection = op.get_bind()
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
session.query(Meta).filter_by(name="nvd_last_cve_id").delete()
session.query(Meta).filter_by(name="nvd_last_cve_updated_at").delete() | null |
160,524 | import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from opencve.models.cve import Cve
from opencve.models.events import Event
class Event(BaseModel):
__tablename__ = "events"
type = db.Column(ChoiceType(EVENT_TYPES))
details = db.Column(JSONType)
review = db.Column(db.Boolean, default=False)
# Relationships
cve_id = db.Column(UUIDType(binary=False), db.ForeignKey("cves.id"))
cve = db.relationship("Cve", back_populates="events")
change_id = db.Column(UUIDType(binary=False), db.ForeignKey("changes.id"))
change = db.relationship("Change", back_populates="events")
alerts = db.relationship("Alert", secondary=alerts_events, passive_deletes=True)
def __repr__(self):
return "<Event {}>".format(self.type)
The provided code snippet includes necessary dependencies for implementing the `upgrade` function. Write a Python function `def upgrade()` to solve the following problem:
This migration is only useful for users who are upgrading from a previous version of OpenCVE. It fixes a bug when CVSS fields were not well filled.
Here is the function:
def upgrade():
"""
This migration is only useful for users who are upgrading
from a previous version of OpenCVE.
It fixes a bug when CVSS fields were not well filled.
"""
connection = op.get_bind()
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
# Fetch the CVEs linked to CVSS events. Only the
# events with an empty 'new' field are kept
cves = [
e.cve
for e in session.query(Event).filter(Event.type == "cvss").all()
if not e.details["new"]
]
if not cves:
return
# Clean their cvss2 and cvss3 fields
for cve in cves:
cve.cvss2 = None
cve.cvss3 = None
# Small tip to keep the same updated_at attribute
cve.updated_at = cve.updated_at + datetime.timedelta(seconds=1)
session.commit()
cve.updated_at = cve.updated_at + datetime.timedelta(seconds=-1)
session.commit() | This migration is only useful for users who are upgrading from a previous version of OpenCVE. It fixes a bug when CVSS fields were not well filled. |
160,525 | import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from opencve.models.cve import Cve
from opencve.models.events import Event
def downgrade():
pass | null |
160,526 | from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils import UUIDType, JSONType
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table(
"cves",
sa.Column("id", UUIDType(binary=False), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("cve_id", sa.String(), nullable=False),
sa.Column("json", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("vendors", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("cwes", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("summary", sa.String(), nullable=False),
sa.Column("cvss2", sa.Float(), nullable=True),
sa.Column("cvss3", sa.Float(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_cves_created_at"), "cves", ["created_at"], unique=False)
op.create_index(op.f("ix_cves_cve_id"), "cves", ["cve_id"], unique=False)
op.create_index(op.f("ix_cves_updated_at"), "cves", ["updated_at"], unique=False)
op.create_index(
"ix_cves_vendors",
"cves",
["vendors"],
unique=False,
postgresql_using="gin",
)
op.create_index(
"ix_cves_cwes", "cves", ["cwes"], unique=False, postgresql_using="gin"
)
op.create_table(
"cwes",
sa.Column("id", UUIDType(binary=False), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("cwe_id", sa.String(), nullable=True),
sa.Column("name", sa.String(), nullable=True),
sa.Column("description", sa.String(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_cwes_created_at"), "cwes", ["created_at"], unique=False)
op.create_table(
"metas",
sa.Column("id", UUIDType(binary=False), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("value", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_metas_created_at"), "metas", ["created_at"], unique=False)
op.create_table(
"tasks",
sa.Column("id", UUIDType(binary=False), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_tasks_created_at"), "tasks", ["created_at"], unique=False)
op.create_table(
"users",
sa.Column("id", UUIDType(binary=False), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("username", sa.String(length=50), nullable=False),
sa.Column("password", sa.String(length=255), server_default="", nullable=False),
sa.Column(
"reset_password_token",
sa.String(length=100),
server_default="",
nullable=False,
),
sa.Column("email", sa.String(length=255), nullable=False),
sa.Column("email_confirmed_at", sa.DateTime(timezone=True), nullable=True),
sa.Column(
"enable_notifications",
sa.Boolean(),
server_default=sa.text("true"),
nullable=False,
),
sa.Column("filters_notifications", JSONType(), nullable=True),
sa.Column(
"frequency_notifications",
sa.Enum("once", "always", name="notification_frequencies"),
nullable=True,
),
sa.Column(
"is_active", sa.Boolean(), server_default=sa.text("false"), nullable=False
),
sa.Column(
"first_name", sa.String(length=100), server_default="", nullable=False
),
sa.Column(
"last_name", sa.String(length=100), server_default="", nullable=False
),
sa.Column(
"admin", sa.Boolean(), server_default=sa.text("false"), nullable=True
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("email"),
sa.UniqueConstraint("username"),
)
op.create_index(op.f("ix_users_created_at"), "users", ["created_at"], unique=False)
op.create_table(
"vendors",
sa.Column("id", UUIDType(binary=False), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_index(
op.f("ix_vendors_created_at"), "vendors", ["created_at"], unique=False
)
op.create_table(
"changes",
sa.Column("id", UUIDType(binary=False), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("json", JSONType(), nullable=True),
sa.Column("cve_id", UUIDType(binary=False), nullable=True),
sa.Column("task_id", UUIDType(binary=False), nullable=True),
sa.ForeignKeyConstraint(
["cve_id"],
["cves.id"],
),
sa.ForeignKeyConstraint(
["task_id"],
["tasks.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_changes_created_at"), "changes", ["created_at"], unique=False
)
op.create_table(
"products",
sa.Column("id", UUIDType(binary=False), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("vendor_id", UUIDType(binary=False), nullable=True),
sa.ForeignKeyConstraint(
["vendor_id"],
["vendors.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_products_created_at"), "products", ["created_at"], unique=False
)
op.create_index(op.f("ix_products_name"), "products", ["name"], unique=False)
op.create_table(
"reports",
sa.Column("id", UUIDType(binary=False), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("public_link", sa.String(), nullable=True),
sa.Column("seen", sa.Boolean(), nullable=True),
sa.Column("details", JSONType(), nullable=True),
sa.Column("user_id", UUIDType(binary=False), nullable=True),
sa.ForeignKeyConstraint(
["user_id"],
["users.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_reports_created_at"), "reports", ["created_at"], unique=False
)
op.create_table(
"users_vendors",
sa.Column("user_id", UUIDType(binary=False), nullable=False),
sa.Column("vendor_id", UUIDType(binary=False), nullable=False),
sa.ForeignKeyConstraint(
["user_id"],
["users.id"],
),
sa.ForeignKeyConstraint(
["vendor_id"],
["vendors.id"],
),
sa.PrimaryKeyConstraint("user_id", "vendor_id"),
)
op.create_table(
"alerts",
sa.Column("id", UUIDType(binary=False), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("details", JSONType(), nullable=True),
sa.Column("notify", sa.Boolean(), nullable=True),
sa.Column("user_id", UUIDType(binary=False), nullable=True),
sa.Column("cve_id", UUIDType(binary=False), nullable=True),
sa.Column("report_id", UUIDType(binary=False), nullable=True),
sa.ForeignKeyConstraint(
["cve_id"],
["cves.id"],
),
sa.ForeignKeyConstraint(
["report_id"],
["reports.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["users.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_alerts_created_at"), "alerts", ["created_at"], unique=False
)
op.create_table(
"events",
sa.Column("id", UUIDType(binary=False), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
sa.Column(
"type",
sa.Enum(
"new_cve",
"references",
"cpes",
"cvss",
"summary",
"cwes",
name="event_types",
),
nullable=True,
),
sa.Column("details", JSONType(), nullable=True),
sa.Column("review", sa.Boolean(), nullable=True),
sa.Column("cve_id", UUIDType(binary=False), nullable=True),
sa.Column("change_id", UUIDType(binary=False), nullable=True),
sa.ForeignKeyConstraint(
["change_id"],
["changes.id"],
),
sa.ForeignKeyConstraint(
["cve_id"],
["cves.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_events_created_at"), "events", ["created_at"], unique=False
)
op.create_table(
"users_products",
sa.Column("user_id", UUIDType(binary=False), nullable=False),
sa.Column("product_id", UUIDType(binary=False), nullable=False),
sa.ForeignKeyConstraint(
["product_id"],
["products.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["users.id"],
),
sa.PrimaryKeyConstraint("user_id", "product_id"),
)
op.create_table(
"alerts_events",
sa.Column("alert_id", UUIDType(binary=False), nullable=False),
sa.Column("event_id", UUIDType(binary=False), nullable=False),
sa.ForeignKeyConstraint(
["alert_id"],
["alerts.id"],
),
sa.ForeignKeyConstraint(
["event_id"],
["events.id"],
),
sa.PrimaryKeyConstraint("alert_id", "event_id"),
) | null |
160,527 | from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils import UUIDType, JSONType
from sqlalchemy.dialects import postgresql
def downgrade():
op.drop_table("alerts_events")
op.drop_table("users_products")
op.drop_index(op.f("ix_events_created_at"), table_name="events")
op.drop_table("events")
op.execute("DROP TYPE event_types")
op.drop_index(op.f("ix_alerts_created_at"), table_name="alerts")
op.drop_table("alerts")
op.drop_table("users_vendors")
op.drop_index(op.f("ix_reports_created_at"), table_name="reports")
op.drop_table("reports")
op.drop_index(op.f("ix_products_name"), table_name="products")
op.drop_index(op.f("ix_products_created_at"), table_name="products")
op.drop_table("products")
op.drop_index(op.f("ix_changes_created_at"), table_name="changes")
op.drop_table("changes")
op.drop_index(op.f("ix_vendors_created_at"), table_name="vendors")
op.drop_table("vendors")
op.drop_index(op.f("ix_users_created_at"), table_name="users")
op.drop_table("users")
op.execute("DROP TYPE notification_frequencies")
op.drop_index(op.f("ix_tasks_created_at"), table_name="tasks")
op.drop_table("tasks")
op.drop_index(op.f("ix_metas_created_at"), table_name="metas")
op.drop_table("metas")
op.drop_index(op.f("ix_cwes_created_at"), table_name="cwes")
op.drop_table("cwes")
op.drop_index(op.f("ix_cves_updated_at"), table_name="cves")
op.drop_index(op.f("ix_cves_cve_id"), table_name="cves")
op.drop_index(op.f("ix_cves_created_at"), table_name="cves")
op.drop_index(op.f("ix_cves_vendors"), table_name="cves")
op.drop_index(op.f("ix_cves_cwes"), table_name="cves")
op.drop_table("cves") | null |
160,528 | from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_constraint("alerts_report_id_fkey", "alerts", type_="foreignkey")
op.create_foreign_key(
"alerts_report_id_fkey",
"alerts",
"reports",
["report_id"],
["id"],
ondelete="CASCADE",
)
op.drop_constraint(
"alerts_events_alert_id_fkey", "alerts_events", type_="foreignkey"
)
op.create_foreign_key(
"alerts_events_alert_id_fkey",
"alerts_events",
"alerts",
["alert_id"],
["id"],
ondelete="CASCADE",
)
op.create_index(op.f("ix_alerts_cve_id"), "alerts", ["cve_id"], unique=False)
op.create_index(op.f("ix_alerts_user_id"), "alerts", ["user_id"], unique=False)
op.create_index(op.f("ix_alerts_report_id"), "alerts", ["report_id"], unique=False)
op.create_index(
op.f("ix_alerts_events_alert_id"), "alerts_events", ["alert_id"], unique=False
) | null |
160,529 | from alembic import op
import sqlalchemy as sa
def downgrade():
op.drop_constraint(
"alerts_events_alert_id_fkey", "alerts_events", type_="foreignkey"
)
op.create_foreign_key(
"alerts_events_alert_id_fkey", "alerts_events", "alerts", ["alert_id"], ["id"]
)
op.drop_constraint("alerts_report_id_fkey", "alerts", type_="foreignkey")
op.create_foreign_key(
"alerts_report_id_fkey", "alerts", "reports", ["report_id"], ["id"]
)
op.drop_index(op.f("ix_alerts_cve_id"), table_name="alerts")
op.drop_index(op.f("ix_alerts_user_id"), table_name="alerts")
op.drop_index(op.f("ix_alerts_report_id"), table_name="alerts")
op.drop_index(op.f("ix_alerts_events_alert_id"), table_name="alerts_events") | null |
160,530 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import logging
config = context.config
from flask import current_app
config.set_main_option(
"sqlalchemy.url", current_app.config.get("SQLALCHEMY_DATABASE_URI")
)
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
The provided code snippet includes necessary dependencies for implementing the `run_migrations_offline` function. Write a Python function `def run_migrations_offline()` to solve the following problem:
Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output.
Here is the function:
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations() | Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. |
160,531 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import logging
config = context.config
logger = logging.getLogger("alembic.env")
from flask import current_app
config.set_main_option(
"sqlalchemy.url", current_app.config.get("SQLALCHEMY_DATABASE_URI")
)
target_metadata = current_app.extensions["migrate"].db.metadata
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
The provided code snippet includes necessary dependencies for implementing the `run_migrations_online` function. Write a Python function `def run_migrations_online()` to solve the following problem:
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
Here is the function:
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.readthedocs.org/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, "autogenerate", False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info("No changes in schema detected.")
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
**current_app.extensions["migrate"].configure_args
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close() | Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. |
160,532 | from flask import current_app as app
from flask import abort, flash, redirect, render_template, request, url_for
from flask_user import current_user
from sqlalchemy import and_
from sqlalchemy.dialects.postgresql import array
from sqlalchemy.orm import joinedload, aliased
from opencve.constants import PRODUCT_SEPARATOR
from opencve.controllers.main import main, welcome
from opencve.controllers.reports import ReportController
from opencve.extensions import db
from opencve.forms import ActivitiesViewForm
from opencve.models.changes import Change
from opencve.models.cve import Cve
from opencve.models.events import Event
def index():
if not app.config.get("DISPLAY_WELCOME", False):
abort(404)
return render_template("index.html") | null |
160,533 | from flask import current_app as app
from flask import abort, flash, redirect, render_template, request, url_for
from flask_user import current_user
from sqlalchemy import and_
from sqlalchemy.dialects.postgresql import array
from sqlalchemy.orm import joinedload, aliased
from opencve.constants import PRODUCT_SEPARATOR
from opencve.controllers.main import main, welcome
from opencve.controllers.reports import ReportController
from opencve.extensions import db
from opencve.forms import ActivitiesViewForm
from opencve.models.changes import Change
from opencve.models.cve import Cve
from opencve.models.events import Event
def terms():
if not app.config.get("DISPLAY_TERMS", False):
abort(404)
return render_template("terms.html") | null |
160,534 | from flask import current_app as app
from flask import abort, flash, redirect, render_template, request, url_for
from flask_user import current_user
from sqlalchemy import and_
from sqlalchemy.dialects.postgresql import array
from sqlalchemy.orm import joinedload, aliased
from opencve.constants import PRODUCT_SEPARATOR
from opencve.controllers.main import main, welcome
from opencve.controllers.reports import ReportController
from opencve.extensions import db
from opencve.forms import ActivitiesViewForm
from opencve.models.changes import Change
from opencve.models.cve import Cve
from opencve.models.events import Event
PRODUCT_SEPARATOR = "$PRODUCT$"
class ReportController(BaseController):
def build_query(cls, args):
def get(cls, filters):
db = SQLAlchemy(session_options={"autoflush": False})
class ActivitiesViewForm(FlaskForm):
class Change(BaseModel):
class Cve(BaseModel):
def __repr__(self):
def raw_tags(self):
def tags(self):
def cvss_weight(self):
def cvss2_score(self):
def cvss3_score(self):
def cvss2_severity(self):
def cvss3_severity(self):
def references(self):
def home():
# Allow customization of the homepage
if not current_user.is_authenticated:
if app.config.get("DISPLAY_WELCOME", False):
return redirect(url_for("welcome.index"))
return redirect(url_for("main.cves"))
# Form used to customize the activities view
activities_view_form = ActivitiesViewForm(
obj=current_user,
view=current_user.settings["activities_view"],
)
if request.method == "POST":
form_name = request.form["form-name"]
if form_name == "activities_view_form" and activities_view_form.validate():
new_settings = {
**current_user.settings,
"activities_view": activities_view_form.view.data,
}
current_user.settings = new_settings
db.session.commit()
flash("Your settings has been updated.", "success")
return redirect(url_for("main.home"))
# Handle the page parameter
page = request.args.get("page", type=int, default=1)
page = 1 if page < 1 else page
per_page = app.config["ACTIVITIES_PER_PAGE"]
# Only display the 5 last reports
reports = ReportController.list_items({"user_id": current_user.id})[:5]
# Build the query to fetch the last changes
query = (
Change.query.options(joinedload("cve"))
.options(joinedload("events"))
.filter(Change.cve_id == Cve.id)
.filter(Change.events.any())
)
# Filter by subscriptions
if current_user.settings["activities_view"] == "subscriptions":
vendors = [v.name for v in current_user.vendors]
vendors.extend(
[
f"{p.vendor.name}{PRODUCT_SEPARATOR}{p.name}"
for p in current_user.products
]
)
if not vendors:
vendors = [None]
query = query.filter(Cve.vendors.has_any(array(vendors)))
# List the paginated changes
changes = (
query.order_by(Change.created_at.desc())
.limit(per_page)
.offset((page - 1) * per_page)
.all()
)
return render_template(
"home.html",
changes=changes,
reports=reports,
page=page,
activities_view_form=activities_view_form,
) | null |
160,535 | import os
from flask import Blueprint
from jinja2.filters import do_mark_safe
from opencve.configuration import OPENCVE_WELCOME_FILES
from opencve.constants import EMAIL_CONFIRMATION_MESSAGE
from opencve.context import (
_cvss_bg,
_cvss_percent,
_excerpt,
_event_description,
_event_excerpt,
_humanize_filter,
_metric_bg,
_remove_product_separator,
_report_excerpt,
)
from flask import url_for
def _cvss_percent(score):
def cvss_percent():
return {"cvss_score_percent": _cvss_percent} | null |
160,536 | import os
from flask import Blueprint
from jinja2.filters import do_mark_safe
from opencve.configuration import OPENCVE_WELCOME_FILES
from opencve.constants import EMAIL_CONFIRMATION_MESSAGE
from opencve.context import (
_cvss_bg,
_cvss_percent,
_excerpt,
_event_description,
_event_excerpt,
_humanize_filter,
_metric_bg,
_remove_product_separator,
_report_excerpt,
)
from flask import url_for
def _cvss_bg(score):
def cvss_bg():
return {"cvss_bg": _cvss_bg} | null |
160,537 | import os
from flask import Blueprint
from jinja2.filters import do_mark_safe
from opencve.configuration import OPENCVE_WELCOME_FILES
from opencve.constants import EMAIL_CONFIRMATION_MESSAGE
from opencve.context import (
_cvss_bg,
_cvss_percent,
_excerpt,
_event_description,
_event_excerpt,
_humanize_filter,
_metric_bg,
_remove_product_separator,
_report_excerpt,
)
from flask import url_for
def _metric_bg(version, type, value):
metrics_v2 = {
"AV": {
"local": "label-default",
"adjacent network": "label-warning",
"network": "label-danger",
},
"AC": {
"high": "label-default",
"medium": "label-warning",
"low": "label-danger",
},
"AU": {
"multiple": "label-default",
"single": "label-warning",
"none": "label-danger",
},
"C": {
"none": "label-default",
"partial": "label-warning",
"complete": "label-danger",
},
"I": {
"none": "label-default",
"partial": "label-warning",
"complete": "label-danger",
},
"A": {
"none": "label-default",
"partial": "label-warning",
"complete": "label-danger",
},
}
metrics_v3 = {
"AV": {
"network": "label-danger",
"adjacent": "label-warning",
"local": "label-warning",
"physical": "label-default",
},
"AC": {"low": "label-danger", "high": "label-warning"},
"PR": {"none": "label-danger", "low": "label-warning", "high": "label-default"},
"UI": {"none": "label-danger", "required": "label-warning"},
"S": {"unchanged": "label-default", "changed": "label-danger"},
"C": {"high": "label-danger", "low": "label-warning", "none": "label-default"},
"I": {"high": "label-danger", "low": "label-warning", "none": "label-default"},
"A": {"high": "label-danger", "low": "label-warning", "none": "label-default"},
}
versions = {"v2": metrics_v2, "v3": metrics_v3}
try:
value = versions[version][type][value.lower()]
except KeyError:
return ("label-default", "No description")
return value
def metric_bg():
return {"metric_bg": _metric_bg} | null |
160,538 | import os
from flask import Blueprint
from jinja2.filters import do_mark_safe
from opencve.configuration import OPENCVE_WELCOME_FILES
from opencve.constants import EMAIL_CONFIRMATION_MESSAGE
from opencve.context import (
_cvss_bg,
_cvss_percent,
_excerpt,
_event_description,
_event_excerpt,
_humanize_filter,
_metric_bg,
_remove_product_separator,
_report_excerpt,
)
from flask import url_for
def url_for_asset(filename):
return url_for("static", filename=filename)
def get_url_for_asset():
return {"url_for_asset": url_for_asset} | null |
160,539 | import os
from flask import Blueprint
from jinja2.filters import do_mark_safe
from opencve.configuration import OPENCVE_WELCOME_FILES
from opencve.constants import EMAIL_CONFIRMATION_MESSAGE
from opencve.context import (
_cvss_bg,
_cvss_percent,
_excerpt,
_event_description,
_event_excerpt,
_humanize_filter,
_metric_bg,
_remove_product_separator,
_report_excerpt,
)
from flask import url_for
def _humanize_filter(s):
return " ".join(map(lambda x: x.capitalize(), s.split("_")))
def humanize_filter(s):
return _humanize_filter(s) | null |
160,540 | import os
from flask import Blueprint
from jinja2.filters import do_mark_safe
from opencve.configuration import OPENCVE_WELCOME_FILES
from opencve.constants import EMAIL_CONFIRMATION_MESSAGE
from opencve.context import (
_cvss_bg,
_cvss_percent,
_excerpt,
_event_description,
_event_excerpt,
_humanize_filter,
_metric_bg,
_remove_product_separator,
_report_excerpt,
)
from flask import url_for
def _excerpt(objects, _type):
"""
This function takes a flat list of vendors and products and returns
the HTML code used in the CVEs list page.
"""
output = ""
if not objects:
return output
# Keep the objects of the requested type
if _type == "products":
objects = [o for o in objects if PRODUCT_SEPARATOR in o]
else:
objects = [o for o in objects if not PRODUCT_SEPARATOR in o]
objects = sorted(objects)
output += '<span class="badge badge-primary">{}</span> '.format(len(objects))
# Keep the remains size and reduce the list
remains = len(objects[app.config["COUNT_EXCERPT"] :])
if len(objects) > app.config["COUNT_EXCERPT"]:
objects = objects[: app.config["COUNT_EXCERPT"]]
# Construct the HTML
for idx, obj in enumerate(objects):
if _type == "products":
vendor, product = obj.split(PRODUCT_SEPARATOR)
url = url_for("main.cves", vendor=vendor, product=product)
output += f"<a href='{url}'>{_humanize_filter(product)}</a>"
elif _type == "vendors":
url = url_for("main.cves", vendor=obj)
output += f"<a href='{url}'>{_humanize_filter(obj)}</a>"
else:
url = url_for("main.cves", tag=obj)
tag = UserTag.query.filter_by(user_id=current_user.id, name=obj).first()
output += f"<a href='{url}'><span class='label label-tag' style='background-color: {tag.color};'>{obj}</span></a>"
output += ", " if idx + 1 != len(objects) and _type != "tags" else " "
if remains:
output += "<i>and {} more</i>".format(remains)
return output
def vendors_excerpt(s):
return _excerpt(s, "vendors") | null |
160,541 | import os
from flask import Blueprint
from jinja2.filters import do_mark_safe
from opencve.configuration import OPENCVE_WELCOME_FILES
from opencve.constants import EMAIL_CONFIRMATION_MESSAGE
from opencve.context import (
_cvss_bg,
_cvss_percent,
_excerpt,
_event_description,
_event_excerpt,
_humanize_filter,
_metric_bg,
_remove_product_separator,
_report_excerpt,
)
from flask import url_for
def _excerpt(objects, _type):
"""
This function takes a flat list of vendors and products and returns
the HTML code used in the CVEs list page.
"""
output = ""
if not objects:
return output
# Keep the objects of the requested type
if _type == "products":
objects = [o for o in objects if PRODUCT_SEPARATOR in o]
else:
objects = [o for o in objects if not PRODUCT_SEPARATOR in o]
objects = sorted(objects)
output += '<span class="badge badge-primary">{}</span> '.format(len(objects))
# Keep the remains size and reduce the list
remains = len(objects[app.config["COUNT_EXCERPT"] :])
if len(objects) > app.config["COUNT_EXCERPT"]:
objects = objects[: app.config["COUNT_EXCERPT"]]
# Construct the HTML
for idx, obj in enumerate(objects):
if _type == "products":
vendor, product = obj.split(PRODUCT_SEPARATOR)
url = url_for("main.cves", vendor=vendor, product=product)
output += f"<a href='{url}'>{_humanize_filter(product)}</a>"
elif _type == "vendors":
url = url_for("main.cves", vendor=obj)
output += f"<a href='{url}'>{_humanize_filter(obj)}</a>"
else:
url = url_for("main.cves", tag=obj)
tag = UserTag.query.filter_by(user_id=current_user.id, name=obj).first()
output += f"<a href='{url}'><span class='label label-tag' style='background-color: {tag.color};'>{obj}</span></a>"
output += ", " if idx + 1 != len(objects) and _type != "tags" else " "
if remains:
output += "<i>and {} more</i>".format(remains)
return output
def products_excerpt(s):
return _excerpt(s, "products") | null |
160,542 | import os
from flask import Blueprint
from jinja2.filters import do_mark_safe
from opencve.configuration import OPENCVE_WELCOME_FILES
from opencve.constants import EMAIL_CONFIRMATION_MESSAGE
from opencve.context import (
_cvss_bg,
_cvss_percent,
_excerpt,
_event_description,
_event_excerpt,
_humanize_filter,
_metric_bg,
_remove_product_separator,
_report_excerpt,
)
from flask import url_for
def _excerpt(objects, _type):
"""
This function takes a flat list of vendors and products and returns
the HTML code used in the CVEs list page.
"""
output = ""
if not objects:
return output
# Keep the objects of the requested type
if _type == "products":
objects = [o for o in objects if PRODUCT_SEPARATOR in o]
else:
objects = [o for o in objects if not PRODUCT_SEPARATOR in o]
objects = sorted(objects)
output += '<span class="badge badge-primary">{}</span> '.format(len(objects))
# Keep the remains size and reduce the list
remains = len(objects[app.config["COUNT_EXCERPT"] :])
if len(objects) > app.config["COUNT_EXCERPT"]:
objects = objects[: app.config["COUNT_EXCERPT"]]
# Construct the HTML
for idx, obj in enumerate(objects):
if _type == "products":
vendor, product = obj.split(PRODUCT_SEPARATOR)
url = url_for("main.cves", vendor=vendor, product=product)
output += f"<a href='{url}'>{_humanize_filter(product)}</a>"
elif _type == "vendors":
url = url_for("main.cves", vendor=obj)
output += f"<a href='{url}'>{_humanize_filter(obj)}</a>"
else:
url = url_for("main.cves", tag=obj)
tag = UserTag.query.filter_by(user_id=current_user.id, name=obj).first()
output += f"<a href='{url}'><span class='label label-tag' style='background-color: {tag.color};'>{obj}</span></a>"
output += ", " if idx + 1 != len(objects) and _type != "tags" else " "
if remains:
output += "<i>and {} more</i>".format(remains)
return output
def tags_excerpt(s):
return _excerpt(s, "tags") | null |
160,543 | import os
from flask import Blueprint
from jinja2.filters import do_mark_safe
from opencve.configuration import OPENCVE_WELCOME_FILES
from opencve.constants import EMAIL_CONFIRMATION_MESSAGE
from opencve.context import (
_cvss_bg,
_cvss_percent,
_excerpt,
_event_description,
_event_excerpt,
_humanize_filter,
_metric_bg,
_remove_product_separator,
_report_excerpt,
)
from flask import url_for
def _report_excerpt(items):
def report_excerpt(s):
return _report_excerpt(s) | null |
160,544 | import os
from flask import Blueprint
from jinja2.filters import do_mark_safe
from opencve.configuration import OPENCVE_WELCOME_FILES
from opencve.constants import EMAIL_CONFIRMATION_MESSAGE
from opencve.context import (
_cvss_bg,
_cvss_percent,
_excerpt,
_event_description,
_event_excerpt,
_humanize_filter,
_metric_bg,
_remove_product_separator,
_report_excerpt,
)
from flask import url_for
def _event_excerpt(details):
if isinstance(details, list):
return f"<strong>{len(details)}</strong> added"
else:
output = []
if "changed" in details:
output.append(f"<strong>{len(details['changed'])}</strong> changed")
if "added" in details:
output.append(f"<strong>{len(details['added'])}</strong> added")
if "removed" in details:
output.append(f"<strong>{len(details['removed'])}</strong> removed")
return ", ".join(output)
def event_excerpt(s):
return _event_excerpt(s) | null |
160,545 | import os
from flask import Blueprint
from jinja2.filters import do_mark_safe
from opencve.configuration import OPENCVE_WELCOME_FILES
from opencve.constants import EMAIL_CONFIRMATION_MESSAGE
from opencve.context import (
_cvss_bg,
_cvss_percent,
_excerpt,
_event_description,
_event_excerpt,
_humanize_filter,
_metric_bg,
_remove_product_separator,
_report_excerpt,
)
from flask import url_for
def _event_description(code):
return dict(EVENT_TYPES)[code]
def event_description(s):
return _event_description(s) | null |
160,546 | import os
from flask import Blueprint
from jinja2.filters import do_mark_safe
from opencve.configuration import OPENCVE_WELCOME_FILES
from opencve.constants import EMAIL_CONFIRMATION_MESSAGE
from opencve.context import (
_cvss_bg,
_cvss_percent,
_excerpt,
_event_description,
_event_excerpt,
_humanize_filter,
_metric_bg,
_remove_product_separator,
_report_excerpt,
)
from flask import url_for
def _remove_product_separator(s):
return s.replace(PRODUCT_SEPARATOR, " ")
def remove_product_separator(s):
return _remove_product_separator(s) | null |
160,547 | import os
from flask import Blueprint
from jinja2.filters import do_mark_safe
from opencve.configuration import OPENCVE_WELCOME_FILES
from opencve.constants import EMAIL_CONFIRMATION_MESSAGE
from opencve.context import (
_cvss_bg,
_cvss_percent,
_excerpt,
_event_description,
_event_excerpt,
_humanize_filter,
_metric_bg,
_remove_product_separator,
_report_excerpt,
)
from flask import url_for
EMAIL_CONFIRMATION_MESSAGE = (
"Your email address has not yet been confirmed. Check your email Inbox "
'and Spam folders for the confirmation email or <a href="/account/resend-'
'email-confirmation">Re-send confirmation email</a>.'
)
def custom_safe(s):
if s == EMAIL_CONFIRMATION_MESSAGE:
return do_mark_safe(s)
return s | null |
160,548 | import json
from flask import request, jsonify
from flask_user import current_user, login_required
from opencve.controllers.main import main
from opencve.extensions import db
from opencve.models.products import Product
from opencve.models.vendors import Vendor
from opencve.models import is_valid_uuid
from werkzeug.exceptions import HTTPException
db = SQLAlchemy(session_options={"autoflush": False})
class Product(BaseModel):
def human_name(self):
def __repr__(self):
class Vendor(BaseModel):
def human_name(self):
def __repr__(self):
def is_valid_uuid(val):
def subscribe_to_tag():
def _bad_request(type, id):
return (
jsonify({"status": "error", "message": f"{type} {id} does not exist"}),
400,
)
if not current_user.is_authenticated:
return json.dumps({"status": "error", "message": "not allowed"})
# Check the required fields
if not request.form["obj"] or not request.form["id"]:
return json.dumps({"status": "error", "message": "bad request"})
if not request.form["action"] or request.form["action"] not in [
"subscribe",
"unsubscribe",
]:
return json.dumps({"status": "error", "message": "bad request"})
# Vendor
if request.form["obj"] == "vendor":
if not is_valid_uuid(request.form["id"]):
return _bad_request(request.form["obj"], request.form["id"])
vendor = Vendor.query.get(request.form["id"])
if not vendor:
return _bad_request(request.form["obj"], request.form["id"])
# Subscribe
if request.form["action"] == "subscribe":
if vendor not in current_user.vendors:
current_user.vendors.append(vendor)
db.session.commit()
return json.dumps({"status": "ok", "message": "vendor added"})
# Unsubscribe
if request.form["action"] == "unsubscribe":
if vendor in current_user.vendors:
current_user.vendors.remove(vendor)
db.session.commit()
return json.dumps({"status": "ok", "message": "vendor removed"})
# Product
elif request.form["obj"] == "product":
if not is_valid_uuid(request.form["id"]):
return _bad_request(request.form["obj"], request.form["id"])
product = Product.query.get(request.form["id"])
if not product:
return _bad_request(request.form["obj"], request.form["id"])
# Subscribe
if request.form["action"] == "subscribe":
if product not in current_user.products:
current_user.products.append(product)
db.session.commit()
return json.dumps({"status": "ok", "message": "product added"})
# Unsubscribe
if request.form["action"] == "unsubscribe":
if product in current_user.products:
current_user.products.remove(product)
db.session.commit()
return json.dumps({"status": "ok", "message": "product removed"})
return json.dumps({"status": "error", "message": "bad request"}) | null |
160,549 | import time
from datetime import datetime
import arrow
import requests
from celery.utils.log import get_task_logger
from opencve.checks import BaseCheck
from opencve.commands.utils import CveUtil
from opencve.extensions import cel, db
from opencve.models.cve import Cve
from opencve.models.metas import Meta
from opencve.models.tasks import Task
NVD_API_URL = "https://services.nvd.nist.gov/rest/json/cves/2.0"
logger = get_task_logger(__name__)
def get_last_cve():
cve_id = Meta.query.filter_by(name="nvd_last_cve_id").first().value
updated_at = arrow.get(
Meta.query.filter_by(name="nvd_last_cve_updated_at").first().value
)
return cve_id, updated_at
def save_last_cve(cve_id, updated_at):
meta_last_cve = Meta.query.filter_by(name="nvd_last_cve_id").first()
meta_last_cve.value = cve_id
meta_last_cve = Meta.query.filter_by(name="nvd_last_cve_updated_at").first()
meta_last_cve.value = str(updated_at)
db.session.commit()
def check_for_update(cve_json, task):
cve_id = cve_json["id"]
cve_obj = Cve.query.filter_by(cve_id=cve_id).first()
events = []
# A new CVE has been added
if not cve_obj:
cve_obj = CveUtil.create_cve(cve_json)
logger.info("{} created (ID: {})".format(cve_id, cve_obj.id))
events = [CveUtil.create_event(cve_obj, cve_json, "new_cve", {})]
# Existing CVE has changed
elif CveUtil.cve_has_changed(cve_obj, cve_json):
logger.info("{} has changed, parsing it...".format(cve_obj.cve_id))
events = []
checks = BaseCheck.__subclasses__()
# Loop on each kind of check
for check in checks:
c = check(cve_obj, cve_json)
event = c.execute()
if event:
events.append(event)
# Change the last updated date
cve_obj.updated_at = arrow.get(cve_json["lastModified"]).datetime
cve_obj.json = cve_json
db.session.commit()
# Create the change
if events:
CveUtil.create_change(cve_obj, cve_json, task, events)
db = SQLAlchemy(session_options={"autoflush": False})
cel = FlaskCelery("opencve", include=["opencve.tasks"])
class Task(BaseModel):
__tablename__ = "tasks"
# Relationships
changes = db.relationship("Change", back_populates="task")
def __repr__(self):
return "<Task {}>".format(self.created_at)
def handle_events():
cel.app.app_context().push()
# Retrieve the last CVE to start the synchronization
last_cve_id, last_updated_at = get_last_cve()
logger.info(f"Parsing last events since {last_cve_id} (at {last_updated_at})")
start = last_updated_at.strftime("%Y-%m-%dT%H:%M:%S")
end = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
url_template = (
NVD_API_URL
+ f"?lastModStartDate={start}Z&lastModEndDate={end}Z"
+ "&startIndex={idx}"
)
# Create the task containing the changes
task = Task()
db.session.add(task)
# Iterate over all new CVEs
start_index = 0
total_results = 0
while start_index <= total_results:
url = url_template.format(idx=start_index)
logger.info(f"Fetching {url}")
resp = requests.get(url)
# Continue if status != 200
if not resp.ok:
logger.info(
f"Bad response: {resp.status_code}, sleeping before retrying..."
)
time.sleep(10)
continue
data = resp.json()
total_results = data.get("totalResults")
for vulnerability in data.get("vulnerabilities"):
cve = vulnerability.get("cve")
check_for_update(cve, task)
# Store the last CVE info
cve_last_modified = arrow.get(cve["lastModified"])
if last_updated_at < cve_last_modified:
last_cve_id = cve["id"]
last_updated_at = cve_last_modified
# NVD requirement is 2000 CVE per page and 6s between requests
start_index += 2000
time.sleep(6)
# Save the last CVE information for the next handle_events tasks
save_last_cve(last_cve_id, last_updated_at) | null |
160,550 | from celery.utils.log import get_task_logger
from opencve.constants import PRODUCT_SEPARATOR
from opencve.extensions import cel, db
from opencve.models.alerts import Alert
from opencve.models.cve import Cve
from opencve.models.events import Event
from opencve.models.products import Product
from opencve.models.vendors import Vendor
logger = get_task_logger(__name__)
def filter_events(user, events):
# Only keep the wanted events
filtered_events = {
e.type.code: e
for e in events
if e.type.code in user.filters_notifications["event_types"]
}
# Check if new vendors/products match the user's subscriptions
if "first_time" in filtered_events.keys():
# TODO: refactor with controllers.home::home (+tests)
subscriptions = [v.name for v in user.vendors]
subscriptions.extend(
[f"{p.vendor.name}{PRODUCT_SEPARATOR}{p.name}" for p in user.products]
)
if not any(s in filtered_events["first_time"].details for s in subscriptions):
del filtered_events["first_time"]
return list(filtered_events.values())
PRODUCT_SEPARATOR = "$PRODUCT$"
db = SQLAlchemy(session_options={"autoflush": False})
cel = FlaskCelery("opencve", include=["opencve.tasks"])
class Alert(BaseModel):
__tablename__ = "alerts"
details = db.Column(JSONType)
notify = db.Column(db.Boolean, default=False)
# Relationships
events = db.relationship("Event", secondary=alerts_events, cascade="all, delete")
user_id = db.Column(UUIDType(binary=False), db.ForeignKey("users.id"), index=True)
user = db.relationship("User", back_populates="alerts")
cve_id = db.Column(UUIDType(binary=False), db.ForeignKey("cves.id"), index=True)
cve = db.relationship("Cve", back_populates="alerts")
report_id = db.Column(
UUIDType(binary=False),
db.ForeignKey("reports.id", ondelete="CASCADE"),
nullable=True,
index=True,
)
report = db.relationship("Report", back_populates="alerts")
def __repr__(self):
return "<Alert {}>".format(self.id)
class Cve(BaseModel):
__tablename__ = "cves"
# CVE are sorted by last modified date, we need to index it.
updated_at = db.Column(
db.DateTime(timezone=True),
default=db.func.now(),
onupdate=db.func.now(),
nullable=False,
index=True,
)
cve_id = db.Column(db.String(), nullable=False)
json = db.Column(JSONB)
# We used initially secondary relationships to fetch the list of
# associated vendors, products and cwes. But it was complicated
# to maintain, and the performance were poor. So we now use the
# JSONB data type associated to the GIN index type.
vendors = db.Column(JSONB)
cwes = db.Column(JSONB)
# Keep the summary separated when searching keywords
summary = db.Column(db.String(), nullable=False)
# Keep CVSS separated when searching a particupal score
cvss2 = db.Column(db.Float())
cvss3 = db.Column(db.Float())
# Relationships
events = db.relationship("Event", back_populates="cve")
changes = db.relationship("Change", back_populates="cve")
alerts = db.relationship("Alert", back_populates="cve")
# Index
__table_args__ = (
db.Index("ix_cves_vendors", vendors, postgresql_using="gin"),
db.Index("ix_cves_cwes", cwes, postgresql_using="gin"),
db.Index(
"ix_cves_summary",
summary,
postgresql_using="gin",
postgresql_ops={
"summary": "gin_trgm_ops",
},
),
db.Index(
"ix_cves_cve_id",
cve_id,
postgresql_using="gin",
postgresql_ops={
"cve_id": "gin_trgm_ops",
},
),
)
def __repr__(self):
return "<Cve {}>".format(self.cve_id)
def raw_tags(self):
if not current_user.is_authenticated:
return []
cve_tag = CveTag.query.filter_by(
user_id=current_user.id, cve_id=self.id
).first()
if not cve_tag:
return []
return cve_tag.tags
def tags(self):
if not current_user.is_authenticated:
return []
return [
UserTag.query.filter_by(user_id=current_user.id, name=t).first()
for t in self.raw_tags
]
def cvss_weight(self):
"""Only used to sort several CVE by their CVSS"""
w = 0
if self.cvss2:
w += self.cvss2
if self.cvss3:
w += self.cvss3
return w
def cvss2_score(self):
if "cve" in self.json.keys():
if "baseMetricV2" in self.json["impact"]:
return self.json["impact"]["baseMetricV2"]["cvssV2"]["baseScore"]
else:
if "cvssMetricV2" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV2"][0]["cvssData"]["baseScore"]
return None
def cvss3_score(self):
if "cve" in self.json.keys():
if "baseMetricV3" in self.json["impact"]:
return self.json["impact"]["baseMetricV3"]["cvssV3"]["baseScore"]
else:
if "cvssMetricV31" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV31"][0]["cvssData"]["baseScore"]
elif "cvssMetricV30" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV30"][0]["cvssData"]["baseScore"]
return None
def cvss2_severity(self):
if "cve" in self.json.keys():
if "baseMetricV2" in self.json["impact"]:
return self.json["impact"]["baseMetricV2"]["severity"]
else:
if "cvssMetricV2" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV2"][0]["baseSeverity"]
return None
def cvss3_severity(self):
if "cve" in self.json.keys():
if "baseMetricV3" in self.json["impact"]:
return self.json["impact"]["baseMetricV3"]["cvssV3"]["baseSeverity"]
else:
if "cvssMetricV31" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV31"][0]["cvssData"][
"baseSeverity"
]
elif "cvssMetricV30" in self.json["metrics"]:
return self.json["metrics"]["cvssMetricV30"][0]["cvssData"][
"baseSeverity"
]
return None
def references(self):
if "cve" in self.json.keys():
return self.json["cve"]["references"]["reference_data"]
return self.json["references"]
class Event(BaseModel):
__tablename__ = "events"
type = db.Column(ChoiceType(EVENT_TYPES))
details = db.Column(JSONType)
review = db.Column(db.Boolean, default=False)
# Relationships
cve_id = db.Column(UUIDType(binary=False), db.ForeignKey("cves.id"))
cve = db.relationship("Cve", back_populates="events")
change_id = db.Column(UUIDType(binary=False), db.ForeignKey("changes.id"))
change = db.relationship("Change", back_populates="events")
alerts = db.relationship("Alert", secondary=alerts_events, passive_deletes=True)
def __repr__(self):
return "<Event {}>".format(self.type)
class Product(BaseModel):
__tablename__ = "products"
name = db.Column(db.String(), nullable=False, index=True)
# Relationships
vendor_id = db.Column(UUIDType(binary=False), db.ForeignKey("vendors.id"))
vendor = db.relationship("Vendor", back_populates="products")
users = db.relationship("User", secondary=users_products)
def human_name(self):
return _humanize_filter(self.name)
def __repr__(self):
return "<Product {}>".format(self.name)
class Vendor(BaseModel):
__tablename__ = "vendors"
name = db.Column(db.String(), nullable=False, unique=True)
# Relationships
products = db.relationship("Product", back_populates="vendor")
users = db.relationship("User", secondary=users_vendors)
def human_name(self):
return _humanize_filter(self.name)
def __repr__(self):
return "<Vendor {}>".format(self.name)
def handle_alerts():
cel.app.app_context().push()
logger.info("Checking for new alerts...")
# Retrieve the CVE list for which events no reviewed exist.
cves = Cve.query.filter(Cve.events.any(Event.review == False)).all()
if not cves:
logger.info("No CVE to review")
return
# Check each CVE, get its events and create alerts
logger.info("Checking {} CVE containing event(s) no reviewed...".format(len(cves)))
for cve in cves:
users = {}
events = Event.query.filter_by(cve=cve, review=False).all()
logger.info(
"{} contains {} events to review...".format(cve.cve_id, len(events))
)
# Save the subscribers for each vendor of the CVE
for v in cve.vendors:
# Product contains the separator
if PRODUCT_SEPARATOR in v:
vendor = Vendor.query.filter_by(
name=v.split(PRODUCT_SEPARATOR)[0]
).first()
product = Product.query.filter_by(
name=v.split(PRODUCT_SEPARATOR)[1], vendor_id=vendor.id
).first()
for user in product.users:
if user not in users.keys():
users[user] = {"products": [], "vendors": []}
users[user]["products"].append(product.name)
# Vendor
else:
vendor = Vendor.query.filter_by(name=v).first()
for user in vendor.users:
if user not in users.keys():
users[user] = {"products": [], "vendors": []}
users[user]["vendors"].append(vendor.name)
# No users concerned
if not users:
logger.info("No users to alert.")
for event in events:
event.review = True
db.session.commit()
continue
# Users need to be alerted
logger.info("{} users found, creating the alerts...".format(len(users)))
for user, details in users.items():
# Filter by CVSS v3 score
cvss_score = cve.cvss3
if cvss_score and cvss_score < user.filters_notifications["cvss"]:
logger.info(
"Skip alert for {0} because of CVSSv3 filter ({1} < {2})".format(
user.username, cvss_score, user.filters_notifications["cvss"]
)
)
continue
# Keep the wanted filter by user
events_copy = list(events)
events_copy = filter_events(user, events_copy)
if not events_copy:
logger.info(
"No event matches the filters for {0}".format(user.username)
)
else:
logger.info(
"Events match for {0} ({1})".format(
user.username, ",".join(e.type.code for e in events_copy)
)
)
# We add the filters in the details
details["filters"] = [e.type.code for e in events_copy]
# An alert is composed of a CVE, events for that CVE,
# and details including vendors and products.
alert = Alert(
user=user, cve=cve, details=details, events=events, notify=False
)
db.session.add(alert)
db.session.commit()
logger.info(
"Alert created for {} (ID: {})".format(user.username, alert.id)
)
# We can review the events
for event in events:
event.review = True
db.session.commit() | null |
160,551 | from collections import OrderedDict
from datetime import datetime, time
import arrow
from celery.utils.log import get_task_logger
from flask import render_template
from flask_user import EmailError
from sqlalchemy import delete
from opencve.context import _humanize_filter
from opencve.extensions import cel, db, user_manager
from opencve.models.alerts import Alert
from opencve.models.cve import Cve
from opencve.models.reports import Report
from opencve.models.users import User
logger = get_task_logger(__name__)
def get_users_with_alerts():
"""
If we are between 11:00 AM and 11:15 AM, we get all the users. Otherwise
we only select the 'always' frequency ones (todo: find a cleaner solution).
"""
now = datetime.now()
query = User.query.filter(User.alerts.any(Alert.notify == False))
if time(11, 0) <= now.time() <= time(11, 15):
logger.info("We are between 11:00 AM and 11:15 AM, get all the users...")
users = query.all()
else:
logger.info("Get the users who want to always receive email...")
users = query.filter(User.frequency_notifications == "always").all()
return users
def get_top_alerts(user, count=10):
"""
Return the top X alerts for a given user.
"""
top_alerts = (
db.session.query(Alert.id)
.filter_by(user=user, notify=False)
.join(Alert.cve)
.order_by(Cve.cvss3.desc())
.limit(count)
.all()
)
# Convert this list of ID in a list of objects
top_alerts = [alert[0] for alert in top_alerts]
top_alerts = db.session.query(Alert).filter(Alert.id.in_(top_alerts)).all()
return top_alerts
def get_sorted_alerts(alerts):
"""
Sort the alerts by vendors and products then extract their max score.
"""
alerts_sorted = {}
for alert in alerts:
for vendor in alert.details["vendors"]:
if vendor not in alerts_sorted:
alerts_sorted[vendor] = {
"name": _humanize_filter(vendor),
"alerts": [],
"max": 0,
}
alerts_sorted[vendor]["alerts"].append(alert)
for product in alert.details["products"]:
if product not in alerts_sorted:
alerts_sorted[product] = {
"name": _humanize_filter(product),
"alerts": [],
"max": 0,
}
alerts_sorted[product]["alerts"].append(alert)
# For each vendor, we take the max score
for k, als in alerts_sorted.items():
# Get the max score
cvss = [al.cve.cvss3 for al in als["alerts"] if al.cve.cvss3]
if cvss:
alerts_sorted[k]["max"] = max(cvss)
alerts_sorted = OrderedDict(
sorted(alerts_sorted.items(), key=lambda i: i[1]["max"], reverse=True)
)
return alerts_sorted
def get_vendors_products(alerts):
"""
Returns a sorted list of vendors given some alerts.
"""
vendors_products = []
for alert in alerts:
vendors_products.extend(
sorted(
list(set(alert.details["vendors"]))
+ list(set(alert.details["products"]))
)
)
# Remove duplicates
vendors_products = sorted(list(set(vendors_products)))
return vendors_products
def _humanize_filter(s):
return " ".join(map(lambda x: x.capitalize(), s.split("_")))
db = SQLAlchemy(session_options={"autoflush": False})
user_manager = CustomUserManager(None, None, None)
cel = FlaskCelery("opencve", include=["opencve.tasks"])
class Alert(BaseModel):
__tablename__ = "alerts"
details = db.Column(JSONType)
notify = db.Column(db.Boolean, default=False)
# Relationships
events = db.relationship("Event", secondary=alerts_events, cascade="all, delete")
user_id = db.Column(UUIDType(binary=False), db.ForeignKey("users.id"), index=True)
user = db.relationship("User", back_populates="alerts")
cve_id = db.Column(UUIDType(binary=False), db.ForeignKey("cves.id"), index=True)
cve = db.relationship("Cve", back_populates="alerts")
report_id = db.Column(
UUIDType(binary=False),
db.ForeignKey("reports.id", ondelete="CASCADE"),
nullable=True,
index=True,
)
report = db.relationship("Report", back_populates="alerts")
def __repr__(self):
return "<Alert {}>".format(self.id)
class Report(BaseModel):
__tablename__ = "reports"
public_link = db.Column(db.String(), default=generate_public_link)
seen = db.Column(db.Boolean(), default=False)
details = db.Column(JSONType)
user_id = db.Column(UUIDType(binary=False), db.ForeignKey("users.id"))
user = db.relationship("User", back_populates="reports")
alerts = db.relationship(
"Alert",
back_populates="report",
cascade="all, delete",
passive_deletes=True,
)
def __repr__(self):
return "<Report {}>".format(self.id)
def handle_reports():
cel.app.app_context().push()
# The server name is mandatory to generate the mails
if not cel.app.config.get("SERVER_NAME"):
raise ValueError(
"The `server_name` variable is not set in your `opencve.cf` file. "
"Please configure it to allow OpenCVE to create reports and send the mails."
)
# Get users to nofity
users = get_users_with_alerts()
if not users:
logger.info("No alert to send.")
return
# Get alerts for all users, create a report containing it
# and send a mail with the top alerts.
logger.info("Checking {} users with alerts to send...".format(len(users)))
for user in users:
alerts = Alert.query.filter_by(user=user, notify=False).all()
logger.info("{} alerts to notify for {}".format(len(alerts), user.username))
top_alerts = get_top_alerts(user)
sorted_alerts = get_sorted_alerts(top_alerts)
all_vendors_products = get_vendors_products(alerts)
# Create the report
report = Report(user=user, alerts=alerts, details=all_vendors_products)
db.session.add(report)
db.session.commit()
logger.info("Report {0} created.".format(report.id))
if not user.enable_notifications:
logger.info(
"User {} do not want to receive email notifications, skip it.".format(
user.username
)
)
else:
alert_str = "alerts" if len(alerts) > 1 else "alert"
subject = "{count} {alerts} on {vendors}".format(
count=len(alerts),
alerts=alert_str,
vendors=", ".join(list(map(_humanize_filter, all_vendors_products))),
)
try:
user_manager.email_manager.send_user_report(
user,
**{
"subject": subject,
"total_alerts": len(alerts),
"alerts_sorted": sorted_alerts,
"report_public_link": report.public_link,
},
)
logger.info("Mail sent for {}".format(user.email))
except EmailError as e:
logger.error(f"EmailError : {e}")
# The alerts have been notified
for alert in alerts:
alert.notify = True
db.session.commit() | null |
160,552 | from collections import OrderedDict
from datetime import datetime, time
import arrow
from celery.utils.log import get_task_logger
from flask import render_template
from flask_user import EmailError
from sqlalchemy import delete
from opencve.context import _humanize_filter
from opencve.extensions import cel, db, user_manager
from opencve.models.alerts import Alert
from opencve.models.cve import Cve
from opencve.models.reports import Report
from opencve.models.users import User
logger = get_task_logger(__name__)
db = SQLAlchemy(session_options={"autoflush": False})
cel = FlaskCelery("opencve", include=["opencve.tasks"])
class Report(BaseModel):
__tablename__ = "reports"
public_link = db.Column(db.String(), default=generate_public_link)
seen = db.Column(db.Boolean(), default=False)
details = db.Column(JSONType)
user_id = db.Column(UUIDType(binary=False), db.ForeignKey("users.id"))
user = db.relationship("User", back_populates="reports")
alerts = db.relationship(
"Alert",
back_populates="report",
cascade="all, delete",
passive_deletes=True,
)
def __repr__(self):
return "<Report {}>".format(self.id)
def reports_cleanup():
cel.app.app_context().push()
if not cel.app.config.get("REPORTS_CLEANUP_DAYS"):
return
# Remove N days to the current date
nb_days = -abs(cel.app.config.get("REPORTS_CLEANUP_DAYS"))
shifted_date = arrow.utcnow().shift(days=nb_days)
logger.info(f"Removing old reports ({shifted_date.humanize()})...")
# Use SQLAlchemy core for performance
stmt = delete(Report).where(Report.created_at < shifted_date.datetime)
db.engine.execute(stmt) | null |
160,553 | from flask import current_app as app
from flask import request, url_for
from flask_user import current_user
from opencve.constants import EVENT_TYPES, PRODUCT_SEPARATOR
from opencve.models.tags import UserTag
def _is_active(route):
return request.endpoint in route.split(",") | null |
160,554 | from flask import current_app as app
from flask_user import UserMixin
from sqlalchemy.sql import expression
from sqlalchemy_utils import ChoiceType, JSONType
from opencve.constants import FREQUENCIES_TYPES
from opencve.extensions import db
from opencve.models import BaseModel, users_products, users_vendors
def get_default_filters():
return {
"cvss": 0,
"event_types": [
"new_cve",
"first_time",
"references",
"cvss",
"cpes",
"summary",
"cwes",
],
} | null |
160,555 | from flask import current_app as app
from flask_user import UserMixin
from sqlalchemy.sql import expression
from sqlalchemy_utils import ChoiceType, JSONType
from opencve.constants import FREQUENCIES_TYPES
from opencve.extensions import db
from opencve.models import BaseModel, users_products, users_vendors
def get_default_settings():
return {"activities_view": "all"} | null |
160,556 | import random
import string
from sqlalchemy_utils import JSONType, UUIDType
from opencve.extensions import db
from opencve.models import BaseModel
def generate_public_link(size=12):
chars = string.ascii_uppercase + string.digits
return "".join(random.choice(chars) for _ in range(size)) | null |
160,557 | import os
import sys
def skip(app, what, name, obj, would_skip, options):
if name in specials:
return False
return would_skip
def setup(app):
app.connect("autodoc-skip-member", skip) | null |
160,558 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `apply_lambda` function. Write a Python function `def apply_lambda( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, aug_function: Callable[..., Tuple[np.ndarray, int]] = lambda x, y: (x, y), output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, **kwargs, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Apply a user-defined lambda to the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param aug_function: the augmentation function to be applied onto the audio (should expect the audio np.ndarray & sample rate int as input, and return the transformed audio & sample rate) @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @param **kwargs: the input attributes to be passed into `aug_function` @returns: the augmented audio array and sample rate
Here is the function:
def apply_lambda(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
aug_function: Callable[..., Tuple[np.ndarray, int]] = lambda x, y: (x, y),
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
**kwargs,
) -> Tuple[np.ndarray, int]:
"""
Apply a user-defined lambda to the audio
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param aug_function: the augmentation function to be applied onto the audio (should
expect the audio np.ndarray & sample rate int as input, and return the
transformed audio & sample rate)
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@param **kwargs: the input attributes to be passed into `aug_function`
@returns: the augmented audio array and sample rate
"""
assert callable(aug_function), (
repr(type(aug_function).__name__) + " object is not callable"
)
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
aug_audio, out_sample_rate = aug_function(audio, sample_rate, **kwargs)
audutils.get_metadata(
metadata=metadata,
function_name="apply_lambda",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=out_sample_rate,
aug_function=aug_function.__name__,
output_path=output_path,
)
return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate) | Apply a user-defined lambda to the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param aug_function: the augmentation function to be applied onto the audio (should expect the audio np.ndarray & sample rate int as input, and return the transformed audio & sample rate) @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @param **kwargs: the input attributes to be passed into `aug_function` @returns: the augmented audio array and sample rate |
160,559 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `change_volume` function. Write a Python function `def change_volume( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, volume_db: float = 0.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Changes the volume of the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param volume_db: the decibel amount by which to either increase (positive value) or decrease (negative value) the volume of the audio @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def change_volume(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
volume_db: float = 0.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Changes the volume of the audio
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param volume_db: the decibel amount by which to either increase
(positive value) or decrease (negative value) the volume of the audio
@param output_path: the path in which the resulting audio will be stored. If
None, the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(volume_db, (int, float)), "Expected 'volume_db' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
aug_audio = audio.reshape((num_channels, -1))
aug_audio, out_sample_rate = sox_effects.apply_effects_tensor(
torch.Tensor(aug_audio), sample_rate, [["vol", str(volume_db), "dB"]]
)
aug_audio = aug_audio.numpy()
if num_channels == 1:
aug_audio = aug_audio.reshape((aug_audio.shape[-1],))
audutils.get_metadata(
metadata=metadata,
function_name="change_volume",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
volume_db=volume_db,
output_path=output_path,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate) | Changes the volume of the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param volume_db: the decibel amount by which to either increase (positive value) or decrease (negative value) the volume of the audio @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,560 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
def add_background_noise(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
background_audio: Optional[Union[str, np.ndarray]] = None,
snr_level_db: float = 10.0,
seed: Optional[audutils.RNGSeed] = None,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Mixes in a background sound into the audio
will be augmented
np.ndarray containing the background audio. If set to `None`, the background
audio will be white noise
remain reproducible
the resulting np.ndarray will still be returned
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
"""
assert isinstance(
snr_level_db, (int, float)
), "Expected 'snr_level_db' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
func_kwargs.pop("seed")
random_generator = audutils.check_random_state(seed)
if background_audio is None:
background_audio = random_generator.standard_normal(audio.shape)
else:
background_audio, background_sr = audutils.validate_and_load_audio(
background_audio, sample_rate
)
if background_sr != sample_rate:
background_audio = resample(
torch.tensor(background_audio), background_sr, sample_rate
).numpy()
if metadata is not None:
func_kwargs["background_duration"] = background_audio.shape[-1] / sample_rate
audio_rms = np.sqrt(np.mean(np.square(audio), axis=-1))
bg_rms = np.sqrt(np.mean(np.square(background_audio), axis=-1))
desired_bg_rms = audio_rms / (10 ** (snr_level_db / 20))
if isinstance(bg_rms, np.number) and isinstance(desired_bg_rms, np.ndarray):
desired_bg_rms = desired_bg_rms.mean()
elif isinstance(bg_rms, np.ndarray) and isinstance(desired_bg_rms, np.number):
bg_rms = bg_rms.mean()
elif isinstance(bg_rms, np.ndarray) and isinstance(desired_bg_rms, np.ndarray):
bg_rms = bg_rms.reshape((bg_rms.shape[0], 1))
desired_bg_rms = desired_bg_rms.reshape((desired_bg_rms.shape[0], 1))
assert bg_rms.shape == desired_bg_rms.shape, (
"Handling stereo audio and stereo background audio with different "
"amounts of channels is currently unsupported"
)
background_audio *= desired_bg_rms / bg_rms
while background_audio.shape[-1] < audio.shape[-1]:
axis = 0 if background_audio.ndim == 1 else 1
background_audio = np.concatenate(
(background_audio, background_audio), axis=axis
)
background_audio = (
background_audio[: audio.shape[-1]]
if background_audio.ndim == 1
else background_audio[:, : audio.shape[-1]]
)
aug_audio = audio + background_audio
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="add_background_noise",
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
The provided code snippet includes necessary dependencies for implementing the `clicks` function. Write a Python function `def clicks( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, seconds_between_clicks: float = 0.5, snr_level_db: float = 1.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Adds clicks to the audio at a given regular interval @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param seconds_between_clicks: the amount of time between each click that will be added to the audio, in seconds @param snr_level_db: signal-to-noise ratio in dB @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def clicks(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
seconds_between_clicks: float = 0.5,
snr_level_db: float = 1.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adds clicks to the audio at a given regular interval
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param seconds_between_clicks: the amount of time between each click that
will be added to the audio, in seconds
@param snr_level_db: signal-to-noise ratio in dB
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(
seconds_between_clicks, (int, float)
), "Expected 'seconds_between_clicks' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
num_samples = audio.shape[-1]
seconds_in_audio = num_samples / sample_rate
times = np.arange(0, seconds_in_audio, seconds_between_clicks)
clicks_audio = librosa.clicks(times=times, sr=sample_rate)
aug_audio, out_sample_rate = add_background_noise(
audio,
sample_rate=sample_rate,
background_audio=clicks_audio,
snr_level_db=snr_level_db,
)
audutils.get_metadata(
metadata=metadata,
function_name="clicks",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=out_sample_rate,
seconds_between_clicks=seconds_between_clicks,
output_path=output_path,
clicks_duration=clicks_audio.shape[-1] / sample_rate,
)
return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate) | Adds clicks to the audio at a given regular interval @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param seconds_between_clicks: the amount of time between each click that will be added to the audio, in seconds @param snr_level_db: signal-to-noise ratio in dB @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,561 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `clip` function. Write a Python function `def clip( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, offset_factor: float = 0.0, duration_factor: float = 1.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Clips the audio using the specified offset and duration factors @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param offset_factor: start point of the crop relative to the audio duration (this parameter is multiplied by the audio duration) @param duration_factor: the length of the crop relative to the audio duration (this parameter is multiplied by the audio duration) @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def clip(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
offset_factor: float = 0.0,
duration_factor: float = 1.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Clips the audio using the specified offset and duration factors
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param offset_factor: start point of the crop relative to the audio duration
(this parameter is multiplied by the audio duration)
@param duration_factor: the length of the crop relative to the audio duration
(this parameter is multiplied by the audio duration)
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert (
0.0 <= (offset_factor + duration_factor) <= 1.0
), "Combination of offset and duration factors exceed audio length"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
num_samples = audio.shape[-1]
start = int(offset_factor * num_samples)
end = int((offset_factor + duration_factor) * num_samples)
aug_audio = audio[..., start:end]
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="clip",
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
start_sample=start,
end_sample=end,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate) | Clips the audio using the specified offset and duration factors @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param offset_factor: start point of the crop relative to the audio duration (this parameter is multiplied by the audio duration) @param duration_factor: the length of the crop relative to the audio duration (this parameter is multiplied by the audio duration) @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,562 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `harmonic` function. Write a Python function `def harmonic( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, kernel_size: int = 31, power: float = 2.0, margin: float = 1.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Extracts the harmonic part of the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param kernel_size: kernel size for the median filters @param power: exponent for the Wiener filter when constructing soft mask matrices @param margin: margin size for the masks @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def harmonic(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
kernel_size: int = 31,
power: float = 2.0,
margin: float = 1.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Extracts the harmonic part of the audio
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param kernel_size: kernel size for the median filters
@param power: exponent for the Wiener filter when constructing soft
mask matrices
@param margin: margin size for the masks
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(kernel_size, int), "Expected 'kernel_size' to be an int"
assert isinstance(power, (int, float)), "Expected 'power' to be a number"
assert isinstance(margin, (int, float)), "Expected 'margin' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
if num_channels == 1:
aug_audio = librosa.effects.harmonic(
audio, kernel_size=kernel_size, power=power, margin=margin
)
else:
aug_audio = np.vstack(
[
librosa.effects.harmonic(
np.asfortranarray(audio[c]),
kernel_size=kernel_size,
power=power,
margin=margin,
)
for c in range(num_channels)
]
)
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="harmonic",
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate) | Extracts the harmonic part of the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param kernel_size: kernel size for the median filters @param power: exponent for the Wiener filter when constructing soft mask matrices @param margin: margin size for the masks @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.