id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
4815634 | from cdxj_indexer.main import CDXJIndexer, iter_file_or_dir
from cdxj_indexer.postquery import append_method_query_from_req_resp
from cdxj_indexer.bufferiter import buffering_record_iter
| StarcoderdataPython |
1653391 | from events import EventManager
from exchange.public import ExchangePublic
class FTX(ExchangePublic):
def __init__(self, conf=None):
exchange_id = 'ftx'
super().__init__(exchange_id, conf)
self.em.modify_mailbox_size(exchange_id, 7)
if __name__ == "__main__":
b = FTX()
b.listen()
| StarcoderdataPython |
199852 | import asyncio
import itertools
from collections import defaultdict
from collections.abc import Iterable
from dataclasses import dataclass
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Union, Dict, Any, Sequence
import os
import logging
from aiomultiprocess import Pool
from packaging import version as packaging_version
from prettytable import PrettyTable, SINGLE_BORDER
from checkov.common.bridgecrew.severities import Severities
from checkov.common.models.enums import CheckResult
from checkov.common.output.record import Record, DEFAULT_SEVERITY
from checkov.common.typing import _CheckResult
from checkov.runner_filter import RunnerFilter
from checkov.common.bridgecrew.vulnerability_scanning.integrations.package_scanning import PackageScanningIntegration
from checkov.common.bridgecrew.platform_integration import BcPlatformIntegration
UNFIXABLE_VERSION = "N/A"
@dataclass
class CveCount:
total: int = 0
critical: int = 0
high: int = 0
medium: int = 0
low: int = 0
skipped: int = 0
has_fix: int = 0
to_fix: int = 0
fixable: bool = True
def output_row(self) -> List[str]:
return [
f"Total CVEs: {self.total}",
f"critical: {self.critical}",
f"high: {self.high}",
f"medium: {self.medium}",
f"low: {self.low}",
f"skipped: {self.skipped}",
]
def create_report_record(
rootless_file_path: str, file_abs_path: str, check_class: str, vulnerability_details: Dict[str, Any],
runner_filter: RunnerFilter = RunnerFilter()
) -> Record:
package_name = vulnerability_details["packageName"]
package_version = vulnerability_details["packageVersion"]
cve_id = vulnerability_details["id"].upper()
severity = vulnerability_details.get("severity", DEFAULT_SEVERITY)
# sanitize severity names
if severity == "moderate":
severity = "medium"
description = vulnerability_details.get("description")
resource = f"{rootless_file_path}.{package_name}"
check_result: _CheckResult = {
"result": CheckResult.FAILED,
}
if runner_filter.skip_cve_package and package_name in runner_filter.skip_cve_package:
check_result = {
"result": CheckResult.SKIPPED,
"suppress_comment": f"Filtered by package '{package_name}'"
}
elif not runner_filter.within_threshold(Severities[severity.upper()]):
check_result = {
"result": CheckResult.SKIPPED,
"suppress_comment": "Filtered by severity"
}
code_block = [(0, f"{package_name}: {package_version}")]
lowest_fixed_version = UNFIXABLE_VERSION
fixed_versions: List[Union[packaging_version.Version, packaging_version.LegacyVersion]] = []
status = vulnerability_details.get("status") or "open"
if status != "open":
fixed_versions = [
packaging_version.parse(version.strip()) for version in status.replace("fixed in", "").split(",")
]
lowest_fixed_version = str(min(fixed_versions))
details = {
"id": cve_id,
"status": status,
"severity": severity,
"package_name": package_name,
"package_version": package_version,
"link": vulnerability_details.get("link"),
"cvss": vulnerability_details.get("cvss"),
"vector": vulnerability_details.get("vector"),
"description": description,
"risk_factors": vulnerability_details.get("riskFactors"),
"published_date": vulnerability_details.get("publishedDate")
or (datetime.now() - timedelta(days=vulnerability_details.get("publishedDays", 0))).isoformat(),
"lowest_fixed_version": lowest_fixed_version,
"fixed_versions": fixed_versions,
}
record = Record(
check_id=f"CKV_{cve_id.replace('-', '_')}",
bc_check_id=f"BC_{cve_id.replace('-', '_')}",
check_name="SCA package scan",
check_result=check_result,
code_block=code_block,
file_path=f"/{rootless_file_path}",
file_line_range=[0, 0],
resource=resource,
check_class=check_class,
evaluations=None,
file_abs_path=file_abs_path,
severity=Severities[severity.upper()],
description=description,
short_description=f"{cve_id} - {package_name}: {package_version}",
vulnerability_details=details,
)
return record
def calculate_lowest_compliant_version(
fix_versions_lists: List[List[Union[packaging_version.Version, packaging_version.LegacyVersion]]]
) -> str:
"""A best effort approach to find the lowest compliant version"""
package_min_versions = set()
package_versions = set()
for fix_versions in fix_versions_lists:
if fix_versions:
package_min_versions.add(min(fix_versions))
package_versions.update(fix_versions)
if package_min_versions:
package_min_version = min(package_min_versions)
package_max_version = max(package_min_versions)
if isinstance(package_min_version, packaging_version.LegacyVersion) or isinstance(
package_max_version, packaging_version.LegacyVersion
):
return str(package_max_version)
elif package_min_version.major == package_max_version.major:
return str(package_max_version)
else:
lowest_version = max(
version
for version in package_versions
if isinstance(version, packaging_version.Version) and version.major == package_max_version.major
)
return str(lowest_version)
def compare_cve_severity(cve: Dict[str, str]) -> int:
severity = (cve.get("severity") or DEFAULT_SEVERITY).upper()
return Severities[severity].level
def create_cli_output(fixable=True, *cve_records: List[Record]) -> str:
cli_outputs = []
group_by_file_path_package_map = defaultdict(dict)
for record in itertools.chain(*cve_records):
group_by_file_path_package_map[record.file_path].setdefault(
record.vulnerability_details["package_name"], []
).append(record)
for file_path, packages in group_by_file_path_package_map.items():
cve_count = CveCount(fixable=fixable)
package_details_map = defaultdict(dict)
for package_name, records in packages.items():
package_version = None
fix_versions_lists = []
for record in records:
cve_count.total += 1
if record.check_result["result"] == CheckResult.SKIPPED:
cve_count.skipped += 1
continue
else:
cve_count.to_fix += 1
# best way to dynamically access an class instance attribute
severity_str = record.severity.name.lower()
setattr(cve_count, severity_str, getattr(cve_count, severity_str) + 1)
if record.vulnerability_details["lowest_fixed_version"] != UNFIXABLE_VERSION:
cve_count.has_fix += 1
fix_versions_lists.append(record.vulnerability_details["fixed_versions"])
if package_version is None:
package_version = record.vulnerability_details["package_version"]
package_details_map[package_name].setdefault("cves", []).append(
{
"id": record.vulnerability_details["id"],
"severity": severity_str,
"fixed_version": record.vulnerability_details["lowest_fixed_version"],
}
)
if package_name in package_details_map.keys():
package_details_map[package_name]["cves"].sort(key=compare_cve_severity, reverse=True)
package_details_map[package_name]["current_version"] = package_version
package_details_map[package_name]["compliant_version"] = calculate_lowest_compliant_version(
fix_versions_lists
)
cli_outputs.append(
create_cli_table(
file_path=file_path,
cve_count=cve_count,
package_details_map=package_details_map,
)
)
return "".join(cli_outputs)
def create_cli_table(file_path: str, cve_count: CveCount, package_details_map: Dict[str, Dict[str, Any]]) -> str:
columns = 6
table_width = 120
column_width = int(120 / columns)
cve_table_lines = create_cve_summary_table_part(
table_width=table_width, column_width=column_width, cve_count=cve_count
)
vulnerable_packages = True if package_details_map else False
fixable_table_lines = create_fixable_cve_summary_table_part(
table_width=table_width, column_count=columns, cve_count=cve_count, vulnerable_packages=vulnerable_packages
)
package_table_lines = create_package_overview_table_part(
table_width=table_width, column_width=column_width, package_details_map=package_details_map
)
return (
f"\t{file_path}\n"
f"{''.join(cve_table_lines)}\n"
f"{''.join(fixable_table_lines)}"
f"{''.join(package_table_lines)}\n"
)
def create_cve_summary_table_part(table_width: int, column_width: int, cve_count: CveCount) -> List[str]:
cve_table = PrettyTable(
header=False,
padding_width=1,
min_table_width=table_width,
max_table_width=table_width,
)
cve_table.set_style(SINGLE_BORDER)
cve_table.add_row(cve_count.output_row())
cve_table.align = "l"
cve_table.min_width = column_width
cve_table.max_width = column_width
cve_table_lines = [f"\t{line}" for line in cve_table.get_string().splitlines(keepends=True)]
# hack to make multiple tables look like one
cve_table_bottom_line = (
cve_table_lines[-1]
.replace(cve_table.bottom_left_junction_char, cve_table.left_junction_char)
.replace(cve_table.bottom_right_junction_char, cve_table.right_junction_char)
)
cve_table_lines[-1] = cve_table_bottom_line
return cve_table_lines
def create_fixable_cve_summary_table_part(
table_width: int, column_count: int, cve_count: CveCount, vulnerable_packages: bool
) -> List[str]:
fixable_table = PrettyTable(
header=False, min_table_width=table_width + column_count * 2, max_table_width=table_width + column_count * 2
)
fixable_table.set_style(SINGLE_BORDER)
if cve_count.fixable:
fixable_table.add_row([f"To fix {cve_count.has_fix}/{cve_count.to_fix} CVEs, go to https://www.bridgecrew.cloud/"])
fixable_table.align = "l"
# hack to make multiple tables look like one
fixable_table_lines = [f"\t{line}" for line in fixable_table.get_string().splitlines(keepends=True)]
del fixable_table_lines[0]
# only remove the last line, if there are vulnerable packages
if vulnerable_packages:
del fixable_table_lines[-1]
return fixable_table_lines
def create_package_overview_table_part(
table_width: int, column_width: int, package_details_map: Dict[str, Dict[str, Any]]
) -> List[str]:
package_table_lines: List[str] = []
package_table = PrettyTable(min_table_width=table_width, max_table_width=table_width)
package_table.set_style(SINGLE_BORDER)
package_table.field_names = [
"Package",
"CVE ID",
"Severity",
"Current version",
"Fixed version",
"Compliant version",
]
for package_idx, (package_name, details) in enumerate(package_details_map.items()):
if package_idx > 0:
del package_table_lines[-1]
package_table.header = False
package_table.clear_rows()
for cve_idx, cve in enumerate(details["cves"]):
col_package = ""
col_current_version = ""
col_compliant_version = ""
if cve_idx == 0:
col_package = package_name
col_current_version = details["current_version"]
col_compliant_version = details["compliant_version"]
package_table.add_row(
[
col_package,
cve["id"],
cve["severity"],
col_current_version,
cve["fixed_version"],
col_compliant_version,
]
)
package_table.align = "l"
package_table.min_width = column_width
package_table.max_width = column_width
for idx, line in enumerate(package_table.get_string().splitlines(keepends=True)):
if idx == 0:
# hack to make multiple tables look like one
line = line.replace(package_table.top_left_junction_char, package_table.left_junction_char).replace(
package_table.top_right_junction_char, package_table.right_junction_char
)
if package_idx > 0:
# hack to make multiple package tables look like one
line = line.replace(package_table.top_junction_char, package_table.junction_char)
package_table_lines.append(f"\t{line}")
return package_table_lines
async def _report_results_to_bridgecrew_async(
scan_results: "Iterable[Dict[str, Any]]",
bc_integration: BcPlatformIntegration,
bc_api_key: str
) -> "Sequence[int]":
package_scanning_int = PackageScanningIntegration()
args = [
(result, bc_integration, bc_api_key, Path(result["repository"]))
for result in scan_results
]
if os.getenv("PYCHARM_HOSTED") == "1":
# PYCHARM_HOSTED env variable equals 1 when running via Pycharm.
# it avoids us from crashing, which happens when using multiprocessing via Pycharm's debug-mode
logging.warning("reporting the results in sequence for avoiding crashing when running via Pycharm")
exit_codes = []
for curr_arg in args:
exit_codes.append(await package_scanning_int.report_results_async(*curr_arg))
else:
async with Pool() as pool:
exit_codes = await pool.starmap(package_scanning_int.report_results_async, args)
return exit_codes
def report_results_to_bridgecrew(
scan_results: "Iterable[Dict[str, Any]]",
bc_integration: BcPlatformIntegration,
bc_api_key: str
) -> "Sequence[int]":
return asyncio.run(_report_results_to_bridgecrew_async(scan_results, bc_integration, bc_api_key))
| StarcoderdataPython |
3231531 | <reponame>AWSCookbook/Containers<filename>605-Updating-Containers-With-BlueGreen/cdk-AWS-Cookbook-605/app.py
#!/usr/bin/env python3
import aws_cdk as cdk
from cdk_aws_cookbook_605.cdk_aws_cookbook_605_stack import CdkAwsCookbook605Stack
app = cdk.App()
CdkAwsCookbook605Stack(app, "cdk-aws-cookbook-605")
app.synth()
| StarcoderdataPython |
3240007 | load("@bazel_skylib//lib:versions.bzl", "versions")
def _store_bazel_version(repository_ctx):
bazel_version = versions.get()
if len(bazel_version) == 0:
print("You're using development build of Bazel, make sure it's at least version 0.17.1")
elif versions.is_at_most("0.17.0", bazel_version):
fail("Bazel {} is too old to use with rules_rust, please use at least Bazel 0.17.1, preferably newer.".format(bazel_version))
repository_ctx.file("BUILD", "exports_files(['def.bzl'])")
repository_ctx.file("def.bzl", "BAZEL_VERSION='" + bazel_version + "'")
bazel_version = repository_rule(
implementation = _store_bazel_version,
)
| StarcoderdataPython |
70415 | <gh_stars>0
from src.localLib.paymentGateway import FwGateway as paymentGateway
from src.models.plans import Plans
def runPlansBilling():
"""
Process all payments requests for all clients of all plans
"""
pg = paymentGateway() # Instanciating the paymentGateway
plans = Plans()
activePlans = plans.getPlans()
for activePlanId in activePlans.keys():
activePlanData = activePlans[activePlanId]
subscribers = plans.getSubscribers(activePlanId)
for subscriberId in subscribers.keys():
subscriberData = subscribers[subscriberId]
nextUrl=""
try:
nextUrl = pg.processPlanPayment(
subscriberData['mail'],
subscriberData['currency'],
subscriberData['amount'],
activePlanData['redirectUrl'],
subscriberData['paymentMethod'],
activePlanId
)
except Exception as e:
print e.message
#Logs and alerts
print subscriberData['mail'] + ' : ' +nextUrl
#Send the 'nextUrl' to the subscriber Mail in orther to confirm the payment
runPlansBilling() | StarcoderdataPython |
1737921 | # -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
import logging
import time
import numpy as np
import tensorflow as tf
from easy_rec.python.input.input import Input
from easy_rec.python.utils import odps_util
from easy_rec.python.utils.tf_utils import get_tf_type
try:
import common_io
except Exception:
common_io = None
try:
from datahub import DataHub
from datahub.exceptions import DatahubException
from datahub.models import RecordType
from datahub.models import CursorType
except Exception:
logging.warning(
'DataHub is not installed. You can install it by: pip install pydatahub')
DataHub = None
class DataHubInput(Input):
"""Common IO based interface, could run at local or on data science."""
def __init__(self,
data_config,
feature_config,
datahub_config,
task_index=0,
task_num=1,
check_mode=False):
super(DataHubInput, self).__init__(data_config, feature_config, '',
task_index, task_num, check_mode)
if DataHub is None:
logging.error('please install datahub: ',
'pip install pydatahub ;Python 3.6 recommended')
try:
self._datahub_config = datahub_config
if self._datahub_config is None:
pass
self._datahub = DataHub(self._datahub_config.akId,
self._datahub_config.akSecret,
self._datahub_config.region)
self._num_epoch = 0
except Exception as ex:
logging.info('exception in init datahub:', str(ex))
pass
def _parse_record(self, *fields):
fields = list(fields)
inputs = {self._input_fields[x]: fields[x] for x in self._effective_fids}
for x in self._label_fids:
inputs[self._input_fields[x]] = fields[x]
return inputs
def _datahub_generator(self):
logging.info('start epoch[%d]' % self._num_epoch)
self._num_epoch += 1
odps_util.check_input_field_and_types(self._data_config)
record_defaults = [
self.get_type_defaults(x, v)
for x, v in zip(self._input_field_types, self._input_field_defaults)
]
batch_defaults = [
np.array([x] * self._data_config.batch_size) for x in record_defaults
]
try:
self._datahub.wait_shards_ready(self._datahub_config.project,
self._datahub_config.topic)
topic_result = self._datahub.get_topic(self._datahub_config.project,
self._datahub_config.topic)
if topic_result.record_type != RecordType.TUPLE:
logging.error('topic type illegal !')
record_schema = topic_result.record_schema
shard_result = self._datahub.list_shard(self._datahub_config.project,
self._datahub_config.topic)
shards = shard_result.shards
for shard in shards:
shard_id = shard._shard_id
cursor_result = self._datahub.get_cursor(self._datahub_config.project,
self._datahub_config.topic,
shard_id, CursorType.OLDEST)
cursor = cursor_result.cursor
limit = self._data_config.batch_size
while True:
get_result = self._datahub.get_tuple_records(
self._datahub_config.project, self._datahub_config.topic,
shard_id, record_schema, cursor, limit)
batch_data_np = [x.copy() for x in batch_defaults]
for row_id, record in enumerate(get_result.records):
for col_id in range(len(record_defaults)):
if record.values[col_id] not in ['', 'Null', None]:
batch_data_np[col_id][row_id] = record.values[col_id]
yield tuple(batch_data_np)
if 0 == get_result.record_count:
time.sleep(1)
cursor = get_result.next_cursor
except DatahubException as e:
logging.error(e)
def _build(self, mode, params):
# get input type
list_type = [get_tf_type(x) for x in self._input_field_types]
list_type = tuple(list_type)
list_shapes = [tf.TensorShape([None]) for x in range(0, len(list_type))]
list_shapes = tuple(list_shapes)
# read datahub
dataset = tf.data.Dataset.from_generator(
self._datahub_generator,
output_types=list_type,
output_shapes=list_shapes)
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.shuffle(
self._data_config.shuffle_buffer_size,
seed=2020,
reshuffle_each_iteration=True)
dataset = dataset.repeat(self.num_epochs)
else:
dataset = dataset.repeat(1)
dataset = dataset.map(
self._parse_record,
num_parallel_calls=self._data_config.num_parallel_calls)
# preprocess is necessary to transform data
# so that they could be feed into FeatureColumns
dataset = dataset.map(
map_func=self._preprocess,
num_parallel_calls=self._data_config.num_parallel_calls)
dataset = dataset.prefetch(buffer_size=self._prefetch_size)
if mode != tf.estimator.ModeKeys.PREDICT:
dataset = dataset.map(lambda x:
(self._get_features(x), self._get_labels(x)))
else:
dataset = dataset.map(lambda x: (self._get_features(x)))
return dataset
| StarcoderdataPython |
3292672 | # -*- coding: utf-8 -*-
"""
GUI frame template:
- auto-accelerated control shortcuts, "&OK" will turn Alt-O into shortcut
- Python console window, initially hidden,
with auto-saved command history kept in conf.ConsoleHistoryCommands
- wx widget inspector window, initially hidden
- option for log panel, handles logging messages via wx events
------------------------------------------------------------------------------
This file is part of h3sed - Heroes3 Savegame Editor.
Released under the MIT License.
@created 14.03.2020
@modified 09.01.2022
------------------------------------------------------------------------------
"""
import datetime
import logging
import os
import re
import traceback
import wx
import wx.lib.inspection
import wx.lib.newevent
import wx.py
from . lib.controls import ColourManager
from . lib import util, wx_accel
from . import conf
logger = logging.getLogger(__name__)
def status(text, *args, **kwargs):
"""
Sets main window status text, optionally logs the message.
@param args string format arguments, if any, to substitute in text
@param flash whether to clear the status after timeout,
by default after conf.StatusFlashLength if not given seconds
@param log whether to log the message to main window
"""
window = wx.GetApp() and wx.GetApp().GetTopWindow()
if not window: return
try: msg = text % args if args else text
except UnicodeError:
args = tuple(map(util.to_unicode, args))
msg = text % args if args else text
msg = re.sub("[\n\r\t]+", " ", msg)
log, flash = (kwargs.get(x) for x in ("log", "flash"))
if log: logger.info(msg)
window.set_status(msg, timeout=flash)
class GUILogHandler(logging.Handler):
"""Logging handler that forwards logging messages to GUI log window."""
def __init__(self):
self.deferred = [] # Messages logged before main window available
super(self.__class__, self).__init__()
def emit(self, record):
"""Adds message to GUI log window, or postpones if window unavailable."""
now = datetime.datetime.now()
try: text = record.msg % record.args if record.args else record.msg
except UnicodeError:
args = tuple(map(util.to_unicode, record.args or ()))
text = record.msg % args if args else record.msg
if record.exc_info:
text += "\n\n" + "".join(traceback.format_exception(*record.exc_info))
if "\n" in text:
text = text.replace("\n", "\n\t\t") # Indent linebreaks
text = re.sub(r"^\s+$", "", text, flags=re.M) # Unindent whitespace-only lines
msg = "%s.%03d\t%s" % (now.strftime("%H:%M:%S"), now.microsecond // 1000, text)
window = wx.GetApp() and wx.GetApp().GetTopWindow()
if window:
msgs = self.deferred + [msg]
for m in msgs: wx.CallAfter(window.log_message, m)
del self.deferred[:]
else: self.deferred.append(msg)
class TemplateFrameMixIn(wx_accel.AutoAcceleratorMixIn):
"""Application main window."""
def __init__(self):
wx_accel.AutoAcceleratorMixIn.__init__(self)
self.Bind(wx.EVT_CLOSE, self.on_exit)
self.console_commands = set() # Commands from run_console()
self.frame_console = wx.py.shell.ShellFrame(parent=self,
title=u"%s Console" % conf.Title, size=conf.ConsoleSize)
self.frame_console.Bind(wx.EVT_CLOSE, self.on_toggle_console)
self.frame_console_shown = False # Init flag
console = self.console = self.frame_console.shell
if not isinstance(conf.ConsoleHistoryCommands, list):
conf.ConsoleHistoryCommands = []
for cmd in conf.ConsoleHistoryCommands:
console.addHistory(cmd)
console.Bind(wx.EVT_KEY_DOWN, self.on_keydown_console)
self.widget_inspector = wx.lib.inspection.InspectionTool()
self.CreateStatusBar()
def create_log_panel(self, parent):
"""Creates and returns the log output panel."""
panel = wx.Panel(parent)
sizer = panel.Sizer = wx.BoxSizer(wx.VERTICAL)
ColourManager.Manage(panel, "BackgroundColour", wx.SYS_COLOUR_BTNFACE)
button_clear = wx.Button(parent=panel, label="C&lear log", size=(100, -1))
button_clear.Bind(wx.EVT_BUTTON, lambda event: self.log.Clear())
edit_log = self.log = wx.TextCtrl(panel, style=wx.TE_MULTILINE)
edit_log.SetEditable(False)
# Read-only controls tend to be made grey by default
ColourManager.Manage(edit_log, "ForegroundColour", wx.SYS_COLOUR_GRAYTEXT)
ColourManager.Manage(edit_log, "BackgroundColour", wx.SYS_COLOUR_WINDOW)
sizer.Add(button_clear, border=5, flag=wx.ALIGN_RIGHT | wx.TOP |
wx.RIGHT)
sizer.Add(edit_log, border=5, proportion=1, flag=wx.GROW | wx.ALL)
return panel
def create_menu(self):
"""Creates the program menu."""
menu = wx.MenuBar()
menu_file = wx.Menu()
menu.Insert(0, menu_file, "&File")
menu_recent = self.menu_recent = wx.Menu()
menu_file.AppendMenu(id=wx.NewIdRef().Id, text="&Recent files",
submenu=menu_recent, help="Recently opened files.")
menu_file.AppendSeparator()
menu_console = self.menu_console = menu_file.Append(
id=wx.NewIdRef().Id, kind=wx.ITEM_CHECK, text="Show &console\tCtrl-E",
help="Show/hide a Python shell environment window")
menu_inspect = self.menu_inspect = menu_file.Append(
id=wx.NewIdRef().Id, kind=wx.ITEM_CHECK, text="Show &widget inspector",
help="Show/hide the widget inspector")
self.file_history = wx.FileHistory(conf.MaxRecentFiles)
self.file_history.UseMenu(menu_recent)
for f in conf.RecentFiles[::-1]: # Backwards - FileHistory is a stack
os.path.exists(f) and self.file_history.AddFileToHistory(f)
wx.EVT_MENU_RANGE(self, wx.ID_FILE1, wx.ID_FILE9, self.on_recent_file)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "E&xit\tAlt-X", "Exit")
self.Bind(wx.EVT_MENU, self.on_toggle_console, menu_console)
self.Bind(wx.EVT_MENU, self.on_open_widget_inspector, menu_inspect)
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
self.SetMenuBar(menu)
def on_exit(self, event):
"""Handler on application exit, saves configuration."""
conf.save()
self.Destroy()
def on_keydown_console(self, event):
"""Handler for keydown in console, saves entered command in history."""
event.Skip()
if (event.KeyCode in (wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER)
and not event.ShiftDown() and self.console.history):
# Defer saving until command is inserted into console history
wx.CallAfter(self.save_last_command)
def run_console(self, command):
"""
Runs the command in the Python console. Will not be saved to console
commands history.
"""
self.console.run(command)
self.console_commands.add(command)
def save_last_command(self):
"""
Saves the last console command in conf, minus the commands given via
run_console().
"""
h = [x for x in self.console.history if x not in self.console_commands]
history = h[:conf.MaxConsoleHistory][::-1]
if history != conf.ConsoleHistoryCommands:
conf.ConsoleHistoryCommands[:] = history
conf.save()
def set_status(self, text, timeout=False):
"""Sets main window status bar text, optionally clears after timeout."""
self.SetStatusText(text)
if not timeout or not text: return
if timeout is True: timeout = conf.StatusFlashLength
clear = lambda sb: sb and sb.StatusText == text and self.SetStatusText("")
wx.CallLater(timeout * 1000, clear, self.StatusBar)
def log_message(self, text):
"""Adds a message to the log control."""
if not hasattr(self, "log") \
or hasattr(conf, "LogEnabled") and not conf.LogEnabled: return
try: self.log.AppendText(text + "\n")
except Exception:
try: self.log.AppendText(text.decode("utf-8", "replace") + "\n")
except Exception as e: print("Exception %s: %s in log_message" %
(e.__class__.__name__, e))
def on_toggle_console(self, *_):
"""Toggles the console shown/hidden."""
show = not self.frame_console.IsShown()
if show and not self.frame_console_shown:
# First showing of console, set height to a fraction of main
# form, and position it immediately under the main form, or
# covering its bottom if no room.
self.frame_console_shown = True
size = wx.Size(self.Size.width, max(200, self.Size.height // 3))
self.frame_console.Size = size
display = wx.GetDisplaySize()
y = 0
min_bottom_space = 130 # Leave space for autocomplete dropdown
if size.height > display.height - self.Size.height \
- self.Position.y - min_bottom_space:
y = display.height - self.Size.height - self.Position.y \
- size.height - min_bottom_space
self.frame_console.Position = (
self.Position.x, self.Position.y + self.Size.height + y
)
if show: self.console.ScrollToLine(self.console.LineCount + 3 - (
self.console.Size.height // self.console.GetTextExtent(" ")[1]
)) # Scroll to the last line
self.frame_console.Show(show)
if hasattr(self, "menu_console"): self.menu_console.Check(show)
def on_open_widget_inspector(self, *_):
"""Toggles the widget inspection tool shown/hidden."""
visible = not (self.widget_inspector.initialized
and self.widget_inspector._frame)
if visible:
self.widget_inspector.Init()
self.widget_inspector.Show(selectObj=self, refreshTree=True)
self.widget_inspector._frame.Bind(wx.EVT_CLOSE, lambda e: e.Skip())
else:
self.widget_inspector._frame.Close()
if hasattr(self, "menu_inspect"):
self.menu_inspect.Check(visible)
def on_recent_file(self, event):
"""Handler for clicking an entry in Recent Files menu."""
filename = self.file_history.GetHistoryFile(event.GetId() - wx.ID_FILE1)
self.open_file(filename)
| StarcoderdataPython |
4800688 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
:author: 秋荏苒
:copyright: © 2019 by 秋荏苒 <<EMAIL>>.
:license: MIT, see LICENSE for more details.
"""
import os
import sys
from urllib.parse import urlparse, urljoin
from flask import request, redirect, url_for, current_app
from app.configs import basedir
def is_safe_url(target):
"""
Make sure the redirect URLs safely
:param target: url address
"""
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in (
'http', 'https') and ref_url.netloc == test_url.netloc
def redirect_back(default='web.index', **kwargs):
"""
If next is not none, redirect next, if not, redirect index page
"""
for target in request.args.get('next'), request.referrer:
if not target:
continue
if is_safe_url(target):
return redirect(target)
return redirect(url_for(default, **kwargs))
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in \
current_app.config['BLOG_ALLOWED_IMAGE_EXTENSIONS']
def upload_file(file, prefix):
"""
For save file with what prefix you like
"""
file.filename = prefix + '.' + file.filename.rsplit('.', 1)[1]
if sys.platform.startswith('win'):
upload_path = os.path.join(basedir, r'app\static\images')
else:
upload_path = os.path.join(basedir, 'app/static/images')
file.save(os.path.join(upload_path, file.filename))
| StarcoderdataPython |
94640 | <gh_stars>10-100
# function for merge sort
def merge_sort(arr):
if len(arr) > 1:
# mid element of array
mid = len(arr) // 2
# Dividing the array and calling merge sort on array
left = arr[:mid]
# into 2 halves
right = arr[mid:]
# merge sort for array first
merge_sort(left)
# merge sort for array second
merge_sort(right)
# merging function
merge_array(arr, left, right)
def merge_array(arr, left, right):
i = j = k = 0
# merging two array left right in sorted order
while i < len(left) and j < len(right):
if left[i] < right[j]:
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k += 1
# merging any remaining element
while i < len(left):
arr[k] = left[i]
i += 1
k += 1
while j < len(right):
arr[k] = right[j]
j += 1
k += 1
# printing array
def print_array(arr):
for i in range(len(arr)):
print(arr[i], end=" ")
print()
total_element = int(input("Number of element in array "))
arr = []
for i in range(total_element):
arr.append(int(input(f"Enter {i}th element ")))
print("Input array is ", end="\n")
print_array(arr)
merge_sort(arr)
print("array after sort is: ", end="\n")
print_array(arr)
| StarcoderdataPython |
1625740 | from cosymlib.file_io import get_geometry_from_file_cor
from cosymlib.file_io import errors
import os
import tempfile
import warnings
def read_old_input(file_name):
"""
Reads the old Shape's program input
:param file_name: file name
:return: list of Geometry objects and options
"""
options = {'%out': None, '%conquest': None, '%external': False, '%fullout': False, '%test': False,
'%n_atoms': 0, '%central_atom': 0, '%labels': 0, '%path': False}
idl = 0
with open(file_name, mode='r') as lines:
while True:
line = lines.readline().split()
if '$' in line or '!' in line:
pass
elif any('%' in word for word in line):
if len(line) > 1:
options[line[0]] = line[1]
else:
options[line[0]] = True
else:
try:
int(line[0])
if options['%n_atoms'] == 0:
options['%n_atoms'] = int(line[0])
options['%central_atom'] = int(line[1])
else:
options['%labels'] = line
except (ValueError, IndexError):
break
idl += 1
n_atoms = options['%n_atoms']
if options['%central_atom'] != 0:
n_atoms += 1
if options['%conquest'] is not None:
dir = os.path.dirname(os.path.abspath(file_name))
structures = get_geometry_from_file_cor(os.path.join(dir, options['%conquest'] + '.cor'), read_multiple=True)
else:
tmp = tempfile.NamedTemporaryFile(mode='w+t', dir=os.getcwd())
tmp_lines = lines.readlines()
try:
# Write data to the temporary file
tmp.write(line[0]+'\n')
idl += 1
for line in tmp_lines:
if line.strip() == '':
warnings.warn('Line {} is empty'.format(idl + 1), errors.EmptyLineWarning)
else:
tmp.write(line)
idl += 1
tmp.seek(0)
structures = get_geometry_from_file_cor(tmp.name, read_multiple=True)
finally:
tmp.close()
return [structures, options]
| StarcoderdataPython |
141769 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from .views import SlackAuthView, DefaultAddSuccessView, DefaultSigninSuccessView
urlpatterns = [
url('add/', SlackAuthView.as_view(auth_type="add"), name='slack_add'),
url('signin/', SlackAuthView.as_view(auth_type="signin"), name='slack_signin'),
url('add-success/', DefaultAddSuccessView.as_view(), name='slack_add_success'),
url('signin-success/', DefaultSigninSuccessView.as_view(), name='slack_signin_success')
]
| StarcoderdataPython |
10400 | import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import flopy
def run():
workspace = os.path.join("lake")
# make sure workspace directory exists
if not os.path.exists(workspace):
os.makedirs(workspace)
fext = "png"
narg = len(sys.argv)
iarg = 0
if narg > 1:
while iarg < narg - 1:
iarg += 1
basearg = sys.argv[iarg].lower()
if basearg == "--pdf":
fext = "pdf"
# save the starting path
cwdpth = os.getcwd()
# change to the working directory
os.chdir(workspace)
# We are creating a square model with a specified head equal to `h1` along all boundaries.
# The head at the cell in the center in the top layer is fixed to `h2`. First, set the name
# of the model and the parameters of the model: the number of layers `Nlay`, the number of rows
# and columns `N`, lengths of the sides of the model `L`, aquifer thickness `H`, hydraulic
# conductivity `Kh`
name = "lake_example"
h1 = 100
h2 = 90
Nlay = 10
N = 101
L = 400.0
H = 50.0
Kh = 1.0
# Create a MODFLOW model and store it (in this case in the variable `ml`, but you can call it
# whatever you want). The modelname will be the name given to all MODFLOW files (input and output).
# The exe_name should be the full path to your MODFLOW executable. The version is either 'mf2k'
# for MODFLOW2000 or 'mf2005'for MODFLOW2005.
ml = flopy.modflow.Modflow(
modelname=name, exe_name="mf2005", version="mf2005"
)
# Define the discretization of the model. All layers are given equal thickness. The `bot` array
# is build from the `Hlay` values to indicate top and bottom of each layer, and `delrow` and
# `delcol` are computed from model size `L` and number of cells `N`. Once these are all computed,
# the Discretization file is built.
bot = np.linspace(-H / Nlay, -H, Nlay)
delrow = delcol = L / (N - 1)
dis = flopy.modflow.ModflowDis(
ml,
nlay=Nlay,
nrow=N,
ncol=N,
delr=delrow,
delc=delcol,
top=0.0,
botm=bot,
laycbd=0,
)
# Next we specify the boundary conditions and starting heads with the Basic package. The `ibound`
# array will be `1` in all cells in all layers, except for along the boundary and in the cell at
# the center in the top layer where it is set to `-1` to indicate fixed heads. The starting heads
# are used to define the heads in the fixed head cells (this is a steady simulation, so none of
# the other starting values matter). So we set the starting heads to `h1` everywhere, except for
# the head at the center of the model in the top layer.
Nhalf = int((N - 1) / 2)
ibound = np.ones((Nlay, N, N), dtype=int)
ibound[:, 0, :] = -1
ibound[:, -1, :] = -1
ibound[:, :, 0] = -1
ibound[:, :, -1] = -1
ibound[0, Nhalf, Nhalf] = -1
start = h1 * np.ones((N, N))
start[Nhalf, Nhalf] = h2
# create external ibound array and starting head files
files = []
hfile = f"{name}_strt.ref"
np.savetxt(hfile, start)
hfiles = []
for kdx in range(Nlay):
file = f"{name}_ib{kdx + 1:02d}.ref"
files.append(file)
hfiles.append(hfile)
np.savetxt(file, ibound[kdx, :, :], fmt="%5d")
bas = flopy.modflow.ModflowBas(ml, ibound=files, strt=hfiles)
# The aquifer properties (really only the hydraulic conductivity) are defined with the
# LPF package.
lpf = flopy.modflow.ModflowLpf(ml, hk=Kh)
# Finally, we need to specify the solver we want to use (PCG with default values), and the
# output control (using the default values). Then we are ready to write all MODFLOW input
# files and run MODFLOW.
pcg = flopy.modflow.ModflowPcg(ml)
oc = flopy.modflow.ModflowOc(ml)
ml.write_input()
ml.run_model()
# change back to the starting directory
os.chdir(cwdpth)
# Once the model has terminated normally, we can read the heads file. First, a link to the heads
# file is created with `HeadFile`. The link can then be accessed with the `get_data` function, by
# specifying, in this case, the step number and period number for which we want to retrieve data.
# A three-dimensional array is returned of size `nlay, nrow, ncol`. Matplotlib contouring functions
# are used to make contours of the layers or a cross-section.
hds = flopy.utils.HeadFile(os.path.join(workspace, f"{name}.hds"))
h = hds.get_data(kstpkper=(0, 0))
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[0], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%2.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake1.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[-1], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%1.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake2.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
z = np.linspace(-H / Nlay / 2, -H + H / Nlay / 2, Nlay)
c = plt.contour(x, z, h[:, 50, :], np.arange(90, 100.1, 0.2))
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake3.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
return 0
if __name__ == "__main__":
success = run()
| StarcoderdataPython |
31159 | <filename>pythonlearn/input.py
# Write a program that asks the user what kind of rental car they
# would like. Print a message about that car, such as “Let me see if I can find you
# a Subaru.”
car = input("What type of rental rental car would you like? ")
print(f"Checking database to find a {car}")
# Write a program that asks the user how many people
# are in their dinner group. If the answer is more than eight, print a message saying
# they’ll have to wait for a table. Otherwise, report that their table is ready.
num_guests = input("Goodevening, how many in your dinner party group? ")
num_guests = int(num_guests)
if num_guests > 8:
print("I'm sorry, you will have to wait for a table")
else:
print("Right this way, we have an open table for you")
# Ask the user for a number, and then report whether the
# number is a multiple of 10 or not.
number = input("Please enter a number and I'll tell you if its a multiple of 10: ")
number = int(number)
if number % 10 == 0:
print(f"The number {number} is a multiple of 10")
else:
print(f"The number {number} is not a multiple of 10")
| StarcoderdataPython |
1618371 | import os
import datetime
import requests
def fetch_remote_file(url, cache = '', expire = 0):
if cache and expire:
expire = (datetime.datetime.now() - datetime.timedelta(minutes=expire)).strftime('%s')
if not os.path.isfile(cache) or int(os.path.getmtime(cache)) < int(expire):
try:
content = requests.get(url, verify=False).text.encode('utf-8')
file_put_contents(cache, content)
except Exception, e:
print e
else:
content = file_get_contents(cache)
else:
content = requests.get(url, verify=False).text.encode('utf-8')
return content
def file_get_contents(file):
if os.path.isfile(file):
file = open(file, 'r')
content = file.read()
file.close()
return content
def file_put_contents(file, content):
file = open(file, 'w')
file.write(content)
file.close()
return content
| StarcoderdataPython |
3336781 | import math
import cv2
class DistanceToCamera(object):
def __init__(self):
# camera params
self.alpha = 8.0 * math.pi / 180 # degree measured manually
self.v0 = 119.865631204 # from camera matrix
self.ay = 332.262498472 # from camera matrix
def calculate(self, v, h, x_shift, image):
# compute and return the distance from the target point to the camera
d = h / math.tan(self.alpha + math.atan((v - self.v0) / self.ay))
if d > 0:
cv2.putText(image, "%.1fcm" % d,
(image.shape[1] - x_shift, image.shape[0] - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
return d
| StarcoderdataPython |
3267522 | <reponame>CodeVsZombie/code-vs-zombie
from codeingame import Point, Line, PointId, Segment, Ash, Human, Zombie, Field
import math
import pytest
def test_calculate_distances():
a = Point(0, 0)
b = Point(1, 0)
c = Point(0, 1)
d = Point(1, 1)
assert a.distance(b) == 1
assert a.distance(c) == 1
assert a.distance(d) == 1 * math.sqrt(2)
def test_calculate_angles():
a = Point(0, 0)
b = Point(1, 0) # 0
c = Point(0, 1) # 90
d = Point(1, 1) # 45
assert a.angle(b) == 0
assert a.angle(c) == 90
assert a.angle(d) == 45
def test_calculate_trajectories():
a = Point(0, 0)
b = Point(100, 100)
e = a.angle(b)
assert e == 45
def test_invalid_line():
with pytest.raises(ValueError):
Line.from_points(Point(0, 0), Point(0, 0))
def test_calculate_not_parallel():
a = Line(1, 1)
b = Line(4, 1)
assert not a.parallel(b)
a = Line(2, 1)
b = Line(-2, 1)
assert not a.parallel(b)
def test_calculate_parallel():
a = Point(0, 0)
b = Point(0, 1)
l = Line.from_points(a, b)
c = Point(1, 0)
d = Point(1, 1)
g = Line.from_points(c, d)
assert l.parallel(g)
a = Point(0, 0)
b = Point(1, 0)
l = Line.from_points(a, b)
c = Point(0, 1)
d = Point(1, 1)
g = Line.from_points(c, d)
assert l.parallel(g)
def test_calculate_not_perpendicular():
a = Line(2, 1)
b = Line(4, 1)
assert not a.perpendicular(b)
a = Line(3, 1)
b = Line(5, 1)
assert not a.perpendicular(b)
def test_calculate_perpendicular():
a = Point(0, 0)
b = Point(0, 1)
l = Line.from_points(a, b)
c = Point(0, 1)
d = Point(1, 1)
g = Line.from_points(c, d)
assert l.perpendicular(g)
a = Point(0, 0)
b = Point(1, 0)
l = Line.from_points(a, b)
c = Point(1, 0)
d = Point(1, 1)
g = Line.from_points(c, d)
assert l.perpendicular(g)
def test_line_not_intersect_point():
a = Point(0, 0)
b = Point(5, 5)
l = Line.from_points(a, b)
c = Point(3, 1)
d = Point(2, 1)
e = Point(-2, -1)
assert not l.intersect(c)
assert not l.intersect(d)
assert not l.intersect(e)
def test_line_intersect_point():
a = Point(0, 0)
b = Point(2, 2)
l = Line.from_points(a, b)
c = Point(1, 1)
d = Point(3, 3)
e = Point(-1, -1)
assert l.intersect(c)
assert l.intersect(d)
assert l.intersect(e)
def test_segment_not_intersect_point():
a = Point(0, 0)
b = Point(5, 5)
s = Segment(a, b)
c = Point(0, 1)
d = Point(1, 0)
e = Point(-2, -2)
e = Point(7, 7)
assert not s.intersect(c)
assert not s.intersect(d)
assert not s.intersect(e)
def test_segment_intersect_point():
a = Point(0, 0)
b = Point(4, 4)
s = Segment(a, b)
c = Point(1, 1)
d = Point(2, 2)
e = Point(3, 3)
assert s.intersect(c)
assert s.intersect(d)
assert s.intersect(e)
def test_segment_intersect_point_on_y():
a = Point(0, 8999)
b = Point(0, 4500)
s = Segment(a, b)
c = Point(0, 7999)
d = Point(0, 6999)
e = Point(0, 5999)
external = Point(8250, 9999) # this is external
assert s.intersect(c)
assert s.intersect(d)
assert s.intersect(e)
assert not s.intersect(external)
def test_split_segment_equals():
s = Segment(Point(0,0), Point(10,0))
ss = s / 2
assert len(ss) == 2
assert ss[0] == Segment(Point(0, 0), Point(5, 0))
assert ss[1] == Segment(Point(5, 0), Point(10, 0))
def test_split_segment_size():
s = Segment(Point(0,0), Point(3,0))
ss = s // 1
assert len(ss) == 3
assert ss[0] == Segment(Point(0, 0), Point(1, 0))
assert ss[1] == Segment(Point(1, 0), Point(2, 0))
assert ss[2] == Segment(Point(2, 0), Point(3, 0))
s = Segment(Point(0, 0), Point(10, 0))
ss = s // 2
assert len(ss) == 5
assert ss[0] == Segment(Point(0, 0), Point(2, 0))
assert ss[1] == Segment(Point(2, 0), Point(4, 0))
assert ss[2] == Segment(Point(4, 0), Point(6, 0))
assert ss[3] == Segment(Point(6, 0), Point(8, 0))
assert ss[4] == Segment(Point(8, 0), Point(10, 0))
def test_reprs():
line = Line(3, 5)
assert line == eval(repr(line))
segment = Segment(Point(0, 0), Point(1, 1))
assert segment == eval(repr(segment))
point = Point(0, 0)
assert point == eval(repr(point))
point_id = PointId(3, 3, 3)
assert point_id == eval(repr(point_id))
ash = Ash(5, 7)
assert ash == eval(repr(ash))
human = Human(5, 7, 9)
assert human == eval(repr(human))
zombie = Zombie(7, 1, 5, 2, 6)
print("zombie", repr(zombie))
assert zombie == eval(repr(zombie))
# field = Field(ash, [human], [zombie])
# assert field == eval(repr(field))
def test_in_operator_for_line():
line = Line.from_points(Point(0, 0), Point(2, 2))
assert Point(-1, -1) in line
assert Point(3, 3) in line
assert Point(1, 1) in line
assert Point(1, 5) not in line
def test_in_operator_for_segment():
s = Segment(Point(0, 0),Point(2, 2))
assert Point(-1, -1) not in s
assert Point(3, 3) not in s
assert Point(1, 1) in s
assert Point(1, 5) not in s
def test_midpoint_segment():
a = Point(0, 0)
b = Point(2, 2)
assert Segment(a, b).midpoint() == Point(1, 1)
a = Point(0, 0)
b = Point(2, 0)
assert Segment(a, b).midpoint() == Point(1, 0)
a = Point(0, 2)
b = Point(0, 0)
assert Segment(a, b).midpoint() == Point(0, 1)
a = Point(0, 0)
b = Point(3, 3)
assert Segment(a, b).midpoint() == Point(2, 2)
def test_simulation_encoding_decoding():
a = Ash(0, 0)
h1 = Human(0, 1, 1)
h2 = Human(1, 2, 2)
h3 = Human(2, 3, 3)
z1 = Zombie(0, 3, 3, 4, 4, human_target=h1)
z2 = Zombie(0, 3, 3, 4, 4)
f = Field(a, [h1, h2, h3], [z1, z2])
def test_nearest_coordinate():
pass
"""@pytest.mark.skipif(False, reason='i want to skip')
def test_win_simulations():
from simulator import main
simulations = ['simple']
for simulation in simulations:
assert main(simulation, enable_graphics=False)
""" | StarcoderdataPython |
1708350 | <reponame>gamozolabs/flounder
import requests, json, time, sys
# This script takes in a search query and a bing subscription key and
# generates a file containing all the links from the query. This file with
# links can then be used by download.py to download the files mentioned by
# the links
market_codes = [
"es-AR",
"en-AU",
"de-AT",
"nl-BE",
"fr-BE",
"pt-BR",
"en-CA",
"fr-CA",
"es-CL",
"da-DK",
"fi-FI",
"fr-FR",
"de-DE",
"zh-HK",
"en-IN",
"en-ID",
"it-IT",
"ja-JP",
"ko-KR",
"en-MY",
"es-MX",
"nl-NL",
"en-NZ",
"no-NO",
"zh-CN",
"pl-PL",
"en-PH",
"ru-RU",
"en-ZA",
"es-ES",
"sv-SE",
"fr-CH",
"de-CH",
"zh-TW",
"tr-TR",
"en-GB",
"en-US",
"es-US",
]
if len(sys.argv) < 3:
print("""
Usage:
For non-image files: flounder.py <bing subscription key> <search query>
For image files: flounder.py <bing subscription key> <search query> --imagesearch=<image file type>
For example: flounder.py BINGKEYHERE \"filetype:rtf\"
For searching for images: flounder.py BINGKEYHERE bananas --imagesearch=png
""")
quit()
subscription_key = sys.argv[1]
image_search = None
if len(sys.argv) == 4:
assert sys.argv[3].startswith("--imagesearch=")
image_search = sys.argv[3].split("--imagesearch=", maxsplit=1)[1]
url_log = open("urllog_%s.txt" % time.time(), "wb")
for offset in range(0, 1000000, 50):
for market in market_codes:
if image_search == None:
search_url = "https://api.cognitive.microsoft.com/bing/v7.0/search?count=50&mkt=%s&offset=%d" % (market, offset)
else:
search_url = "https://api.cognitive.microsoft.com/bing/v7.0/images/search?count=50&mkt=%s&offset=%d" % (market, offset)
search_term = sys.argv[2] # Example: "some keywords filetype:rtf"
headers = {"Ocp-Apim-Subscription-Key" : subscription_key}
params = {"q": search_term, "textDecorations":True, "textFormat":"HTML"}
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
#print(json.dumps(search_results, indent=4, sort_keys=True))
if search_results["_type"] == "SearchResponse":
for result in search_results["webPages"]["value"]:
print(result["url"])
url_log.write(result["url"].encode() + b"738ced42e85db6ed9095b29dc94b9253")
url_log.flush()
time.sleep(0.5)
elif search_results["_type"] == "Images":
for result in search_results["value"]:
# Filter to only save images of the type requested
if "encodingFormat" in result and result["encodingFormat"] == image_search:
print(result["contentUrl"])
url_log.write(result["contentUrl"].encode() + b"738ced42e85db6ed9095b29dc94b9253")
url_log.flush()
else:
assert 1==2, "Unexpected search result type"
| StarcoderdataPython |
4813964 | __all__ = ['coffee']
# Needed for South
from .coffee import *
| StarcoderdataPython |
149708 | from entities.entity import Entity
import random
#An entity that receives ticks
class TickingTrait(Entity):
delta_time = 0.0 #Time passed per frame, secs
time = 0.0 #time passed since simulation start, secs
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.at_most_funcs = {}
self.after_funcs = {}
#Main game logic
def tick(self):
pass
def update_graphics_model(self):
pass
#Rate limits a function to at most time seconds
def at_most(self, task_name, func, limit):
if(TickingTrait.time - self.at_most_funcs.get(task_name, -limit) >= limit):
func()
self.at_most_funcs[task_name] = TickingTrait.time #We last fired this func now
def chance(self, chance, func):
if random.random() < chance:
func()
def once(self, task_name, func):
self.once_after(task_name,func,0)
#Runs a function once after a length of time has passed
def once_after(self,task_name, func, limit):
fireTime = self.after_funcs.get(task_name, None)
if fireTime is None:
self.after_funcs[task_name] = self.time + limit
return
elif self.time >= fireTime:
func() #fire it
self.after_funcs[task_name] = float('inf')
| StarcoderdataPython |
1733131 | <filename>TermGenerator.py
"""
Generates the terms to be used in the graph.
Supposed goals:
- Retrieve raw sentences and decide how to process them
- Cross-reference with entities to get all valid terms
-
"""
import logging
import os
import spacy
import time
from collections import Counter, OrderedDict
from utils import set_up_logger, check_table_existence
from MongoConnector import MongoConnector
from PostgresConnector import PostgresConnector
from spacy.lang.en.stop_words import STOP_WORDS
from nltk.corpus import stopwords
from psycopg2 import ProgrammingError, IntegrityError
from psycopg2.extras import execute_values
class TermGenerator:
def __init__(self,
num_distinct_documents=5000,
replace_entities=True,
max_term_length=127,
remove_stopwords=True,
custom_stopwords=[',', '.', '-', '\xa0', '“', '”', '"', '\n', '—', ':', '?', 'I', '(', ')'],
analyze=False,
document_tabe_name="documents",
sentence_table_name="sentences",
sentence_fields=OrderedDict({"doc_id":"document_id",
"sen_id":"sentence_id",
"content":"sentence_text"
}),
term_table_name="terms",
term_sql_format=("term_id", "term_text", "is_entity"),
term_occurrence_table_name="term_occurrence",
term_occurrence_sql_format=("document_id","sentence_id","term_id"),
entity_table_name="entities",
entity_sql_format=("entity_id", "entity_type"),
database="postgres",
user="postgres",
password="<PASSWORD>",
host="127.0.0.1",
port=5435,
log_file=os.path.join(os.path.dirname(__file__), "logs/TermGenerator.log"),
log_level=logging.INFO,
log_verbose=True):
"""
Initializes various parameters, registers logger and MongoConnector, and sets up the limit.
:param num_distinct_documents: (int) The number of distinct documents retrieved from the queries.
For performance reasons, this should be limited during debugging/development.
0 (Zero) represents no limit, in accordance with the MongoDB standard for .limit().
:param replace_entities: (boolean) Whether or not the entities in the text should be replaced/recognised.
The reason for this is that single terms might be merged together to one term, i.e. first and last name:
"Dennis" "Aumiller" would be two separate terms in the traditional splitting (replace_entities=False),
whereas - if set to true - "<NAME>" would represent only one entity.
:param max_term_length: (int) Indicator of how long the terms are supposed to be (varchar property in table).
:param remove_stopwords: (boolean) Determines whether or not stop words are removed. Currently, we are still
deciding on the final set, but likely either one (or both) of NLTK and SpaCy's stop word lists.
:param custom_stopwords: (list of strings) Additional words that will not be considered at adding-time.
:param analyze: (boolean) Whether or not to include analytically relevant metrics.
:param document_tabe_name: (str) Name of the table where the document information is stored.
:param sentence_table_name: (str) Name of the table where the sentence information will be stored.
:param sentence_fields: (OrderedDict) Structure of input to output values from MongoDB to postgres for the
sentence table and its fields.
:param term_table_name: (str) Name of the Postgres tables for the terms.
:param term_sql_format: (tuple) Since those are generated locally, only a tuple of the PostgresColumns suffices.
:param term_occurrence_table_name: (str) Name of the Postgres table for the term occurrences
:param term_occurrence_sql_format: (tuple) Same as term_sql_format, but for the term occurrences.
:param entity_table_name: (str) (Not implemented yet) Name of the table for the entity meta information.
:param entity_sql_format: (str) Same as term_sql_format, but for entities.
:param database: (str) database name.
:param user: (str) User name to get access to the Postgres database.
:param password: (<PASSWORD>) <PASSWORD>.
:param host: (IP) IP address (in string format) for the host of the postgres database.
:param port: (integer) Port at which to access the database.
"""
# set up logger
self.logger = set_up_logger(__name__, log_file, log_level, log_verbose)
self.logger.info("Successfully registered logger to TermGenerator.")
# register a MongoConnector
self.mc = MongoConnector()
self.logger.info("Successfully registered MongoConnector to TermGenerator.")
# PostgresConnector
self.pc = PostgresConnector(database, user, password, host, port)
self.logger.info("Successfully registered PostgresConnector to DocumentGenerator.")
self.num_distinct_documents = num_distinct_documents
# do this earlier since we need it already for the distinct documents.
self.document_table_name = document_tabe_name
# get the distinct IDs for the documents so we can match against them later
# since we have removed parts of the document collection, we have to make sure to get this from Postgres.
self.logger.info("Parsing relevant documents from Postgres...")
with self.pc as open_pc:
open_pc.cursor.execute("SELECT document_id FROM {}".format(self.document_table_name))
self.first_distinct_documents = list(open_pc.cursor.fetchall())
# extract from the tuple structure
self.first_distinct_documents = [el[0] for el in self.first_distinct_documents]
self.logger.info("Retrieved all relevant documents from Postgres.")
# additionally restrict if we want only a number of documents.
if self.num_distinct_documents != 0:
self.logger.info("Non-zero limit detected. Limiting to the first N entries.")
self.first_distinct_documents = self.first_distinct_documents[:self.num_distinct_documents]
self.replace_entities = replace_entities
self.analyze = analyze
self.max_term_length = max_term_length
self.nlp = spacy.load("en")
# construct dictionary with the entries per document/sentence id pair. Thus, we can later check whether
# there are any entities in the current sentence with higher efficiency.
self.occurrence_dict = {}
self.occurring_entities = []
# start building the term dictionary/set, as well as an occurence map. Since terms will be "post-processed",
# it is first created as a list and later cast to Counter and set.
self.terms = [] # cast into a set later on.
self.term_in_sentence = set()
self.term_id = {}
self.term_is_entity = {}
if self.analyze:
self.term_count = Counter()
self.entity_count = Counter()
self.entities = []
self.sentences = []
self.processed_sentences = []
# Postgres tables
if not sentence_fields:
self.logger.error("No sentence fields specified!")
self.sentence_table_name = sentence_table_name
self.sentence_fields = sentence_fields
if not term_sql_format:
self.logger.error("No term fields specified!")
self.term_table_name = term_table_name
self.term_sql_format = ", ".join(term_sql_format)
if not term_occurrence_sql_format:
self.logger.error("No term occurrence fields specified!")
self.term_occurrence_table_name = term_occurrence_table_name
self.term_occurrence_sql_format = ", ".join(term_occurrence_sql_format)
if not entity_sql_format:
self.logger.error("No entity fields specified!")
self.entity_table_name = entity_table_name
self.entity_sql_format = ", ".join(entity_sql_format)
# value retrieving parse:
self.sentence_values_to_retrieve = {key: 1 for key in self.sentence_fields.keys()}
# suppress _id if not present:
if "_id" not in self.sentence_values_to_retrieve.keys():
self.sentence_values_to_retrieve["_id"] = 0
self.sentence_sql_format = ", ".join([value for value in self.sentence_fields.values()])
# create union of stop words, and add potentially custom stop words
self.remove_stopwords = remove_stopwords
self.removed_counter = 0
self.stopwords = STOP_WORDS.union(set(stopwords.words("english")))
# add custom stopwords.
for word in custom_stopwords:
self.stopwords.add(word)
self.logger.info("Successfully initialized TermGenerator.")
def get_relevant_documents_and_entities(self):
"""
TODO!
:return:
"""
with self.mc as open_mc:
sentences = open_mc.client[open_mc.news].sentences
# distinction for (un)limited documents:
if self.first_distinct_documents:
self.sentences = list(sentences.find({"doc_id": {"$in": self.first_distinct_documents}},
self.sentence_values_to_retrieve))
else:
self.sentences = list(sentences.find({}, self.sentence_values_to_retrieve))
# get entities only if we actually want to replace them.
if self.replace_entities:
self.replace_procedure(open_mc)
def replace_procedure(self, open_mc):
"""
TODO!
:param open_mc:
:return:
"""
entities = open_mc.client[open_mc.news].entities
# potentially RAM-hazardous for larger results:
if self.first_distinct_documents:
self.occurring_entities = list(entities.find({"doc_id": {"$in": self.first_distinct_documents}}))
else:
self.occurring_entities = list(entities.find({}))
self.logger.info("Retrieved relevant entities. Found a total of {} occurrences.".
format(len(self.occurring_entities)))
# do "blind pass" through the dict to collect all possible keys. The alternative is a single pass,
# but requires a "for x in dict.keys()" check for every element, which is more costly,
# especially for large results.
for ent in self.occurring_entities:
self.occurrence_dict[(ent["doc_id"], ent["sen_id"])] = []
# now insert in the second pass
for ent in self.occurring_entities:
self.occurrence_dict[(ent["doc_id"], ent["sen_id"])].append(ent)
def process_unreplaced(self):
"""
TODO
:return:
"""
for doc in self.sentences:
parsed = self.nlp(doc["content"], disable=['parser', 'tagger', 'ner'])
for token in parsed:
self.add_token(doc["doc_id"], doc["sen_id"], token.text, False)
def process_replaced(self):
"""
TODO!
:return:
"""
for doc in self.sentences:
parsed = self.nlp(doc["content"], disable=['parser', 'tagger', 'ner'])
# check whether there are any entities in the current sentence:
try:
self.process_document(doc, parsed)
# no entities in the current sentence means we can "proceed as normal"
except KeyError:
for token in parsed:
self.add_token(doc["doc_id"], doc["sen_id"], token.text)
def process_document(self, doc, parsed):
"""
TODO!
:param doc:
:param parsed:
:return:
"""
# Get ascending order of elements
current_entities = self.occurrence_dict[(doc["doc_id"], doc["sen_id"])]
# Since they aren't quite sorted in ascending order within the document (sorting runs out of memory,
# we have to "offline-sort" with respect to the starting position key.
# This is probably also smarter, since we know that each sentence only has a very limited number of
# entities, whereas the sort on the whole document collection is way way bigger. (Plus, we already
# have some sort of sorting, and just need to have the last key.
current_entities = sorted(current_entities, key=lambda k: k['start_sen'])
current_el = current_entities.pop(0)
# character position of start and end.
current_start = current_el["start_sen"]
current_end = current_el["end_sen"]
# the last .pop() could be problematic. Avoid this with this boolean.
reached_end = False
for token in parsed:
# before element to insert
if token.idx < current_start or reached_end:
self.add_token(doc["doc_id"], doc["sen_id"], token.text)
# this means we hit the "coveredText" area.
elif current_start <= token.idx < current_end:
continue
# we have covered all the entity, and now add the current_entity_text, as well as the next
# element which was currently encountered (but to a separate entity)
else:
# also differentiate between dates and everything else.
if current_el["neClass"] == "DAT":
current_entity_text = current_el["normalized"]
else:
current_entity_text = current_el["normalized_label"]
# add both the covered text, as well as the element that was not in it anymore
self.add_token(doc["doc_id"], doc["sen_id"], current_entity_text,
True, current_el["neClass"])
self.add_token(doc["doc_id"], doc["sen_id"], token.text)
# reset entity elements. Be careful with popping, as the last element will still reach this.
if current_entities:
current_el = current_entities.pop(0)
current_start = current_el["start_sen"]
current_end = current_el["end_sen"]
else:
reached_end = True
def postprocessing(self):
"""
TODO
:return:
"""
# this allows us to later analyze the term frequency count.
if self.analyze:
self.term_count = Counter(self.terms)
self.entity_count = Counter([el for el in self.terms if self.term_is_entity[el][0]])
self.terms = set(self.terms)
self.term_id = {term: i for i, term in enumerate(self.terms)}
# get the corresponding entity information. Since term_id and entity_id have to match, we have to re-iterate
self.entities = [(self.term_id[k], v[1]) for k, v in self.term_is_entity.items() if v[0]]
# replace the words with the indexed term.
self.term_in_sentence = [(el[0], el[1], self.term_id[el[2]]) for el in self.term_in_sentence]
# "polish" the raw sentences as tuples that we can fit:
self.sentences = [list(sent.values()) for sent in self.sentences]
def parse(self):
"""
Retrieves the data from the MongoDB, and locally matches entities (if enabled). Cleans them, and puts them into
a term dictionary.
:return: (None) Internally generates a list of terms, including their sentence and document position.
"""
# open connection and retrieve sentences, as well as the corresponding occurring entities.
self.logger.info("Starting to parse results...")
start_time = time.time()
self.get_relevant_documents_and_entities()
# moved if to the outer part, since we'd otherwise do a re-check every iteration, even if it causes some
# minor code duplication.
self.logger.info("Starting to place parsed sentences in term dictionary...")
if not self.replace_entities:
self.process_unreplaced()
else:
self.process_replaced()
self.postprocessing()
self.logger.info("In total {} words were not inserted.".format(self.removed_counter))
self.logger.info("Successfully parsed all relevant sentences.")
end_time = time.time()
self.logger.info("Total time taken for parsing results: {:.4f} s".format(end_time-start_time))
def add_token(self, doc_id, sen_id, text, is_entity=False, entity_type=None):
"""
Helper function that adds the given text to the set of terms, and term_in_sentence dictionary
:param doc_id: (int) Document ID from the document containing the current sentence.
:param sen_id: (int) Sentence position of the current sentence within the article it was processed from.
:param text: (string) Text of the term to be appended.
:param is_entity: (boolean) Indicator whether or not the entry is an entity
:param entity_type: (string) If it is an entity, what entity class it belongs to.
:return: None. Only internally adds the terms.
"""
# if the word appears in the list of stopwords, don't add it.
if self.remove_stopwords and text in self.stopwords:
self.removed_counter += 1
return None
self.terms.append(text)
# fill information on entity
self.term_is_entity[text] = (is_entity, entity_type)
# somehow fails if both of that is done in a single line.
self.term_in_sentence.add((doc_id, sen_id, text))
def push_sentences(self):
"""
Puts the sentences in a Postgres table. Specifically in a separate function as this requires potentially less
updates than the parsed terms.
:return: (None) Internally puts up the documents in the Postgres table.
"""
self.logger.info("Starting to push sentences in Postgres...")
if not self.sentences:
self.logger.error("No data found to be pushed! Please call .parse() first!")
return 0
with self.pc as open_pc:
# TODO: Maybe check whether number of insertions matches feed.
if not check_table_existence(self.logger, open_pc, self.sentence_table_name):
return 0
self.logger.info("Found sentence table.")
self.logger.info("Inserting values.")
# build query
start_time = time.time()
try:
execute_values(open_pc.cursor,
"INSERT INTO {} ({}) VALUES %s".format(self.sentence_table_name, self.sentence_sql_format),
self.sentences)
end_time = time.time()
self.logger.info("Successfully inserted values in {:.4f} s".format(end_time - start_time))
except IntegrityError as err:
self.logger.error("Values with previously inserted primary key detected!\n {}".format(err))
return 0
def push_terms(self):
"""
Puts the terms into a Postgres table.
:return: (None) Internally pushes to Postgres.
"""
self.logger.info("Starting to push terms into Postgres...")
if not self.term_id:
self.logger.error("No terms found to be pushed! Please call .parse() first!")
return 0
# prepare values for insertion. Also force length for test run.
push_terms = [(val, key[:self.max_term_length], self.term_is_entity[key][0]) for key, val in self.term_id.items()]
with self.pc as open_pc:
# TODO: Maybe check whether number of insertions matches feed.
if not check_table_existence(self.logger, open_pc, self.term_table_name):
return 0
self.logger.info("Found term table.")
self.logger.info("Inserting values.")
# build query
start_time = time.time()
try:
execute_values(open_pc.cursor,
"INSERT INTO {} ({}) VALUES %s".format(self.term_table_name, self.term_sql_format),
push_terms)
end_time = time.time()
self.logger.info("Successfully inserted values in {:.4f} s".format(end_time - start_time))
except IntegrityError as err:
self.logger.error("Values with previously inserted primary key detected!\n {}".format(err))
return 0
def push_term_occurrences(self):
"""
Puts the term occurrences into a Postgres table.
:return: (None) Internally pushes to Postgres.
"""
self.logger.info("Starting to push term occurrences into Postgres...")
if not self.term_in_sentence:
self.logger.error("No term occurrences found to be pushed! Please call .parse() first!")
return 0
with self.pc as open_pc:
# TODO: Maybe check whether number of insertions matches feed.
if not check_table_existence(self.logger, open_pc, self.term_occurrence_table_name):
return 0
self.logger.info("Found term table.")
self.logger.info("Inserting values.")
# build query
start_time = time.time()
try:
execute_values(open_pc.cursor,
"INSERT INTO {} ({}) VALUES %s".format(self.term_occurrence_table_name,
self.term_occurrence_sql_format),
self.term_in_sentence)
end_time = time.time()
self.logger.info("Successfully inserted values in {:.4f} s".format(end_time - start_time))
except IntegrityError as err:
self.logger.error("Values with previously inserted primary key detected!\n {}".format(err))
return 0
def push_entities(self):
"""
Puts the entities into a Postgres table.
:return: (None) Internally pushes to Postgres.
"""
self.logger.info("Starting to push entities into Postgres...")
if not self.entities:
self.logger.error("No entities found to be pushed! Please call .parse() first!")
return 0
# prepare values for insertion. Also force length for test run.
with self.pc as open_pc:
# TODO: Maybe check whether number of insertions matches feed.
if not check_table_existence(self.logger, open_pc, self.entity_table_name):
return 0
self.logger.info("Found entity table.")
self.logger.info("Inserting values.")
# build query
start_time = time.time()
try:
execute_values(open_pc.cursor,
"INSERT INTO {} ({}) VALUES %s".format(self.entity_table_name, self.entity_sql_format),
self.entities)
end_time = time.time()
self.logger.info("Successfully inserted values in {:.4f} s".format(end_time - start_time))
except IntegrityError as err:
self.logger.error("Values with previously inserted primary key detected!\n {}".format(err))
return 0
def clear_table(self, table_name):
"""
Deletes previously inserted values from the specified table.
:param table_name: (str) Self-explanatory; Name of the table that should be cleared.
:return: (None). Calls Postgres table with prepared DELETE-statement.
"""
with self.pc as open_pc:
if not check_table_existence(self.logger, open_pc, table_name):
return 0
self.logger.info("Found {} table.".format(table_name))
self.logger.info("Deleting all previously inserted {}...".format(table_name))
# Careful! This will remove ALL DATA!
open_pc.cursor.execute("DELETE FROM {}".format(table_name))
# TODO: Check whether document count is actually 0!
self.logger.info("Successfully deleted all previously inserted {}.".format(table_name))
if __name__ == "__main__":
tg = TermGenerator(num_distinct_documents=0)
tg.parse()
print([el for el in tg.terms if len(el) > 127])
# print(tg.terms)
# print("\n---------------------")
# print(tg.sentences[0])
# print("---------------------\n")
# print(tg.term_in_sentence)
# print(tg.term_id)
# print(tg.term_count)
tg.clear_table(tg.term_occurrence_table_name)
tg.clear_table(tg.entity_table_name)
tg.clear_table(tg.term_table_name)
tg.clear_table(tg.sentence_table_name)
tg.push_sentences()
tg.push_terms()
tg.push_entities()
tg.push_term_occurrences()
| StarcoderdataPython |
144945 | <filename>crystalgodgenerator.py
#!/usr/bin/env python
""" Generate The Corpus Cloud with Page Elements, to be Styled """
import jinja2
import arrow
corpus = {
"5Cars": "http://5cars.world",
"Astral Seed": "http://trinitysoulstars.com",
"Ascension Symptoms": "http://ascension.fyi",
"Amethyst Grills": "http://amethystgrills.com/",
"Bubblin": "http://bubblin.life",
"Clouds": "http://clouds.zone",
"decause": "http://twitter.com/remy_d",
"Five Cars": "http://5cars.world",
"Guarav": "http://trinitysoulstars.com",
"Higher Self": "http://highself.solutions",
"Juice Brew": "http://juicebrew.life",
"LightBody": "http://lightbodytherapy.life",
"Manifest": "http://trinitysoulstars.com",
"Mt Meru": "http://mtmeru.life",
"Nino": "http://nino.movie",
"Realms": "http://trinitysoulstars.com",
"Starseed": "http://trinitysoulstars.com",
"Soulstar": "http://trinitysoulstars.com",
"Theosyn": "http://trinitysoulstars.com",
"TRS": "http://truthreignsupreme.club",
"Source": "http://github.com/trinitysoulstars",
}
terms = []
titles = ["Welcome to the Trinity Node - the most lit sector in the multiverse"]
metadesc = ["Welcome to the Trinity Node - the most lit sector in the multiverse"]
authors = ["<NAME> - https://github.com/trinitysoulstars"]
videos = ['<iframe width="560" height="315" src="https://www.youtube.com/embed/3V8mfIDWy1M" frameborder="0" allowfullscreen></iframe>']
boldwords = {
"Crystal God": "http://thecrystalgod.com/",
}
# analytics = ['''
# ''']
for term, link in corpus.iteritems():
print term, link
terms.append(term)
print "terms = %s " % terms
print "titles = %s " % titles
print "metadesc = %s " % metadesc
print "authors = %s " % authors
print "videos = %s " % videos
#print "analytics = %s " % analytics
for term, link in boldwords.iteritems():
print term, link
template = jinja2.Template("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<title>
{%- for title in titles: -%}
{{title}}
{%- endfor -%}
</title>
<meta name="description" content="
{%- for desc in metadesc: -%}
{{desc}}
{%- endfor -%}"/>
<meta name="keywords" content="
{%- for term in terms: -%}
{{term}},
{%- endfor %}"/>
<meta name="author" content="
{%- for author in authors: -%}
{{author}}
{%- endfor -%}"/>
<link rel="stylesheet" type="text/css" href="style.css" media="screen"/>
<!-- Bootstrap Core CSS -->
<link href="vendor/bootstrap/css/bootstrap.min.css" rel="stylesheet">
<!-- Custom Fonts -->
<link href="vendor/font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Lora:400,700,400italic,700italic" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Montserrat:400,700" rel="stylesheet" type="text/css">
<!-- Theme CSS -->
<link href="css/grayscale.min.css" rel="stylesheet">
<!-- Font for Stars Background -->
<link href='http://fonts.googleapis.com/css?family=Lato:300,400,700' rel='stylesheet' type='text/css'>
<!-- Custom CSS -->
<link href="css/custom.css" rel="stylesheet" type="text/css">
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"></script>
<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>
<![endif]-->
<!-- Piwik -->
<script type="text/javascript">
var _paq = _paq || [];
_paq.push(["setDomains", ["*.thecrystalgod.com","*.trinitysoulstars.github.io/thecrystalgod"]]);
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="//piwik-decause.rhcloud.com/";
_paq.push(['setTrackerUrl', u+'piwik.php']);
_paq.push(['setSiteId', '7']);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.type='text/javascript'; g.async=true; g.defer=true; g.src=u+'piwik.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<noscript><p><img src="//piwik-decause.rhcloud.com/piwik.php?idsite=7" style="border:0;" alt="" /></p></noscript>
<!-- End Piwik Code -->
</head>
<div id='stars'></div>
<div id='stars2'></div>
<div id='stars3'></div>
<body id="page-top" data-spy="scroll" data-target=".navbar-fixed-top">
<!-- Navigation -->
<nav class="navbar navbar-custom navbar-fixed-top" role="navigation">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-main-collapse">
Menu <i class="fa fa-bars"></i>
</button>
<a class="navbar-brand page-scroll" href="#page-top">
<i class="fa fa-codepen"></i> <span class="light">Trinity</span> NODE
</a>
</div>
<!-- Collect the nav links, forms, and other content for toggling -->
<div class="collapse navbar-collapse navbar-right navbar-main-collapse">
<ul class="nav navbar-nav">
<!-- Hidden li included to remove active class from about link when scrolled up past about section -->
<li class="hidden">
<a href="#page-top"></a>
</li>
<li>
<a target="_blank" class="page-scroll" href="https://soundcloud.com/trinitysoulstars"><i style="margin-right: 3px;" class="fa fa-soundcloud"></i> <span style="font-size:13px;">SoundCloud</span></a>
</li>
</ul>
</div>
<!-- /.navbar-collapse -->
</div>
<!-- /.container -->
</nav>
<!-- Body -->
<header class="intro" style="margin-top: 5%;">
<div class="intro-body">
<div class="container">
<div class="row">
<div class="col-md-8 col-md-offset-2">
<a class="logo" href ="http://trinitysoulstars.com/" target="_blank"><img style="width: 230px;" src="img/logo.png"/></a>
{% for video in videos: %}
{{video}},
{% endfor %}
<hr style="margin-top: 8px;margin-bottom: 13px;border: 0;border-top: 1px solid #eee;width: 500px;"/>
<p style="margin: 30px 0 40px;"><a style="margin-right:8px:" href="https://www.facebook.com/trinitysoulstars" class="btn btn-circle page-scroll" target="_blank">
<i class="fa fa-facebook"></i>
</a>
<a style="margin-left:4px;margin-right:4px;" href="https://twitter.com/trinitysoulstar" class="btn btn-circle page-scroll" target="_blank">
<i class="fa fa-twitter"></i>
</a>
<a href="https://www.instagram.com/trinitysoulstars/" class="btn btn-circle page-scroll" target="_blank">
<i class="fa fa-instagram"></i>
</a>
</p>
<!-- Tag Cloud -->
<p class='pcloud'>
{% for term, link in boldwords.iteritems(): -%}
<a class='boldwords btn-lg' target="_blank" href="{{link}}">{{term}}</a>
{% endfor -%}
{% for term, link in corpus.iteritems(): -%}
<a target="_blank" class="btn-lg" href="{{link}}">{{term}}</a>
{% endfor %}
</p>
</div>
</div>
</div>
</div>
</header>
<!-- jQuery -->
<script src="vendor/jquery/jquery.js"></script>
<!-- Bootstrap Core JavaScript -->
<script src="vendor/bootstrap/js/bootstrap.min.js"></script>
<!-- Plugin JavaScript -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-easing/1.3/jquery.easing.min.js"></script>
<!-- Google Maps API Key - Use your own API key to enable the map feature. More information on the Google Maps API can be found at https://developers.google.com/maps/ -->
<script type="text/javascript" src="https://maps.googleapis.com/maps/api/js?key=AIzaSyCRngKslUGJTlibkQ3FkfTxj3Xss1UlZDA&sensor=false"></script>
<!-- Theme JavaScript -->
<script src="js/grayscale.min.js"></script>
</body>
</html>
""")
# When you add new elements to the template, you must define it outside the template, and then pass in the value below
output = template.render(corpus=corpus, terms=terms, titles=titles, metadesc=metadesc, authors=authors, videos=videos, boldwords=boldwords)
with open('{}.html'.format(arrow.now().format()[0:10]), "wb") as f:
f.write(output)
| StarcoderdataPython |
1777643 | # For Capstone Engine. AUTO-GENERATED FILE, DO NOT EDIT [x86_const.py]
# X86 registers
X86_REG_INVALID = 0
X86_REG_AH = 1
X86_REG_AL = 2
X86_REG_AX = 3
X86_REG_BH = 4
X86_REG_BL = 5
X86_REG_BP = 6
X86_REG_BPL = 7
X86_REG_BX = 8
X86_REG_CH = 9
X86_REG_CL = 10
X86_REG_CS = 11
X86_REG_CX = 12
X86_REG_DH = 13
X86_REG_DI = 14
X86_REG_DIL = 15
X86_REG_DL = 16
X86_REG_DS = 17
X86_REG_DX = 18
X86_REG_EAX = 19
X86_REG_EBP = 20
X86_REG_EBX = 21
X86_REG_ECX = 22
X86_REG_EDI = 23
X86_REG_EDX = 24
X86_REG_EFLAGS = 25
X86_REG_EIP = 26
X86_REG_EIZ = 27
X86_REG_ES = 28
X86_REG_ESI = 29
X86_REG_ESP = 30
X86_REG_FPSW = 31
X86_REG_FS = 32
X86_REG_GS = 33
X86_REG_IP = 34
X86_REG_RAX = 35
X86_REG_RBP = 36
X86_REG_RBX = 37
X86_REG_RCX = 38
X86_REG_RDI = 39
X86_REG_RDX = 40
X86_REG_RIP = 41
X86_REG_RIZ = 42
X86_REG_RSI = 43
X86_REG_RSP = 44
X86_REG_SI = 45
X86_REG_SIL = 46
X86_REG_SP = 47
X86_REG_SPL = 48
X86_REG_SS = 49
X86_REG_CR0 = 50
X86_REG_CR1 = 51
X86_REG_CR2 = 52
X86_REG_CR3 = 53
X86_REG_CR4 = 54
X86_REG_CR5 = 55
X86_REG_CR6 = 56
X86_REG_CR7 = 57
X86_REG_CR8 = 58
X86_REG_CR9 = 59
X86_REG_CR10 = 60
X86_REG_CR11 = 61
X86_REG_CR12 = 62
X86_REG_CR13 = 63
X86_REG_CR14 = 64
X86_REG_CR15 = 65
X86_REG_DR0 = 66
X86_REG_DR1 = 67
X86_REG_DR2 = 68
X86_REG_DR3 = 69
X86_REG_DR4 = 70
X86_REG_DR5 = 71
X86_REG_DR6 = 72
X86_REG_DR7 = 73
X86_REG_DR8 = 74
X86_REG_DR9 = 75
X86_REG_DR10 = 76
X86_REG_DR11 = 77
X86_REG_DR12 = 78
X86_REG_DR13 = 79
X86_REG_DR14 = 80
X86_REG_DR15 = 81
X86_REG_FP0 = 82
X86_REG_FP1 = 83
X86_REG_FP2 = 84
X86_REG_FP3 = 85
X86_REG_FP4 = 86
X86_REG_FP5 = 87
X86_REG_FP6 = 88
X86_REG_FP7 = 89
X86_REG_K0 = 90
X86_REG_K1 = 91
X86_REG_K2 = 92
X86_REG_K3 = 93
X86_REG_K4 = 94
X86_REG_K5 = 95
X86_REG_K6 = 96
X86_REG_K7 = 97
X86_REG_MM0 = 98
X86_REG_MM1 = 99
X86_REG_MM2 = 100
X86_REG_MM3 = 101
X86_REG_MM4 = 102
X86_REG_MM5 = 103
X86_REG_MM6 = 104
X86_REG_MM7 = 105
X86_REG_R8 = 106
X86_REG_R9 = 107
X86_REG_R10 = 108
X86_REG_R11 = 109
X86_REG_R12 = 110
X86_REG_R13 = 111
X86_REG_R14 = 112
X86_REG_R15 = 113
X86_REG_ST0 = 114
X86_REG_ST1 = 115
X86_REG_ST2 = 116
X86_REG_ST3 = 117
X86_REG_ST4 = 118
X86_REG_ST5 = 119
X86_REG_ST6 = 120
X86_REG_ST7 = 121
X86_REG_XMM0 = 122
X86_REG_XMM1 = 123
X86_REG_XMM2 = 124
X86_REG_XMM3 = 125
X86_REG_XMM4 = 126
X86_REG_XMM5 = 127
X86_REG_XMM6 = 128
X86_REG_XMM7 = 129
X86_REG_XMM8 = 130
X86_REG_XMM9 = 131
X86_REG_XMM10 = 132
X86_REG_XMM11 = 133
X86_REG_XMM12 = 134
X86_REG_XMM13 = 135
X86_REG_XMM14 = 136
X86_REG_XMM15 = 137
X86_REG_XMM16 = 138
X86_REG_XMM17 = 139
X86_REG_XMM18 = 140
X86_REG_XMM19 = 141
X86_REG_XMM20 = 142
X86_REG_XMM21 = 143
X86_REG_XMM22 = 144
X86_REG_XMM23 = 145
X86_REG_XMM24 = 146
X86_REG_XMM25 = 147
X86_REG_XMM26 = 148
X86_REG_XMM27 = 149
X86_REG_XMM28 = 150
X86_REG_XMM29 = 151
X86_REG_XMM30 = 152
X86_REG_XMM31 = 153
X86_REG_YMM0 = 154
X86_REG_YMM1 = 155
X86_REG_YMM2 = 156
X86_REG_YMM3 = 157
X86_REG_YMM4 = 158
X86_REG_YMM5 = 159
X86_REG_YMM6 = 160
X86_REG_YMM7 = 161
X86_REG_YMM8 = 162
X86_REG_YMM9 = 163
X86_REG_YMM10 = 164
X86_REG_YMM11 = 165
X86_REG_YMM12 = 166
X86_REG_YMM13 = 167
X86_REG_YMM14 = 168
X86_REG_YMM15 = 169
X86_REG_YMM16 = 170
X86_REG_YMM17 = 171
X86_REG_YMM18 = 172
X86_REG_YMM19 = 173
X86_REG_YMM20 = 174
X86_REG_YMM21 = 175
X86_REG_YMM22 = 176
X86_REG_YMM23 = 177
X86_REG_YMM24 = 178
X86_REG_YMM25 = 179
X86_REG_YMM26 = 180
X86_REG_YMM27 = 181
X86_REG_YMM28 = 182
X86_REG_YMM29 = 183
X86_REG_YMM30 = 184
X86_REG_YMM31 = 185
X86_REG_ZMM0 = 186
X86_REG_ZMM1 = 187
X86_REG_ZMM2 = 188
X86_REG_ZMM3 = 189
X86_REG_ZMM4 = 190
X86_REG_ZMM5 = 191
X86_REG_ZMM6 = 192
X86_REG_ZMM7 = 193
X86_REG_ZMM8 = 194
X86_REG_ZMM9 = 195
X86_REG_ZMM10 = 196
X86_REG_ZMM11 = 197
X86_REG_ZMM12 = 198
X86_REG_ZMM13 = 199
X86_REG_ZMM14 = 200
X86_REG_ZMM15 = 201
X86_REG_ZMM16 = 202
X86_REG_ZMM17 = 203
X86_REG_ZMM18 = 204
X86_REG_ZMM19 = 205
X86_REG_ZMM20 = 206
X86_REG_ZMM21 = 207
X86_REG_ZMM22 = 208
X86_REG_ZMM23 = 209
X86_REG_ZMM24 = 210
X86_REG_ZMM25 = 211
X86_REG_ZMM26 = 212
X86_REG_ZMM27 = 213
X86_REG_ZMM28 = 214
X86_REG_ZMM29 = 215
X86_REG_ZMM30 = 216
X86_REG_ZMM31 = 217
X86_REG_R8B = 218
X86_REG_R9B = 219
X86_REG_R10B = 220
X86_REG_R11B = 221
X86_REG_R12B = 222
X86_REG_R13B = 223
X86_REG_R14B = 224
X86_REG_R15B = 225
X86_REG_R8D = 226
X86_REG_R9D = 227
X86_REG_R10D = 228
X86_REG_R11D = 229
X86_REG_R12D = 230
X86_REG_R13D = 231
X86_REG_R14D = 232
X86_REG_R15D = 233
X86_REG_R8W = 234
X86_REG_R9W = 235
X86_REG_R10W = 236
X86_REG_R11W = 237
X86_REG_R12W = 238
X86_REG_R13W = 239
X86_REG_R14W = 240
X86_REG_R15W = 241
X86_REG_ENDING = 242
# Sub-flags of EFLAGS
X86_EFLAGS_MODIFY_AF = 1<<0
X86_EFLAGS_MODIFY_CF = 1<<1
X86_EFLAGS_MODIFY_SF = 1<<2
X86_EFLAGS_MODIFY_ZF = 1<<3
X86_EFLAGS_MODIFY_PF = 1<<4
X86_EFLAGS_MODIFY_OF = 1<<5
X86_EFLAGS_MODIFY_TF = 1<<6
X86_EFLAGS_MODIFY_IF = 1<<7
X86_EFLAGS_MODIFY_DF = 1<<8
X86_EFLAGS_MODIFY_NT = 1<<9
X86_EFLAGS_MODIFY_RF = 1<<10
X86_EFLAGS_PRIOR_OF = 1<<11
X86_EFLAGS_PRIOR_SF = 1<<12
X86_EFLAGS_PRIOR_ZF = 1<<13
X86_EFLAGS_PRIOR_AF = 1<<14
X86_EFLAGS_PRIOR_PF = 1<<15
X86_EFLAGS_PRIOR_CF = 1<<16
X86_EFLAGS_PRIOR_TF = 1<<17
X86_EFLAGS_PRIOR_IF = 1<<18
X86_EFLAGS_PRIOR_DF = 1<<19
X86_EFLAGS_PRIOR_NT = 1<<20
X86_EFLAGS_RESET_OF = 1<<21
X86_EFLAGS_RESET_CF = 1<<22
X86_EFLAGS_RESET_DF = 1<<23
X86_EFLAGS_RESET_IF = 1<<24
X86_EFLAGS_RESET_SF = 1<<25
X86_EFLAGS_RESET_AF = 1<<26
X86_EFLAGS_RESET_TF = 1<<27
X86_EFLAGS_RESET_NT = 1<<28
X86_EFLAGS_RESET_PF = 1<<29
X86_EFLAGS_SET_CF = 1<<30
X86_EFLAGS_SET_DF = 1<<31
X86_EFLAGS_SET_IF = 1<<32
X86_EFLAGS_TEST_OF = 1<<33
X86_EFLAGS_TEST_SF = 1<<34
X86_EFLAGS_TEST_ZF = 1<<35
X86_EFLAGS_TEST_PF = 1<<36
X86_EFLAGS_TEST_CF = 1<<37
X86_EFLAGS_TEST_NT = 1<<38
X86_EFLAGS_TEST_DF = 1<<39
X86_EFLAGS_UNDEFINED_OF = 1<<40
X86_EFLAGS_UNDEFINED_SF = 1<<41
X86_EFLAGS_UNDEFINED_ZF = 1<<42
X86_EFLAGS_UNDEFINED_PF = 1<<43
X86_EFLAGS_UNDEFINED_AF = 1<<44
X86_EFLAGS_UNDEFINED_CF = 1<<45
X86_EFLAGS_RESET_RF = 1<<46
X86_EFLAGS_TEST_RF = 1<<47
X86_EFLAGS_TEST_IF = 1<<48
X86_EFLAGS_TEST_TF = 1<<49
X86_EFLAGS_TEST_AF = 1<<50
X86_EFLAGS_RESET_ZF = 1<<51
X86_EFLAGS_SET_OF = 1<<52
X86_EFLAGS_SET_SF = 1<<53
X86_EFLAGS_SET_ZF = 1<<54
X86_EFLAGS_SET_AF = 1<<55
X86_EFLAGS_SET_PF = 1<<56
X86_EFLAGS_RESET_0F = 1<<57
X86_EFLAGS_RESET_AC = 1<<58
X86_FPU_FLAGS_MODIFY_C0 = 1<<0
X86_FPU_FLAGS_MODIFY_C1 = 1<<1
X86_FPU_FLAGS_MODIFY_C2 = 1<<2
X86_FPU_FLAGS_MODIFY_C3 = 1<<3
X86_FPU_FLAGS_RESET_C0 = 1<<4
X86_FPU_FLAGS_RESET_C1 = 1<<5
X86_FPU_FLAGS_RESET_C2 = 1<<6
X86_FPU_FLAGS_RESET_C3 = 1<<7
X86_FPU_FLAGS_SET_C0 = 1<<8
X86_FPU_FLAGS_SET_C1 = 1<<9
X86_FPU_FLAGS_SET_C2 = 1<<10
X86_FPU_FLAGS_SET_C3 = 1<<11
X86_FPU_FLAGS_UNDEFINED_C0 = 1<<12
X86_FPU_FLAGS_UNDEFINED_C1 = 1<<13
X86_FPU_FLAGS_UNDEFINED_C2 = 1<<14
X86_FPU_FLAGS_UNDEFINED_C3 = 1<<15
X86_FPU_FLAGS_TEST_C0 = 1<<16
X86_FPU_FLAGS_TEST_C1 = 1<<17
X86_FPU_FLAGS_TEST_C2 = 1<<18
X86_FPU_FLAGS_TEST_C3 = 1<<19
# Operand type for instruction's operands
X86_OP_INVALID = 0
X86_OP_REG = 1
X86_OP_IMM = 2
X86_OP_MEM = 3
# XOP Code Condition type
X86_XOP_CC_INVALID = 0
X86_XOP_CC_LT = 1
X86_XOP_CC_LE = 2
X86_XOP_CC_GT = 3
X86_XOP_CC_GE = 4
X86_XOP_CC_EQ = 5
X86_XOP_CC_NEQ = 6
X86_XOP_CC_FALSE = 7
X86_XOP_CC_TRUE = 8
# AVX broadcast type
X86_AVX_BCAST_INVALID = 0
X86_AVX_BCAST_2 = 1
X86_AVX_BCAST_4 = 2
X86_AVX_BCAST_8 = 3
X86_AVX_BCAST_16 = 4
# SSE Code Condition type
X86_SSE_CC_INVALID = 0
X86_SSE_CC_EQ = 1
X86_SSE_CC_LT = 2
X86_SSE_CC_LE = 3
X86_SSE_CC_UNORD = 4
X86_SSE_CC_NEQ = 5
X86_SSE_CC_NLT = 6
X86_SSE_CC_NLE = 7
X86_SSE_CC_ORD = 8
# AVX Code Condition type
X86_AVX_CC_INVALID = 0
X86_AVX_CC_EQ = 1
X86_AVX_CC_LT = 2
X86_AVX_CC_LE = 3
X86_AVX_CC_UNORD = 4
X86_AVX_CC_NEQ = 5
X86_AVX_CC_NLT = 6
X86_AVX_CC_NLE = 7
X86_AVX_CC_ORD = 8
X86_AVX_CC_EQ_UQ = 9
X86_AVX_CC_NGE = 10
X86_AVX_CC_NGT = 11
X86_AVX_CC_FALSE = 12
X86_AVX_CC_NEQ_OQ = 13
X86_AVX_CC_GE = 14
X86_AVX_CC_GT = 15
X86_AVX_CC_TRUE = 16
X86_AVX_CC_EQ_OS = 17
X86_AVX_CC_LT_OQ = 18
X86_AVX_CC_LE_OQ = 19
X86_AVX_CC_UNORD_S = 20
X86_AVX_CC_NEQ_US = 21
X86_AVX_CC_NLT_UQ = 22
X86_AVX_CC_NLE_UQ = 23
X86_AVX_CC_ORD_S = 24
X86_AVX_CC_EQ_US = 25
X86_AVX_CC_NGE_UQ = 26
X86_AVX_CC_NGT_UQ = 27
X86_AVX_CC_FALSE_OS = 28
X86_AVX_CC_NEQ_OS = 29
X86_AVX_CC_GE_OQ = 30
X86_AVX_CC_GT_OQ = 31
X86_AVX_CC_TRUE_US = 32
# AVX static rounding mode type
X86_AVX_RM_INVALID = 0
X86_AVX_RM_RN = 1
X86_AVX_RM_RD = 2
X86_AVX_RM_RU = 3
X86_AVX_RM_RZ = 4
# Instruction prefixes - to be used in cs_x86.prefix[]
X86_PREFIX_LOCK = 0xf0
X86_PREFIX_REP = 0xf3
X86_PREFIX_REPE = 0xf3
X86_PREFIX_REPNE = 0xf2
X86_PREFIX_CS = 0x2e
X86_PREFIX_SS = 0x36
X86_PREFIX_DS = 0x3e
X86_PREFIX_ES = 0x26
X86_PREFIX_FS = 0x64
X86_PREFIX_GS = 0x65
X86_PREFIX_OPSIZE = 0x66
X86_PREFIX_ADDRSIZE = 0x67
# X86 instructions
X86_INS_INVALID = 0
X86_INS_AAA = 1
X86_INS_AAD = 2
X86_INS_AAM = 3
X86_INS_AAS = 4
X86_INS_FABS = 5
X86_INS_ADC = 6
X86_INS_ADCX = 7
X86_INS_ADD = 8
X86_INS_ADDPD = 9
X86_INS_ADDPS = 10
X86_INS_ADDSD = 11
X86_INS_ADDSS = 12
X86_INS_ADDSUBPD = 13
X86_INS_ADDSUBPS = 14
X86_INS_FADD = 15
X86_INS_FIADD = 16
X86_INS_FADDP = 17
X86_INS_ADOX = 18
X86_INS_AESDECLAST = 19
X86_INS_AESDEC = 20
X86_INS_AESENCLAST = 21
X86_INS_AESENC = 22
X86_INS_AESIMC = 23
X86_INS_AESKEYGENASSIST = 24
X86_INS_AND = 25
X86_INS_ANDN = 26
X86_INS_ANDNPD = 27
X86_INS_ANDNPS = 28
X86_INS_ANDPD = 29
X86_INS_ANDPS = 30
X86_INS_ARPL = 31
X86_INS_BEXTR = 32
X86_INS_BLCFILL = 33
X86_INS_BLCI = 34
X86_INS_BLCIC = 35
X86_INS_BLCMSK = 36
X86_INS_BLCS = 37
X86_INS_BLENDPD = 38
X86_INS_BLENDPS = 39
X86_INS_BLENDVPD = 40
X86_INS_BLENDVPS = 41
X86_INS_BLSFILL = 42
X86_INS_BLSI = 43
X86_INS_BLSIC = 44
X86_INS_BLSMSK = 45
X86_INS_BLSR = 46
X86_INS_BOUND = 47
X86_INS_BSF = 48
X86_INS_BSR = 49
X86_INS_BSWAP = 50
X86_INS_BT = 51
X86_INS_BTC = 52
X86_INS_BTR = 53
X86_INS_BTS = 54
X86_INS_BZHI = 55
X86_INS_CALL = 56
X86_INS_CBW = 57
X86_INS_CDQ = 58
X86_INS_CDQE = 59
X86_INS_FCHS = 60
X86_INS_CLAC = 61
X86_INS_CLC = 62
X86_INS_CLD = 63
X86_INS_CLFLUSH = 64
X86_INS_CLFLUSHOPT = 65
X86_INS_CLGI = 66
X86_INS_CLI = 67
X86_INS_CLTS = 68
X86_INS_CLWB = 69
X86_INS_CMC = 70
X86_INS_CMOVA = 71
X86_INS_CMOVAE = 72
X86_INS_CMOVB = 73
X86_INS_CMOVBE = 74
X86_INS_FCMOVBE = 75
X86_INS_FCMOVB = 76
X86_INS_CMOVE = 77
X86_INS_FCMOVE = 78
X86_INS_CMOVG = 79
X86_INS_CMOVGE = 80
X86_INS_CMOVL = 81
X86_INS_CMOVLE = 82
X86_INS_FCMOVNBE = 83
X86_INS_FCMOVNB = 84
X86_INS_CMOVNE = 85
X86_INS_FCMOVNE = 86
X86_INS_CMOVNO = 87
X86_INS_CMOVNP = 88
X86_INS_FCMOVNU = 89
X86_INS_CMOVNS = 90
X86_INS_CMOVO = 91
X86_INS_CMOVP = 92
X86_INS_FCMOVU = 93
X86_INS_CMOVS = 94
X86_INS_CMP = 95
X86_INS_CMPSB = 96
X86_INS_CMPSQ = 97
X86_INS_CMPSW = 98
X86_INS_CMPXCHG16B = 99
X86_INS_CMPXCHG = 100
X86_INS_CMPXCHG8B = 101
X86_INS_COMISD = 102
X86_INS_COMISS = 103
X86_INS_FCOMP = 104
X86_INS_FCOMIP = 105
X86_INS_FCOMI = 106
X86_INS_FCOM = 107
X86_INS_FCOS = 108
X86_INS_CPUID = 109
X86_INS_CQO = 110
X86_INS_CRC32 = 111
X86_INS_CVTDQ2PD = 112
X86_INS_CVTDQ2PS = 113
X86_INS_CVTPD2DQ = 114
X86_INS_CVTPD2PS = 115
X86_INS_CVTPS2DQ = 116
X86_INS_CVTPS2PD = 117
X86_INS_CVTSD2SI = 118
X86_INS_CVTSD2SS = 119
X86_INS_CVTSI2SD = 120
X86_INS_CVTSI2SS = 121
X86_INS_CVTSS2SD = 122
X86_INS_CVTSS2SI = 123
X86_INS_CVTTPD2DQ = 124
X86_INS_CVTTPS2DQ = 125
X86_INS_CVTTSD2SI = 126
X86_INS_CVTTSS2SI = 127
X86_INS_CWD = 128
X86_INS_CWDE = 129
X86_INS_DAA = 130
X86_INS_DAS = 131
X86_INS_DATA16 = 132
X86_INS_DEC = 133
X86_INS_DIV = 134
X86_INS_DIVPD = 135
X86_INS_DIVPS = 136
X86_INS_FDIVR = 137
X86_INS_FIDIVR = 138
X86_INS_FDIVRP = 139
X86_INS_DIVSD = 140
X86_INS_DIVSS = 141
X86_INS_FDIV = 142
X86_INS_FIDIV = 143
X86_INS_FDIVP = 144
X86_INS_DPPD = 145
X86_INS_DPPS = 146
X86_INS_RET = 147
X86_INS_ENCLS = 148
X86_INS_ENCLU = 149
X86_INS_ENTER = 150
X86_INS_EXTRACTPS = 151
X86_INS_EXTRQ = 152
X86_INS_F2XM1 = 153
X86_INS_LCALL = 154
X86_INS_LJMP = 155
X86_INS_FBLD = 156
X86_INS_FBSTP = 157
X86_INS_FCOMPP = 158
X86_INS_FDECSTP = 159
X86_INS_FEMMS = 160
X86_INS_FFREE = 161
X86_INS_FICOM = 162
X86_INS_FICOMP = 163
X86_INS_FINCSTP = 164
X86_INS_FLDCW = 165
X86_INS_FLDENV = 166
X86_INS_FLDL2E = 167
X86_INS_FLDL2T = 168
X86_INS_FLDLG2 = 169
X86_INS_FLDLN2 = 170
X86_INS_FLDPI = 171
X86_INS_FNCLEX = 172
X86_INS_FNINIT = 173
X86_INS_FNOP = 174
X86_INS_FNSTCW = 175
X86_INS_FNSTSW = 176
X86_INS_FPATAN = 177
X86_INS_FPREM = 178
X86_INS_FPREM1 = 179
X86_INS_FPTAN = 180
X86_INS_FFREEP = 181
X86_INS_FRNDINT = 182
X86_INS_FRSTOR = 183
X86_INS_FNSAVE = 184
X86_INS_FSCALE = 185
X86_INS_FSETPM = 186
X86_INS_FSINCOS = 187
X86_INS_FNSTENV = 188
X86_INS_FXAM = 189
X86_INS_FXRSTOR = 190
X86_INS_FXRSTOR64 = 191
X86_INS_FXSAVE = 192
X86_INS_FXSAVE64 = 193
X86_INS_FXTRACT = 194
X86_INS_FYL2X = 195
X86_INS_FYL2XP1 = 196
X86_INS_MOVAPD = 197
X86_INS_MOVAPS = 198
X86_INS_ORPD = 199
X86_INS_ORPS = 200
X86_INS_VMOVAPD = 201
X86_INS_VMOVAPS = 202
X86_INS_XORPD = 203
X86_INS_XORPS = 204
X86_INS_GETSEC = 205
X86_INS_HADDPD = 206
X86_INS_HADDPS = 207
X86_INS_HLT = 208
X86_INS_HSUBPD = 209
X86_INS_HSUBPS = 210
X86_INS_IDIV = 211
X86_INS_FILD = 212
X86_INS_IMUL = 213
X86_INS_IN = 214
X86_INS_INC = 215
X86_INS_INSB = 216
X86_INS_INSERTPS = 217
X86_INS_INSERTQ = 218
X86_INS_INSD = 219
X86_INS_INSW = 220
X86_INS_INT = 221
X86_INS_INT1 = 222
X86_INS_INT3 = 223
X86_INS_INTO = 224
X86_INS_INVD = 225
X86_INS_INVEPT = 226
X86_INS_INVLPG = 227
X86_INS_INVLPGA = 228
X86_INS_INVPCID = 229
X86_INS_INVVPID = 230
X86_INS_IRET = 231
X86_INS_IRETD = 232
X86_INS_IRETQ = 233
X86_INS_FISTTP = 234
X86_INS_FIST = 235
X86_INS_FISTP = 236
X86_INS_UCOMISD = 237
X86_INS_UCOMISS = 238
X86_INS_VCOMISD = 239
X86_INS_VCOMISS = 240
X86_INS_VCVTSD2SS = 241
X86_INS_VCVTSI2SD = 242
X86_INS_VCVTSI2SS = 243
X86_INS_VCVTSS2SD = 244
X86_INS_VCVTTSD2SI = 245
X86_INS_VCVTTSD2USI = 246
X86_INS_VCVTTSS2SI = 247
X86_INS_VCVTTSS2USI = 248
X86_INS_VCVTUSI2SD = 249
X86_INS_VCVTUSI2SS = 250
X86_INS_VUCOMISD = 251
X86_INS_VUCOMISS = 252
X86_INS_JAE = 253
X86_INS_JA = 254
X86_INS_JBE = 255
X86_INS_JB = 256
X86_INS_JCXZ = 257
X86_INS_JECXZ = 258
X86_INS_JE = 259
X86_INS_JGE = 260
X86_INS_JG = 261
X86_INS_JLE = 262
X86_INS_JL = 263
X86_INS_JMP = 264
X86_INS_JNE = 265
X86_INS_JNO = 266
X86_INS_JNP = 267
X86_INS_JNS = 268
X86_INS_JO = 269
X86_INS_JP = 270
X86_INS_JRCXZ = 271
X86_INS_JS = 272
X86_INS_KANDB = 273
X86_INS_KANDD = 274
X86_INS_KANDNB = 275
X86_INS_KANDND = 276
X86_INS_KANDNQ = 277
X86_INS_KANDNW = 278
X86_INS_KANDQ = 279
X86_INS_KANDW = 280
X86_INS_KMOVB = 281
X86_INS_KMOVD = 282
X86_INS_KMOVQ = 283
X86_INS_KMOVW = 284
X86_INS_KNOTB = 285
X86_INS_KNOTD = 286
X86_INS_KNOTQ = 287
X86_INS_KNOTW = 288
X86_INS_KORB = 289
X86_INS_KORD = 290
X86_INS_KORQ = 291
X86_INS_KORTESTB = 292
X86_INS_KORTESTD = 293
X86_INS_KORTESTQ = 294
X86_INS_KORTESTW = 295
X86_INS_KORW = 296
X86_INS_KSHIFTLB = 297
X86_INS_KSHIFTLD = 298
X86_INS_KSHIFTLQ = 299
X86_INS_KSHIFTLW = 300
X86_INS_KSHIFTRB = 301
X86_INS_KSHIFTRD = 302
X86_INS_KSHIFTRQ = 303
X86_INS_KSHIFTRW = 304
X86_INS_KUNPCKBW = 305
X86_INS_KXNORB = 306
X86_INS_KXNORD = 307
X86_INS_KXNORQ = 308
X86_INS_KXNORW = 309
X86_INS_KXORB = 310
X86_INS_KXORD = 311
X86_INS_KXORQ = 312
X86_INS_KXORW = 313
X86_INS_LAHF = 314
X86_INS_LAR = 315
X86_INS_LDDQU = 316
X86_INS_LDMXCSR = 317
X86_INS_LDS = 318
X86_INS_FLDZ = 319
X86_INS_FLD1 = 320
X86_INS_FLD = 321
X86_INS_LEA = 322
X86_INS_LEAVE = 323
X86_INS_LES = 324
X86_INS_LFENCE = 325
X86_INS_LFS = 326
X86_INS_LGDT = 327
X86_INS_LGS = 328
X86_INS_LIDT = 329
X86_INS_LLDT = 330
X86_INS_LMSW = 331
X86_INS_OR = 332
X86_INS_SUB = 333
X86_INS_XOR = 334
X86_INS_LODSB = 335
X86_INS_LODSD = 336
X86_INS_LODSQ = 337
X86_INS_LODSW = 338
X86_INS_LOOP = 339
X86_INS_LOOPE = 340
X86_INS_LOOPNE = 341
X86_INS_RETF = 342
X86_INS_RETFQ = 343
X86_INS_LSL = 344
X86_INS_LSS = 345
X86_INS_LTR = 346
X86_INS_XADD = 347
X86_INS_LZCNT = 348
X86_INS_MASKMOVDQU = 349
X86_INS_MAXPD = 350
X86_INS_MAXPS = 351
X86_INS_MAXSD = 352
X86_INS_MAXSS = 353
X86_INS_MFENCE = 354
X86_INS_MINPD = 355
X86_INS_MINPS = 356
X86_INS_MINSD = 357
X86_INS_MINSS = 358
X86_INS_CVTPD2PI = 359
X86_INS_CVTPI2PD = 360
X86_INS_CVTPI2PS = 361
X86_INS_CVTPS2PI = 362
X86_INS_CVTTPD2PI = 363
X86_INS_CVTTPS2PI = 364
X86_INS_EMMS = 365
X86_INS_MASKMOVQ = 366
X86_INS_MOVD = 367
X86_INS_MOVDQ2Q = 368
X86_INS_MOVNTQ = 369
X86_INS_MOVQ2DQ = 370
X86_INS_MOVQ = 371
X86_INS_PABSB = 372
X86_INS_PABSD = 373
X86_INS_PABSW = 374
X86_INS_PACKSSDW = 375
X86_INS_PACKSSWB = 376
X86_INS_PACKUSWB = 377
X86_INS_PADDB = 378
X86_INS_PADDD = 379
X86_INS_PADDQ = 380
X86_INS_PADDSB = 381
X86_INS_PADDSW = 382
X86_INS_PADDUSB = 383
X86_INS_PADDUSW = 384
X86_INS_PADDW = 385
X86_INS_PALIGNR = 386
X86_INS_PANDN = 387
X86_INS_PAND = 388
X86_INS_PAVGB = 389
X86_INS_PAVGW = 390
X86_INS_PCMPEQB = 391
X86_INS_PCMPEQD = 392
X86_INS_PCMPEQW = 393
X86_INS_PCMPGTB = 394
X86_INS_PCMPGTD = 395
X86_INS_PCMPGTW = 396
X86_INS_PEXTRW = 397
X86_INS_PHADDSW = 398
X86_INS_PHADDW = 399
X86_INS_PHADDD = 400
X86_INS_PHSUBD = 401
X86_INS_PHSUBSW = 402
X86_INS_PHSUBW = 403
X86_INS_PINSRW = 404
X86_INS_PMADDUBSW = 405
X86_INS_PMADDWD = 406
X86_INS_PMAXSW = 407
X86_INS_PMAXUB = 408
X86_INS_PMINSW = 409
X86_INS_PMINUB = 410
X86_INS_PMOVMSKB = 411
X86_INS_PMULHRSW = 412
X86_INS_PMULHUW = 413
X86_INS_PMULHW = 414
X86_INS_PMULLW = 415
X86_INS_PMULUDQ = 416
X86_INS_POR = 417
X86_INS_PSADBW = 418
X86_INS_PSHUFB = 419
X86_INS_PSHUFW = 420
X86_INS_PSIGNB = 421
X86_INS_PSIGND = 422
X86_INS_PSIGNW = 423
X86_INS_PSLLD = 424
X86_INS_PSLLQ = 425
X86_INS_PSLLW = 426
X86_INS_PSRAD = 427
X86_INS_PSRAW = 428
X86_INS_PSRLD = 429
X86_INS_PSRLQ = 430
X86_INS_PSRLW = 431
X86_INS_PSUBB = 432
X86_INS_PSUBD = 433
X86_INS_PSUBQ = 434
X86_INS_PSUBSB = 435
X86_INS_PSUBSW = 436
X86_INS_PSUBUSB = 437
X86_INS_PSUBUSW = 438
X86_INS_PSUBW = 439
X86_INS_PUNPCKHBW = 440
X86_INS_PUNPCKHDQ = 441
X86_INS_PUNPCKHWD = 442
X86_INS_PUNPCKLBW = 443
X86_INS_PUNPCKLDQ = 444
X86_INS_PUNPCKLWD = 445
X86_INS_PXOR = 446
X86_INS_MONITOR = 447
X86_INS_MONTMUL = 448
X86_INS_MOV = 449
X86_INS_MOVABS = 450
X86_INS_MOVBE = 451
X86_INS_MOVDDUP = 452
X86_INS_MOVDQA = 453
X86_INS_MOVDQU = 454
X86_INS_MOVHLPS = 455
X86_INS_MOVHPD = 456
X86_INS_MOVHPS = 457
X86_INS_MOVLHPS = 458
X86_INS_MOVLPD = 459
X86_INS_MOVLPS = 460
X86_INS_MOVMSKPD = 461
X86_INS_MOVMSKPS = 462
X86_INS_MOVNTDQA = 463
X86_INS_MOVNTDQ = 464
X86_INS_MOVNTI = 465
X86_INS_MOVNTPD = 466
X86_INS_MOVNTPS = 467
X86_INS_MOVNTSD = 468
X86_INS_MOVNTSS = 469
X86_INS_MOVSB = 470
X86_INS_MOVSD = 471
X86_INS_MOVSHDUP = 472
X86_INS_MOVSLDUP = 473
X86_INS_MOVSQ = 474
X86_INS_MOVSS = 475
X86_INS_MOVSW = 476
X86_INS_MOVSX = 477
X86_INS_MOVSXD = 478
X86_INS_MOVUPD = 479
X86_INS_MOVUPS = 480
X86_INS_MOVZX = 481
X86_INS_MPSADBW = 482
X86_INS_MUL = 483
X86_INS_MULPD = 484
X86_INS_MULPS = 485
X86_INS_MULSD = 486
X86_INS_MULSS = 487
X86_INS_MULX = 488
X86_INS_FMUL = 489
X86_INS_FIMUL = 490
X86_INS_FMULP = 491
X86_INS_MWAIT = 492
X86_INS_NEG = 493
X86_INS_NOP = 494
X86_INS_NOT = 495
X86_INS_OUT = 496
X86_INS_OUTSB = 497
X86_INS_OUTSD = 498
X86_INS_OUTSW = 499
X86_INS_PACKUSDW = 500
X86_INS_PAUSE = 501
X86_INS_PAVGUSB = 502
X86_INS_PBLENDVB = 503
X86_INS_PBLENDW = 504
X86_INS_PCLMULQDQ = 505
X86_INS_PCMPEQQ = 506
X86_INS_PCMPESTRI = 507
X86_INS_PCMPESTRM = 508
X86_INS_PCMPGTQ = 509
X86_INS_PCMPISTRI = 510
X86_INS_PCMPISTRM = 511
X86_INS_PCOMMIT = 512
X86_INS_PDEP = 513
X86_INS_PEXT = 514
X86_INS_PEXTRB = 515
X86_INS_PEXTRD = 516
X86_INS_PEXTRQ = 517
X86_INS_PF2ID = 518
X86_INS_PF2IW = 519
X86_INS_PFACC = 520
X86_INS_PFADD = 521
X86_INS_PFCMPEQ = 522
X86_INS_PFCMPGE = 523
X86_INS_PFCMPGT = 524
X86_INS_PFMAX = 525
X86_INS_PFMIN = 526
X86_INS_PFMUL = 527
X86_INS_PFNACC = 528
X86_INS_PFPNACC = 529
X86_INS_PFRCPIT1 = 530
X86_INS_PFRCPIT2 = 531
X86_INS_PFRCP = 532
X86_INS_PFRSQIT1 = 533
X86_INS_PFRSQRT = 534
X86_INS_PFSUBR = 535
X86_INS_PFSUB = 536
X86_INS_PHMINPOSUW = 537
X86_INS_PI2FD = 538
X86_INS_PI2FW = 539
X86_INS_PINSRB = 540
X86_INS_PINSRD = 541
X86_INS_PINSRQ = 542
X86_INS_PMAXSB = 543
X86_INS_PMAXSD = 544
X86_INS_PMAXUD = 545
X86_INS_PMAXUW = 546
X86_INS_PMINSB = 547
X86_INS_PMINSD = 548
X86_INS_PMINUD = 549
X86_INS_PMINUW = 550
X86_INS_PMOVSXBD = 551
X86_INS_PMOVSXBQ = 552
X86_INS_PMOVSXBW = 553
X86_INS_PMOVSXDQ = 554
X86_INS_PMOVSXWD = 555
X86_INS_PMOVSXWQ = 556
X86_INS_PMOVZXBD = 557
X86_INS_PMOVZXBQ = 558
X86_INS_PMOVZXBW = 559
X86_INS_PMOVZXDQ = 560
X86_INS_PMOVZXWD = 561
X86_INS_PMOVZXWQ = 562
X86_INS_PMULDQ = 563
X86_INS_PMULHRW = 564
X86_INS_PMULLD = 565
X86_INS_POP = 566
X86_INS_POPAW = 567
X86_INS_POPAL = 568
X86_INS_POPCNT = 569
X86_INS_POPF = 570
X86_INS_POPFD = 571
X86_INS_POPFQ = 572
X86_INS_PREFETCH = 573
X86_INS_PREFETCHNTA = 574
X86_INS_PREFETCHT0 = 575
X86_INS_PREFETCHT1 = 576
X86_INS_PREFETCHT2 = 577
X86_INS_PREFETCHW = 578
X86_INS_PSHUFD = 579
X86_INS_PSHUFHW = 580
X86_INS_PSHUFLW = 581
X86_INS_PSLLDQ = 582
X86_INS_PSRLDQ = 583
X86_INS_PSWAPD = 584
X86_INS_PTEST = 585
X86_INS_PUNPCKHQDQ = 586
X86_INS_PUNPCKLQDQ = 587
X86_INS_PUSH = 588
X86_INS_PUSHAW = 589
X86_INS_PUSHAL = 590
X86_INS_PUSHF = 591
X86_INS_PUSHFD = 592
X86_INS_PUSHFQ = 593
X86_INS_RCL = 594
X86_INS_RCPPS = 595
X86_INS_RCPSS = 596
X86_INS_RCR = 597
X86_INS_RDFSBASE = 598
X86_INS_RDGSBASE = 599
X86_INS_RDMSR = 600
X86_INS_RDPMC = 601
X86_INS_RDRAND = 602
X86_INS_RDSEED = 603
X86_INS_RDTSC = 604
X86_INS_RDTSCP = 605
X86_INS_ROL = 606
X86_INS_ROR = 607
X86_INS_RORX = 608
X86_INS_ROUNDPD = 609
X86_INS_ROUNDPS = 610
X86_INS_ROUNDSD = 611
X86_INS_ROUNDSS = 612
X86_INS_RSM = 613
X86_INS_RSQRTPS = 614
X86_INS_RSQRTSS = 615
X86_INS_SAHF = 616
X86_INS_SAL = 617
X86_INS_SALC = 618
X86_INS_SAR = 619
X86_INS_SARX = 620
X86_INS_SBB = 621
X86_INS_SCASB = 622
X86_INS_SCASD = 623
X86_INS_SCASQ = 624
X86_INS_SCASW = 625
X86_INS_SETAE = 626
X86_INS_SETA = 627
X86_INS_SETBE = 628
X86_INS_SETB = 629
X86_INS_SETE = 630
X86_INS_SETGE = 631
X86_INS_SETG = 632
X86_INS_SETLE = 633
X86_INS_SETL = 634
X86_INS_SETNE = 635
X86_INS_SETNO = 636
X86_INS_SETNP = 637
X86_INS_SETNS = 638
X86_INS_SETO = 639
X86_INS_SETP = 640
X86_INS_SETS = 641
X86_INS_SFENCE = 642
X86_INS_SGDT = 643
X86_INS_SHA1MSG1 = 644
X86_INS_SHA1MSG2 = 645
X86_INS_SHA1NEXTE = 646
X86_INS_SHA1RNDS4 = 647
X86_INS_SHA256MSG1 = 648
X86_INS_SHA256MSG2 = 649
X86_INS_SHA256RNDS2 = 650
X86_INS_SHL = 651
X86_INS_SHLD = 652
X86_INS_SHLX = 653
X86_INS_SHR = 654
X86_INS_SHRD = 655
X86_INS_SHRX = 656
X86_INS_SHUFPD = 657
X86_INS_SHUFPS = 658
X86_INS_SIDT = 659
X86_INS_FSIN = 660
X86_INS_SKINIT = 661
X86_INS_SLDT = 662
X86_INS_SMSW = 663
X86_INS_SQRTPD = 664
X86_INS_SQRTPS = 665
X86_INS_SQRTSD = 666
X86_INS_SQRTSS = 667
X86_INS_FSQRT = 668
X86_INS_STAC = 669
X86_INS_STC = 670
X86_INS_STD = 671
X86_INS_STGI = 672
X86_INS_STI = 673
X86_INS_STMXCSR = 674
X86_INS_STOSB = 675
X86_INS_STOSD = 676
X86_INS_STOSQ = 677
X86_INS_STOSW = 678
X86_INS_STR = 679
X86_INS_FST = 680
X86_INS_FSTP = 681
X86_INS_FSTPNCE = 682
X86_INS_FXCH = 683
X86_INS_SUBPD = 684
X86_INS_SUBPS = 685
X86_INS_FSUBR = 686
X86_INS_FISUBR = 687
X86_INS_FSUBRP = 688
X86_INS_SUBSD = 689
X86_INS_SUBSS = 690
X86_INS_FSUB = 691
X86_INS_FISUB = 692
X86_INS_FSUBP = 693
X86_INS_SWAPGS = 694
X86_INS_SYSCALL = 695
X86_INS_SYSENTER = 696
X86_INS_SYSEXIT = 697
X86_INS_SYSRET = 698
X86_INS_T1MSKC = 699
X86_INS_TEST = 700
X86_INS_UD2 = 701
X86_INS_FTST = 702
X86_INS_TZCNT = 703
X86_INS_TZMSK = 704
X86_INS_FUCOMIP = 705
X86_INS_FUCOMI = 706
X86_INS_FUCOMPP = 707
X86_INS_FUCOMP = 708
X86_INS_FUCOM = 709
X86_INS_UD2B = 710
X86_INS_UNPCKHPD = 711
X86_INS_UNPCKHPS = 712
X86_INS_UNPCKLPD = 713
X86_INS_UNPCKLPS = 714
X86_INS_VADDPD = 715
X86_INS_VADDPS = 716
X86_INS_VADDSD = 717
X86_INS_VADDSS = 718
X86_INS_VADDSUBPD = 719
X86_INS_VADDSUBPS = 720
X86_INS_VAESDECLAST = 721
X86_INS_VAESDEC = 722
X86_INS_VAESENCLAST = 723
X86_INS_VAESENC = 724
X86_INS_VAESIMC = 725
X86_INS_VAESKEYGENASSIST = 726
X86_INS_VALIGND = 727
X86_INS_VALIGNQ = 728
X86_INS_VANDNPD = 729
X86_INS_VANDNPS = 730
X86_INS_VANDPD = 731
X86_INS_VANDPS = 732
X86_INS_VBLENDMPD = 733
X86_INS_VBLENDMPS = 734
X86_INS_VBLENDPD = 735
X86_INS_VBLENDPS = 736
X86_INS_VBLENDVPD = 737
X86_INS_VBLENDVPS = 738
X86_INS_VBROADCASTF128 = 739
X86_INS_VBROADCASTI32X4 = 740
X86_INS_VBROADCASTI64X4 = 741
X86_INS_VBROADCASTSD = 742
X86_INS_VBROADCASTSS = 743
X86_INS_VCOMPRESSPD = 744
X86_INS_VCOMPRESSPS = 745
X86_INS_VCVTDQ2PD = 746
X86_INS_VCVTDQ2PS = 747
X86_INS_VCVTPD2DQX = 748
X86_INS_VCVTPD2DQ = 749
X86_INS_VCVTPD2PSX = 750
X86_INS_VCVTPD2PS = 751
X86_INS_VCVTPD2UDQ = 752
X86_INS_VCVTPH2PS = 753
X86_INS_VCVTPS2DQ = 754
X86_INS_VCVTPS2PD = 755
X86_INS_VCVTPS2PH = 756
X86_INS_VCVTPS2UDQ = 757
X86_INS_VCVTSD2SI = 758
X86_INS_VCVTSD2USI = 759
X86_INS_VCVTSS2SI = 760
X86_INS_VCVTSS2USI = 761
X86_INS_VCVTTPD2DQX = 762
X86_INS_VCVTTPD2DQ = 763
X86_INS_VCVTTPD2UDQ = 764
X86_INS_VCVTTPS2DQ = 765
X86_INS_VCVTTPS2UDQ = 766
X86_INS_VCVTUDQ2PD = 767
X86_INS_VCVTUDQ2PS = 768
X86_INS_VDIVPD = 769
X86_INS_VDIVPS = 770
X86_INS_VDIVSD = 771
X86_INS_VDIVSS = 772
X86_INS_VDPPD = 773
X86_INS_VDPPS = 774
X86_INS_VERR = 775
X86_INS_VERW = 776
X86_INS_VEXP2PD = 777
X86_INS_VEXP2PS = 778
X86_INS_VEXPANDPD = 779
X86_INS_VEXPANDPS = 780
X86_INS_VEXTRACTF128 = 781
X86_INS_VEXTRACTF32X4 = 782
X86_INS_VEXTRACTF64X4 = 783
X86_INS_VEXTRACTI128 = 784
X86_INS_VEXTRACTI32X4 = 785
X86_INS_VEXTRACTI64X4 = 786
X86_INS_VEXTRACTPS = 787
X86_INS_VFMADD132PD = 788
X86_INS_VFMADD132PS = 789
X86_INS_VFMADDPD = 790
X86_INS_VFMADD213PD = 791
X86_INS_VFMADD231PD = 792
X86_INS_VFMADDPS = 793
X86_INS_VFMADD213PS = 794
X86_INS_VFMADD231PS = 795
X86_INS_VFMADDSD = 796
X86_INS_VFMADD213SD = 797
X86_INS_VFMADD132SD = 798
X86_INS_VFMADD231SD = 799
X86_INS_VFMADDSS = 800
X86_INS_VFMADD213SS = 801
X86_INS_VFMADD132SS = 802
X86_INS_VFMADD231SS = 803
X86_INS_VFMADDSUB132PD = 804
X86_INS_VFMADDSUB132PS = 805
X86_INS_VFMADDSUBPD = 806
X86_INS_VFMADDSUB213PD = 807
X86_INS_VFMADDSUB231PD = 808
X86_INS_VFMADDSUBPS = 809
X86_INS_VFMADDSUB213PS = 810
X86_INS_VFMADDSUB231PS = 811
X86_INS_VFMSUB132PD = 812
X86_INS_VFMSUB132PS = 813
X86_INS_VFMSUBADD132PD = 814
X86_INS_VFMSUBADD132PS = 815
X86_INS_VFMSUBADDPD = 816
X86_INS_VFMSUBADD213PD = 817
X86_INS_VFMSUBADD231PD = 818
X86_INS_VFMSUBADDPS = 819
X86_INS_VFMSUBADD213PS = 820
X86_INS_VFMSUBADD231PS = 821
X86_INS_VFMSUBPD = 822
X86_INS_VFMSUB213PD = 823
X86_INS_VFMSUB231PD = 824
X86_INS_VFMSUBPS = 825
X86_INS_VFMSUB213PS = 826
X86_INS_VFMSUB231PS = 827
X86_INS_VFMSUBSD = 828
X86_INS_VFMSUB213SD = 829
X86_INS_VFMSUB132SD = 830
X86_INS_VFMSUB231SD = 831
X86_INS_VFMSUBSS = 832
X86_INS_VFMSUB213SS = 833
X86_INS_VFMSUB132SS = 834
X86_INS_VFMSUB231SS = 835
X86_INS_VFNMADD132PD = 836
X86_INS_VFNMADD132PS = 837
X86_INS_VFNMADDPD = 838
X86_INS_VFNMADD213PD = 839
X86_INS_VFNMADD231PD = 840
X86_INS_VFNMADDPS = 841
X86_INS_VFNMADD213PS = 842
X86_INS_VFNMADD231PS = 843
X86_INS_VFNMADDSD = 844
X86_INS_VFNMADD213SD = 845
X86_INS_VFNMADD132SD = 846
X86_INS_VFNMADD231SD = 847
X86_INS_VFNMADDSS = 848
X86_INS_VFNMADD213SS = 849
X86_INS_VFNMADD132SS = 850
X86_INS_VFNMADD231SS = 851
X86_INS_VFNMSUB132PD = 852
X86_INS_VFNMSUB132PS = 853
X86_INS_VFNMSUBPD = 854
X86_INS_VFNMSUB213PD = 855
X86_INS_VFNMSUB231PD = 856
X86_INS_VFNMSUBPS = 857
X86_INS_VFNMSUB213PS = 858
X86_INS_VFNMSUB231PS = 859
X86_INS_VFNMSUBSD = 860
X86_INS_VFNMSUB213SD = 861
X86_INS_VFNMSUB132SD = 862
X86_INS_VFNMSUB231SD = 863
X86_INS_VFNMSUBSS = 864
X86_INS_VFNMSUB213SS = 865
X86_INS_VFNMSUB132SS = 866
X86_INS_VFNMSUB231SS = 867
X86_INS_VFRCZPD = 868
X86_INS_VFRCZPS = 869
X86_INS_VFRCZSD = 870
X86_INS_VFRCZSS = 871
X86_INS_VORPD = 872
X86_INS_VORPS = 873
X86_INS_VXORPD = 874
X86_INS_VXORPS = 875
X86_INS_VGATHERDPD = 876
X86_INS_VGATHERDPS = 877
X86_INS_VGATHERPF0DPD = 878
X86_INS_VGATHERPF0DPS = 879
X86_INS_VGATHERPF0QPD = 880
X86_INS_VGATHERPF0QPS = 881
X86_INS_VGATHERPF1DPD = 882
X86_INS_VGATHERPF1DPS = 883
X86_INS_VGATHERPF1QPD = 884
X86_INS_VGATHERPF1QPS = 885
X86_INS_VGATHERQPD = 886
X86_INS_VGATHERQPS = 887
X86_INS_VHADDPD = 888
X86_INS_VHADDPS = 889
X86_INS_VHSUBPD = 890
X86_INS_VHSUBPS = 891
X86_INS_VINSERTF128 = 892
X86_INS_VINSERTF32X4 = 893
X86_INS_VINSERTF32X8 = 894
X86_INS_VINSERTF64X2 = 895
X86_INS_VINSERTF64X4 = 896
X86_INS_VINSERTI128 = 897
X86_INS_VINSERTI32X4 = 898
X86_INS_VINSERTI32X8 = 899
X86_INS_VINSERTI64X2 = 900
X86_INS_VINSERTI64X4 = 901
X86_INS_VINSERTPS = 902
X86_INS_VLDDQU = 903
X86_INS_VLDMXCSR = 904
X86_INS_VMASKMOVDQU = 905
X86_INS_VMASKMOVPD = 906
X86_INS_VMASKMOVPS = 907
X86_INS_VMAXPD = 908
X86_INS_VMAXPS = 909
X86_INS_VMAXSD = 910
X86_INS_VMAXSS = 911
X86_INS_VMCALL = 912
X86_INS_VMCLEAR = 913
X86_INS_VMFUNC = 914
X86_INS_VMINPD = 915
X86_INS_VMINPS = 916
X86_INS_VMINSD = 917
X86_INS_VMINSS = 918
X86_INS_VMLAUNCH = 919
X86_INS_VMLOAD = 920
X86_INS_VMMCALL = 921
X86_INS_VMOVQ = 922
X86_INS_VMOVDDUP = 923
X86_INS_VMOVD = 924
X86_INS_VMOVDQA32 = 925
X86_INS_VMOVDQA64 = 926
X86_INS_VMOVDQA = 927
X86_INS_VMOVDQU16 = 928
X86_INS_VMOVDQU32 = 929
X86_INS_VMOVDQU64 = 930
X86_INS_VMOVDQU8 = 931
X86_INS_VMOVDQU = 932
X86_INS_VMOVHLPS = 933
X86_INS_VMOVHPD = 934
X86_INS_VMOVHPS = 935
X86_INS_VMOVLHPS = 936
X86_INS_VMOVLPD = 937
X86_INS_VMOVLPS = 938
X86_INS_VMOVMSKPD = 939
X86_INS_VMOVMSKPS = 940
X86_INS_VMOVNTDQA = 941
X86_INS_VMOVNTDQ = 942
X86_INS_VMOVNTPD = 943
X86_INS_VMOVNTPS = 944
X86_INS_VMOVSD = 945
X86_INS_VMOVSHDUP = 946
X86_INS_VMOVSLDUP = 947
X86_INS_VMOVSS = 948
X86_INS_VMOVUPD = 949
X86_INS_VMOVUPS = 950
X86_INS_VMPSADBW = 951
X86_INS_VMPTRLD = 952
X86_INS_VMPTRST = 953
X86_INS_VMREAD = 954
X86_INS_VMRESUME = 955
X86_INS_VMRUN = 956
X86_INS_VMSAVE = 957
X86_INS_VMULPD = 958
X86_INS_VMULPS = 959
X86_INS_VMULSD = 960
X86_INS_VMULSS = 961
X86_INS_VMWRITE = 962
X86_INS_VMXOFF = 963
X86_INS_VMXON = 964
X86_INS_VPABSB = 965
X86_INS_VPABSD = 966
X86_INS_VPABSQ = 967
X86_INS_VPABSW = 968
X86_INS_VPACKSSDW = 969
X86_INS_VPACKSSWB = 970
X86_INS_VPACKUSDW = 971
X86_INS_VPACKUSWB = 972
X86_INS_VPADDB = 973
X86_INS_VPADDD = 974
X86_INS_VPADDQ = 975
X86_INS_VPADDSB = 976
X86_INS_VPADDSW = 977
X86_INS_VPADDUSB = 978
X86_INS_VPADDUSW = 979
X86_INS_VPADDW = 980
X86_INS_VPALIGNR = 981
X86_INS_VPANDD = 982
X86_INS_VPANDND = 983
X86_INS_VPANDNQ = 984
X86_INS_VPANDN = 985
X86_INS_VPANDQ = 986
X86_INS_VPAND = 987
X86_INS_VPAVGB = 988
X86_INS_VPAVGW = 989
X86_INS_VPBLENDD = 990
X86_INS_VPBLENDMB = 991
X86_INS_VPBLENDMD = 992
X86_INS_VPBLENDMQ = 993
X86_INS_VPBLENDMW = 994
X86_INS_VPBLENDVB = 995
X86_INS_VPBLENDW = 996
X86_INS_VPBROADCASTB = 997
X86_INS_VPBROADCASTD = 998
X86_INS_VPBROADCASTMB2Q = 999
X86_INS_VPBROADCASTMW2D = 1000
X86_INS_VPBROADCASTQ = 1001
X86_INS_VPBROADCASTW = 1002
X86_INS_VPCLMULQDQ = 1003
X86_INS_VPCMOV = 1004
X86_INS_VPCMPB = 1005
X86_INS_VPCMPD = 1006
X86_INS_VPCMPEQB = 1007
X86_INS_VPCMPEQD = 1008
X86_INS_VPCMPEQQ = 1009
X86_INS_VPCMPEQW = 1010
X86_INS_VPCMPESTRI = 1011
X86_INS_VPCMPESTRM = 1012
X86_INS_VPCMPGTB = 1013
X86_INS_VPCMPGTD = 1014
X86_INS_VPCMPGTQ = 1015
X86_INS_VPCMPGTW = 1016
X86_INS_VPCMPISTRI = 1017
X86_INS_VPCMPISTRM = 1018
X86_INS_VPCMPQ = 1019
X86_INS_VPCMPUB = 1020
X86_INS_VPCMPUD = 1021
X86_INS_VPCMPUQ = 1022
X86_INS_VPCMPUW = 1023
X86_INS_VPCMPW = 1024
X86_INS_VPCOMB = 1025
X86_INS_VPCOMD = 1026
X86_INS_VPCOMPRESSD = 1027
X86_INS_VPCOMPRESSQ = 1028
X86_INS_VPCOMQ = 1029
X86_INS_VPCOMUB = 1030
X86_INS_VPCOMUD = 1031
X86_INS_VPCOMUQ = 1032
X86_INS_VPCOMUW = 1033
X86_INS_VPCOMW = 1034
X86_INS_VPCONFLICTD = 1035
X86_INS_VPCONFLICTQ = 1036
X86_INS_VPERM2F128 = 1037
X86_INS_VPERM2I128 = 1038
X86_INS_VPERMD = 1039
X86_INS_VPERMI2D = 1040
X86_INS_VPERMI2PD = 1041
X86_INS_VPERMI2PS = 1042
X86_INS_VPERMI2Q = 1043
X86_INS_VPERMIL2PD = 1044
X86_INS_VPERMIL2PS = 1045
X86_INS_VPERMILPD = 1046
X86_INS_VPERMILPS = 1047
X86_INS_VPERMPD = 1048
X86_INS_VPERMPS = 1049
X86_INS_VPERMQ = 1050
X86_INS_VPERMT2D = 1051
X86_INS_VPERMT2PD = 1052
X86_INS_VPERMT2PS = 1053
X86_INS_VPERMT2Q = 1054
X86_INS_VPEXPANDD = 1055
X86_INS_VPEXPANDQ = 1056
X86_INS_VPEXTRB = 1057
X86_INS_VPEXTRD = 1058
X86_INS_VPEXTRQ = 1059
X86_INS_VPEXTRW = 1060
X86_INS_VPGATHERDD = 1061
X86_INS_VPGATHERDQ = 1062
X86_INS_VPGATHERQD = 1063
X86_INS_VPGATHERQQ = 1064
X86_INS_VPHADDBD = 1065
X86_INS_VPHADDBQ = 1066
X86_INS_VPHADDBW = 1067
X86_INS_VPHADDDQ = 1068
X86_INS_VPHADDD = 1069
X86_INS_VPHADDSW = 1070
X86_INS_VPHADDUBD = 1071
X86_INS_VPHADDUBQ = 1072
X86_INS_VPHADDUBW = 1073
X86_INS_VPHADDUDQ = 1074
X86_INS_VPHADDUWD = 1075
X86_INS_VPHADDUWQ = 1076
X86_INS_VPHADDWD = 1077
X86_INS_VPHADDWQ = 1078
X86_INS_VPHADDW = 1079
X86_INS_VPHMINPOSUW = 1080
X86_INS_VPHSUBBW = 1081
X86_INS_VPHSUBDQ = 1082
X86_INS_VPHSUBD = 1083
X86_INS_VPHSUBSW = 1084
X86_INS_VPHSUBWD = 1085
X86_INS_VPHSUBW = 1086
X86_INS_VPINSRB = 1087
X86_INS_VPINSRD = 1088
X86_INS_VPINSRQ = 1089
X86_INS_VPINSRW = 1090
X86_INS_VPLZCNTD = 1091
X86_INS_VPLZCNTQ = 1092
X86_INS_VPMACSDD = 1093
X86_INS_VPMACSDQH = 1094
X86_INS_VPMACSDQL = 1095
X86_INS_VPMACSSDD = 1096
X86_INS_VPMACSSDQH = 1097
X86_INS_VPMACSSDQL = 1098
X86_INS_VPMACSSWD = 1099
X86_INS_VPMACSSWW = 1100
X86_INS_VPMACSWD = 1101
X86_INS_VPMACSWW = 1102
X86_INS_VPMADCSSWD = 1103
X86_INS_VPMADCSWD = 1104
X86_INS_VPMADDUBSW = 1105
X86_INS_VPMADDWD = 1106
X86_INS_VPMASKMOVD = 1107
X86_INS_VPMASKMOVQ = 1108
X86_INS_VPMAXSB = 1109
X86_INS_VPMAXSD = 1110
X86_INS_VPMAXSQ = 1111
X86_INS_VPMAXSW = 1112
X86_INS_VPMAXUB = 1113
X86_INS_VPMAXUD = 1114
X86_INS_VPMAXUQ = 1115
X86_INS_VPMAXUW = 1116
X86_INS_VPMINSB = 1117
X86_INS_VPMINSD = 1118
X86_INS_VPMINSQ = 1119
X86_INS_VPMINSW = 1120
X86_INS_VPMINUB = 1121
X86_INS_VPMINUD = 1122
X86_INS_VPMINUQ = 1123
X86_INS_VPMINUW = 1124
X86_INS_VPMOVDB = 1125
X86_INS_VPMOVDW = 1126
X86_INS_VPMOVM2B = 1127
X86_INS_VPMOVM2D = 1128
X86_INS_VPMOVM2Q = 1129
X86_INS_VPMOVM2W = 1130
X86_INS_VPMOVMSKB = 1131
X86_INS_VPMOVQB = 1132
X86_INS_VPMOVQD = 1133
X86_INS_VPMOVQW = 1134
X86_INS_VPMOVSDB = 1135
X86_INS_VPMOVSDW = 1136
X86_INS_VPMOVSQB = 1137
X86_INS_VPMOVSQD = 1138
X86_INS_VPMOVSQW = 1139
X86_INS_VPMOVSXBD = 1140
X86_INS_VPMOVSXBQ = 1141
X86_INS_VPMOVSXBW = 1142
X86_INS_VPMOVSXDQ = 1143
X86_INS_VPMOVSXWD = 1144
X86_INS_VPMOVSXWQ = 1145
X86_INS_VPMOVUSDB = 1146
X86_INS_VPMOVUSDW = 1147
X86_INS_VPMOVUSQB = 1148
X86_INS_VPMOVUSQD = 1149
X86_INS_VPMOVUSQW = 1150
X86_INS_VPMOVZXBD = 1151
X86_INS_VPMOVZXBQ = 1152
X86_INS_VPMOVZXBW = 1153
X86_INS_VPMOVZXDQ = 1154
X86_INS_VPMOVZXWD = 1155
X86_INS_VPMOVZXWQ = 1156
X86_INS_VPMULDQ = 1157
X86_INS_VPMULHRSW = 1158
X86_INS_VPMULHUW = 1159
X86_INS_VPMULHW = 1160
X86_INS_VPMULLD = 1161
X86_INS_VPMULLQ = 1162
X86_INS_VPMULLW = 1163
X86_INS_VPMULUDQ = 1164
X86_INS_VPORD = 1165
X86_INS_VPORQ = 1166
X86_INS_VPOR = 1167
X86_INS_VPPERM = 1168
X86_INS_VPROTB = 1169
X86_INS_VPROTD = 1170
X86_INS_VPROTQ = 1171
X86_INS_VPROTW = 1172
X86_INS_VPSADBW = 1173
X86_INS_VPSCATTERDD = 1174
X86_INS_VPSCATTERDQ = 1175
X86_INS_VPSCATTERQD = 1176
X86_INS_VPSCATTERQQ = 1177
X86_INS_VPSHAB = 1178
X86_INS_VPSHAD = 1179
X86_INS_VPSHAQ = 1180
X86_INS_VPSHAW = 1181
X86_INS_VPSHLB = 1182
X86_INS_VPSHLD = 1183
X86_INS_VPSHLQ = 1184
X86_INS_VPSHLW = 1185
X86_INS_VPSHUFB = 1186
X86_INS_VPSHUFD = 1187
X86_INS_VPSHUFHW = 1188
X86_INS_VPSHUFLW = 1189
X86_INS_VPSIGNB = 1190
X86_INS_VPSIGND = 1191
X86_INS_VPSIGNW = 1192
X86_INS_VPSLLDQ = 1193
X86_INS_VPSLLD = 1194
X86_INS_VPSLLQ = 1195
X86_INS_VPSLLVD = 1196
X86_INS_VPSLLVQ = 1197
X86_INS_VPSLLW = 1198
X86_INS_VPSRAD = 1199
X86_INS_VPSRAQ = 1200
X86_INS_VPSRAVD = 1201
X86_INS_VPSRAVQ = 1202
X86_INS_VPSRAW = 1203
X86_INS_VPSRLDQ = 1204
X86_INS_VPSRLD = 1205
X86_INS_VPSRLQ = 1206
X86_INS_VPSRLVD = 1207
X86_INS_VPSRLVQ = 1208
X86_INS_VPSRLW = 1209
X86_INS_VPSUBB = 1210
X86_INS_VPSUBD = 1211
X86_INS_VPSUBQ = 1212
X86_INS_VPSUBSB = 1213
X86_INS_VPSUBSW = 1214
X86_INS_VPSUBUSB = 1215
X86_INS_VPSUBUSW = 1216
X86_INS_VPSUBW = 1217
X86_INS_VPTESTMD = 1218
X86_INS_VPTESTMQ = 1219
X86_INS_VPTESTNMD = 1220
X86_INS_VPTESTNMQ = 1221
X86_INS_VPTEST = 1222
X86_INS_VPUNPCKHBW = 1223
X86_INS_VPUNPCKHDQ = 1224
X86_INS_VPUNPCKHQDQ = 1225
X86_INS_VPUNPCKHWD = 1226
X86_INS_VPUNPCKLBW = 1227
X86_INS_VPUNPCKLDQ = 1228
X86_INS_VPUNPCKLQDQ = 1229
X86_INS_VPUNPCKLWD = 1230
X86_INS_VPXORD = 1231
X86_INS_VPXORQ = 1232
X86_INS_VPXOR = 1233
X86_INS_VRCP14PD = 1234
X86_INS_VRCP14PS = 1235
X86_INS_VRCP14SD = 1236
X86_INS_VRCP14SS = 1237
X86_INS_VRCP28PD = 1238
X86_INS_VRCP28PS = 1239
X86_INS_VRCP28SD = 1240
X86_INS_VRCP28SS = 1241
X86_INS_VRCPPS = 1242
X86_INS_VRCPSS = 1243
X86_INS_VRNDSCALEPD = 1244
X86_INS_VRNDSCALEPS = 1245
X86_INS_VRNDSCALESD = 1246
X86_INS_VRNDSCALESS = 1247
X86_INS_VROUNDPD = 1248
X86_INS_VROUNDPS = 1249
X86_INS_VROUNDSD = 1250
X86_INS_VROUNDSS = 1251
X86_INS_VRSQRT14PD = 1252
X86_INS_VRSQRT14PS = 1253
X86_INS_VRSQRT14SD = 1254
X86_INS_VRSQRT14SS = 1255
X86_INS_VRSQRT28PD = 1256
X86_INS_VRSQRT28PS = 1257
X86_INS_VRSQRT28SD = 1258
X86_INS_VRSQRT28SS = 1259
X86_INS_VRSQRTPS = 1260
X86_INS_VRSQRTSS = 1261
X86_INS_VSCATTERDPD = 1262
X86_INS_VSCATTERDPS = 1263
X86_INS_VSCATTERPF0DPD = 1264
X86_INS_VSCATTERPF0DPS = 1265
X86_INS_VSCATTERPF0QPD = 1266
X86_INS_VSCATTERPF0QPS = 1267
X86_INS_VSCATTERPF1DPD = 1268
X86_INS_VSCATTERPF1DPS = 1269
X86_INS_VSCATTERPF1QPD = 1270
X86_INS_VSCATTERPF1QPS = 1271
X86_INS_VSCATTERQPD = 1272
X86_INS_VSCATTERQPS = 1273
X86_INS_VSHUFPD = 1274
X86_INS_VSHUFPS = 1275
X86_INS_VSQRTPD = 1276
X86_INS_VSQRTPS = 1277
X86_INS_VSQRTSD = 1278
X86_INS_VSQRTSS = 1279
X86_INS_VSTMXCSR = 1280
X86_INS_VSUBPD = 1281
X86_INS_VSUBPS = 1282
X86_INS_VSUBSD = 1283
X86_INS_VSUBSS = 1284
X86_INS_VTESTPD = 1285
X86_INS_VTESTPS = 1286
X86_INS_VUNPCKHPD = 1287
X86_INS_VUNPCKHPS = 1288
X86_INS_VUNPCKLPD = 1289
X86_INS_VUNPCKLPS = 1290
X86_INS_VZEROALL = 1291
X86_INS_VZEROUPPER = 1292
X86_INS_WAIT = 1293
X86_INS_WBINVD = 1294
X86_INS_WRFSBASE = 1295
X86_INS_WRGSBASE = 1296
X86_INS_WRMSR = 1297
X86_INS_XABORT = 1298
X86_INS_XACQUIRE = 1299
X86_INS_XBEGIN = 1300
X86_INS_XCHG = 1301
X86_INS_XCRYPTCBC = 1302
X86_INS_XCRYPTCFB = 1303
X86_INS_XCRYPTCTR = 1304
X86_INS_XCRYPTECB = 1305
X86_INS_XCRYPTOFB = 1306
X86_INS_XEND = 1307
X86_INS_XGETBV = 1308
X86_INS_XLATB = 1309
X86_INS_XRELEASE = 1310
X86_INS_XRSTOR = 1311
X86_INS_XRSTOR64 = 1312
X86_INS_XRSTORS = 1313
X86_INS_XRSTORS64 = 1314
X86_INS_XSAVE = 1315
X86_INS_XSAVE64 = 1316
X86_INS_XSAVEC = 1317
X86_INS_XSAVEC64 = 1318
X86_INS_XSAVEOPT = 1319
X86_INS_XSAVEOPT64 = 1320
X86_INS_XSAVES = 1321
X86_INS_XSAVES64 = 1322
X86_INS_XSETBV = 1323
X86_INS_XSHA1 = 1324
X86_INS_XSHA256 = 1325
X86_INS_XSTORE = 1326
X86_INS_XTEST = 1327
X86_INS_FDISI8087_NOP = 1328
X86_INS_FENI8087_NOP = 1329
X86_INS_CMPSS = 1330
X86_INS_CMPEQSS = 1331
X86_INS_CMPLTSS = 1332
X86_INS_CMPLESS = 1333
X86_INS_CMPUNORDSS = 1334
X86_INS_CMPNEQSS = 1335
X86_INS_CMPNLTSS = 1336
X86_INS_CMPNLESS = 1337
X86_INS_CMPORDSS = 1338
X86_INS_CMPSD = 1339
X86_INS_CMPEQSD = 1340
X86_INS_CMPLTSD = 1341
X86_INS_CMPLESD = 1342
X86_INS_CMPUNORDSD = 1343
X86_INS_CMPNEQSD = 1344
X86_INS_CMPNLTSD = 1345
X86_INS_CMPNLESD = 1346
X86_INS_CMPORDSD = 1347
X86_INS_CMPPS = 1348
X86_INS_CMPEQPS = 1349
X86_INS_CMPLTPS = 1350
X86_INS_CMPLEPS = 1351
X86_INS_CMPUNORDPS = 1352
X86_INS_CMPNEQPS = 1353
X86_INS_CMPNLTPS = 1354
X86_INS_CMPNLEPS = 1355
X86_INS_CMPORDPS = 1356
X86_INS_CMPPD = 1357
X86_INS_CMPEQPD = 1358
X86_INS_CMPLTPD = 1359
X86_INS_CMPLEPD = 1360
X86_INS_CMPUNORDPD = 1361
X86_INS_CMPNEQPD = 1362
X86_INS_CMPNLTPD = 1363
X86_INS_CMPNLEPD = 1364
X86_INS_CMPORDPD = 1365
X86_INS_VCMPSS = 1366
X86_INS_VCMPEQSS = 1367
X86_INS_VCMPLTSS = 1368
X86_INS_VCMPLESS = 1369
X86_INS_VCMPUNORDSS = 1370
X86_INS_VCMPNEQSS = 1371
X86_INS_VCMPNLTSS = 1372
X86_INS_VCMPNLESS = 1373
X86_INS_VCMPORDSS = 1374
X86_INS_VCMPEQ_UQSS = 1375
X86_INS_VCMPNGESS = 1376
X86_INS_VCMPNGTSS = 1377
X86_INS_VCMPFALSESS = 1378
X86_INS_VCMPNEQ_OQSS = 1379
X86_INS_VCMPGESS = 1380
X86_INS_VCMPGTSS = 1381
X86_INS_VCMPTRUESS = 1382
X86_INS_VCMPEQ_OSSS = 1383
X86_INS_VCMPLT_OQSS = 1384
X86_INS_VCMPLE_OQSS = 1385
X86_INS_VCMPUNORD_SSS = 1386
X86_INS_VCMPNEQ_USSS = 1387
X86_INS_VCMPNLT_UQSS = 1388
X86_INS_VCMPNLE_UQSS = 1389
X86_INS_VCMPORD_SSS = 1390
X86_INS_VCMPEQ_USSS = 1391
X86_INS_VCMPNGE_UQSS = 1392
X86_INS_VCMPNGT_UQSS = 1393
X86_INS_VCMPFALSE_OSSS = 1394
X86_INS_VCMPNEQ_OSSS = 1395
X86_INS_VCMPGE_OQSS = 1396
X86_INS_VCMPGT_OQSS = 1397
X86_INS_VCMPTRUE_USSS = 1398
X86_INS_VCMPSD = 1399
X86_INS_VCMPEQSD = 1400
X86_INS_VCMPLTSD = 1401
X86_INS_VCMPLESD = 1402
X86_INS_VCMPUNORDSD = 1403
X86_INS_VCMPNEQSD = 1404
X86_INS_VCMPNLTSD = 1405
X86_INS_VCMPNLESD = 1406
X86_INS_VCMPORDSD = 1407
X86_INS_VCMPEQ_UQSD = 1408
X86_INS_VCMPNGESD = 1409
X86_INS_VCMPNGTSD = 1410
X86_INS_VCMPFALSESD = 1411
X86_INS_VCMPNEQ_OQSD = 1412
X86_INS_VCMPGESD = 1413
X86_INS_VCMPGTSD = 1414
X86_INS_VCMPTRUESD = 1415
X86_INS_VCMPEQ_OSSD = 1416
X86_INS_VCMPLT_OQSD = 1417
X86_INS_VCMPLE_OQSD = 1418
X86_INS_VCMPUNORD_SSD = 1419
X86_INS_VCMPNEQ_USSD = 1420
X86_INS_VCMPNLT_UQSD = 1421
X86_INS_VCMPNLE_UQSD = 1422
X86_INS_VCMPORD_SSD = 1423
X86_INS_VCMPEQ_USSD = 1424
X86_INS_VCMPNGE_UQSD = 1425
X86_INS_VCMPNGT_UQSD = 1426
X86_INS_VCMPFALSE_OSSD = 1427
X86_INS_VCMPNEQ_OSSD = 1428
X86_INS_VCMPGE_OQSD = 1429
X86_INS_VCMPGT_OQSD = 1430
X86_INS_VCMPTRUE_USSD = 1431
X86_INS_VCMPPS = 1432
X86_INS_VCMPEQPS = 1433
X86_INS_VCMPLTPS = 1434
X86_INS_VCMPLEPS = 1435
X86_INS_VCMPUNORDPS = 1436
X86_INS_VCMPNEQPS = 1437
X86_INS_VCMPNLTPS = 1438
X86_INS_VCMPNLEPS = 1439
X86_INS_VCMPORDPS = 1440
X86_INS_VCMPEQ_UQPS = 1441
X86_INS_VCMPNGEPS = 1442
X86_INS_VCMPNGTPS = 1443
X86_INS_VCMPFALSEPS = 1444
X86_INS_VCMPNEQ_OQPS = 1445
X86_INS_VCMPGEPS = 1446
X86_INS_VCMPGTPS = 1447
X86_INS_VCMPTRUEPS = 1448
X86_INS_VCMPEQ_OSPS = 1449
X86_INS_VCMPLT_OQPS = 1450
X86_INS_VCMPLE_OQPS = 1451
X86_INS_VCMPUNORD_SPS = 1452
X86_INS_VCMPNEQ_USPS = 1453
X86_INS_VCMPNLT_UQPS = 1454
X86_INS_VCMPNLE_UQPS = 1455
X86_INS_VCMPORD_SPS = 1456
X86_INS_VCMPEQ_USPS = 1457
X86_INS_VCMPNGE_UQPS = 1458
X86_INS_VCMPNGT_UQPS = 1459
X86_INS_VCMPFALSE_OSPS = 1460
X86_INS_VCMPNEQ_OSPS = 1461
X86_INS_VCMPGE_OQPS = 1462
X86_INS_VCMPGT_OQPS = 1463
X86_INS_VCMPTRUE_USPS = 1464
X86_INS_VCMPPD = 1465
X86_INS_VCMPEQPD = 1466
X86_INS_VCMPLTPD = 1467
X86_INS_VCMPLEPD = 1468
X86_INS_VCMPUNORDPD = 1469
X86_INS_VCMPNEQPD = 1470
X86_INS_VCMPNLTPD = 1471
X86_INS_VCMPNLEPD = 1472
X86_INS_VCMPORDPD = 1473
X86_INS_VCMPEQ_UQPD = 1474
X86_INS_VCMPNGEPD = 1475
X86_INS_VCMPNGTPD = 1476
X86_INS_VCMPFALSEPD = 1477
X86_INS_VCMPNEQ_OQPD = 1478
X86_INS_VCMPGEPD = 1479
X86_INS_VCMPGTPD = 1480
X86_INS_VCMPTRUEPD = 1481
X86_INS_VCMPEQ_OSPD = 1482
X86_INS_VCMPLT_OQPD = 1483
X86_INS_VCMPLE_OQPD = 1484
X86_INS_VCMPUNORD_SPD = 1485
X86_INS_VCMPNEQ_USPD = 1486
X86_INS_VCMPNLT_UQPD = 1487
X86_INS_VCMPNLE_UQPD = 1488
X86_INS_VCMPORD_SPD = 1489
X86_INS_VCMPEQ_USPD = 1490
X86_INS_VCMPNGE_UQPD = 1491
X86_INS_VCMPNGT_UQPD = 1492
X86_INS_VCMPFALSE_OSPD = 1493
X86_INS_VCMPNEQ_OSPD = 1494
X86_INS_VCMPGE_OQPD = 1495
X86_INS_VCMPGT_OQPD = 1496
X86_INS_VCMPTRUE_USPD = 1497
X86_INS_UD0 = 1498
X86_INS_ENDING = 1499
# Group of X86 instructions
X86_GRP_INVALID = 0
# Generic groups
X86_GRP_JUMP = 1
X86_GRP_CALL = 2
X86_GRP_RET = 3
X86_GRP_INT = 4
X86_GRP_IRET = 5
X86_GRP_PRIVILEGE = 6
X86_GRP_BRANCH_RELATIVE = 7
# Architecture-specific groups
X86_GRP_VM = 128
X86_GRP_3DNOW = 129
X86_GRP_AES = 130
X86_GRP_ADX = 131
X86_GRP_AVX = 132
X86_GRP_AVX2 = 133
X86_GRP_AVX512 = 134
X86_GRP_BMI = 135
X86_GRP_BMI2 = 136
X86_GRP_CMOV = 137
X86_GRP_F16C = 138
X86_GRP_FMA = 139
X86_GRP_FMA4 = 140
X86_GRP_FSGSBASE = 141
X86_GRP_HLE = 142
X86_GRP_MMX = 143
X86_GRP_MODE32 = 144
X86_GRP_MODE64 = 145
X86_GRP_RTM = 146
X86_GRP_SHA = 147
X86_GRP_SSE1 = 148
X86_GRP_SSE2 = 149
X86_GRP_SSE3 = 150
X86_GRP_SSE41 = 151
X86_GRP_SSE42 = 152
X86_GRP_SSE4A = 153
X86_GRP_SSSE3 = 154
X86_GRP_PCLMUL = 155
X86_GRP_XOP = 156
X86_GRP_CDI = 157
X86_GRP_ERI = 158
X86_GRP_TBM = 159
X86_GRP_16BITMODE = 160
X86_GRP_NOT64BITMODE = 161
X86_GRP_SGX = 162
X86_GRP_DQI = 163
X86_GRP_BWI = 164
X86_GRP_PFI = 165
X86_GRP_VLX = 166
X86_GRP_SMAP = 167
X86_GRP_NOVLX = 168
X86_GRP_FPU = 169
X86_GRP_ENDING = 170
| StarcoderdataPython |
5425 | import unittest
from routes import Mapper
class TestMapperStr(unittest.TestCase):
def test_str(self):
m = Mapper()
m.connect('/{controller}/{action}')
m.connect('entries', '/entries', controller='entry', action='index')
m.connect('entry', '/entries/{id}', controller='entry', action='show')
expected = """\
Route name Methods Path
/{controller}/{action}
entries /entries
entry /entries/{id}"""
for expected_line, actual_line in zip(expected.splitlines(), str(m).splitlines()):
assert expected_line == actual_line.rstrip()
| StarcoderdataPython |
65801 | n = [ 1 ] + [ 50 ] * 10 + [ 1 ]
with open('8.in', 'r') as f:
totn, m, k, op = [ int(x) for x in f.readline().split() ]
for i in range(m):
f.readline()
for i, v in enumerate(n):
with open('p%d.in' % i, 'w') as o:
o.write('%d 0 %d 2\n' % (v, k))
for j in range(v):
o.write(f.readline() + '\n')
| StarcoderdataPython |
1775100 | <gh_stars>0
c = float(input())
f = (9/5)*c + 32
k = c + 273.15
print(f,k) | StarcoderdataPython |
3367691 | # -*- coding: utf-8 -*-
from distutils.core import setup
import settings
setup(name='nebula_web',
version=settings.Nebula_Web_Version,
description='nebula_web is nebula web server',
author='nebula',
author_email='<EMAIL>',
url='http://www.threathunter.cn',
packages=[],
)
| StarcoderdataPython |
52166 | <gh_stars>1-10
#!/usr/bin/python
# Raspberry Pi GPIO-controlled video looper
# Copyright (c) 2019 <NAME>
# License MIT
import RPi.GPIO as GPIO
import os
import sys
from subprocess import Popen, PIPE, call
import time
from threading import Lock
import signal
import argparse
class _GpioParser(argparse.Action):
""" Parse a GPIO spec string (see argparse setup later in this file) """
def __call__(self, parser, namespace, values, option_string=None):
gpio_dict = {}
pin_pairs = values.split(',')
for pair in pin_pairs:
pair_split = pair.split(':')
if 0 == len(pair_split) > 2:
raise ValueError('Invalid GPIO pin format')
try:
in_pin = int(pair_split[0])
except ValueError:
raise ValueError('GPIO input pin must be numeric integer')
try:
out_pin = int(pair_split[1])
except ValueError:
raise ValueError('GPIO output pin must be numeric integer')
except IndexError:
out_pin = None
if in_pin in gpio_dict:
raise ValueError('Duplicate GPIO input pin: {}'.format(in_pin))
gpio_dict[in_pin] = out_pin
setattr(namespace, self.dest, gpio_dict)
class VidLooper(object):
_GPIO_BOUNCE_TIME = 200
_VIDEO_EXTS = ('.mp4', '.m4v', '.mov', '.avi', '.mkv')
_GPIO_PIN_DEFAULT = {
26: 21,
19: 20,
13: 16,
6: 12
}
# Use this lock to avoid multiple button presses updating the player
# state simultaneously
_mutex = Lock()
# The currently playing video filename
_active_vid = None
# The process of the active video player
_p = None
def __init__(self, audio='hdmi', autostart=True, restart_on_press=False,
video_dir=os.getcwd(), videos=None, gpio_pins=None, loop=True,
no_osd=False, splash=None, debug=False):
# Use default GPIO pins, if needed
if gpio_pins is None:
gpio_pins = self._GPIO_PIN_DEFAULT.copy()
self.gpio_pins = gpio_pins
# Assemble the list of videos to play, if needed
if videos:
self.videos = videos
for video in videos:
if not os.path.exists(video):
raise FileNotFoundError('Video "{}" not found'.format(video))
else:
self.videos = [os.path.join(video_dir, f)
for f in sorted(os.listdir(video_dir))
if os.path.splitext(f)[1] in self._VIDEO_EXTS]
if not self.videos:
raise Exception('No videos found in "{}". Please specify a different '
'directory or filename(s).'.format(video_dir))
# Check that we have enough GPIO input pins for every video
assert len(videos) <= len(self.gpio_pins), \
"Not enough GPIO pins configured for number of videos"
self.debug = debug
assert audio in ('hdmi', 'local', 'both'), "Invalid audio choice"
self.audio = audio
self.autostart = autostart
self.restart_on_press = restart_on_press
self.loop = loop
self.no_osd = no_osd
self.splash = splash
self._splashproc = None
def _kill_process(self):
""" Kill a video player process. SIGINT seems to work best. """
if self._p is not None:
os.killpg(os.getpgid(self._p.pid), signal.SIGINT)
self._p = None
def switch_vid(self, pin):
""" Switch to the video corresponding to the shorted pin """
# Use a mutex lock to avoid race condition when
# multiple buttons are pressed quickly
with self._mutex:
# Update the output pins' states
for in_pin, out_pin in self.gpio_pins.items():
if out_pin is not None:
GPIO.output(out_pin,
GPIO.HIGH if in_pin == pin else GPIO.LOW)
filename = self.videos[self.in_pins.index(pin)]
if filename != self._active_vid or self.restart_on_press:
# Kill any previous video player process
self._kill_process()
# Start a new video player process, capture STDOUT to keep the
# screen clear. Set a session ID (os.setsid) to allow us to kill
# the whole video player process tree.
cmd = ['omxplayer', '-b', '-o', self.audio]
if self.loop:
cmd += ['--loop']
if self.no_osd:
cmd += ['--no-osd']
self._p = Popen(cmd + [filename],
stdout=None if self.debug else PIPE,
preexec_fn=os.setsid)
self._active_vid = filename
@property
def in_pins(self):
""" Create a tuple of input pins, for easy access """
return tuple(self.gpio_pins.keys())
def start(self):
if not self.debug:
# Clear the screen
os.system('clear')
# Disable the (blinking) cursor
os.system('tput civis')
# Set up GPIO
GPIO.setmode(GPIO.BCM)
for in_pin, out_pin in self.gpio_pins.items():
GPIO.setup(in_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
if out_pin is not None:
GPIO.setup(out_pin, GPIO.OUT)
GPIO.output(out_pin, GPIO.LOW)
if self.autostart:
if self.splash is not None:
self._splashproc = Popen(['fbi', '--noverbose', '-a',
self.splash])
else:
# Start playing first video
self.switch_vid(self.in_pins[0])
# Enable event detection on each input pin
for pin in self.in_pins:
GPIO.add_event_detect(pin, GPIO.FALLING, callback=self.switch_vid,
bouncetime=self._GPIO_BOUNCE_TIME)
# Loop forever
try:
while True:
time.sleep(0.5)
if not self.loop:
pid = -1
if self._p:
pid = self._p.pid
self._p.communicate()
if self._p:
if self._p.pid == pid:
# Reset LEDs
for out_pin in self.gpio_pins.values():
if out_pin is not None:
GPIO.output(out_pin, GPIO.LOW)
self._active_vid = None
self._p = None
finally:
self.__del__()
def __del__(self):
if not self.debug:
# Reset the terminal cursor to normal
os.system('tput cnorm')
# Cleanup the GPIO pins (reset them)
GPIO.cleanup()
# Kill any active video process
self._kill_process()
# Kill any active splash screen
if self._splashproc:
os.killpg(os.getpgid(self._splashproc.pid), signal.SIGKILL)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Raspberry Pi video player controlled by GPIO pins
This program is designed to power a looping video display, where the active
video can be changed by pressing a button (i.e. by shorting a GPIO pin).
The active video can optionally be indicated by an LED (one output for each
input pin; works well with switches with built-in LEDs, but separate LEDs work
too).
This video player uses omxplayer, a hardware-accelerated video player for the
Raspberry Pi, which must be installed separately.
"""
)
parser.add_argument('--audio', default='hdmi',
choices=('hdmi', 'local', 'both'),
help='Output audio over HDMI, local (headphone jack),'
'or both')
parser.add_argument('--no-autostart', action='store_false',
dest='autostart', default=True,
help='Don\'t start playing a video on startup')
parser.add_argument('--no-loop', action='store_false', default=True,
dest='loop', help='Don\'t loop the active video')
parser.add_argument(
'--restart-on-press', action='store_true', default=False,
help='If True, restart the current video if the button for the active '
'video is pressed. If False, pressing the button for the active '
'video will be ignored.')
vidmode = parser.add_mutually_exclusive_group()
vidmode.add_argument(
'--video-dir', default=os.getcwd(),
help='Directory containing video files. Use this or specify videos one '
'at a time at the end of the command.')
vidmode.add_argument('videos', action="store", nargs='*', default=(),
help='List of video paths (local, rtsp:// or rtmp://)')
parser.add_argument('--gpio-pins', default=VidLooper._GPIO_PIN_DEFAULT,
action=_GpioParser,
help='List of GPIO pins. Either INPUT:OUTPUT pairs, or '
'just INPUT pins (no output), separated by '
'commas.')
parser.add_argument('--debug', action='store_true', default=False,
help='Debug mode (don\'t clear screen or suppress '
'terminal output)')
parser.add_argument('--countdown', type=int, default=0,
help='Add a countdown before start (time in seconds)')
parser.add_argument('--splash', type=str, default=None,
help='Splash screen image to show when no video is '
'playing')
parser.add_argument('--no-osd', action='store_true', default=False,
help='Don\'t show on-screen display when changing '
'videos')
# Invoke the videoplayer
args = parser.parse_args()
# Apply any countdown
countdown = args.countdown
while countdown > 0:
sys.stdout.write(
'\rrpi-vidlooper starting in {} seconds '
'(Ctrl-C to abort)...'.format(countdown))
sys.stdout.flush()
time.sleep(1)
countdown -= 1
del args.countdown
VidLooper(**vars(args)).start()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1699199 | <reponame>aasensio/pyiacsun
from .prox_rank1_box import *
from .prox_rank1_hinge import *
from .prox_rank1_l0 import *
from .prox_rank1_l1 import *
from .prox_rank1_l1pos import *
from .prox_rank1_linf import *
from .prox_rank1_Rplus import *
| StarcoderdataPython |
3250388 | <filename>test_scripts/test2.py<gh_stars>1-10
# -*- coding:utf-8 -*-
import json
import array
import requests
url = "http://54.180.120.132:5000/"
#url = "http://1172.16.31.10:5000/"
def test():
byte_array = array.array('B')
audio_file = open("../data/sample_sound.wav", 'rb')
byte_array.frombytes(audio_file.read())
body = byte_array.tobytes()
stt = '카스'
try:
stt_data = stt.encode() + b'!'
body = stt_data + body
response = requests.post(url + "cmd", data=body, headers={'Content-Type': 'application/octet-stream'})
print("url : ", url + "cmd")
print("file len : ", len(body))
print("status code :", response.status_code)
return response.text
except Exception as e:
print("ERROR! ", str(e))
def main():
resp = test()
cmd = json.loads(resp)
print(cmd)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3319864 | """ test the slip correction factor calculation
"""
import pytest
from particula import u
from particula.util.knudsen_number import knu
from particula.util.slip_correction import scf
def test_slip_correction():
""" test the slip correction factor calculation
the slip correction factor is approximately
~1 if radius ~> 1e-6 m (Kn -> 0)
~100 if radius ~< 1e-9 m
"""
radius_micron = 1e-6 * u.m
radius_nano = 1e-9 * u.m
# mean free path air
mfp_air = 66.4e-9 * u.m
knu_val = knu(radius=radius_micron, mfp=mfp_air)
assert (
scf(radius=radius_micron) ==
pytest.approx(1, rel=1e-1)
)
assert (
scf(radius=radius_nano) ==
pytest.approx(100, rel=1e0)
)
assert (
scf(radius=radius_micron, knu=knu_val) ==
pytest.approx(1, rel=1e-1)
)
assert scf(radius=[1, 2, 3]).m.shape == (3,)
assert scf(radius=1, mfp=[1, 2, 3]).m.shape == (3, 1)
assert scf(radius=[1, 2, 3], mfp=[1, 2, 3]).m.shape == (3, 3)
assert scf(radius=[1, 2, 3], temperature=[1, 2, 3]).m.shape == (3, 3)
| StarcoderdataPython |
164325 | import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from datasets.dataset import Dataset
class AdultDataset(Dataset):
def __init__(self):
super().__init__(name="Adult Census", description="The Adult Census dataset")
self.cat_mappings = {
"education": {
"School": 0,
"HS-grad": 1,
"Some-college": 2,
"Prof-school": 3,
"Assoc": 4,
"Bachelors": 5,
"Masters": 6,
"Doctorate": 7,
},
"marital_status": {
"Divorced": 0,
"Married": 1,
"Separated": 2,
"Single": 3,
"Widowed": 4,
},
"workclass": {
"Other/Unknown": 0,
"Government": 1,
"Private": 2,
"Self-Employed": 3,
},
"occupation": {
"Other/Unknown": 0,
"Blue-Collar": 1,
"Professional": 2,
"Sales": 3,
"Service": 4,
"White-Collar": 5,
},
"race": {
"White": 0,
"Other": 1,
},
"gender": {
"Male": 0,
"Female": 1,
},
"native_country": {
"?": 0,
"Cambodia": 1,
"Canada": 2,
"China": 3,
"Columbia": 4,
"Cuba": 5,
"Dominican-Republic": 6,
"Ecuador": 7,
"El-Salvador": 8,
"England": 9,
"France": 10,
"Germany": 11,
"Greece": 12,
"Guatemala": 13,
"Haiti": 14,
"Holand-Netherlands": 15,
"Honduras": 16,
"Hong": 17,
"Hungary": 18,
"India": 19,
"Iran": 20,
"Ireland": 21,
"Italy": 22,
"Jamaica": 23,
"Japan": 24,
"Laos": 25,
"Mexico": 26,
"Nicaragua": 27,
"Outlying-US(Guam-USVI-etc)": 28,
"Peru": 29,
"Philippines": 30,
"Poland": 31,
"Portugal": 32,
"Puerto-Rico": 33,
"Scotland": 34,
"South": 35,
"Taiwan": 36,
"Thailand": 37,
"Trinadad&Tobago": 38,
"United-States": 39,
"Vietnam": 40,
"Yugoslavia": 41,
},
}
self.inv_cat_mappings = {
key: {v: k for k, v in mapping.items()}
for key, mapping in self.cat_mappings.items()
}
self.__init_encoder()
def load(self) -> pd.DataFrame:
"""Loads adult income dataset from https://archive.ics.uci.edu/ml/datasets/Adult and prepares the data for data analysis based on https://rpubs.com/H_Zhu/235617
:param: save_intermediate: save the transformed dataset. Do not save by default.
"""
raw_data = np.genfromtxt(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
delimiter=", ",
dtype=str,
)
# column names from "https://archive.ics.uci.edu/ml/datasets/Adult"
column_names = [
"age",
"workclass",
"fnlwgt",
"education",
"educational-num",
"marital-status",
"occupation",
"relationship",
"race",
"gender",
"capital-gain",
"capital-loss",
"hours-per-week",
"native-country",
"income",
]
adult_data = pd.DataFrame(raw_data, columns=column_names)
# For more details on how the below transformations are made, please refer to https://rpubs.com/H_Zhu/235617
adult_data = adult_data.astype(
{"age": np.int64, "educational-num": np.int64, "hours-per-week": np.int64}
)
adult_data = adult_data.replace(
{
"workclass": {
"Without-pay": "Other/Unknown",
"Never-worked": "Other/Unknown",
}
}
)
adult_data = adult_data.replace(
{
"workclass": {
"Federal-gov": "Government",
"State-gov": "Government",
"Local-gov": "Government",
}
}
)
adult_data = adult_data.replace(
{
"workclass": {
"Self-emp-not-inc": "Self-Employed",
"Self-emp-inc": "Self-Employed",
}
}
)
# adult_data = adult_data.replace(
# {
# "workclass": {
# "Never-worked": "Self-Employed",
# "Without-pay": "Self-Employed",
# }
# }
# )
adult_data = adult_data.replace({"workclass": {"?": "Other/Unknown"}})
adult_data = adult_data.replace(
{
"occupation": {
"Adm-clerical": "White-Collar",
"Craft-repair": "Blue-Collar",
"Exec-managerial": "White-Collar",
"Farming-fishing": "Blue-Collar",
"Handlers-cleaners": "Blue-Collar",
"Machine-op-inspct": "Blue-Collar",
"Other-service": "Service",
"Priv-house-serv": "Service",
"Prof-specialty": "Professional",
"Protective-serv": "Service",
"Tech-support": "Service",
"Transport-moving": "Blue-Collar",
"Unknown": "Other/Unknown",
"Armed-Forces": "Other/Unknown",
"?": "Other/Unknown",
}
}
)
adult_data = adult_data.replace(
{
"marital-status": {
"Married-civ-spouse": "Married",
"Married-AF-spouse": "Married",
"Married-spouse-absent": "Married",
"Never-married": "Single",
}
}
)
adult_data = adult_data.replace(
{
"race": {
"Black": "Other",
"Asian-Pac-Islander": "Other",
"Amer-Indian-Eskimo": "Other",
}
}
)
# adult_data = adult_data[['age','workclass','education','marital-status','occupation','race','gender',
# 'hours-per-week','income']]
adult_data = adult_data[
[
"age",
"capital-gain",
"hours-per-week",
"workclass",
"education",
"marital-status",
"occupation",
"race",
"gender",
"capital-loss",
"native-country",
"income",
]
]
# adult_data = adult_data[
# [
# "age",
# "hours-per-week",
# "workclass",
# "education",
# "marital-status",
# "occupation",
# "race",
# "gender",
# "native-country",
# "income",
# ]
# ]
adult_data = adult_data.replace({"income": {"<=50K": 0, ">50K": 1}})
adult_data = adult_data.replace(
{
"education": {
"Assoc-voc": "Assoc",
"Assoc-acdm": "Assoc",
"11th": "School",
"10th": "School",
"7th-8th": "School",
"9th": "School",
"12th": "School",
"5th-6th": "School",
"1st-4th": "School",
"Preschool": "School",
}
}
)
adult_data = adult_data.rename(
columns={
"marital-status": "marital_status",
"hours-per-week": "hours_per_week",
"capital-gain": "capital_gain",
"native-country": "native_country",
"capital-loss": "capital_loss",
}
)
return adult_data.drop("income", axis=1), adult_data["income"]
def extract_info(self):
columns = self.dataset.columns
target = "income"
real_feat = np.array(
[
0, # age
1, # capital-gain
2, # hours-per-week
9, # capital-loss
]
)
cat_feat = np.array(
[
3, # workclass
4, # education
5, # marital
6, # occupation
7, # race
8, # gender
10, # native-country
]
)
_both = np.concatenate([real_feat, cat_feat])
_cond = (np.sort(_both) == np.arange(0, max(_both) + 1)).all()
assert _cond
# real_feat = np.array(
# [
# 0, # age
# 1, # hours-per-week
# ]
# )
# cat_feat = np.array(
# [
# 2, # workclass
# 3, # education
# 4, # marital
# 5, # occupation
# 6, # race
# 7, # gender
# 8, # native country
# ]
# )
return columns, target, real_feat, cat_feat
def __init_encoder(self):
self.encoder = OneHotEncoder(sparse=False)
X = self.get_optimizer_data().copy()
self.encoder.fit(X[:, self.cat_features])
return self.encoder
def encode_features(self, X: np.array) -> np.array:
onehot = self.encoder.transform(X[:, self.cat_features])
n_real = len(self.real_features)
n_onehot = onehot.shape[1]
_X = np.zeros((X.shape[0], n_real + n_onehot))
_X[:, :n_real] = X[:, self.real_features]
_X[:, n_real:] = onehot # .astype(int)
return _X.astype(int)
def decode_features(self, X: np.array) -> np.array:
_X = np.zeros((X.shape[0], self.dataset.shape[1]))
n_real = len(self.real_features)
orig_cat = self.encoder.inverse_transform(X[:, n_real:])
_X[:, self.real_features] = X[:, :n_real].copy()
_X[:, self.cat_features] = orig_cat
return _X.astype(int)
def preprocess(self, X: pd.DataFrame) -> pd.DataFrame:
df = self.dataset.copy()
return df.replace(self.cat_mappings)
def get_optimizer_data(self) -> np.array:
X = self.get_numpy_representation()
X[:, self.real_features] = X[:, self.real_features].astype(float)
X[:, self.cat_features] = X[:, self.cat_features].astype(int)
return X.astype(int)
def get_classifier_data(self):
X = self.get_optimizer_data().copy()
return self.encode_features(X), self.labels
def get_processed_orig_data(self, X: np.array) -> pd.DataFrame:
df = pd.DataFrame(X, columns=self.columns)
df = df.replace(self.inv_cat_mappings)
return df | StarcoderdataPython |
3288741 | <gh_stars>0
import requests
import re
r = requests.get("https://dsu.edu/news")
news = re.findall(">([^<]+)</a></h2>", r.text)
for n in news:
n = n.encode("ascii", "ignore")
print n
| StarcoderdataPython |
197595 | import logging
from core.emulator.coreemu import CoreEmu
from core.emulator.emudata import IpPrefixes, NodeOptions
from core.emulator.enumerations import EventTypes
from core.nodes.base import CoreNode
from core.nodes.network import SwitchNode
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
# setup basic network
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
options = NodeOptions(model="nothing")
coreemu = CoreEmu()
session = coreemu.create_session()
session.set_state(EventTypes.CONFIGURATION_STATE)
switch = session.add_node(SwitchNode)
# node one
options.config_services = ["DefaultRoute", "IPForward"]
node_one = session.add_node(CoreNode, options=options)
interface = prefixes.create_interface(node_one)
session.add_link(node_one.id, switch.id, interface_one=interface)
# node two
node_two = session.add_node(CoreNode, options=options)
interface = prefixes.create_interface(node_two)
session.add_link(node_two.id, switch.id, interface_one=interface)
# start session and run services
session.instantiate()
input("press enter to exit")
session.shutdown()
| StarcoderdataPython |
159977 | <gh_stars>1-10
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'GroupedMessage'
db.create_table('sentry_groupedmessage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('logger', self.gf('django.db.models.fields.CharField')(default='root', max_length=64, db_index=True, blank=True)),
('class_name', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=128, null=True, blank=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(default=40, db_index=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')()),
('traceback', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('view', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('server_name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('checksum', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('status', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('times_seen', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)),
('last_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('first_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
))
db.send_create_signal('sentry', ['GroupedMessage'])
# Adding unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.create_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
# Adding model 'Message'
db.create_table('sentry_message', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('logger', self.gf('django.db.models.fields.CharField')(default='root', max_length=64, db_index=True, blank=True)),
('class_name', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=128, null=True, blank=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(default=40, db_index=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')()),
('traceback', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('view', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('server_name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('checksum', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('data', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('sentry', ['Message'])
def backwards(self, orm):
# Deleting model 'GroupedMessage'
db.delete_table('sentry_groupedmessage')
# Removing unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.delete_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
# Deleting model 'Message'
db.delete_table('sentry_message')
models = {
'sentry.groupedmessage': {
'Meta': {'unique_together': "(('logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'})
},
'sentry.message': {
'Meta': {'object_name': 'Message'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
}
}
complete_apps = ['sentry']
| StarcoderdataPython |
1709136 | <gh_stars>1-10
#! /usr/bin/python3
# Copyright © 2017 <NAME> <<EMAIL>>
# This work is free. You can redistribute it and/or modify it under the
# terms of the Do What The Fuck You Want To Public License, Version 2,
# as published by Sam Hocevar. See the COPYING file for more details.
""" ...
Usage:
figLidar.py <FIGURE_DIR>
Arguments:
<FIGURE_DIR> Directory for saved figures.
Options:
-h, --help
"""
import matplotlib.pyplot as plt
from docopt import docopt
import pandas as pd
FIGURE_DIR = docopt(__doc__)['<FIGURE_DIR>']
PLOT_COLORS = ['cornflowerblue', 'darkorange', 'forestgreen', 'tomato', 'gold']
PLOT_AXIS = [-0.5, 5.5, -2.2, 2.2]
def main():
l1 = pd.read_pickle("/home/sami/work/memo/data/processed/lidar1m.pickle")
l2 = pd.read_pickle("/home/sami/work/memo/data/processed/lidar2m.pickle")
l3 = pd.read_pickle("/home/sami/work/memo/data/processed/lidar3m.pickle")
l4 = pd.read_pickle("/home/sami/work/memo/data/processed/lidar4m.pickle")
l5 = pd.read_pickle("/home/sami/work/memo/data/processed/lidar5m.pickle")
plt.scatter(0, 0, color='black', facecolor='black', marker=(3, 0, -90), s=400)
for i, d in enumerate([l1, l2, l3, l4, l5]):
d = d[(d.angle > 90) & (d.angle < 270) & (d.distance > 0.2)]
label = "{:d} m".format(i+1)
color = PLOT_COLORS[i]
plt.scatter(d.lidarX, d.lidarY, color=color, label=label,
s=30, marker='.',)
if len(d) > 0:
s = "dist: {:d}m, data: {:3d}, variance: {:1.2f}m"
print(s.format(i+1, len(d), max(d.lidarX) - min(d.lidarX)))
# plt.title("LiDAR measurements")
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.axis(PLOT_AXIS)
# plt.legend()
# plt.show()
plt.savefig("{}/lidarRangeTest.png".format(FIGURE_DIR), bbox_inches='tight')
plt.savefig("{}/lidarRangeTest.pdf".format(FIGURE_DIR), bbox_inches='tight')
if __name__ == '__main__':
main()
| StarcoderdataPython |
1716328 | <filename>test.py
###############################################################################
#
# Copyright (c) 2018, <NAME>,
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# #############################################################################
""" Play against a network.
Some arguments can be passed by command line (see --help) while other can be
modified in the config.py file.
To run:
$ python test.py --mode=tictactoe
Or choose another valid mode (see --help).
"""
import argparse
import numpy as np
import os.path as osp
import sys
import tensorflow as tf
import utils
from game_manager_io import GameManagerIO
from mcts import MCTS
tfe = tf.contrib.eager
config_proto = tf.ConfigProto()
config_proto.gpu_options.allow_growth = True
tf.enable_eager_execution(config=config_proto)
def main():
args = parse_args()
valid_modes_list = utils.get_valid_game_modes()
valid_modes_string = utils.get_valid_game_modes_string()
if args.mode not in valid_modes_list:
print('Invalid game mode informed. Please inform a mode with ' +
'--mode=mode_name, where mode_name is one of the following ' +
'{%s}' % valid_modes_string)
sys.exit()
gconf = utils.get_game_config(args.mode, 'test')
if args.game_type == 'moku':
(game_config_string, game_manager_module, game_manager_kwargs,
game_manager_io_module, game_manager_io_kwargs) = \
utils.generate_moku_manager_params(
gconf.drop_mode, gconf.moku_size, gconf.board_size,
args.gpu_id, gconf.num_res_layers, gconf.num_channels)
else:
raise NotImplementedError(
'Game type %s is not supported.' % args.game_type)
train_dir = osp.join('train_files', game_config_string)
ckpt_path = utils.get_checkpoint_path(train_dir, args.num_iters_ckpt)
game_manager_kwargs['ckpt_path'] = ckpt_path
gm_module = __import__(game_manager_module[0])
gm_class = getattr(gm_module, game_manager_module[1])
game_manager = gm_class(**game_manager_kwargs)
gmio_module = __import__(game_manager_io_module[0])
gmio_class = getattr(gmio_module, game_manager_io_module[1])
game_manager_io = gmio_class(**game_manager_io_kwargs)
state = game_manager.initial_state()
mctss = [MCTS(game_manager, gconf.max_simulations_per_move, gconf.cpuct,
gconf.virtual_loss, state, gconf.root_noise_weight,
gconf.dirichlet_noise_param, gconf.eval_batch_size,
game_manager_kwargs['tf_device'])]
iplayer = 0
iplay = 0
moves = []
last_played_imove = None
while not game_manager.is_over(state.state[np.newaxis])[0]:
imove = None
if iplay < gconf.num_relaxed_turns:
turn_temperature = 1.0
else:
turn_temperature = gconf.move_temperature
imc = iplayer % len(mctss)
print('===== New turn =====')
game_manager_io.print_board(state, last_played_imove)
if args.iuser == 2 or iplayer == args.iuser:
# User types a move
imove = game_manager_io.get_input(state)
if imove == GameManagerIO.IEXIT:
break
if imove == GameManagerIO.ICOMPUTER_MOVE or \
(args.iuser != 2 and iplayer != args.iuser):
# Computer chooses a move
stats = mctss[imc].simulate(state, gconf.max_seconds_per_move)
if args.show_mcts:
print('MCTS stats')
game_manager_io.print_stats(stats)
print()
if args.show_win_prob or imove == GameManagerIO.ICOMPUTER_MOVE:
with tf.device(game_manager_kwargs['tf_device']):
_, value_prior = game_manager.predict(
tf.constant(state.state[np.newaxis], tf.float32))
win_prob = (value_prior[0] + 1.0) / 2.0
print('Estimated win probability: %.03f\n' % win_prob)
if args.show_move_prob or imove == GameManagerIO.ICOMPUTER_MOVE:
print('Move probabilities:')
game_manager_io.print_stats_on_board(stats, 1)
print()
if args.show_move_prob_temp:
print('Move probabilities with temperature ' +
'%.1e' % turn_temperature)
game_manager_io.print_stats_on_board(stats, turn_temperature)
print()
if imove == GameManagerIO.ICOMPUTER_MOVE:
# If user asked for computer prediction,
# escape before actually choosing a move
continue
imove, _ = mctss[imc].choose_move(turn_temperature)
moves.append((imove, iplayer))
last_played_imove = imove
state = game_manager.update_state(state, last_played_imove)
iplayer = (iplayer + 1) % 2
for imc2 in range(len(mctss)):
mctss[imc2].update_root(last_played_imove, state)
iplay += 1
if imove == GameManagerIO.IEXIT:
print('Game unfinished')
else:
game_manager_io.print_board(state, imove)
iwinner = game_manager.get_iwinner(state.state[np.newaxis])
if iwinner < 0:
print('DRAW')
else:
if args.iuser == 2:
print('Player %d WON.' % (iwinner + 1))
elif iwinner == args.iuser:
print('You WON!')
else:
print('You LOST!')
def parse_args():
parser = argparse.ArgumentParser()
valid_modes = utils.get_valid_game_modes_string()
parser.add_argument(
'--mode',
help=('A valid game mode name. valid modes are {%s}.' % valid_modes),
default=None
)
parser.add_argument(
'--gpu_id',
help=('GPU id to use, or -1 to use the CPU.'),
default=0,
type=int
)
parser.add_argument(
'--game_type',
help=('Type is a more general term which may include many game ' +
'modes. For example, moku is the type of tictactoe, connect4 ' +
'and gomoku modes.'),
default='moku'
)
parser.add_argument(
'--iuser',
help=('Index of the user, 0 to play first and 1 to play second. ' +
'Or you can also use -1 to let the computer play as both ' +
'players or 2 if you want to play as both players.'),
default=0,
type=int
)
parser.add_argument(
'--num_iters_ckpt',
help=('Number of iterations in the checkpoint to load. ' +
'e.g. if the file is called moku3_3x3_1000.ckpt, type 1000. ' +
'Use -1 to load the latest checkpoint or 0 to use a naive ' +
'network.'),
default=-1,
type=int
)
parser.add_argument(
'-sm',
'--show_mcts',
help=('If set, the MCTS stats for the current state will ' +
'be displayed.'),
nargs='?',
const=True,
default=False,
type=bool
)
parser.add_argument(
'-sp',
'--show_move_prob',
help=('If set, the probabilities of playing at each position will ' +
'be displayed.'),
nargs='?',
const=True,
default=False,
type=bool
)
parser.add_argument(
'-spt',
'--show_move_prob_temp',
help=('If set, the probabilities of playing at each position ' +
'rebalanced by the temperature will be displayed.'),
nargs='?',
const=True,
default=False,
type=bool
)
parser.add_argument(
'-sw',
'--show_win_prob',
help=('If set, the winning probability estimated by the network ' +
'will be displayed.'),
nargs='?',
const=True,
default=False,
type=bool
)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
| StarcoderdataPython |
1682659 | <filename>dashboard/dashboard/debug_alert.py<gh_stars>0
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface for debugging the anomaly detection function."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
import urllib
from dashboard import find_anomalies
from dashboard import find_change_points
from dashboard.common import datastore_hooks
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import anomaly_config
from dashboard.models import graph_data
from dashboard.sheriff_config_client import SheriffConfigClient
# Default number of points before and after a point to analyze.
_NUM_BEFORE = 40
_NUM_AFTER = 10
class QueryParameterError(Exception):
pass
class DebugAlertHandler(request_handler.RequestHandler):
"""Request handler for the /debug_alert page."""
def get(self):
"""Displays UI for debugging the anomaly detection function.
Request parameters:
test_path: Full test path (Master/bot/suite/chart) for test with alert.
rev: A revision (Row id number) to center the graph on.
num_before: Maximum number of points after the given revision to get.
num_after: Maximum number of points before the given revision.
config: Config parameters for in JSON form.
Outputs:
A HTML page with a chart (if test_path is given) and a form.
"""
try:
test = self._GetTest()
num_before, num_after = self._GetNumBeforeAfter()
config_name = self._GetConfigName(test)
config_dict = anomaly_config.CleanConfigDict(self._GetConfigDict(test))
except QueryParameterError as e:
self.RenderHtml('debug_alert.html', {'error': e.message})
return
revision = self.request.get('rev')
if revision:
rows = _FetchRowsAroundRev(test, int(revision), num_before, num_after)
else:
rows = _FetchLatestRows(test, num_before)
chart_series = _ChartSeries(rows)
lookup = _RevisionList(rows)
# Get the anomaly data from the new anomaly detection module. This will
# also be passed to the template so that it can be shown on the page.
change_points = SimulateAlertProcessing(chart_series, **config_dict)
anomaly_indexes = [c.x_value for c in change_points]
anomaly_points = [(i, chart_series[i][1]) for i in anomaly_indexes]
anomaly_segments = _AnomalySegmentSeries(change_points)
plot_data = _GetPlotData(chart_series, anomaly_points, anomaly_segments)
subscriptions, err_msg = SheriffConfigClient().Match(test.test_path)
subscription_names = ','.join([s.name for s in subscriptions or []])
if err_msg is not None:
self.RenderHtml('debug_alert.html', {'error': err_msg})
# Render the debug_alert page with all of the parameters filled in.
self.RenderHtml('debug_alert.html', {
'test_path': test.test_path,
'rev': revision or '',
'num_before': num_before,
'num_after': num_after,
'sheriff_name': subscription_names,
'config_name': config_name,
'config_json': json.dumps(config_dict, indent=2, sort_keys=True),
'plot_data': json.dumps(plot_data),
'lookup': json.dumps(lookup),
'anomalies': json.dumps([c.AsDict() for c in change_points]),
'csv_url': _CsvUrl(test.test_path, rows),
'graph_url': _GraphUrl(test, revision),
'stored_anomalies': _FetchStoredAnomalies(test, lookup),
})
def post(self):
"""A POST request to this endpoint does the same thing as a GET request."""
return self.get()
def _GetTest(self):
test_path = self.request.get('test_path')
if not test_path:
raise QueryParameterError('No test specified.')
test = utils.TestKey(test_path).get()
if not test:
raise QueryParameterError('Test "%s" not found.' % test_path)
return test
def _GetNumBeforeAfter(self):
try:
num_before = int(self.request.get('num_before', _NUM_BEFORE))
num_after = int(self.request.get('num_after', _NUM_AFTER))
except ValueError:
raise QueryParameterError('Invalid "num_before" or "num_after".')
return num_before, num_after
def _GetConfigName(self, test):
"""Gets the name of the custom anomaly threshold, just for display."""
if test.overridden_anomaly_config:
return test.overridden_anomaly_config.string_id()
if self.request.get('config'):
return 'Custom config'
return 'Default config'
def _GetConfigDict(self, test):
"""Gets the name of the anomaly threshold dict to use."""
input_config_json = self.request.get('config')
if not input_config_json:
return anomaly_config.GetAnomalyConfigDict(test)
try:
return json.loads(input_config_json)
except ValueError:
raise QueryParameterError('Invalid JSON.')
def SimulateAlertProcessing(chart_series, **config_dict):
"""Finds the same alerts as would be found normally as points are added.
Each time a new point is added to a data series on dashboard, the
FindChangePoints function is called with some points from that series.
In order to simulate this here, we need to repeatedly call FindChangePoints.
Args:
chart_series: A list of (x, y) pairs.
**config_dict: An alert threshold config dict.
Returns:
A list of find_change_points.ChangePoint objects, one for each alert found.
"""
all_change_points = []
highest_x = None # This is used to avoid finding duplicate alerts.
# The number of points that are passed in to FindChangePoints normally may
# depend on either the specific "max_window_size" value or another default
# used in find_anomalies.
window = config_dict.get('max_window_size', find_anomalies.DEFAULT_NUM_POINTS)
for end in range(1, len(chart_series)):
start = max(0, end - window)
series = chart_series[start:end]
change_points = find_change_points.FindChangePoints(series, **config_dict)
change_points = [c for c in change_points if c.x_value > highest_x]
if change_points:
highest_x = max(c.x_value for c in change_points)
all_change_points.extend(change_points)
return all_change_points
def _AnomalySegmentSeries(change_points):
"""Makes a list of data series for showing segments next to anomalies.
Args:
change_points: A list of find_change_points.ChangePoint objects.
Returns:
A list of data series (lists of pairs) to be graphed by Flot.
"""
# We make a separate series for each anomaly, since segments may overlap.
anomaly_series_list = []
for change_point in change_points:
anomaly_series = []
# In a Flot data series, null is treated as a special value which
# indicates a discontinuity. We want to end each segment with null
# so that they show up as separate segments on the graph.
anomaly_series.append([change_point.window_start, None])
for x in range(change_point.window_start + 1, change_point.x_value):
anomaly_series.append([x, change_point.median_before])
anomaly_series.append([change_point.x_value, None])
for x in range(change_point.x_value + 1, change_point.window_end + 1):
anomaly_series.append([x, change_point.median_after])
anomaly_series.append([change_point.window_end, None])
anomaly_series_list.append(anomaly_series)
return anomaly_series_list
def _GetPlotData(chart_series, anomaly_points, anomaly_segments):
"""Returns data to embed on the front-end for the chart.
Args:
chart_series: A series, i.e. a list of (index, value) pairs.
anomaly_points: A series which contains the list of points where the
anomalies were detected.
anomaly_segments: A list of series, each of which represents one segment,
which is a horizontal line across a range of values used in finding
an anomaly.
Returns:
A list of data series, in the format accepted by Flot, which can be
serialized as JSON and embedded on the page.
"""
data = [
{
'data': chart_series,
'color': '#666',
'lines': {'show': True},
'points': {'show': False},
},
{
'data': anomaly_points,
'color': '#f90',
'lines': {'show': False},
'points': {'show': True, 'radius': 4}
},
]
for series in anomaly_segments:
data.append({
'data': series,
'color': '#f90',
'lines': {'show': True},
'points': {'show': False},
})
return data
def _ChartSeries(rows):
"""Returns a data series and index to revision map."""
return [(i, r.value) for i, r in enumerate(rows)]
def _RevisionList(rows):
"""Returns a list of revisions."""
return [r.revision for r in rows]
def _FetchLatestRows(test, num_points):
"""Does a query for the latest Row entities in the given test.
Args:
test: A TestMetadata entity to fetch Row entities for.
num_points: Number of points to fetch.
Returns:
A list of Row entities, ordered by revision. The number to fetch is limited
to the number that is expected to be processed at once by GASP.
"""
assert utils.IsInternalUser() or not test.internal_only
datastore_hooks.SetSinglePrivilegedRequest()
return list(reversed(
graph_data.GetLatestRowsForTest(test.key, num_points)))
def _FetchRowsAroundRev(test, revision, num_before, num_after):
"""Fetches Row entities before and after a given revision.
Args:
test: A TestMetadata entity.
revision: A Row ID.
num_before: Maximum number of Rows before |revision| to fetch.
num_after: Max number of Rows starting from |revision| to fetch.
Returns:
A list of Row entities ordered by ID. The Row entities will have at least
the "revision" and "value" properties, which are the only ones relevant
to their use in this module.
"""
assert utils.IsInternalUser() or not test.internal_only
return graph_data.GetRowsForTestBeforeAfterRev(
test.key, revision, num_before, num_after)
def _FetchStoredAnomalies(test, revisions):
"""Makes a list of data about Anomaly entities for a Test."""
stored_anomalies, _, _ = anomaly.Anomaly.QueryAsync(
test=test.key).get_result()
stored_anomaly_dicts = []
for a in stored_anomalies:
if a.end_revision > revisions[0]:
stored_anomaly_dicts.append({
'revision': a.end_revision,
'median_before': a.median_before_anomaly,
'median_after': a.median_after_anomaly,
'percent_changed': a.percent_changed,
'bug_id': _GetDisplayBugId(a.bug_id),
'timestamp': a.timestamp,
})
return stored_anomaly_dicts
def _CsvUrl(test_path, rows):
"""Constructs an URL for requesting data from /graph_csv for |rows|."""
# Using a list of pairs ensures a predictable order for the parameters.
params = [('test_path', test_path)]
if rows:
params += [
('num_points', len(rows)),
('rev', rows[-1].revision),
]
return '/graph_csv?%s' % urllib.urlencode(params)
def _GraphUrl(test, revision):
"""Constructs an URL for requesting data from /graph_csv for |rows|."""
params = [
('masters', test.master_name),
('bots', test.bot_name),
('tests', '/'.join(test.test_path.split('/')[2:])),
]
if revision:
params.append(('rev', revision))
return '/report?%s' % urllib.urlencode(params)
def _GetDisplayBugId(bug_id):
"""Returns a display string for the given bug ID property of an anomaly."""
special_ids = {-1: 'INVALID', -2: 'IGNORE', None: 'NONE'}
return special_ids.get(bug_id, str(bug_id))
| StarcoderdataPython |
1732765 | # -*- coding: utf-8 -*-
"""
Spyder Editor
京东手机TOP10数据分析.
问题列表:
%matplotlib widget在这里如何使用?或者说Spyder中如何便利的使用matplotlib
1. 长度
"""
# Part I. 基础图表
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
import pandas as pd
import numpy as np
matplotlib.rcParams['font.family'] = ['DengXian', 'sans-serif']
matplotlib.rcParams['axes.unicode_minus'] = False
#%%1.数据准备
"""markdown
基础图表1 - 长宽比例图
每一条折线表示一款手机,其有三个顶点,左下原点,屏幕右上点,手机右上点。
屏幕尺寸的计算方法:
> $\frac{\sqrt{x^2+y^2}}{Z}$
计算对角线的像素数除以对角线长度算出PPI,之后计算屏幕长与宽。
"""
fn = r'E:\notebooks\data_visualization_notebooks\phone_data2.csv'
df = pd.read_csv(fn).iloc[0:15]
row, col = df.shape
c = df['CPU'].astype('category')
ec = list(enumerate(c.cat.categories))
ppi = np.sqrt(df['分辨率宽']**2 + df['分辨率长']**2) / (df['屏'] * 25.4)
x1 = df['宽']
y1 = df['长']
x2 = df['分辨率宽'] / ppi
y2 = df['分辨率长'] / ppi
px = list(zip([0]*15, x2, x1))
py = list(zip([0]*15, y2, y1))
#%%2.长宽折线图
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(121)
ax.set_aspect(1)
for i in range(15):
ax.plot(px[i], py[i], lw=0.35, marker='o', alpha=0.75)
axins = zoomed_inset_axes(ax, 4, loc=2, borderpad=0,
bbox_to_anchor = (1.2, .3, .8, .7),
bbox_transform = ax.transAxes
)
for i in range(15):
axins.plot(px[i], py[i], lw=0.35, marker='o', alpha=0.75)
#ax.set_aspect(1)
axins.set_xlim(65, 80)
axins.set_ylim(145, 165)
mark_inset(ax, axins, loc1=2, loc2=2, fc="none", ec="0.5")
#%%3.
"""
横坐标怎样添加偏置?
heft 重量
"""
fig2, ax2 = plt.subplots(figsize=(3,5))
heft_df = df.sort_values(by = '重')
heft_df.index = np.arange(row)
for n, r in ec:
tdf = heft_df[heft_df['CPU'] == r]
ax2.barh(tdf.index, tdf['重'] - 180,
color='C%d' % n,
height=0.7
)
ax2.set_yticks(heft_df.index.values)
ax2.set_yticklabels(heft_df['name'])
ax2.set_xticklabels(['140','160','180','200','220'])
| StarcoderdataPython |
137393 | <gh_stars>0
#!/usr/bin/env python
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.link import Link, TCLink,Intf
from subprocess import Popen, PIPE
from mininet.log import setLogLevel
if '__main__' == __name__:
setLogLevel('info')
net = Mininet(link=TCLink)
# key = "net.mptcp.mptcp_enabled"
# value = 1
# p = Popen("sysctl -w %s=%s" % (key, value), shell=True, stdout=PIPE, stderr=PIPE)
# stdout, stderr = p.communicate()
# print "stdout=",stdout,"stderr=", stderr
h1 = net.addHost('h1')
h2 = net.addHost('h2')
r1 = net.addHost('r1')
linkopt_wifi={'bw':10, 'delay':'50ms', "loss":0}
linkopt_4g={'bw':10, 'delay':'50ms', "loss":0}
linkopt2={'bw':100}
net.addLink(r1,h1,cls=TCLink, **linkopt_wifi)
net.addLink(r1,h1,cls=TCLink, **linkopt_4g)
net.addLink(r1,h2,cls=TCLink, **linkopt2)
net.build()
r1.cmd("ifconfig r1-eth0 0")
r1.cmd("ifconfig r1-eth1 0")
r1.cmd("ifconfig r1-eth2 0")
h1.cmd("ifconfig h1-eth0 0")
h1.cmd("ifconfig h1-eth1 0")
h2.cmd("ifconfig h2-eth0 0")
r1.cmd("echo 1 > /proc/sys/net/ipv4/ip_forward")
r1.cmd("ifconfig r1-eth0 10.0.0.1 netmask 255.255.255.0")
r1.cmd("ifconfig r1-eth1 10.0.1.1 netmask 255.255.255.0")
r1.cmd("ifconfig r1-eth2 10.0.2.1 netmask 255.255.255.0")
h1.cmd("ifconfig h1-eth0 10.0.0.2 netmask 255.255.255.0")
h1.cmd("ifconfig h1-eth1 10.0.1.2 netmask 255.255.255.0")
h2.cmd("ifconfig h2-eth0 10.0.2.2 netmask 255.255.255.0")
h1.cmd("ip rule add from 10.0.0.2 table 1")
h1.cmd("ip rule add from 10.0.1.2 table 2")
h1.cmd("ip route add 10.0.0.0/24 dev h1-eth0 scope link table 1")
h1.cmd("ip route add default via 10.0.0.1 dev h1-eth0 table 1")
h1.cmd("ip route add 10.0.1.0/24 dev h1-eth1 scope link table 2")
h1.cmd("ip route add default via 10.0.1.1 dev h1-eth1 table 2")
h1.cmd("ip route add default scope global nexthop via 10.0.0.1 dev h1-eth0")
h2.cmd("ip rule add from 10.0.2.2 table 1")
h2.cmd("ip route add 10.0.2.0/24 dev h2-eth0 scope link table 1")
h2.cmd("ip route add default via 10.0.2.1 dev h2-eth0 table 1")
h2.cmd("ip route add default scope global nexthop via 10.0.2.1 dev h2-eth0")
CLI(net)
net.stop()
| StarcoderdataPython |
3274629 | #--------------------------------------------#
# 该部分代码用于看网络结构
#--------------------------------------------#
import torch
from torchsummary import summary
from nets.deeplabv3_plus import DeepLab
if __name__ == "__main__":
# 需要使用device来指定网络在GPU还是CPU运行
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = DeepLab(num_classes=21, backbone="mobilenet", downsample_factor=16, pretrained=False).to(device)
summary(model, (3,512,512))
| StarcoderdataPython |
57480 | import os
from argparse import RawTextHelpFormatter, ArgumentTypeError, ArgumentParser
from cfg_exporter.const import ExportType, ExtensionType, TEMPLATE_EXTENSION
def valid_source(source):
if os.path.exists(source):
return source
else:
raise ArgumentTypeError(_('the source path does not exists `{source}`').format(source=source))
def valid_export(export):
if export in ExportType.__members__:
return ExportType[export]
else:
raise ArgumentTypeError(_('the export file type does not exits {export}').format(export=export))
def valid_table(row_num):
try:
row_num = int(row_num)
assert row_num > 0
return row_num
except (ValueError, AssertionError):
raise ArgumentTypeError(_('{row_num} is not a valid line number').format(row_num=row_num))
def valid_lang_template(lang_template):
if os.path.exists(lang_template):
return lang_template
else:
raise ArgumentTypeError(_('the lang template path does not exists `{lang_template}`')
.format(source=lang_template))
parser = ArgumentParser(description=_('Configuration table export toolset'), formatter_class=RawTextHelpFormatter)
base_group = parser.add_argument_group(title=_('Base options'))
base_group.add_argument('--clear_dir', default=False, action='store_true',
help=_('clear the output directory.'))
base_group.add_argument('--exclude_files', default=[], nargs="*",
help=_('specify a list of file names not to load.'))
base_group.add_argument('-e', '--export_type', type=valid_export,
metavar=f'[{",".join(ExportType.__members__.keys())}]',
help=_('specify the configuration table export type.'))
base_group.add_argument('--file_prefix', default='',
help=_('specify the prefix of the output filename.'))
base_group.add_argument('--force', default=False, action='store_true',
help=_('force all configuration tables to be generated.'))
base_group.add_argument('-o', '--output', type=str, default="",
help=_('specify the configuration table output path.'))
base_group.add_argument('-r', '--recursive', default=False, action='store_true',
help=_('recursively search the source path.'))
base_group.add_argument('--verification', default=False, action='store_true',
help=_('verify only the correctness of the configuration table.'))
base_group.add_argument('-s', '--source', type=valid_source, required=True,
help=_(
'specify the configuration table source path.\nsupported file types [{extensions}]').format(
extensions=",".join(ExtensionType.__members__.keys())))
base_group.add_argument('--template_path',
help=_('specify the extension template path.\n'
'the template name consists of the table name, export type, '
'and {template_extension} extension\n'
'e.g:\n'
'`item.erl.{template_extension}` `item.hrl.{template_extension}` '
'`item.lua.{template_extension}`\n'
'loads the template based on the specified export type\n'
'e.g:\n'
'`--export_type erl` templates ending with `.erl.{template_extension}` '
'and `.hrl.{template_extension}` will be loaded\n'
'`--export_type lua` templates ending with `.lua.{template_extension}` will be loaded'
).format(template_extension=TEMPLATE_EXTENSION))
base_group.add_argument('--verbose', default=False, action='store_true',
help=_('show the details.'))
table_group = parser.add_argument_group(title=_('Table options'))
table_group.add_argument('--data_row', type=valid_table, required=True,
help=_('specify the start line number of the configuration table body data.'))
table_group.add_argument('--desc_row', type=valid_table,
help=_('specify the line number of the configuration table column description.'))
table_group.add_argument('--field_row', type=valid_table, required=True,
help=_('specify the line number of the configuration table field name.'))
table_group.add_argument('--rule_row', type=valid_table,
help=_('specify the line number of the configuration table check rule.'))
table_group.add_argument('--type_row', type=valid_table, required=True,
help=_('specify the line number of the configuration table data type.'))
lang_group = parser.add_argument_group(title=_('Multi languages options'))
lang_group.add_argument('--lang_template', type=valid_lang_template,
help=_('specify the language template path.'))
lang_group.add_argument('--export_lang_template',
help=_('output language template.'))
csv_group = parser.add_argument_group(title=_('CSV options'))
csv_group.add_argument('--csv_encoding', default='utf-8-sig', metavar='ENCODING',
help=_('specify the default encoding format for CSV files.\nDEFAULT UTF-8'))
erl_group = parser.add_argument_group(title=_('Erlang options'))
erl_group.add_argument('--erl_dir', default='',
help=_('specify output directory for where to generate the .erl.'))
erl_group.add_argument('--hrl_dir', default='',
help=_('specify output directory for where to generate the .hrl.'))
lua_group = parser.add_argument_group(title=_('LUA options'))
lua_group.add_argument('--lua_optimize', default=False, action='store_true',
help=_('remove default value fields ( store them into metatable ) '
'and reuse all table values to save memory'))
py_group = parser.add_argument_group(title=_('PYTHON options'))
py_group.add_argument('--py_optimize', default=False, action='store_true',
help=_('remove default value fields and reuse all table values to save memory'))
args = parser.parse_args()
__all__ = ('args',)
| StarcoderdataPython |
1760893 | <reponame>elisarchodorov/ML-Recipes<filename>experiments_track/propancity/src/plots.py<gh_stars>0
import plotly.graph_objects as go
from plotly.subplots import make_subplots
["#27c1d1", "#217883", "#FF0000", "#6473ff"]
color_scheme = {"Visa": "#6473ff", "MasterCard":"#217883"}
def create_model_plots(df, feature_plots, desc):
for feature_plot in feature_plots:
fig = go.Figure()
for card_name, card_df in df.groupby("formatted_credit_card_company"):
fig.add_trace(
go.Histogram(
x=card_df[feature_plot],
name=card_name,
marker_color=color_scheme.get(card_name)
)
)
# Overlay both histograms
fig.update_layout(
barmode='overlay',
title_text=f"{desc} {feature_plot.replace('_', ' ')}",
)
# Reduce opacity to see both histograms
fig.update_traces(opacity=0.75)
fig.show()
fig.write_html(f"outputs/{desc}_{feature_plot}_histogram.html")
def plot_smd(smd_scores):
fig = make_subplots(
rows=len(smd_scores.index),
cols=1,
shared_xaxes=True,
vertical_spacing=0.02
)
for row, feature in enumerate(smd_scores.index, 1):
show_legend = True if row == 1 else False
fig.add_trace(
go.Scatter(
x=smd_scores.loc[feature, ["unmatched", "matched"]],
y=[feature, feature],
mode='lines+markers',
showlegend=show_legend,
name="unmatched",
line=dict(color="#217883"),
marker=dict(
size=[20, 20],
color=["#27c1d1", "#217883"]
)
),
row=row,
col=1
)
fig.update_layout(
height=1000, width=1200,
title_text="smd scores for propensity score matching"
)
fig.update_traces(textposition="bottom right")
fig.show()
fig.write_html(f"outputs/smd_scores.html")
def plot_smd_old(smd_scores):
fig = go.Figure()
for score in ["unmatched", "matched"]:
fig.add_trace(
go.Scatter(
x=smd_scores[score],
y=smd_scores.index,
name="unmatched",
mode='markers',
marker=dict(size=16)
)
)
fig.update_traces(textposition="bottom right")
fig.write_html(f"outputs/smd_scores_old.html")
| StarcoderdataPython |
1733581 | <gh_stars>0
#coding=utf-8
import xlsxwriter
from xlsxwriter.workbook import Workbook
from xlrd.sheet import Sheet
def demo1():
import xlsxwriter
# 创建excel文件
workbook = xlsxwriter.Workbook('demo.xlsx')
# 添加worksheet,也可以指定名字
worksheet = workbook.add_worksheet()
worksheet = workbook.add_worksheet('Test')
#设置第一列的宽度
worksheet.set_column('A:A', len('hello ')+1)
#添加一个加粗格式方便后面使用
bold = workbook.add_format({'bold': True})
#在A1单元格写入纯文本
worksheet.write('A1', 'Hello')
#在A2单元格写入带格式的文本
worksheet.write('A2', 'World', bold)
#指定行列写入数字,下标从0开始
worksheet.write(2, 0, 123)
worksheet.write(3, 0, 123.456)
#在B5单元格插入图片
worksheet.insert_image('B5', 'python-logo.png')
workbook.close()
def charts():
workbook = xlsxwriter.Workbook('chart_column.xlsx')
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
# 这是个数据table的列
headings = ['Number', 'Batch 1', 'Batch 2']
data = [
[2, 3, 4, 5, 6, 7],
[10, 40, 50, 20, 10, 50],
[30, 60, 70, 50, 40, 30],
]
#写入一行
worksheet.write_row('A1', headings, bold)
#写入一列
worksheet.write_column('A2', data[0])
worksheet.write_column('B2', data[1])
worksheet.write_column('C2', data[2])
############################################
#创建一个图表,类型是column
chart1 = workbook.add_chart({'type': 'column'})
# 配置series,这个和前面worksheet是有关系的。
# 指定图表的数据范围
chart1.add_series({
'name': '=Sheet1!$B$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$B$2:$B$7',
})
chart1.add_series({
'name': "=Sheet1!$C$1",
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$C$2:$C$7',
})
# 配置series的另一种方法
# # [sheetname, first_row, first_col, last_row, last_col]
# chart1.add_series({
# 'name': ['Sheet1',0,1],
# 'categories': ['Sheet1',1,0,6,0],
# 'values': ['Sheet1',1,1,6,1],
# })
#
#
#
# chart1.add_series({
# 'name': ['Sheet1', 0, 2],
# 'categories': ['Sheet1', 1, 0, 6, 0],
# 'values': ['Sheet1', 1, 2, 6, 2],
# })
# 添加图表标题和标签
chart1.set_title ({'name': 'Results of sample analysis'})
chart1.set_x_axis({'name': 'Test number'})
chart1.set_y_axis({'name': 'Sample length (mm)'})
# 设置图表风格
chart1.set_style(11)
# 在D2单元格插入图表(带偏移)
worksheet.insert_chart('D2', chart1, {'x_offset': 25, 'y_offset': 10})
#######################################################################
#
# 创建一个叠图子类型
chart2 = workbook.add_chart({'type': 'column', 'subtype': 'stacked'})
# Configure the first series.
chart2.add_series({
'name': '=Sheet1!$B$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$B$2:$B$7',
})
# Configure second series.
chart2.add_series({
'name': '=Sheet1!$C$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$C$2:$C$7',
})
# Add a chart title and some axis labels.
chart2.set_title ({'name': 'Stacked Chart'})
chart2.set_x_axis({'name': 'Test number'})
chart2.set_y_axis({'name': 'Sample length (mm)'})
# Set an Excel chart style.
chart2.set_style(12)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('D18', chart2, {'x_offset': 25, 'y_offset': 10})
#######################################################################
#
# Create a percentage stacked chart sub-type.
#
chart3 = workbook.add_chart({'type': 'column', 'subtype': 'percent_stacked'})
# Configure the first series.
chart3.add_series({
'name': '=Sheet1!$B$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$B$2:$B$7',
})
# Configure second series.
chart3.add_series({
'name': '=Sheet1!$C$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$C$2:$C$7',
})
# Add a chart title and some axis labels.
chart3.set_title ({'name': 'Percent Stacked Chart'})
chart3.set_x_axis({'name': 'Test number'})
chart3.set_y_axis({'name': 'Sample length (mm)'})
# Set an Excel chart style.
chart3.set_style(13)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('D34', chart3, {'x_offset': 25, 'y_offset': 10})
#生成圆饼图
chart4 = workbook.add_chart({'type':'pie'})
#定义数据
data = [
['Pass','Fail','Warn','NT'],
[333,11,12,22],
]
#写入数据
worksheet.write_row('A51',data[0],bold)
worksheet.write_row('A52',data[1])
chart4.add_series({
'name': '接口测试报表图',
'categories': '=Sheet1!$A$51:$D$51',
'values': '=Sheet1!$A$52:$D$52',
'points':[
{'fill':{'color':'#00CD00'}},
{'fill':{'color':'red'}},
{'fill':{'color':'yellow'}},
{'fill':{'color':'gray'}},
],
})
# Add a chart title and some axis labels.
chart4.set_title ({'name': '接口测试统计'})
chart4.set_style(3)
# chart3.set_y_axis({'name': 'Sample length (mm)'})
worksheet.insert_chart('E52', chart4, {'x_offset': 25, 'y_offset': 10})
workbook.close()
if __name__ == '__main__':
# demo1()
charts()
print('finished...')
pass
| StarcoderdataPython |
1760304 | # Generated by Django 3.1.13 on 2021-12-10 13:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reservation_units', '0034_fix_reservation_start_interval_help_text'),
]
operations = [
migrations.AddField(
model_name='reservationunit',
name='buffer_time_after',
field=models.DurationField(blank=True, null=True, verbose_name='Buffer time after reservation'),
),
migrations.AddField(
model_name='reservationunit',
name='buffer_time_before',
field=models.DurationField(blank=True, null=True, verbose_name='Buffer time before reservation'),
),
]
| StarcoderdataPython |
51435 | <reponame>lean-delivery/tf-readme-validator<filename>tests/optional-neg/test.py
#!/usr/bin/env python
import unittest
import sys
sys.path.append('../../bin')
target = __import__('tf_readme_validator')
main = target.main
readme = target.cfg['readme']
class Test1(unittest.TestCase):
def test(self):
result = main()
self.assertEqual(result, 1)
self.assertEqual('ok' in readme['Conditional creation'], False)
self.assertEqual('ok' in readme['Code included in this module'], False)
self.assertEqual('ok' in readme['Known issues / Limitations'], False)
self.assertEqual('ok' in readme['Tests'], False)
self.assertEqual('ok' in readme['Examples'], False)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1682524 | <gh_stars>1-10
import _plotly_utils.basevalidators
class ValuessrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="valuessrc", parent_name="table.header", **kwargs):
super(ValuessrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ValuesValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="values", parent_name="table.header", **kwargs):
super(ValuesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class SuffixsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="suffixsrc", parent_name="table.header", **kwargs):
super(SuffixsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class SuffixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="suffix", parent_name="table.header", **kwargs):
super(SuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class PrefixsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="prefixsrc", parent_name="table.header", **kwargs):
super(PrefixsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class PrefixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="prefix", parent_name="table.header", **kwargs):
super(PrefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="table.header", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on plot.ly for color
.
width
widthsrc
Sets the source reference on plot.ly for width
.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class HeightValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="height", parent_name="table.header", **kwargs):
super(HeightValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class FormatsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="formatsrc", parent_name="table.header", **kwargs):
super(FormatsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class FormatValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="format", parent_name="table.header", **kwargs):
super(FormatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="table.header", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class FillValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="fill", parent_name="table.header", **kwargs):
super(FillValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Fill"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the cell fill color. It accepts either a
specific color or an array of colors or a 2D
array of colors.
colorsrc
Sets the source reference on plot.ly for color
.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class AlignsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="alignsrc", parent_name="table.header", **kwargs):
super(AlignsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class AlignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="align", parent_name="table.header", **kwargs):
super(AlignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["left", "center", "right"]),
**kwargs
)
| StarcoderdataPython |
25906 | """
Created on June 19th, 2017
@author: rouxpn
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default', DeprecationWarning)
import os
import re
from decimal import Decimal
class DecayParser():
"""
Parses the PHISICS xml decay file and replaces the nominal values by the perturbed values.
"""
def __init__(self, inputFiles, workingDir, **pertDict):
"""
Constructor
@ In, inputFiles, string, .dat Decay file.
@ In, workingDir, string, absolute path to the working directory
@ In, pertDict, dictionary, dictionary of perturbed variables
@ Out, None
"""
self.allDecayList = [] # All possible types of decay for actinides and FP
self.isotopeClassifier = {} # String, FP or Actinide
self.decayModeNumbering = {} # Gives the column number of each decay type
self.isotopeParsed = ['Actinide', 'FP']
self.listedDict = {} # Nested dictionary of perturbed variables
self.inputFiles = inputFiles
self.pertDict = self.scientificNotation(pertDict)
# open the unperturbed file
openInputFile = open(self.inputFiles, "r")
lines = openInputFile.readlines()
openInputFile.close()
self.characterizeLibrary(lines)
self.fileReconstruction()
self.printInput(workingDir,lines)
def matrixPrinter(self, line, outfile, atomicNumber):
"""
The xml files is split into two categories: hardcopied lines (banner, column labels etc.) that cannot
be modified by RAVEN variable definition, and matrix lines that can be modified by RAVEN variable definition.
This method treats the matrix lines, and print them into the perturbed file.
@ In, line, list, unperturbed input file line
@ In, outfile, file object, output file in file object format
@ In, atomicNumber, integer, indicates if the isotope parsed is an actinide (0) or a fission product (1)
@ Out, None
"""
line = line.upper().split()
line[0] = re.sub(r'(.*?)(\w+)(-)(\d+M?)', r'\1\2\4', line[0]) # remove isotope dashes
for isotopeID in self.listedDict.iterkeys():
if line[0] == isotopeID:
typeOfDecayPerturbed = []
typeOfDecayPerturbed = self.listedDict.get(isotopeID, {}).keys()
for i in range(len(typeOfDecayPerturbed)):
try:
if self.isotopeClassifier.get(isotopeID) == self.isotopeParsed[0]: # it means the isotope is an actinide
line[self.decayModeNumbering.get(atomicNumber).get(typeOfDecayPerturbed[i])] = str(self.listedDict.get(isotopeID).get(typeOfDecayPerturbed[i]))
elif self.isotopeClassifier.get(isotopeID) == self.isotopeParsed[1]: # it means the isotope is a FP
line[self.decayModeNumbering.get(atomicNumber).get(typeOfDecayPerturbed[i])] = str(self.listedDict.get(isotopeID).get(typeOfDecayPerturbed[i]))
except (KeyError, TypeError):
raise KeyError('you used the decay mode' + str(typeOfDecayPerturbed) +'Check if the decay mode ' + str(typeOfDecayPerturbed) +'exist in the decay library. You can also check if you perturbed dictionary is under the format |DECAY|DECAYMODE|ISOTOPEID.')
if any('ACTINIDES' in s for s in line):
flag = self.isotopeParsed[0]
elif any('FPRODUCTS' in s for s in line):
flag = self.isotopeParsed[1]
try:
if self.isotopeClassifier[line[0]] == atomicNumber:
line[0] = "{0:<7s}".format(line[0])
i = 1
while i <= len(self.decayModeNumbering[atomicNumber]):
line[i] = "{0:<11s}".format(line[i])
i = i + 1
outfile.writelines(' ' + ''.join(
line[0:len(self.decayModeNumbering[atomicNumber]) + 1]) + "\n")
except KeyError: # happens for all the unperturbed isotopes
pass
def hardcopyPrinter(self, atomicNumber, lines):
"""
The files are split into two categories: hardcopied lines (banner, column labels etc.) that cannot
be modified by RAVEN variable definition, and matrix lines that can be modified by RAVEN variable definition.
This method treats the hardcopied lines, and then call the matrix line handler method.
@ In, atomicNumber, integer, indicates if the isotope parsed is an actinide (0) or a fission product (1)
@ In, lines, list, unperturbed input file lines
@ Out, None
"""
flag = 0
count = 0
with open(self.inputFiles, 'a+') as outfile:
for line in lines:
# if the line is blank, ignore it
if not line.split():
continue
if re.match(r'(.*?)' + atomicNumber + 's', line.strip()) and atomicNumber == self.isotopeParsed[0]:
flag = 2 # if the word Actinides is found
outfile.writelines(line)
if re.match(r'(.*?)' + atomicNumber + 'roducts', line.strip())and atomicNumber == self.isotopeParsed[1]:
flag = 1 # if the word FProducts is found
outfile.writelines(line)
if flag == 2:
# for the actinides decay section
if re.match(r'(.*?)\s+\w+(\W)\s+\w+(\W)', line) and any(
s in 'BETA' for s in
line.split()) and atomicNumber == self.isotopeParsed[0] and count == 0:
count = count + 1
outfile.writelines(line)
self.matrixPrinter(line, outfile, atomicNumber)
if flag == 1:
#for the FP decay section
if re.match(r'(.*?)\s+\w+(\W)\s+\w+(\W)', line) and any(
s in 'BETA' for s in
line.split()) and atomicNumber == self.isotopeParsed[1]:
outfile.writelines(line)
self.matrixPrinter(line, outfile, atomicNumber)
def characterizeLibrary(self,lines):
"""
Characterizes the structure of the library. Teaches the type of decay available for the actinide family and FP family.
@ In, lines, list, unperturbed input file lines
@ Out, None
"""
concatenateDecayList = []
for line in lines:
if re.match(r'(.*?)Actinides', line):
typeOfIsotopeParsed = self.isotopeParsed[0]
elif re.match(r'(.*?)FProducts', line):
typeOfIsotopeParsed = self.isotopeParsed[1]
if (
re.match(r'(.*?)\w+(\W?)\s+\w+(\W?)\s+\w', line)
and any(s in "BETA" for s in line)
): # create dynamic column detector, the search for 'BETA' ensures this is the label line.
count = 0 # reset the counter and the dictionary numbering if new colum sequence is detected
numbering = {}
decayList = []
line = re.sub(r'(Yy?)ield', r'',
line) # Remove the word 'yield' in the decay type lines
splitStringDecayType = line.upper().split(
) # Split the words into individual strings
for decayType in splitStringDecayType: # replace + and * by strings
decayList.append(
decayType.replace('*', 'S').replace('+', 'PLUS').replace(
'_', ''))
concatenateDecayList = concatenateDecayList + decayList # concatenate all the possible decay type (including repetition among actinides and FP)
self.allDecayList = list(set(concatenateDecayList))
for i in range(len(decayList)):
count = count + 1
numbering[decayList[
i]] = count # assign the column position of the given decay types
if typeOfIsotopeParsed == self.isotopeParsed[0]:
self.decayModeNumbering[self.isotopeParsed[0]] = numbering
if typeOfIsotopeParsed == self.isotopeParsed[1]:
self.decayModeNumbering[self.isotopeParsed[1]] = numbering
if re.match(r'(.*?)\D+(-?)\d+(M?)\s+\d', line):
splitString = line.upper().split()
for i, decayConstant in enumerate(splitString):
try:
splitString[i] = float(decayConstant)
except ValueError:
pass # the element is a string (isotope tag). It can be ignored
splitString[0] = re.sub(
r'(.*?)(\w+)(-)(\d+M?)', r'\1\2\4', splitString[
0]) # remove the dash if it the key (isotope ID) contains it
if typeOfIsotopeParsed == self.isotopeParsed[0]:
self.isotopeClassifier[splitString[0]] = self.isotopeParsed[0]
elif typeOfIsotopeParsed == self.isotopeParsed[1]:
self.isotopeClassifier[splitString[0]] = self.isotopeParsed[1]
def scientificNotation(self, pertDict):
"""
Converts the numerical values into a scientific notation.
@ In, pertDict, dictionary, perturbed variables
@ Out, pertDict, dictionary, perturbed variables in scientific format
"""
for key, value in pertDict.iteritems():
pertDict[key] = '%.3E' % Decimal(str(value))
return pertDict
def fileReconstruction(self):
"""
Converts the formatted dictionary pertdict -> {'DECAY|ALPHA|U235':1.30}.
into a dictionary of dictionaries that has the format -> {'DECAY':{'ALPHA':{'U235'1.30}}}
@ In, None
@ Out, None
"""
perturbedIsotopes = []
for key in self.pertDict.iterkeys():
splittedDecayKeywords = key.split('|')
perturbedIsotopes.append(splittedDecayKeywords[2])
for i in range(len(perturbedIsotopes)):
self.listedDict[perturbedIsotopes[i]] = {}
for decayTypeKey, decayValue in self.pertDict.iteritems():
decayKeyWords = decayTypeKey.split('|')
for i in range(len(self.allDecayList)):
self.listedDict[decayKeyWords[2]][decayKeyWords[1]] = decayValue
def printInput(self, workingDir,lines):
"""
Prints out the pertubed decay library into a file. The workflow is:
Open a new file with a dummy name; parse the unperturbed library; print the line in the dummy,
replace with perturbed variables if necessary. Change the name of the dummy file.
@ In, workingDir, string, path to working directory
@ In, lines, list, unperturbed input file lines
@ Out, None
"""
if os.path.exists(self.inputFiles):
os.remove(self.inputFiles) # remove the file if was already existing
for atomicNumber in self.isotopeParsed:
self.hardcopyPrinter(atomicNumber, lines)
with open(self.inputFiles, 'a') as outfile:
outfile.writelines(' end')
| StarcoderdataPython |
3341141 | ##############################################################################################################################################################
##############################################################################################################################################################
"""
Training scripts for autoencoder models presented in our paper.
Replace or modify the config file in the following part of the code to make changes to train different models.
# load the config file
config = toml.load("cfg/extractor_ae.toml")
"""
##############################################################################################################################################################
##############################################################################################################################################################
import os
import sys
import time
import toml
import torch
import random
import numpy as np
from joblib import dump
from torch import optim
from torch.cuda.amp import autocast, GradScaler
from pathlib import Path
from shutil import copyfile
from sklearn.neighbors import KNeighborsClassifier
import torchvision.transforms.functional as TF
from torch.nn import functional as F
import utils as utils
from model import create_ae_model
from dataset import create_dataset
##############################################################################################################################################################
##############################################################################################################################################################
def folder_setup(config):
# define the name of the experiment
if config["model"]["type"] == "classification":
experiment_name = time.strftime("%Y%m%d-%H%M%S") + "_" + config["model"]["architecture"] + "_" + config["dataset"]["name"] + "_pretrained_" + str(config["model"]["use_pretrained"])
else:
experiment_name = time.strftime("%Y%m%d-%H%M%S") + "_" + config["model"]["type"] + "_" + config["dataset"]["name"] + "_latentDimension_" + str(config["model"]["dimension"])
# define the paths to save the experiment
save_folder = dict()
Path("results").mkdir(exist_ok=True)
save_folder["main"] = Path("results") / experiment_name
save_folder["checkpoints"] = save_folder["main"] / "checkpoints"
save_folder["images"] = save_folder["main"] / "images" / "train"
save_folder["data"] = save_folder["main"] / "data"
save_folder["latent"] = save_folder["main"] / "data" / "latent"
save_folder["scripts"] = save_folder["main"] / "scripts"
save_folder["logs"] = save_folder["main"] / "logs"
# create all the folders
for item in save_folder.values():
item.mkdir(parents=True)
# save the console output to a file and to the console
sys.stdout = utils.Tee(original_stdout=sys.stdout, file=save_folder["logs"] / "training.log")
# save the accuracy for each evaluation to a file
save_folder["accuracy"] = save_folder["logs"] / "accuracy.log"
save_folder["accuracy"].touch()
# copy files as a version backup
# this way we know exactly what we did
# these can also be loaded automatically for testing the models
copyfile(Path(__file__).absolute(), save_folder["scripts"] / "train_ae.py")
copyfile(Path().absolute() / "dataset.py", save_folder["scripts"] / "dataset.py")
copyfile(Path().absolute() / "model.py", save_folder["scripts"] / "model.py")
copyfile(Path().absolute() / "utils.py", save_folder["scripts"] / "utils.py")
copyfile(Path().absolute() / "pretrained_model.py", save_folder["scripts"] / "pretrained_model.py")
copyfile(Path().absolute() / "repeat_train_classifier.py", save_folder["scripts"] / "repeat_train_classifier.py")
copyfile(Path().absolute() / "repeat_train_ae.py", save_folder["scripts"] / "repeat_train_ae.py")
copyfile(Path().absolute() / "train_classifier.py", save_folder["scripts"] / "train_classifier.py")
# save config file
# remove device info, as it is not properly saved
config_to_save = config.copy()
del config_to_save["device"]
utils.write_toml_to_file(config_to_save, save_folder["main"] / "cfg.toml")
return save_folder
##############################################################################################################################################################
def model_setup(config, save_folder):
if config["model"]["type"] == "classification":
raise ValueError("Your config is for a classification model, but this script is for autoencoders. Please use train_classifier.py instead.")
# define model and print it
model = create_ae_model(config["model"], config["dataset"], config["training"]).to(config["device"])
model.print_model()
# get the optimizer defined in the config file
# load it from the torch module
optim_def = getattr(optim, config["training"]["optimizer"])
# create the optimizer
optimizer = dict()
optimizer["method"] = []
if model.type == "factorvae":
ae_param = list(model.encoder.parameters()) + list(model.encoder_fc21.parameters()) + list(model.encoder_fc22.parameters()) + list(model.decoder_fc.parameters()) + list(model.decoder_conv.parameters())
optimizer["method"].append(optim_def(ae_param, lr=config["training"]["learning_rate"], weight_decay=config["training"]["weight_decay"], betas=(0.9, 0.999)))
optimizer["method"].append(optim_def(model.discriminator.parameters(), lr=config["training"]["learning_rate"], weight_decay=config["training"]["weight_decay"], betas=(0.5, 0.9)))
else:
if config["training"]["optimizer"] == "SGD":
optimizer["method"].append(optim_def(model.parameters(), lr=config["training"]["learning_rate"], weight_decay=config["training"]["weight_decay"], momentum=0.9, nesterov=True))
else:
optimizer["method"].append(optim_def(model.parameters(), lr=config["training"]["learning_rate"], weight_decay=config["training"]["weight_decay"]))
print('=' * 73)
print(optimizer["method"])
print('=' * 73)
# load data
train_loader = create_dataset(
which_dataset=config["dataset"]["name"],
which_factor=config["dataset"]["factor"],
use_triplet=True if config["model"]["variation"]=="tae" else False,
should_augment=config["training"]["augment"],
make_scene_impossible=config["training"]["make_scene_impossible"],
make_instance_impossible=config["training"]["make_instance_impossible"],
batch_size=config["training"]["batch_size"],
shuffle=True,
get_all=False
)
# save the dict to transform labels to int
np.save(save_folder["checkpoints"] / 'label_dict.npy', train_loader.dataset.string_labels_to_integer_dict)
return model, optimizer, train_loader, train_loader.dataset.string_labels_to_integer_dict
##############################################################################################################################################################
def get_test_loader(config, real_only=False, get_train_loader=False):
# check whether we need vehicle images or mpi3d
vehicle_images=False if config["dataset"]["name"].lower() in ["mpi3d", "mnist", "fonts"] else True
# dict to keep a loader for each test vehicle
test_loader = dict()
# either we train on vehicle images, or on mpi3d
if vehicle_images:
if not real_only:
for vehicle in ["cayenne", "kona", "kodiaq"]:
test_loader[vehicle] = create_dataset(
which_dataset="sviro_illumination",
which_factor=vehicle,
use_triplet=False,
should_augment=False,
batch_size=config["training"]["batch_size"],
shuffle=True,
get_all=True
)
test_loader["ticam"] = create_dataset(
which_dataset="ticam",
which_factor="all",
use_triplet=False,
should_augment=False,
batch_size=512,
shuffle=True,
get_all=False
)
if get_train_loader:
test_loader["train"] = create_dataset(
which_dataset=config["dataset"]["name"],
which_factor=config["dataset"]["factor"],
use_triplet=False,
should_augment=False,
make_scene_impossible=False,
make_instance_impossible=False,
batch_size=512,
shuffle=False,
get_all=False
)
# mpi3d
else:
if config["dataset"]["name"].lower() == "mpi3d":
test_loader["real"] = create_dataset(
which_dataset="mpi3d",
which_factor="real",
use_triplet=False,
should_augment=False,
batch_size=config["training"]["batch_size"],
shuffle=True,
get_all=False
)
elif config["dataset"]["name"].lower() == "mnist":
test_loader["real"] = create_dataset(
which_dataset="fonts",
which_factor="test",
use_triplet=False,
should_augment=False,
batch_size=config["training"]["batch_size"],
shuffle=True,
get_all=False
)
else:
test_loader["real"] = create_dataset(
which_dataset=config["dataset"]["name"].lower(),
which_factor="validation",
use_triplet=False,
should_augment=False,
batch_size=config["training"]["batch_size"],
shuffle=True,
get_all=False
)
return test_loader
##############################################################################################################################################################
def train_one_epoch(model, optimizer, scaler, train_loader, config, save_folder, nbr_epoch):
# make sure we are training
model.train()
# init
total_loss = 0
total_recon_loss = 0
total_kl_loss = 0
total_vae_tc_loss = 0
total_d_tc_loss = 0
total_tp_loss = 0
# for each batch
train_loader = iter(train_loader)
for batch_idx, batch_images in enumerate(train_loader):
# init
batch_loss = 0
batch_recon_loss = 0
batch_kl_loss = 0
batch_vae_tc_loss = 0
batch_d_tc_loss = 0
batch_tp_loss = 0
# set gradients to zero
optimizer["method"][0].zero_grad()
# push to gpu
input_images = batch_images["image"].to(config["device"])
output_images = batch_images["target"].to(config["device"])
if model.variation == "tae":
positive_input_images = batch_images["positive"].to(config["device"])
positive_output_image = batch_images["positive_target"].to(config["device"])
negative_input_images = batch_images["negative"].to(config["device"])
negative_output_image = batch_images["negative_target"].to(config["device"])
# inference
with autocast():
model_output = model(input_images)
if model.variation == "tae":
positive_output = model(positive_input_images)
negative_output = model(negative_input_images)
# calculate the loss
batch_recon_loss = model.loss(prediction=model_output["xhat"].to(torch.float32), target=output_images)
if config["training"]["loss"] == "SSIM" or config["training"]["loss"] == "BCE":
batch_loss += batch_recon_loss
else:
batch_loss += batch_recon_loss / config["training"]["batch_size"]
if model.variation == "tae":
positive_recon_loss = model.loss(prediction=positive_output["xhat"].to(torch.float32), target=positive_output_image)
negative_recon_loss = model.loss(prediction=negative_output["xhat"].to(torch.float32), target=negative_output_image)
if config["training"]["loss"] == "SSIM" or config["training"]["loss"] == "BCE":
batch_loss += positive_recon_loss
batch_loss += negative_recon_loss
else:
batch_loss += positive_recon_loss / config["training"]["batch_size"]
batch_loss += negative_recon_loss / config["training"]["batch_size"]
# triplet loss
batch_tp_loss = model.triplet_loss(anchor=model_output["mu"], positive=positive_output["mu"], negative=negative_output["mu"])
batch_loss += batch_tp_loss
if model.type == "vae" or model.type == "factorvae":
batch_kl_loss = model.kl_divergence_loss(model_output["mu"], model_output["logvar"])
batch_loss += config["training"]["kl_weight"]*batch_kl_loss
if model.type == "factorvae":
D_z_reserve = model.discriminator(model_output["z"])
batch_vae_tc_loss = (D_z_reserve[:, 0] - D_z_reserve[:, 1]).mean()
batch_loss += config["training"]["tc_weight"]*batch_vae_tc_loss
# Scales loss. Calls backward() on scaled loss to create scaled gradients.
# Backward passes under autocast are not recommended.
# Backward ops run in the same dtype autocast chose for corresponding forward ops.
if model.type == "factorvae":
scaler.scale(batch_loss).backward(retain_graph=True)
else:
scaler.scale(batch_loss).backward()
# scaler.step() first unscales the gradients of the optimizer's assigned params.
# If these gradients do not contain infs or NaNs, optimizer.step() is then called,
# otherwise, optimizer.step() is skipped.
scaler.step(optimizer["method"][0])
# Updates the scale for next iteration.
scaler.update()
if model.type == "factorvae":
batch_images_2 = next(train_loader)
input_images_2 = batch_images_2["image"].to(config["device"])
with autocast():
model_output_2 = model(input_images_2)
true_labels = torch.ones(input_images_2.size(0), dtype= torch.long, requires_grad=False).to(config["device"])
false_labels = torch.zeros(input_images_2.size(0), dtype= torch.long, requires_grad=False).to(config["device"])
z_perm = model.permute_latent(model_output_2["z"]).detach()
D_z_perm = model.discriminator(z_perm)
batch_d_tc_loss = 0.5 * (F.cross_entropy(D_z_reserve, false_labels) + F.cross_entropy(D_z_perm, true_labels))
optimizer["method"][1].zero_grad()
scaler.scale(batch_d_tc_loss).backward()
scaler.step(optimizer["method"][1])
scaler.update()
# accumulate loss
total_loss += batch_loss.item()
total_recon_loss += batch_recon_loss.item()
if model.variation == "tae":
total_tp_loss += batch_tp_loss.item()
if model.type == "vae" or model.type == "factorvae":
total_kl_loss += batch_kl_loss.item()
if model.type == "factorvae":
total_vae_tc_loss += batch_vae_tc_loss.item()
total_d_tc_loss += batch_d_tc_loss.item()
if ((nbr_epoch+1) % config["training"]["frequency"] == 0 or nbr_epoch == 1) and batch_idx == 0:
utils.plot_progress(input_images, model_output["xhat"], save_folder, nbr_epoch, text="epoch")
if model.variation == "tae":
utils.plot_progress(positive_input_images, positive_output["xhat"], save_folder, nbr_epoch, text="epoch_positive")
utils.plot_progress(negative_input_images, negative_output["xhat"], save_folder, nbr_epoch, text="epoch_negative")
if "target" in batch_images:
utils.plot_progress(input_images, output_images, save_folder, nbr_epoch, text="epoch_target")
print(f"[Training] \tEpoch: {nbr_epoch+1} Total Loss: {total_loss:.2f} \tRecon Loss: {total_recon_loss:.2f} \tTriplet Loss: {total_tp_loss:.2f} \tKL Loss: {total_kl_loss:.2f} \tVAE-TC Loss: {batch_vae_tc_loss:.2f} \tD-TC Loss: {batch_d_tc_loss:.2f}")
return model
##############################################################################################################################################################
def recon_one_batch(model, loader_dict, config, save_folder, nbr_epoch, split):
if (nbr_epoch+1) % config["training"]["frequency"] == 0 or nbr_epoch == 1:
# save the last model
torch.save(model.state_dict(), save_folder["checkpoints"] / "last_model.pth")
# make sure we are in eval mode
model.eval()
# do not keep track of gradients
with torch.no_grad():
# for the loader of each test vehicle
for vehicle, loader in loader_dict.items():
# for each batch of training images
for batch_idx, input_images in enumerate(loader):
# push to gpu
input_images = input_images["image"].to(config["device"])
# encode the images
model_output = model(input_images)
# plot the samples
utils.plot_progress(input_images, model_output["xhat"], save_folder, nbr_epoch, text=f"{split}_{vehicle}_{batch_idx}_epoch")
# leave the loop after the first batch
break
##############################################################################################################################################################
def get_data(model, config, data_loader, labels_dict, is_train):
# keep track of latent space
mus = []
labels = []
# make sure we are in eval mode
model.eval()
# we do not need to keep track of gradients
with torch.no_grad():
# for each batch of images
for batch in data_loader:
# push to gpu
gt_left = batch["gt_left"].numpy()
gt_middle = batch["gt_middle"].numpy()
gt_right =batch["gt_right"].numpy()
input_images = batch["image"].to(config["device"])
# get the flipped versions as well
flipped_input_images = torch.stack([TF.hflip(x) for x in input_images])
# encode the images
latent = model(input_images)["mu"]
flipped_latent = model(flipped_input_images)["mu"]
# keep track of latent space
mus.extend(latent.cpu().numpy())
curr_labels = [labels_dict[utils.stringify([x,y,z])] for x,y,z in zip(gt_left, gt_middle, gt_right)]
labels.extend(curr_labels)
mus.extend(flipped_latent.cpu().numpy())
curr_flipped_labels = [labels_dict[utils.stringify([z,y,x])] for x,y,z in zip(gt_left, gt_middle, gt_right)]
labels.extend(curr_flipped_labels)
# otherwise not useable
mus = np.array(mus)
labels = np.array(labels)
return mus, labels
def evaluate(model, labels_dict, train_loader, test_loader, config, save_folder, nbr_epoch, best_score):
if (nbr_epoch+1) % config["training"]["frequency"] == 0:
# get the training data
train_mu, train_labels = get_data(model, config, train_loader, labels_dict=labels_dict, is_train=True)
# get the evaluation data
ticam_mu, ticam_labels = get_data(model, config, test_loader["ticam"], labels_dict=labels_dict, is_train=False)
# define the classifier
classifier = KNeighborsClassifier(n_neighbors=config["model"]["knn"], n_jobs=-1)
# train the classifier
classifier.fit(train_mu, train_labels)
# evaluate the classifier on this data
score = classifier.score(ticam_mu, ticam_labels)
print(f"[Testing] \tAccuracy Ticam: {100*score:.2f}% (best: {100*best_score:.2f}) (Nbr-Train: {train_labels.shape[0]}, Nbr-Ticam: {ticam_labels.shape[0]})")
# append result to file
utils.append_accuracy(save_folder, score)
# check if its the best so far
if score > best_score:
torch.save(model.state_dict(), save_folder["checkpoints"] / "best_model.pth")
dump(classifier, save_folder["checkpoints"] / f'best_classifier.joblib')
best_score = score
return best_score
##############################################################################################################################################################
def train(config):
#########################################################
# GPU
#########################################################
# specify which gpu should be visible
os.environ["CUDA_VISIBLE_DEVICES"] = config["training"]["gpu"]
# save the gpu settings
config["device"] = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# gradscaler to improve speed performance with mixed precision training
scaler = GradScaler()
#########################################################
# Setup
#########################################################
# create the folders for saving
save_folder = folder_setup(config)
# create the model, optimizer and data loader
model, optimizer, train_loader, labels_dict = model_setup(config, save_folder)
# get also a test loader for evaluation on unseen dataset
test_loader = get_test_loader(config, real_only=True, get_train_loader=True)
#########################################################
# Training
#########################################################
# keep track of time
timer = utils.TrainingTimer()
# init
best_score = 0
# for each epoch
for nbr_epoch in range(config["training"]["epochs"]):
# train a single epoch
model = train_one_epoch(model, optimizer, scaler, train_loader, config, save_folder, nbr_epoch)
# reconstruct one batch for each loader for visualization purposes
recon_one_batch(model, test_loader, config, save_folder, nbr_epoch, split="test")
# get the latent space for all datasets for the current epoch
if config["dataset"]["name"].lower() not in ["mpi3d", "mnist", "fonts"]:
best_score = evaluate(model, labels_dict, test_loader["train"], test_loader, config, save_folder, nbr_epoch, best_score)
#########################################################
# Aftermath
#########################################################
# save the last model
torch.save(model.state_dict(), save_folder["checkpoints"] / "last_model.pth")
print("=" * 37)
timer.print_end_time()
print("=" * 37)
# reset the stdout with the original one
# this is necessary when the train function is called several times
# by another script
sys.stdout = sys.stdout.end()
##############################################################################################################################################################
##############################################################################################################################################################
if __name__ == "__main__":
# reproducibility
seed = 42
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
# load the config file
# config = toml.load("cfg/conv_ae.toml")
# config = toml.load("cfg/vae.toml")
# config = toml.load("cfg/factorvae.toml")
config = toml.load("cfg/extractor_ae.toml")
# start the training using the config file
train(config) | StarcoderdataPython |
1675903 | <filename>backend/server.py
print("Starting server script")
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import flask
from flask_cors import CORS
import torch
import transformers
import traceback
app = flask.Flask(__name__)
app.config['TESTING'] = True
cors = CORS(app)
MODEL_NAME = "allenai/unifiedqa-t5-base"
tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL_NAME)
model = transformers.T5ForConditionalGeneration.from_pretrained(MODEL_NAME)
def run_model(question, context, **generator_args):
input_string = question.strip() + " \\n " + context.strip()
input_ids = tokenizer.encode(input_string, return_tensors="pt")
res = model.generate(input_ids, **generator_args)
return [tokenizer.decode(x) for x in res]
@app.route("/predict", methods=["GET", "POST"])
def predict():
data = {"success": False}
try:
request = flask.request.get_json(force=True)
context = request["context"]
question = request["question"]
answer = run_model(question, context)
data["success"] = True
data["answer"] = answer
except Exception as e:
error_string = str(e) + " - " + str(traceback.format_exc())
print("Error:", error_string)
data["error"] = error_string
return flask.jsonify(data)
if __name__ == "__main__":
print("Starting Flask server")
app.run(host="0.0.0.0", port=5000)
| StarcoderdataPython |
1699402 | #!/usr/bin/env python3
"""
The main entry point for the package.
Module Attributes:
_NAME_MOD_OVERRIDE (str): Name to use as override for `__name__` in select
cases since, in this module, `__name__` is often expected to be `__main__`.
logger (Logger): Logger for this module.
(C) Copyright 2021 <NAME>. All Rights Reserved Worldwide.
"""
import argparse
import logging
import signal
import sys
from asana_extensions import version
from asana_extensions.general import config
from asana_extensions.rules import rules
_NAME_MOD_OVERRIDE = 'asana_extensions.main'
if __name__ == '__main__': # Ignored by CodeCov
# Since no unit testing here, code kept at absolute minimum
logger = logging.getLogger(_NAME_MOD_OVERRIDE)
else:
logger = logging.getLogger(__name__)
def main(force_test_report_only, log_level, modules):
"""
Launches the main app.
Args:
force_test_report_only (bool): True to force test report only mode; False
to allow full execution (pending other settings).
log_level (Level/int/str): The desired log level. This can be specified
as a level constant from the logging module, or it can be an int or str
reprenting the numeric value (possibly as a str) or textual name
(possibly with incorrect case) of the level.
modules ([str]): The list of module names of what to execute. See the
arg parsing code in `_setup_and_call_main()` for details of options.
"""
try:
_config_root_logger(log_level)
except (TypeError, ValueError) as ex:
_config_root_logger(logging.NOTSET)
logger.warning(f'Logger setting failed (Exception: {ex}). Defaulting'
+ ' to not set.')
any_errors = None
if modules and any(x.lower() in ['rules', 'all'] for x in modules):
any_errors = not _main_rules(force_test_report_only) \
or any_errors or False
if any_errors is None:
logger.info('Asana Extensions had no modules to run -- fully skipped.')
elif any_errors:
logger.warning('Asana Extensions run completed, but with errors...')
else:
logger.info('Asana Extensions run completed successfully!')
def _main_rules(force_test_report_only):
"""
The main function for execution the rules modules.
Args:
force_test_report_only (bool): True to force test report only mode; False
to allow full execution (pending other settings).
Return:
(bool): True if fully successful (even in test report only mode); False if
any errors occurred that partially or fully prevented completion.
"""
all_rules = rules.load_all_from_config()
return rules.execute_rules(all_rules, force_test_report_only)
def _config_root_logger(log_level):
"""
Configure the root logger.
Specifically, this sets the log level for the root logger so it will apply
to all loggers in this app.
Args:
log_level (Level/int/str): The desired log level. This can be specified
as a level constant from the logging module, or it can be an int or str
reprenting the numeric value (possibly as a str) or textual name
(possibly with incorrect case) of the level.
Raises:
(TypeError): Invalid type provided for `log_level`.
(ValueError): Correct type provided for `log_level`, but is not a valid
supported value.
"""
root_logger = logging.getLogger() # Root logger will config app-wide
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setLevel(logging.NOTSET)
handler_stdout.addFilter(config.LevelFilter(max_inc_level=logging.INFO))
handler_stderr = logging.StreamHandler()
handler_stderr.setLevel(logging.WARNING)
root_logger.addHandler(handler_stdout)
root_logger.addHandler(handler_stderr)
formatter = logging.Formatter('<%(name)s> %(levelname)s: %(message)s')
handler_stdout.setFormatter(formatter)
handler_stderr.setFormatter(formatter)
str_value_error = None
try:
root_logger.setLevel(log_level.upper())
return
except AttributeError:
# Likely passed in an int, which has no method `upper()` -- retry below
pass
except ValueError as ex:
# ValueError is probably "unknown level" from logger but might be intstr
str_value_error = ex
try:
root_logger.setLevel(int(log_level))
return
except (TypeError, ValueError):
# Probably an invalid type that couldn't be cast -- let fall thru
pass
if str_value_error is not None:
raise str_value_error
raise TypeError('Invalid log level type (somehow). See --help for -l.')
def _setup_and_call_main(_args=None):
"""
Setup any pre-main operations, such as signals and input arg parsing, then
call `main()`. This is basically what would normally be in
`if __name__ == '__main__':` prior to `main()` call, but this allows unit
testing a lot more easily.
Args:
_args ([str] or None): The list of input args to parse. Should only be
used by unit testing. When executing, it is expected this stays as
`None` so it will default to taking args from `sys.argv` (i.e. from
CLI).
"""
_register_shutdown_signals()
parser = argparse.ArgumentParser(description='Process inputs.',
prog='asana_extensions')
parser.add_argument('-e', '--execute',
dest='force_test_report_only',
action='store_const',
const=False,
default=True,
help='Execute the module(s). Without this, it will run in test'
+ ' report only mode.')
parser.add_argument('-l', '--log-level',
default=logging.WARNING,
help='Set the log level through the app. Will only report logged'
+ ' messages that are the specified level or more severe.'
+ ' Defaults to "Warning". Can specify by name or number to'
+ ' match python `logging` module: notset/0, debug/10, info/20,'
+ ' warning/30, error/40, critical/50.')
parser.add_argument('-m', '--modules',
nargs='+',
help='The modules to run in this invocation. Required. Can'
+ ' specify "all" to run all modules. Otherwise, can provide a'
+ ' space-separate list of module names. Supported modules:'
+ ' rules.')
parser.add_argument('--version',
action='version',
version='%(prog)s ' + version.get_full_version_string(),
help='The version of this application/package.')
main(**vars(parser.parse_args(_args)))
def _register_shutdown_signals(signals=None):
"""
Registers the shutdown signals that will be supported, handling any platform
dependent discrepancies gracefully.
Args:
signals ([str] or None): String of names of signals in `signal` module, or
`None` to use defaults.
"""
if signals is None:
signals = ('SIGINT', 'SIGTERM', 'SIGQUIT', 'SIGHUP')
for sig in signals:
try:
signal.signal(getattr(signal, sig), _shutdown)
except AttributeError:
logger.debug(f'Signal "{sig}" not registered for shutdown. Likely'
+ ' not supported by this OS.')
# Likely a platform didn't support one of the options
continue
def _shutdown(signum, _frame):
"""
Perform all necessary operations to cleanly shutdown when required.
This is triggered through signal interrupts as registered when this is
executed as a script.
Args:
signum (int): Number of signal received.
_frame (frame): See signal.signal python docs.
"""
msg = f'Exiting from signal {str(signum)} ...'
logger.warning(msg)
sys.exit(1)
if __name__ == '__main__': # Ignored by CodeCov
# Since no unit testing here, code kept at absolute minimum
_setup_and_call_main()
| StarcoderdataPython |
3263805 | '''
Support module around logging functionality for the performance scripts.
'''
from datetime import datetime
from logging import FileHandler, Formatter, StreamHandler
from logging import getLogger
from logging import INFO, WARNING
from os import getpid, makedirs, path
from time import time
import sys
import __main__
from .common import get_repo_root_path
def setup_loggers(verbose: bool):
'''Setup the root logger for the performance scripts.'''
def __formatter() -> Formatter:
fmt = '[%(asctime)s][%(levelname)s] %(message)s'
datefmt = "%Y/%m/%d %H:%M:%S"
return Formatter(fmt=fmt, datefmt=datefmt)
def __initialize(verbose: bool):
'''Initializes the loggers used by the script.'''
launch_datetime = datetime.fromtimestamp(time())
getLogger().setLevel(INFO)
# Log console handler
getLogger().addHandler(__get_console_handler(verbose))
# Log file handler
log_file_name = __generate_log_file_name(launch_datetime)
getLogger().addHandler(__get_file_handler(log_file_name))
# Log level
getLogger().setLevel(INFO)
start_msg = "Initializing logger {}".format(str(launch_datetime))
getLogger().info('-' * len(start_msg))
getLogger().info(start_msg)
getLogger().info('-' * len(start_msg))
def __generate_log_file_name(launch_datetime: datetime) -> str:
'''Generates a unique log file name for the current script.'''
log_dir = path.join(get_repo_root_path(), 'logs')
if not path.exists(log_dir):
makedirs(log_dir)
if not hasattr(__main__, '__file__'):
script_name = 'python_interactive_mode'
else:
script_name = path.splitext(path.basename(sys.argv[0]))[0]
timestamp = launch_datetime.strftime("%Y%m%d%H%M%S")
log_file_name = '{}-{}-pid{}.log'.format(
timestamp, script_name, getpid())
return path.join(log_dir, log_file_name)
def __get_console_handler(verbose: bool) -> StreamHandler:
console_handler = StreamHandler()
level = INFO if verbose else WARNING
console_handler.setLevel(level)
console_handler.setFormatter(__formatter())
return console_handler
def __get_file_handler(file: str) -> FileHandler:
file_handler = FileHandler(file)
file_handler.setLevel(INFO)
file_handler.setFormatter(__formatter())
return file_handler
__initialize(verbose)
| StarcoderdataPython |
3338793 | <gh_stars>1-10
"""Gentoo Security bug scraper and vulnerable package checker."""
__version__ = '0.1.3'
| StarcoderdataPython |
4809576 | <filename>peleffy/tests/test_mapper.py
"""
This module contains the tests to check peleffy's molecular mapper.
"""
import pytest
class TestMapper(object):
"""
It wraps all tests that involve Mapper class.
"""
def test_mapper_initializer(self):
"""
It checks the initialization of the Mapper class.
"""
from peleffy.topology import Molecule
from peleffy.topology import Mapper
mol1 = Molecule(smiles='c1ccccc1', hydrogens_are_explicit=False)
mol2 = Molecule(smiles='c1ccccc1C', hydrogens_are_explicit=False)
# Check initializer with only the two molecules
mapper = Mapper(mol1, mol2)
# Check initializer with only include_hydrogens parameter
mapper = Mapper(mol1, mol2, include_hydrogens=False)
# Check initializer with bad types
with pytest.raises(TypeError):
mapper = Mapper(mol1.rdkit_molecule, mol2)
with pytest.raises(TypeError):
mapper = Mapper(mol1, "mol2")
def test_mapper_mapping(self):
"""
It validates the mapping.
"""
from peleffy.topology import Molecule
from peleffy.topology import Mapper
# First mapping checker
mol1 = Molecule(smiles='c1ccccc1', hydrogens_are_explicit=False)
mol2 = Molecule(smiles='c1ccccc1C', hydrogens_are_explicit=False)
mapper = Mapper(mol1, mol2, include_hydrogens=False)
mapping = mapper.get_mapping()
assert mapping == [(0, 0), (1, 1), (2, 2), (3, 3),
(4, 4), (5, 5)], 'Unexpected mapping'
# Second mapping checker
mol1 = Molecule(smiles='c1(C)ccccc1C', hydrogens_are_explicit=False)
mol2 = Molecule(smiles='c1c(C)cccc1C', hydrogens_are_explicit=False)
mapper = Mapper(mol1, mol2, include_hydrogens=False)
mapping = mapper.get_mapping()
assert mapping == [(0, 1), (1, 2), (2, 0), (3, 6),
(4, 5), (5, 4), (6, 3)], 'Unexpected mapping'
# Third mapping checker with hydrogens
mol1 = Molecule(smiles='c1ccccc1', hydrogens_are_explicit=False)
mol2 = Molecule(smiles='c1ccccc1C', hydrogens_are_explicit=False)
mapper = Mapper(mol1, mol2, include_hydrogens=True)
mapping = mapper.get_mapping()
assert mapping == [(0, 0), (1, 1), (2, 2), (3, 3),
(4, 4), (5, 5), (11, 6), (10, 11),
(9, 10), (8, 9), (7, 8), (6, 7)], \
'Unexpected mapping'
# Fourth mapping checker with hydrogens
mol1 = Molecule(smiles='c1(C)ccccc1C', hydrogens_are_explicit=False)
mol2 = Molecule(smiles='c1c(C)cccc1C', hydrogens_are_explicit=False)
mapper = Mapper(mol1, mol2, include_hydrogens=True)
mapping = mapper.get_mapping()
assert mapping == [(0, 1), (1, 2), (8, 9), (9, 10),
(10, 11), (2, 0), (3, 6), (4, 5),
(5, 4), (6, 3), (7, 12), (14, 13),
(13, 14), (12, 7), (11, 8)], 'Unexpected mapping'
| StarcoderdataPython |
1724393 | <gh_stars>1-10
"""api_v1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from api_v1.api import BucketListViewSet, BucketListItemViewSet,\
UserCreateViewSet
from rest_framework_jwt.views import obtain_jwt_token
from rest_framework_jwt.views import refresh_jwt_token
urlpatterns = [
url(r'^bucketlists/$',
BucketListViewSet.as_view({'get': 'list_bucketlists',
'post': 'create'})),
url(r'^bucketlists/(?P<pk>[0-9]+)/$',
BucketListViewSet.as_view({'get': 'list_bucketlist',
'post': 'create',
'put': 'update',
'delete': 'destroy'})),
url(r'^bucketlists/(?P<pk_bucketlist>[0-9]+)/items/$',
BucketListItemViewSet.as_view({'get': 'list_items',
'post': 'create'})),
url(r'^bucketlists/(?P<pk_bucketlist>[0-9]+)/items/(?P<pk_item>[0-9]+)/$',
BucketListItemViewSet.as_view({'put': 'update',
'delete': 'destroy'})),
url(r'^auth/login/', obtain_jwt_token),
url(r'^auth/token-refresh/', refresh_jwt_token),
url(r'^auth/register/',
UserCreateViewSet.as_view({'post': 'create'})),
]
| StarcoderdataPython |
1744321 | <filename>qiskit/circuit/library/boolean_logic.py
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=no-member
"""Implementations of boolean logic quantum circuits."""
from typing import List, Optional
import numpy as np
from qiskit.circuit import QuantumRegister, QuantumCircuit
from qiskit.circuit.exceptions import CircuitError
from qiskit.extensions.standard import MCXGate
class Permutation(QuantumCircuit):
"""An n_qubit circuit that permutes qubits."""
def __init__(self,
num_qubits: int,
pattern: Optional[List[int]] = None,
seed: Optional[int] = None,
) -> QuantumCircuit:
"""Return an n_qubit permutation circuit implemented using SWAPs.
Args:
num_qubits: circuit width.
pattern: permutation pattern. If None, permute randomly.
seed: random seed in case a random permutation is requested.
Returns:
A permutation circuit.
Raises:
CircuitError: if permutation pattern is malformed.
Reference Circuit:
.. jupyter-execute::
:hide-code:
from qiskit.circuit.library import Permutation
import qiskit.tools.jupyter
circuit = Permutation(5, seed=42)
%circuit_library_info circuit
"""
super().__init__(num_qubits, name="permutation")
if pattern is not None:
if sorted(pattern) != list(range(num_qubits)):
raise CircuitError("Permutation pattern must be some "
"ordering of 0..num_qubits-1 in a list.")
pattern = np.array(pattern)
else:
rng = np.random.RandomState(seed)
pattern = np.arange(num_qubits)
rng.shuffle(pattern)
for i in range(num_qubits):
if (pattern[i] != -1) and (pattern[i] != i):
self.swap(i, int(pattern[i]))
pattern[pattern[i]] = -1
class XOR(QuantumCircuit):
"""An n_qubit circuit for bitwise xor-ing the input with some integer ``amount``.
The ``amount`` is xor-ed in bitstring form with the input.
This circuit can also represent addition by ``amount`` over the finite field GF(2).
"""
def __init__(self,
num_qubits: int,
amount: Optional[int] = None,
seed: Optional[int] = None,
) -> QuantumCircuit:
"""Return a circuit implementing bitwise xor.
Args:
num_qubits: the width of circuit.
amount: the xor amount in decimal form.
seed: random seed in case a random xor is requested.
Returns:
A circuit for bitwise XOR.
Raises:
CircuitError: if the xor bitstring exceeds available qubits.
Reference Circuit:
.. jupyter-execute::
:hide-code:
from qiskit.circuit.library import XOR
import qiskit.tools.jupyter
circuit = XOR(5, seed=42)
%circuit_library_info circuit
"""
super().__init__(num_qubits, name="xor")
if amount is not None:
if len(bin(amount)[2:]) > num_qubits:
raise CircuitError("Bits in 'amount' exceed circuit width")
else:
rng = np.random.RandomState(seed)
amount = rng.randint(0, 2**num_qubits)
for i in range(num_qubits):
bit = amount & 1
amount = amount >> 1
if bit == 1:
self.x(i)
class InnerProduct(QuantumCircuit):
"""An n_qubit circuit that computes the inner product of two registers."""
def __init__(self, num_qubits: int) -> QuantumCircuit:
"""Return a circuit to compute the inner product of 2 n-qubit registers.
This implementation uses CZ gates.
Args:
num_qubits: width of top and bottom registers (half total circuit width)
Returns:
A circuit computing inner product of two registers.
Reference Circuit:
.. jupyter-execute::
:hide-code:
from qiskit.circuit.library import InnerProduct
import qiskit.tools.jupyter
circuit = InnerProduct(5)
%circuit_library_info circuit
"""
qr_a = QuantumRegister(num_qubits)
qr_b = QuantumRegister(num_qubits)
super().__init__(qr_a, qr_b, name="inner_product")
for i in range(num_qubits):
self.cz(qr_a[i], qr_b[i])
class OR(QuantumCircuit):
r"""A circuit implementing the logical OR operation on a number of qubits.
For the OR operation the state :math:`|1\rangle` is interpreted as ``True``. The result
qubit is flipped, if the state of any variable qubit is ``True``. The OR is implemented using
a multi-open-controlled X gate (i.e. flips if the state is :math:`|0\rangle`) and
applying an X gate on the result qubit.
Using a list of flags, qubits can be skipped or negated.
The OR gate without special flags:
.. jupyter-execute::
:hide-code:
from qiskit.circuit.library import OR
import qiskit.tools.jupyter
circuit = OR(5)
%circuit_library_info circuit
Using flags we can negate qubits or skip them. For instance, if we have 5 qubits and want to
return ``True`` if the first qubit is ``False`` or one of the last two are ``True`` we use the
flags ``[-1, 0, 0, 1, 1]``.
.. jupyter-execute::
:hide-code:
from qiskit.circuit.library import OR
import qiskit.tools.jupyter
circuit = OR(5, flags=[-1, 0, 0, 1, 1])
%circuit_library_info circuit
"""
def __init__(self, num_variable_qubits: int, flags: Optional[List[int]] = None,
mcx_mode: str = 'noancilla') -> None:
"""Create a new logical OR circuit.
Args:
num_variable_qubits: The qubits of which the OR is computed. The result will be written
into an additional result qubit.
flags: A list of +1/0/-1 marking negations or omisiions of qubits.
mcx_mode: The mode to be used to implement the multi-controlled X gate.
"""
# store num_variables_qubits and flags
self.num_variable_qubits = num_variable_qubits
self.flags = flags
# add registers
qr_variable = QuantumRegister(num_variable_qubits, name='variable')
qr_result = QuantumRegister(1, name='result')
super().__init__(qr_variable, qr_result, name='or')
# determine the control qubits: all that have a nonzero flag
flags = flags or [1] * num_variable_qubits
control_qubits = [q for q, flag in zip(qr_variable, flags) if flag != 0]
# determine the qubits that need to be flipped (if a flag is > 0)
flip_qubits = [q for q, flag in zip(qr_variable, flags) if flag > 0]
# determine the number of ancillas
self.num_ancilla_qubits = MCXGate.get_num_ancilla_qubits(len(control_qubits), mode=mcx_mode)
if self.num_ancilla_qubits > 0:
qr_ancilla = QuantumRegister(self.num_ancilla_qubits, 'ancilla')
self.add_register(qr_ancilla)
else:
qr_ancilla = []
self.x(qr_result)
if len(flip_qubits) > 0:
self.x(flip_qubits)
self.mcx(control_qubits, qr_result[:], qr_ancilla[:], mode=mcx_mode)
if len(flip_qubits) > 0:
self.x(flip_qubits)
class AND(QuantumCircuit):
r"""A circuit implementing the logical AND operation on a number of qubits.
For the AND operation the state :math:`|1\rangle` is interpreted as ``True``. The result
qubit is flipped, if the state of all variable qubits is ``True``. In this format, the AND
operation equals a multi-controlled X gate, which is controlled on all variable qubits.
Using a list of flags however, qubits can be skipped or negated. Practically, the flags
allow to skip controls or to apply pre- and post-X gates to the negated qubits.
The AND gate without special flags equals the multi-controlled-X gate:
.. jupyter-execute::
:hide-code:
from qiskit.circuit.library import AND
import qiskit.tools.jupyter
circuit = AND(5)
%circuit_library_info circuit
Using flags we can negate qubits or skip them. For instance, if we have 5 qubits and want to
return ``True`` if the first qubit is ``False`` and the last two are ``True`` we use the flags
``[-1, 0, 0, 1, 1]``.
.. jupyter-execute::
:hide-code:
from qiskit.circuit.library import AND
import qiskit.tools.jupyter
circuit = AND(5, flags=[-1, 0, 0, 1, 1])
%circuit_library_info circuit
"""
def __init__(self, num_variable_qubits: int, flags: Optional[List[int]] = None,
mcx_mode: str = 'noancilla') -> None:
"""Create a new logical AND circuit.
Args:
num_variable_qubits: The qubits of which the OR is computed. The result will be written
into an additional result qubit.
flags: A list of +1/0/-1 marking negations or omisiions of qubits.
mcx_mode: The mode to be used to implement the multi-controlled X gate.
"""
# store num_variables_qubits and flags
self.num_variable_qubits = num_variable_qubits
self.flags = flags
# add registers
qr_variable = QuantumRegister(num_variable_qubits, name='variable')
qr_result = QuantumRegister(1, name='result')
super().__init__(qr_variable, qr_result, name='and')
# determine the control qubits: all that have a nonzero flag
flags = flags or [1] * num_variable_qubits
control_qubits = [q for q, flag in zip(qr_variable, flags) if flag != 0]
# determine the qubits that need to be flipped (if a flag is < 0)
flip_qubits = [q for q, flag in zip(qr_variable, flags) if flag < 0]
# determine the number of ancillas
self.num_ancilla_qubits = MCXGate.get_num_ancilla_qubits(len(control_qubits), mode=mcx_mode)
if self.num_ancilla_qubits > 0:
qr_ancilla = QuantumRegister(self.num_ancilla_qubits, 'ancilla')
self.add_register(qr_ancilla)
else:
qr_ancilla = []
if len(flip_qubits) > 0:
self.x(flip_qubits)
self.mcx(control_qubits, qr_result[:], qr_ancilla[:], mode=mcx_mode)
if len(flip_qubits) > 0:
self.x(flip_qubits)
| StarcoderdataPython |
10876 | <reponame>mikedelong/aarhus
import json
import logging
import os
import pickle
import sys
import time
import pyzmail
# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
reload(sys)
sys.setdefaultencoding("utf8")
logging.basicConfig(format='%(asctime)s : %(levelname)s :: %(message)s', level=logging.DEBUG)
def process_folder(arg_folder, arg_reference, arg_in_or_out, arg_document_count_limit):
result = dict()
document_count = 0
no_references_count = 0
references_count = 0
message_id_count = 0
for root, subdirectories, files in os.walk(arg_folder):
for current in files:
# first get the references node
if document_count < arg_document_count_limit:
current_full_file_name = os.path.join(root, current)
if document_count % 1000 == 0 and document_count > 0:
logging.debug("%d %s", document_count, current_full_file_name)
references, message = get_references(current_full_file_name)
if 'references' in references.keys():
# if references.has_key('references'):
references_count += 1
else:
no_references_count += 1
document_count += 1
if 'message-id' in references.keys():
message_id_count += 1
if arg_reference in references.keys() and arg_in_or_out:
result[current] = message
elif arg_reference not in references.keys() and not arg_in_or_out:
result[current] = message
logging.info('documents : %d message-id: %d references: %d no references: %d' % (
document_count, message_id_count, references_count, no_references_count))
return result
def get_references(current_file):
result = {}
with open(current_file, 'rb') as fp:
message = pyzmail.message_from_file(fp)
if 'Message-Id' in message.keys():
result['message-id'] = message['Message-Id']
elif 'Message-ID' in message.keys():
result['message-id'] = message['Message-ID']
elif 'Message-id' in message.keys():
result['message-id'] = message['Message-id']
else:
logging.warn('no message id in file %s', current_file)
logging.info([key for key in message.keys()])
if 'References' in message.keys():
references = message['References'].split(' ')
result['references'] = references
if 'In-Reply-To' in message.keys():
result['in-reply-to'] = message['In-Reply-To']
return result, message
def run():
start_time = time.time()
with open('roots-settings.json') as data_file:
data = json.load(data_file)
logging.debug(data)
input_folder = data['input_folder']
document_count_limit = data['document_count_limit']
if document_count_limit == -1:
document_count_limit = sys.maxint
reference_of_interest = data['reference']
# our internal keys are always lowercase, so we want to be sure
# to use a lowercase reference for comparisons
reference_of_interest = reference_of_interest.lower()
in_or_out = data['reference_in']
in_or_out = bool(in_or_out)
pickle_file = data['output_pickle_file']
documents_of_interest = process_folder(input_folder, reference_of_interest, in_or_out, document_count_limit)
logging.info(
'found %d documents of interest: %s' % (len(documents_of_interest), sorted(documents_of_interest.keys())))
with open(pickle_file, 'wb') as output_fp:
pickle.dump(documents_of_interest, output_fp)
logging.info('wrote pickled dictionary to %s.' % pickle_file)
finish_time = time.time()
elapsed_hours, elapsed_remainder = divmod(finish_time - start_time, 3600)
elapsed_minutes, elapsed_seconds = divmod(elapsed_remainder, 60)
logging.info("Time: {:0>2}:{:0>2}:{:05.2f}".format(int(elapsed_hours), int(elapsed_minutes), elapsed_seconds))
if __name__ == '__main__':
run()
| StarcoderdataPython |
1683660 | <reponame>CONABIO-audio/irekua-database
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from irekua_database.models import DeviceType
class MimeTypesInline(admin.TabularInline):
extra = 0
model = DeviceType.mime_types.through
autocomplete_fields = ('mimetype',)
verbose_name = _('Mime type')
verbose_name_plural = _('Mime types')
classes = ('collapse', )
class DeviceTypeAdmin(admin.ModelAdmin):
search_fields = ['name', 'mime_types__name']
list_display = ('id', 'name', 'created_on')
list_display_links = ('id', 'name')
fieldsets = (
(None, {
'fields': (
('name', 'icon'),
'description'
),
}),
)
inlines = [
MimeTypesInline
]
| StarcoderdataPython |
71638 | <gh_stars>0
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import user_story as user_story_module
class UserStorySet(object):
def __init__(self):
self.user_stories = []
def AddUserStory(self, user_story):
assert isinstance(user_story, user_story_module.UserStory)
self.user_stories.append(user_story)
@classmethod
def Name(cls):
""" Returns the string name of this UserStorySet.
Note that this should be a classmethod so benchmark_runner script can match
user story class with its name specified in the run command:
'Run <User story test name> <User story class name>'
"""
return cls.__module__.split('.')[-1]
@classmethod
def Description(cls):
""" Return a string explaining in human-understandable terms what this
user story represents.
Note that this should be a classmethod so benchmark_runner script can
display user stories' names along their descriptions in the list commmand.
"""
if cls.__doc__:
return cls.__doc__.splitlines()[0]
else:
return ''
def __iter__(self):
return self.user_stories.__iter__()
def __len__(self):
return len(self.user_stories)
def __getitem__(self, key):
return self.user_stories[key]
def __setitem__(self, key, value):
self.user_stories[key] = value
| StarcoderdataPython |
1657980 | <reponame>cuiliang0302/myblog
# Generated by Django 3.1.3 on 2020-11-22 14:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0014_auto_20201122_1420'),
]
operations = [
migrations.CreateModel(
name='FirstCatalogue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='名称')),
('order', models.IntegerField(verbose_name='序号')),
('note', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='blog.note', verbose_name='笔记名称')),
],
options={
'verbose_name': '笔记一级目录',
'verbose_name_plural': '笔记一级目录',
},
),
migrations.CreateModel(
name='SecondCatalogue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(verbose_name='序号')),
('content', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='blog.content', verbose_name='笔记名称')),
('father', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='blog.firstcatalogue', verbose_name='一级目录名称')),
],
options={
'verbose_name': '笔记二级目录',
'verbose_name_plural': '笔记二级目录',
},
),
migrations.DeleteModel(
name='Catalogue',
),
]
| StarcoderdataPython |
3372541 | from __future__ import unicode_literals
# Django
from django.conf import settings
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
# Local Apps
from grapevine.admin.base import BaseModelAdmin, SendableAdminMixin
from .models import Email, EmailRecipient, EmailBackend, EmailVariable, \
RawEvent, Event, EmailEvent, UnsubscribedAddress
IS_SUIT_AVAILBLE = "suit" in settings.INSTALLED_APPS
class EmailableAdminMixin(SendableAdminMixin):
"""
Used for Sendables specifically of the Emailable variety.
"""
# Used for admin display purposes
message_type_verbose = "Email"
if IS_SUIT_AVAILBLE:
change_form_template = 'admin/suit_change_emailable_form.html'
else:
change_form_template = 'admin/change_emailable_form.html'
def get_test_recipient(self, request, obj_id):
return request.user.email
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
if obj:
try:
context['reply_to'] = obj.get_reply_to()
except ValueError as e:
context['error_reply_to'] = "ERROR: %s" % (e.args[0],)
except NotImplementedError:
context['error_reply_to'] = "ERROR: Could not generate a `reply_to`. Does this template lack a value?"
try:
val = obj.get_from_email()
val = val.replace("<", "<").replace(">", ">")
context['from_email'] = mark_safe(val)
except ValueError as e:
context['error_from_email'] = "ERROR: %s" % (e.args[0],)
except NotImplementedError:
context['error_from_email'] = "ERROR: Could not generate a `from_email`. Does this template lack a value?"
try:
context['subject'] = obj.get_subject()
except ValueError:
context['error_subject'] = "ERROR: Could not populate everything in \"%s\"" % (obj.get_raw_subject(),)
except NotImplementedError:
context['error_subject'] = "ERROR: Could not generate a subject. Does the template lack a subject?"
return super(EmailableAdminMixin, self).render_change_form(request, context, add, change, form_url, obj)
class EmailInlineMixin(object):
extra = 0
def has_add_permission(self, obj):
return False
def has_delete_permission(self, request, obj):
return False
class EmailVariableInline(EmailInlineMixin, admin.TabularInline):
model = EmailVariable
readonly_fields = ['key', 'value']
verbose_name_plural = 'Variables'
class EmailRecipientInline(EmailInlineMixin, admin.TabularInline):
model = EmailRecipient
readonly_fields = ['address', 'domain', 'name', 'type']
verbose_name_plural = 'Recipients'
class EmailEventInline(EmailInlineMixin, admin.TabularInline):
model = EmailEvent
def admin_raw_event(self, obj):
url = reverse('admin:emails_rawevent_change', args=(obj.raw_event.pk,))
return '<a href="%s">%s</a>' % (url, obj.raw_event.pk,)
admin_raw_event.short_description = "Raw Event"
admin_raw_event.allow_tags = True
readonly_fields = ['event', 'admin_raw_event', 'happened_at']
fields = ['admin_raw_event', 'event', 'happened_at']
verbose_name_plural = 'Events'
class EmailAdmin(BaseModelAdmin):
inlines = [EmailRecipientInline, EmailVariableInline, EmailEventInline]
list_display = ['id', 'subject', 'sent_at', 'status', 'is_test']
list_filter = ['status']
search_fields = ('=from_email',)
# Everything is readonly because this table is inherently immutable.
# It makes no sense to edit the records of that which has already happened.
readonly_fields = ['subject', 'html_body', 'text_body', 'from_email', 'reply_to', 'type', 'admin_text_body', 'is_real', 'admin_sendable',
'status', 'sent_at', 'is_test', 'communication_time', 'guid', 'admin_log', 'backend', 'admin_html_body']
# def admin_html_body(self, obj):
# url = reverse("grapevine:view-on-site", kwargs={"message_guid": obj.guid})
# return """<iframe style="border:0; width:560px; height:500px; padding:10px 5%;" src="{}"></iframe>""".format(url)
# admin_html_body.short_description = 'HTML'
# admin_html_body.allow_tags = True
def admin_html_body(self, obj):
return obj.html_body
admin_html_body.short_description = 'HTML'
admin_html_body.allow_tags = True
def admin_text_body(self, obj):
return obj.text_body.replace('\n', '<br>')
admin_text_body.short_description = 'Text Body'
admin_text_body.allow_tags = True
def admin_log(self, obj):
return '<pre>{}</pre>'.format(obj.log or '')
admin_log.short_description = 'Log'
admin_log.allow_tags = True
def admin_sendable(self, obj):
if obj.sendable:
return """<a href="{0}">{1}</a>""".format(obj.sendable.admin_url, obj.sendable.__unicode__())
return "--"
admin_sendable.short_description = "Sendable"
admin_sendable.allow_tags = True
fieldsets = (
('Message Quick View', {
'fields': ('sent_at', 'subject', 'from_email', 'reply_to', 'admin_sendable',)
},),
('Full Message', {
'fields': ('admin_html_body',)
},),
('Other Data', {
'classes': ('collapse',),
'fields': ('type', 'is_real', 'communication_time', 'guid', 'backend', 'admin_log', 'admin_text_body',)
},),
)
def is_real(self, obj):
return not obj.is_test
is_real.short_description = "Real Message?"
class EmailRecipientAdmin(BaseModelAdmin):
raw_id_fields = ['email']
list_display = ['email', 'address', 'type']
class EmailBackendAdmin(BaseModelAdmin):
list_display = ['id', 'name', 'path', 'username', 'password']
actions = None
def has_delete_permission(self, request, obj=None):
return False
class RawEventAdmin(BaseModelAdmin):
readonly_fields = ['backend', 'admin_detail_payload', 'processed_on', 'processed_in',
'is_queued', 'is_broken', 'remote_ip', 'created_at']
list_display = ['id', 'backend', 'admin_list_payload', 'processed_on', 'processed_in',
'remote_ip', 'created_at']
fieldsets = (
('Event',
{'fields': ('backend', 'admin_detail_payload', 'remote_ip',)},
),
('Status',
{'fields': ('processed_on', 'processed_in', 'is_queued', 'is_broken', 'created_at',)},
)
)
def admin_list_payload(self, obj):
payload = obj.payload.replace('\n', '')[:20]
return payload
def admin_detail_payload(self, obj):
return "<pre>%s</pre>" % (obj.payload,)
admin_detail_payload.short_description = "Payload"
admin_detail_payload.allow_tags = True
class UnsubscribedAddressAdmin(BaseModelAdmin):
raw_id_fields = ['email']
list_display = ['address', 'created_at']
class EventAdmin(BaseModelAdmin):
list_display = ['name', 'should_stop_sending']
admin.site.register(Email, EmailAdmin)
admin.site.register(EmailRecipient, EmailRecipientAdmin)
admin.site.register(EmailBackend, EmailBackendAdmin)
admin.site.register(RawEvent, RawEventAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(UnsubscribedAddress, UnsubscribedAddressAdmin)
| StarcoderdataPython |
7767 | <filename>cracking_the_coding_interview_qs/10.4/find_x_in_listy_test.py
import unittest
from find_x_in_listy import find_x_in_listy, Listy
class Test_Case_Find_X_In_Listy(unittest.TestCase):
def test_case_find_x_in_listy(self):
listy = Listy(list(range(0, 1*10**8)))
self.assertEqual(find_x_in_listy(listy, 5678), 5678) | StarcoderdataPython |
3209139 | <filename>gbdxtools/rda/fetch/conc/libcurl/select.py
import os
from collections import defaultdict
import threading
from tempfile import NamedTemporaryFile
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from functools import lru_cache # python 3
except ImportError:
from cachetools.func import lru_cache
import pycurl
from skimage.io import imread
import numpy as np
import os
from collections import deque
try:
import signal
from signal import SIGPIPE, SIG_IGN
except ImportError:
pass
else:
signal.signal(SIGPIPE, SIG_IGN)
NUM_WORKERS = 5
MAX_RETRIES = 5
def _on_fail(shape=(8, 256, 256), dtype=np.float32):
return np.zeros(shape, dtype=dtype)
def _load_data(fp):
try:
arr = imread(fp)
if len(arr.shape) == 3:
arr = np.rollaxis(arr, 2, 0)
else:
arr = np.expand_dims(arr, axis=0)
except Exception as e:
arr = _on_fail()
finally:
os.remove(fp)
return arr
def _init_curl(NOSIGNAL=1, CONNECTTIMEOUT=120, TIMEOUT=300):
_curl = pycurl.Curl()
_curl.setopt(pycurl.NOSIGNAL, NOSIGNAL)
_curl.setopt(pycurl.CONNECTTIMEOUT, CONNECTTIMEOUT)
_curl.setopt(pycurl.TIMEOUT, TIMEOUT)
return _curl
def _load_curl(url, token, index, _curl):
_, ext = os.path.splitext(urlparse(url).path)
fd = NamedTemporaryFile(prefix='gbdxtools', suffix=ext, delete=False)
_curl.setopt(pycurl.WRITEDATA, fd.file)
_curl.setopt(pycurl.URL, url)
_curl.setopt(pycurl.HTTPHEADER, ['Authorization: Bearer {}'.format(token)])
_curl.index = index
_curl.token = token
_curl.url = url
_curl.fd = fd
return _curl
def _fd_handler(fd, delete=True):
fd.flush()
fd.close()
if delete:
os.remove(fd.name)
def _cleanup(crec, cmulti):
for _curl in crec:
_curl.close()
cmulti.close()
def load_urls(collection, max_workers=64, max_retries=MAX_RETRIES, shape=(8,256,256),
NOSIGNAL=1, CONNECTTIMEOUT=120, TIMEOUT=300):
ntasks = len(collection)
taskq = deque(collection)
crec = [_init_curl() for _ in range(min(max_workers, ntasks))]
curlq = deque(crec)
runcount = defaultdict(int)
results = defaultdict(_on_fail)
cmulti = pycurl.CurlMulti()
nprocessed = 0
while ntasks > nprocessed:
while taskq and curlq:
url, token, index = taskq.popleft()
index = tuple(index)
_curl = curlq.popleft()
_curl = _load_curl(url, token, index, _curl)
# increment attempt number and add to multi
runcount[index] += 1
cmulti.add_handle(_curl)
while True:
ret, nhandles = cmulti.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while True:
nq, suc, failed = cmulti.info_read()
for _curl in suc:
results[_curl.index] = _curl.fd.name
_fd_handler(_curl.fd, delete=False)
_curl.fd = None
cmulti.remove_handle(_curl)
curlq.append(_curl)
nprocessed += 1
for _curl, err_num, err_msg in failed:
_fd_handler(_curl.fd)
_curl.fd = None
if runcount[_curl.index] < max_retries:
taskq.append([_curl.url, _curl.token, _curl.index])
else:
nprocessed += 1
cmulti.remove_handle(_curl)
curlq.append(_curl)
if nq == 0:
break
_cleanup(crec, cmulti)
return {idx: _load_data(results[idx]) if idx in results else _on_fail() for idx in runcount.keys()}
| StarcoderdataPython |
105219 | <gh_stars>0
from abc import ABC, abstractmethod
# NOTE - not making Pizza class as an ABC as we want subclasses to inherent current print messages that are linked
# to each method
class Pizza:
def __init__(self):
self.name = None
self.dough = None
self.sauce = None
self.veggies = []
self.cheese = None
self.pepperoni = None
self.clam = None
self.toppings = []
self.ingredientsFactory = None
@abstractmethod
def prepare(self):
raise NotImplementedError
# print(f'Preparing {self.name}')
# print(f'Tossing dough......')
# print(f'Adding sauce')
# print(f'Adding toppings {str(self.toppings)}...')
def bake(self):
print('Bake for 25 min at 350C')
def cut(self):
print('Cutting the pizza into diagonol slices')
def box(self):
print('Place pizza in official PizzaStore box')
def setName(self, name):
self.name = name
def getName(self):
return self.name
def prepare(self):
print(f'Preparing {self.name}')
self.dough = self.ingredientsFactory.createDough()
self.sauce = self.ingredientsFactory.createSauce()
self.cheese = self.ingredientsFactory.createCheese()
class CheesePizza(Pizza):
def __init__(self, ingredientsFactory):
super().__init__()
self.name = 'cheese'
self.ingredientsFactory = ingredientsFactory
class PepperoniPizza(Pizza):
def __init__(self):
super().__init__()
self.name = 'pepperoni'
class ClamPizza(Pizza):
def __init__(self, ingredientsFactory):
super().__init__()
self.name = 'clam'
self.ingredientsFactory = ingredientsFactory
class VeggiePizza(Pizza):
def __init__(self):
super().__init__()
self.name = 'veggie'
class NYStyleCheesePizza(Pizza):
def __init__(self, ingredientsFactory):
super().__init__()
self.type = 'NYStyleCheesePizza'
self.name = 'NY style sauce and cheese pizza'
self.dough = 'thin crust dough'
self.sauce = 'marina sauce'
self.toppings = ['grated regiano cheese']
self.ingredientsFactory = ingredientsFactory
class NYStylePepperoniPizza(Pizza):
def __init__(self, ingredientsFactory):
super().__init__()
self.name = 'NYStylePepperoni'
self.ingredientsFactory = ingredientsFactory
class NYStyleClamPizza(Pizza):
def __init__(self, ingredientsFactory):
super().__init__()
self.name = 'NYStyleClamPizza'
self.ingredientsFactory = ingredientsFactory
class NYStyleVeggiePizza(Pizza):
def __init__(self, ingredientsFactory):
super().__init__()
self.name = 'NYStyleVeggie'
self.ingredientsFactory = ingredientsFactory
class ChicagoStyleCheesePizza(Pizza):
def __init__(self, ingredientsFactory):
super().__init__()
self.name = 'chiciago style deep dish cheese pizza'
self.dough = 'extra thick crust dough'
self.sauce = 'plum tomato sauce'
self.toppings = ['shredded mozzarella cheese']
self.ingredientsFactory = ingredientsFactory
class ChicagoStylePepperoniPizza(Pizza):
def __init__(self, ingredientsFactory):
super().__init__()
self.name = 'ChicagoStylePepperoni'
self.ingredientsFactory = ingredientsFactory
class ChicagoStyleClamPizza(Pizza):
def __init__(self, ingredientsFactory):
super().__init__()
self.name = 'ChicagoStyleClamPizza'
self.ingredientsFactory = ingredientsFactory
class ChicagoStyleVeggiePizza(Pizza):
def __init__(self, ingredientsFactory):
super().__init__()
self.name = 'ChicagoStyleVeggie'
self.ingredientsFactory = ingredientsFactory
class CaliforniaStyleCheesePizza(Pizza):
def __init__(self, ingredientsFactory):
super().__init__()
self.name = 'CaliforniaStyleCheesePizza'
self.dough = 'extra thick crust dough'
self.sauce = 'plum tomato sauce'
self.toppings = ['pieapples']
self.ingredientsFactory = ingredientsFactory
class CaliforniaStylePepperoniPizza(Pizza):
def __init__(self, ingredientsFactory):
super().__init__()
self.name = 'CaliforniaStylePepperoni'
self.ingredientsFactory = ingredientsFactory
class CaliforniaStyleClamPizza(Pizza):
def __init__(self, ingredientsFactory):
super().__init__()
self.name = 'CaliforniaStyleClamPizza'
self.ingredientsFactory = ingredientsFactory
class CaliforniaStyleVeggiePizza(Pizza):
def __init__(self, ingredientsFactory):
super().__init__()
self.name = 'CaliforniaStyleVeggie'
self.ingredientsFactory = ingredientsFactory
| StarcoderdataPython |
1725155 | <gh_stars>1-10
# Copyright (c) 2021. <NAME>
# Copyright (c) 2021. University of Edinburgh
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from DataObjects.Architecture.ClassFlatArchitecture import FlatArchitecture
from DataObjects.FlowDataTypes.ClassBaseAccess import BaseAccess
from DataObjects.ClassMultiDict import MultiDict
from DataObjects.Transitions.ClassTransitionv2 import Transition_v2
from Backend.Murphi.BaseConfig import BaseConfig
from Backend.Murphi.MurphiModular.MurphiTokens import MurphiTokens
from Backend.Common.TemplateHandler.TemplateHandler import TemplateHandler
from Backend.Murphi.MurphiTemp.TemplateHandler.MurphiTemplates import MurphiTemplates
from Parser.NetworkxParser.ClassProtoParserBase import ProtoParserBase
from Debug.Monitor.ClassDebug import Debug
class GenMurphiAccess(TemplateHandler):
# PermFuncNames
k_perm = "Perm_"
f_clear_perm = "Clear_perm"
f_set_perm = "Set_perm"
f_exe_cpu_access = "Serve_CPU"
f_exe_store = "Store"
def __init__(self, arch: FlatArchitecture, config: BaseConfig):
TemplateHandler.__init__(self)
self.arch = arch
self.config = config
# A state permission is valid if a transition triggered by this access exists, that doesn't have an outgoing
# edge
self.state_permission_map: MultiDict = MultiDict()
for transition in arch.get_architecture_transitions():
if (isinstance(transition.guard, BaseAccess.Access)
and str(transition.guard) in BaseAccess.Access_str_list
and not transition.out_msg
and not ProtoParserBase.k_cond in [str(operation) for operation in transition.operations]
and not ProtoParserBase.k_ncond in [str(operation) for operation in transition.operations]):
if (transition.start_state not in self.state_permission_map or
str(transition.guard) not in self.state_permission_map[transition.start_state]):
self.state_permission_map[transition.start_state] = str(transition.guard)
def gen_state_access_perm(self, transition: Transition_v2) -> str:
# First reset state permission
access_perm_str = self.f_clear_perm + "(" + MurphiTokens.v_adr + ", " + MurphiTokens.v_mach + ");"
# Check if no accesses are defined for state
if transition.final_state not in self.state_permission_map:
return access_perm_str + self.nl
# For access_permission defined in the state set multiset entry
for access_perm in self.state_permission_map[transition.final_state]:
access_perm_str += " " + self.f_set_perm + "(" + access_perm + ", " \
+ MurphiTokens.v_adr + ", " + MurphiTokens.v_mach + ");"
# If litmus testing enabled call serve access function
# Check if manual access is defined, if yes then don't replicate access, only single access per transition
# allowed
if (self.config.litmus_testing and
not [op for op in transition.operations
if str(op) == ProtoParserBase.k_access
and str(op.getChildren()[0]) not in self.arch.event_network.event_issue]):
access_perm_str += self.gen_serve_cpu_func()
# If access permission tracking is enabled or litmus testing
if (self.config.enable_read_write_execution and not self.config.litmus_testing
and str(transition.guard) == BaseAccess.k_store
and str(transition.guard) in self.state_permission_map[transition.final_state]):
access_perm_str += self.gen_serve_access_func()
return access_perm_str + self.nl
def gen_tmp_access(self, access: BaseAccess.Access):
Debug.perror("Access to be executed is not a base access (load/store): " + str(access),
str(access) in BaseAccess.Access_str_list)
# Set the defined access permission and serve CPU if necessary
access_perm_str = self.f_set_perm + "(" + str(access) + ", " \
+ MurphiTokens.v_adr + ", " + MurphiTokens.v_mach + ");"
# If litmus testing enabled call serve access function
if self.config.litmus_testing:
access_perm_str += self.gen_serve_cpu_func()
# At the end of a transition the access clear function is called in self.gen_state_access_perm so any temporary
# access permissions will be cleared
return access_perm_str
def gen_remote_event_serve(self, remote_event: str):
Debug.perror("Expected event, but passed object has different type: " + str(remote_event),
str(remote_event) in self.arch.event_network.event_issue)
return self._stringReplKeys(self._openTemplate(MurphiTemplates.f_remote_event_serve_func),
[
str(remote_event),
str(self.arch)
])
def gen_serve_cpu_func(self):
return (self.nl + self.f_exe_cpu_access + "(" + MurphiTokens.v_cbe + "." + self.get_data_variable() + ", " +
MurphiTokens.v_adr + ", " + MurphiTokens.v_mach + ");")
def gen_serve_access_func(self):
return (self.nl + self.f_exe_store + "(" + MurphiTokens.v_cbe + "." + self.get_data_variable() + ", " +
MurphiTokens.v_adr + ");")
def get_data_variable(self) -> str:
data_var_list = []
# Identify data variable
for variable in self.arch.machine.variables:
if str(self.arch.machine.variables[variable]) == ProtoParserBase.t_data:
data_var_list.append(variable)
Debug.perror("No data variable detected", len(data_var_list))
Debug.pwarning("Multiple variables data variables detected: " + str(data_var_list), len(data_var_list) > 1)
return data_var_list[0]
| StarcoderdataPython |
15268 | <reponame>abijith-kp/Emolytics<gh_stars>0
from server import db, auth, emolytics
from server.models import Tweet
from classifier import create_classifier
from tweepy import Stream
from tweepy.streaming import StreamListener
from flask.ext.rq import job
import json
import random
from multiprocessing import Process
from sqlalchemy.exc import IntegrityError
def get_document(status):
status = json.loads(status)
lat = 0.0
lon = 0.0
try:
lon, lat = status["place"]["bounding_box"]["coordinates"][0][0]
except:
pass
return {"tweet": status["text"], "pos": [lat, lon]}
class StdOutListener(StreamListener):
def on_data(self, status):
with emolytics.app_context():
try:
doc = get_document(status)
loc = doc["pos"]
if loc != [0, 0]:
t = Tweet(doc['tweet'], loc[0], loc[1])
db.session.add(t)
db.session.commit()
except IntegrityError, ie:
pass
except Exception, e:
pass
return True
def on_error(self, error_code):
pass
@job('emolytics')
def start_streaming(track=[""], locations=[-180,-90,180,90], languages=["en"]):
print "Starting streaming"
l = StdOutListener()
stream = Stream(auth, l)
while True:
try:
stream.disconnect()
stream.filter(track=track, locations=locations, languages=languages)
except Exception, e:
pass
@job('emolytics')
def classify():
print "Starting classification"
with emolytics.app_context():
CLF = create_classifier()
c = {0: "green", 1: "red"}
while True:
result = Tweet.query.filter((Tweet.flag == False)).all()
try:
for t in result:
r = CLF.predict(t.tweet.encode('utf-8'))
t.color = c[int(r)]
db.session.commit()
except IntegrityError, ie:
pass
db.session.rollback()
except Exception, e:
pass
'''
def start_thread(track):
global process
if process != None and process.is_alive():
process.terminate()
process = Process(target=start_streaming, kwargs={"track": track})
process.start()
print "Started the thread"
def start_classification():
global clf_process
if clf_process != None and clf_process.is_alive():
clf_process.terminate()
clf_process = Process(target=classify)
clf_process.start()
print "Started classification"
'''
| StarcoderdataPython |
3314190 | <gh_stars>0
#!/usr/bin/python3
import asterisk.agi as agi
def main():
agi_inst = agi.AGI()
agi_inst.verbose("Printing available channel values")
agi_inst.verbose(str(agi_inst.env))
callerId = agi_inst.env['agi_callerid']
agi_inst.verbose("call from %s" % callerId)
while True:
agi_inst.stream_file('vm-extension')
result = agi_inst.wait_for_digit(-1)
agi_inst.verbose("got digit %s" % result)
if result.isdigit():
agi_inst.say_number(result)
else:
agi_inst.verbose("bye!")
agi_inst.hangup()
agi_inst.exit()
if __name__ == '__main__':
main()
| StarcoderdataPython |
190331 | <gh_stars>0
from .idol import *
from .music import *
from .tweet import *
from .calender import *
from .live import *
from .setlist import *
| StarcoderdataPython |
197114 | # @lc app=leetcode id=637 lang=python3
#
# [637] Average of Levels in Binary Tree
#
# https://leetcode.com/problems/average-of-levels-in-binary-tree/description/
#
# algorithms
# Easy (67.09%)
# Likes: 2409
# Dislikes: 215
# Total Accepted: 223.8K
# Total Submissions: 332.7K
# Testcase Example: '[3,9,20,null,null,15,7]'
#
# Given the root of a binary tree, return the average value of the nodes on
# each level in the form of an array. Answers within 10^-5 of the actual answer
# will be accepted.
#
# Example 1:
#
#
# Input: root = [3,9,20,null,null,15,7]
# Output: [3.00000,14.50000,11.00000]
# Explanation: The average value of nodes on level 0 is 3, on level 1 is 14.5,
# and on level 2 is 11.
# Hence return [3, 14.5, 11].
#
#
# Example 2:
#
#
# Input: root = [3,9,20,15,7]
# Output: [3.00000,14.50000,11.00000]
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is in the range [1, 10^4].
# -2^31 <= Node.val <= 2^31 - 1
#
#
#
# @lc tags=tree
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 广度优先遍历二叉树。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def averageOfLevels(self, root: Optional[TreeNode]) -> List[float]:
res = []
q = [root] if root else []
while q:
qn = []
s = 0
for p in q:
s += p.val
if p.left:
qn.append(p.left)
if p.right:
qn.append(p.right)
res.append(s / len(q))
q = qn
return res
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('root = [3,9,20,null,null,15,7]')
print('Exception :')
print('[3.00000,14.50000,11.00000]')
print('Output :')
print(
str(Solution().averageOfLevels(
listToTreeNode([3, 9, 20, None, None, 15, 7]))))
print()
print('Example 2:')
print('Input : ')
print('root = [3,9,20,15,7]')
print('Exception :')
print('[3.00000,14.50000,11.00000]')
print('Output :')
print(str(Solution().averageOfLevels(listToTreeNode([3, 9, 20, 15, 7]))))
print()
pass
# @lc main=end | StarcoderdataPython |
1708943 | from flask_restx import Namespace, Resource, fields
from .utils.decorator import save_request, token_required
from .utils.db_manager import put_doi
from .user_ns import user_response
api = Namespace('admin_doi',
description='Update the number of available DOIs per user')
number_payload = api.model('number_payload', {
'num': fields.Integer(Required=True)})
@api.route('/<string:user>/<string:number_of_dois>')
@api.response(201, 'Updated')
@api.response(401, 'Provide a valid Token')
@api.response(403, 'Not available DOIs')
@api.response(503, 'Error connection with the DB')
class GetPostDOI(Resource):
@api.doc(security='apikey')
@api.marshal_with(user_response, code=200, skip_none=True)
@token_required
@save_request
def put(self, user, number_of_dois):
"""
Update the number of available DOIs per user.
"""
return put_doi(user, number_of_dois)
| StarcoderdataPython |
3364062 | <reponame>RafayAK/CodingPrep
"""
This problem was asked by Amazon.
Given an array of numbers, find the maximum sum of any contiguous subarray of the array.
For example, given the array [34, -50, 42, 14, -5, 86], the maximum sum would be 137,
since we would take elements 42, 14, -5, and 86.
Given the array [-5, -1, -8, -9], the maximum sum would be 0, since we would not take any elements.
Do this in O(N) time.
"""
def find_max_sum(arr): # not optimal the 'sum' function mainly is slowing it down
if len(arr) == 1:
return 0 if arr[0] < 0 else arr[0]
sum_of_arr = sum(arr)
s1 = find_max_sum(arr[1:])
s2 = find_max_sum(arr[:-1])
if sum_of_arr > 0 and sum_of_arr > s1 and sum_of_arr > s2:
return sum_of_arr
elif s1 > s2 and s1>0:
return s1
elif s2 > s1 and s2>0:
return s2
else:
return 0
def find_max_sum_optimized(arr):
if not arr or max(arr) < 0:
return 0
curr_max_sum = arr[0]
overall_max_sum = arr[0]
for num in arr[1:]:
curr_max_sum = max(num, curr_max_sum+num)
overall_max_sum = max(curr_max_sum, overall_max_sum)
return overall_max_sum
if __name__ == '__main__':
# print(find_max_sum([34, -50, 42, 14, -5, 86]))
# print(find_max_sum([-5, -1, -8, -9]))
print(find_max_sum_optimized([34, -50, 42, 14, -5, 86]))
print(find_max_sum_optimized([-5, -1, -8, -9])) | StarcoderdataPython |
121221 | <filename>dataAnalysis/GetDataForAnalysis.py
import lidar
import time
import pickle
chunk_sizes = [3000,4000,6000]
storage = {}
port = raw_input("Enter port name which lidar is connected:") #windows
time.sleep(5)
for size in chunk_sizes:
Obj = lidar.YdLidarG4(port,size)
if(Obj.Connect()):
print(Obj.GetDeviceInfo())
gen = Obj.StartScanning()
t = time.time()
storage.update({size:[]})
while (time.time() - t) < 30:
storage[size].append((next(gen),time.time()-t))
Obj.StopScanning()
Obj.Disconnect()
else:
print("Error connecting to device")
time.sleep(5)
f=open("data.pkl",'wb')
pickle.dump(storage,f)
f.close()
| StarcoderdataPython |
4838653 | from typing import Optional
import requests
class PytweetException(Exception):
"""Exception: This is the base class of all exceptions.
.. versionadded:: 1.2.0
"""
def __init__(
self,
message: str = None,
):
self.message = message
super().__init__(self.message)
class APIException(PytweetException):
""":class:`PytweetException`: Raise When an error is incurred during a request with HTTP Status code 200.
.. versionadded:: 1.2.0
"""
def __init__(
self,
response: Optional[requests.models.Response] = None,
message: str = "No Error Message Provided",
):
self.res = response
self.message = message
super().__init__(f"API Return an Exception: {self.message}")
class HTTPException(PytweetException):
""":class:`PytweetException`: A custom error that will be raise when ever a request return HTTP status code above 200.
.. versionadded:: 1.2.0
"""
def __init__(
self,
response: Optional[requests.models.Response] = None,
message: str = None,
):
self.res = response
self.json = response.json() if response else None
self.message = message
super().__init__(f"Request Return an Exception (status code: {self.res.status_code}): {self.message}")
@property
def status_code(self) -> Optional[int]:
if not self.res:
return None
return self.res.status_code
class BadRequests(HTTPException):
""":class:`HTTPException`: Raised when a request return status code: 400.
.. versionadded:: 1.2.0
"""
def __init__(
self,
response: Optional[requests.models.Response] = None,
message: Optional[str] = None,
):
msg = response.json().get("errors")[0].get("message") if not message else message
detail = response.json().get("errors")[0].get("detail")
super().__init__(response, msg if msg else detail if detail else "Not Found!")
class Unauthorized(HTTPException):
""":class:`HTTPException`: Raised when the Credentials you passed is invalid and a request return status code: 401
.. versionadded:: 1.0.0
"""
def __init__(self, response, message: str = None):
msg = None
detail = None
if response.json().get("errors"):
msg = response.json().get("errors")[0].get("message") if not message else message
detail = response.json().get("errors")[0].get("detail")
else:
detail = response.json().get("detail")
super().__init__(
response,
msg if msg else detail if detail else "Unauthorize to do that action!",
)
class Forbidden(HTTPException):
""":class:`HTTPException`: Raised when a request return status code: 403.
.. versionadded:: 1.2.0
"""
def __init__(
self,
response: Optional[requests.models.Response] = None,
message: Optional[str] = None,
):
msg = None
detail = None
if response.json().get("errors"):
msg = response.json().get("errors")[0].get("message") if not message else message
detail = response.json().get("errors")[0].get("detail")
else:
detail = response.json().get("detail")
super().__init__(
response,
msg if msg else detail if detail != "Forbidden" else "Forbidden to do that action.",
)
class NotFound(HTTPException):
""":class:`HTTPException`: Raised when a request return status code: 404.
.. versionadded:: 1.2.0
"""
def __init__(
self,
response: Optional[requests.models.Response] = None,
message: Optional[str] = None,
):
msg = response.json().get("errors")[0].get("message") if not message else message
detail = response.json().get("errors")[0].get("detail")
super().__init__(response, msg if msg else detail if detail else "Not Found!")
class TooManyRequests(HTTPException):
""":class:`HTTPException`: Raised when ratelimit exceeded and a request return status code: 429
.. versionadded:: 1.1.0
"""
pass
class NotFoundError(APIException):
""":class:`APIException`: This error is usually returns when trying to find specific Tweet, User that does not exist.
.. versionadded:: 1.0.0
"""
def __init__(
self,
response: Optional[requests.models.Response] = None,
message: Optional[str] = None,
):
msg = response.json().get("errors")[0].get("message") if not message else message
detail = response.json().get("errors")[0].get("detail")
super().__init__(response, msg if msg else detail if detail else "Not Found!")
| StarcoderdataPython |
3302403 | from train import train_model
from data_loader import load
from examples.NIPS.MNIST.mnist import MNIST_Net, neural_predicate
import torch
from network import Network
from model import Model
from optimizer import Optimizer
train_queries = load('train.txt')
test_queries = load('test.txt')[:100]
def test(model):
acc = model.accuracy(test_queries, test=True)
print('Accuracy: ', acc)
return [('accuracy', acc)]
with open('multi_digit.pl') as f:
problog_string = f.read()
network = MNIST_Net()
net = Network(network, 'mnist_net', neural_predicate)
net.optimizer = torch.optim.Adam(network.parameters(), lr=0.001)
model = Model(problog_string, [net], caching=False)
optimizer = Optimizer(model, 2)
test(model)
train_model(model, train_queries, 1, optimizer, test_iter=1000, test=test, snapshot_iter=10000)
| StarcoderdataPython |
1725475 | import shutil
import os
import json
import logging
import sys
from docker import APIClient
from fairing.builders.dockerfile import DockerFile
from fairing.builders.container_image_builder import ContainerImageBuilder
from fairing.utils import get_image_full
logger = logging.getLogger('fairing')
class DockerBuilder(ContainerImageBuilder):
def __init__(self):
self.docker_client = None
self.dockerfile = DockerFile()
def execute(self, repository, image_name, image_tag, base_image, dockerfile, publish, env):
full_image_name = get_image_full(repository, image_name, image_tag)
self.dockerfile.write(env, dockerfile=dockerfile, base_image=base_image)
self.build(full_image_name)
if publish:
self.publish(full_image_name)
def build(self, img, path='.'):
logger.warn('Building docker image {}...'.format(img))
if self.docker_client is None:
self.docker_client = APIClient(version='auto')
bld = self.docker_client.build(
path=path,
tag=img,
encoding='utf-8'
)
for line in bld:
self._process_stream(line)
def publish(self, img):
logger.warn('Publishing image {}...'.format(img))
if self.docker_client is None:
self.docker_client = APIClient(version='auto')
# TODO: do we need to set tag?
for line in self.docker_client.push(img, stream=True):
self._process_stream(line)
def _process_stream(self, line):
raw = line.decode('utf-8').strip()
lns = raw.split('\n')
for ln in lns:
# try to decode json
try:
ljson = json.loads(ln)
if ljson.get('error'):
msg = str(ljson.get('error', ljson))
logger.error('Build failed: ' + msg)
raise Exception('Image build failed: ' + msg)
else:
if ljson.get('stream'):
msg = 'Build output: {}'.format(
ljson['stream'].strip())
elif ljson.get('status'):
msg = 'Push output: {} {}'.format(
ljson['status'],
ljson.get('progress')
)
elif ljson.get('aux'):
msg = 'Push finished: {}'.format(ljson.get('aux'))
else:
msg = str(ljson)
logger.info(msg)
except json.JSONDecodeError:
logger.warning('JSON decode error: {}'.format(ln))
| StarcoderdataPython |
1649622 | <gh_stars>0
import os
import discord
from dotenv import load_dotenv
from discord.ext import commands
import information
load_dotenv(dotenv_path='.env')
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
client = commands.Bot(command_prefix='!')
@client.event
async def on_ready():
for guild in client.guilds:
if guild.name == GUILD:
break
print(
f'{client.user} is connected to the following guild:\n'
f'{guild.name}(id: {guild.id})'
)
@client.command(name='find', help='Gives an overview of the course requested')
async def find(ctx, course):
response = information.course_info(course)
await ctx.send(response)
@client.command(name='prereq', help='Gives prerequisites of course requested')
async def prereq(ctx, course):
response = information.course_prereq(course)
await ctx.send(response)
@client.command(name='coreq', help='Gives corequisites of course requested')
async def prereq(ctx, course):
response = information.course_coreq(course)
await ctx.send(response)
@client.command(name='name', help='Gives name of course requested')
async def name(ctx, course):
response = information.course_name(course)
await ctx.send(response)
@client.command(name='description', help='Gives description of course requested')
async def description(ctx, course):
response = information.course_descrip(course)
await ctx.send(response)
@client.command(name='breadth', help='Gives breadth requirements of course requested')
async def breadth(ctx, course):
response = information.course_breadth(course)
await ctx.send(response)
@client.command(name='exclusions', help='Gives exclusions of course requested')
async def exclusion(ctx, course):
response = information.course_exclu(course)
await ctx.send(response)
client.run(TOKEN)
| StarcoderdataPython |
3314151 | """
Hash = (s[1]*a**(n-1) + s[2]*a**(n-2)...s[n-1]*a+s[n]) mod m
"""
def polynomial_hash(base, module, string):
my_hash = 0
string_len = len(string)
for n, s in enumerate(string):
my_hash += ord(s)*(base**(string_len-n-1))
return my_hash % module
if __name__ == '__main__':
with open('input.txt') as file:
a = int(file.readline())
m = int(file.readline())
s = file.readline()
print(polynomial_hash(a, m, s))
| StarcoderdataPython |
159202 | import numpy as np
import cv2
from PIL import Image
face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_default.xml')
image = cv2.imread('jeantest.JPG')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
for (x, y, w, h) in faces:
print(x,y,w,h)
roi_gray = gray[y:y+h, x:x+w]
roi_color = image[y:y+h, x:x+w]
img_item = "nuevaimg.png"
cv2.imwrite(img_item, roi_color)
cv2.waitKey(20) & 0xFF == ord('q')
cv2.destroyAllWindows() | StarcoderdataPython |
1656772 | <reponame>Gwandalff/SelfAdaptableWASM
#
# Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
suite = {
"mxversion" : "5.249.5",
"name" : "wasm",
"groupId" : "org.graalvm.wasm",
"version" : "20.1.0",
"versionConflictResolution" : "latest",
"url" : "http://graalvm.org/",
"developer" : {
"name" : "Truffle and <NAME>",
"email" : "<EMAIL>",
"organization" : "Oracle Corporation",
"organizationUrl" : "http://www.graalvm.org/",
},
"scm" : {
"url" : "https://github.com/oracle/graal",
"read" : "https://github.com/oracle/graal.git",
"write" : "<EMAIL>:oracle/graal.git",
},
"defaultLicense" : "UPL",
"imports" : {
"suites" : [
{
"name" : "truffle",
"subdir" : True,
"urls": [
{"url" : "https://curio.ssw.jku.at/nexus/content/repositories/snapshots", "kind" : "binary"},
],
},
],
},
"projects" : {
"org.graalvm.wasm" : {
"subDir" : "src",
"sourceDirs" : ["src"],
"dependencies" : [
"truffle:TRUFFLE_API",
"sdk:GRAAL_SDK",
],
"checkstyleVersion" : "8.8",
"javaCompliance" : "1.8+",
"annotationProcessors" : ["truffle:TRUFFLE_DSL_PROCESSOR"],
"workingSets" : "WebAssembly",
"license" : "UPL",
},
"org.graalvm.wasm.launcher" : {
"subDir" : "src",
"sourceDirs" : ["src"],
"dependencies" : [
"sdk:LAUNCHER_COMMON",
],
"checkstyle" : "org.graalvm.wasm",
"javaCompliance" : "1.8+",
"license" : "UPL",
},
"org.graalvm.wasm.utils" : {
"subDir" : "src",
"sourceDirs" : ["src"],
"dependencies" : [
"org.graalvm.wasm",
"truffle:TRUFFLE_API",
],
"checkstyle" : "org.graalvm.wasm",
"javaCompliance" : "1.8+",
"annotationProcessors" : ["truffle:TRUFFLE_DSL_PROCESSOR"],
"workingSets" : "WebAssembly",
"license" : "BSD-new",
"testProject" : True,
},
"org.graalvm.wasm.test" : {
"subDir" : "src",
"sourceDirs" : ["src"],
"dependencies" : [
"org.graalvm.wasm",
"org.graalvm.wasm.utils",
"truffle:TRUFFLE_TCK",
"mx:JUNIT",
],
"checkstyle" : "org.graalvm.wasm",
"javaCompliance" : "1.8+",
"annotationProcessors" : ["truffle:TRUFFLE_DSL_PROCESSOR"],
"workingSets" : "WebAssembly",
"license" : "BSD-new",
"testProject" : True,
},
"org.graalvm.wasm.testcases" : {
"subDir" : "src",
"sourceDirs" : ["src"],
"dependencies" : [],
"class" : "GraalWasmSourceFileProject",
"checkstyle" : "org.graalvm.wasm",
"workingSets" : "WebAssembly",
"testProject" : True,
"defaultBuild" : False,
},
"org.graalvm.wasm.testcases.test" : {
"subDir" : "src",
"sourceDirs" : ["src"],
"dependencies" : [
"org.graalvm.wasm.test",
"mx:JUNIT",
],
"checkstyle" : "org.graalvm.wasm",
"javaCompliance" : "1.8+",
"workingSets" : "WebAssembly",
"testProject" : True,
"defaultBuild" : False,
},
"org.graalvm.wasm.benchcases" : {
"subDir" : "src",
"sourceDirs" : ["src"],
"dependencies" : [],
"class" : "GraalWasmSourceFileProject",
"checkstyle" : "org.graalvm.wasm",
"includeset" : "bench",
"workingSets" : "WebAssembly",
"testProject" : True,
"defaultBuild" : False,
},
"org.graalvm.wasm.benchcases.bench" : {
"subDir" : "src",
"sourceDirs" : ["src"],
"dependencies" : [
"org.graalvm.wasm.benchmark",
"mx:JMH_1_21",
],
"checkstyle" : "org.graalvm.wasm",
"javaCompliance" : "1.8",
"annotationProcessors" : ["mx:JMH_1_21"],
"workingSets" : "WebAssembly",
"testProject" : True,
"defaultBuild" : False,
},
"org.graalvm.wasm.benchmark" : {
"subDir" : "src",
"sourceDirs" : ["src"],
"dependencies" : [
"org.graalvm.wasm",
"org.graalvm.wasm.utils",
"mx:JMH_1_21",
],
"javaCompliance" : "1.8+",
"annotationProcessors" : ["mx:JMH_1_21"],
"testProject" : True,
},
},
"externalProjects": {
"resource.org.graalvm.wasm.testcases": {
"type": "web",
"path": "src/org.graalvm.wasm.testcases",
"source": [
"src",
],
},
"resource.org.graalvm.wasm.benchcases": {
"type": "web",
"path": "src/org.graalvm.wasm.benchcases",
"source": [
"src",
],
},
},
"distributions" : {
"WASM" : {
"subDir" : "src",
"dependencies" : [
"org.graalvm.wasm",
],
"distDependencies" : [
"truffle:TRUFFLE_API",
"sdk:GRAAL_SDK",
],
"description" : "GraalWasm, an engine for the WebAssembly language in GraalVM.",
"allowsJavadocWarnings": True,
"license" : "UPL",
"maven" : False,
},
"WASM_LAUNCHER" : {
"subDir" : "src",
"dependencies" : [
"org.graalvm.wasm.launcher",
],
"distDependencies" : [
"sdk:LAUNCHER_COMMON",
],
"license" : "UPL",
"maven" : False,
},
"WASM_TESTS" : {
"dependencies" : [
"org.graalvm.wasm.test",
"org.graalvm.wasm.utils",
],
"exclude" : [
"mx:JUNIT",
],
"distDependencies" : [
"truffle:TRUFFLE_API",
"truffle:TRUFFLE_TCK",
"WASM",
],
"maven" : False,
},
"WASM_TESTCASES" : {
"description" : "Tests compiled from the source code.",
"dependencies" : [
"org.graalvm.wasm.testcases",
"org.graalvm.wasm.testcases.test",
],
"exclude" : [
"mx:JUNIT",
],
"distDependencies" : [
"WASM_TESTS",
],
"defaultBuild" : False,
"maven" : False,
"testDistribution" : True,
},
"WASM_BENCHMARKS" : {
"subDir" : "src",
"dependencies" : [
"org.graalvm.wasm.benchmark",
"mx:JMH_1_21",
],
"distDependencies" : [
"truffle:TRUFFLE_API",
"truffle:TRUFFLE_TCK",
"WASM",
"WASM_TESTS",
],
"maven" : False,
"testDistribution" : True,
},
"WASM_BENCHMARKCASES" : {
"description" : "Benchmarks compiled from the source code.",
"dependencies" : [
"org.graalvm.wasm.benchcases",
"org.graalvm.wasm.benchcases.bench",
"mx:JMH_1_21",
],
"distDependencies" : [
"truffle:TRUFFLE_API",
"truffle:TRUFFLE_TCK",
"WASM",
"WASM_TESTS",
],
"overlaps" : [
"WASM_BENCHMARKS",
],
"defaultBuild" : False,
"platformDependent" : True,
"maven" : False,
"testDistribution" : True,
},
"WASM_GRAALVM_SUPPORT": {
"native": True,
"platformDependent": False,
"description": "Wasm support distribution for the GraalVM license files",
"layout": {
"./": "file:mx.wasm/native-image.properties",
"LICENSE_WASM.txt": "file:LICENSE",
},
"maven": False,
},
}
}
| StarcoderdataPython |
1764986 | /*
wholesum = Sum((2**(m - 1) - 1)*(n + 1 - m*a)*(n + 1 - m*b), (m, 2, s)).doit()*2
vertical and horizontal : wholesum(1, 0, n, n, m)
cross : wholesum(1, 1, n, n, m)
other (gradient (a, b)) : 2*wholesum(a, b, n, floor(n/a), m)
*/
wholesum(a, b, n, s, p) = -2*Mod(2, p)^s*a*n*(s - 1) - 2*Mod(2, p)^s*a*(s - 1) - 2*Mod(2, p)^s*b*n*(s - 1) - 2*Mod(2, p)^s*b*(s - 1) + 2*Mod(2, p)^s - a*b*(2*s^3 + 3*s^2 + s - 6)/3 + 2*a*b*(Mod(2, p)^s*s^2 + 3*Mod(2, p)^s - Mod(2, p)^(s + 1)*s - 4) + a*n*(s^2 + s - 2) + a*(s^2 + s - 2) + b*n*(s^2 + s - 2) + b*(s^2 + s - 2) + 2*n^2*(Mod(2, p)^s - 2) + 2*n^2*(-s + 1) + 4*n*(Mod(2, p)^s - 2) - 4*n*(s - 1) - 2*s - 2
n = 111
p = 10^8
complement = 0
gaitou = 0
{
for(a=1, n,
for(b=1, a, if (gcd(a, b)==1,
if (a == b || a < b || a*2 > n, next;);
t = floor(n/a);
complement += 2*wholesum(a, b, n, t, p);)));
}
/* vertical and horizontal and cross */
complement += wholesum(1, 0, n, n, p) + wholesum(1, 1, n, n, p)
/* empty set and sets with size 1 */
complement += 1 + (n+1)^2
/* final result */
result = Mod(2, p)^((n+1)^2) - complement
print(result)
| StarcoderdataPython |
4811114 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-06-15 17:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data', '0050_auto_20180614_1917'),
('data', '0050_auto_20180612_1415'),
]
operations = [
]
| StarcoderdataPython |
47036 | <filename>backend_getData/get_poptweets_topic.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Downloads all tweets from a given user.
Uses twitter.Api.GetUserTimeline to retreive the last 3,200 tweets from a user.
Twitter doesn't allow retreiving more tweets than this through the API, so we get
as many as possible.
t.py should contain the imported variables.
"""
from __future__ import print_function
import json
import sys
sys.path.append('./lib')
import twitter
from t import ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET
# Get Parent Node
# Function: input the status_id of a tweet can put all the tweet between this tweet and the original tweet into the file
# retweet_timeline.json and it is able to return all the ids of the tweets we have travesed.
# Return: All the status_id as int list including the input status_id
def get_parent_status(api=None, status_id=None):
status_id_list = []
tweet = api.GetStatus(status_id)
f = open('./output/retweet_timeline.json', "w+")
f.write(json.dumps(tweet._json))
f.write('\n')
status_id_list.append(int(status_id))
while 'quoted_status_id_str' in tweet._json:
retweet = api.GetStatus(tweet._json['quoted_status_id_str'])
# print (retweet)
tweet = retweet
f.write(json.dumps(tweet._json))
f.write('\n')
status_id_list.append(int(tweet._json['id']))
f.close()
# Show the return result here
# print(tweet)
# print (status_id_list)
return status_id_list
# Get branch information
# input the id array of the tweet
# Return all the retweet of every tweet in the input in the form of id dictionary.
def get_all_branch_status(api=None, status_id_list=None):
dict_id_relationship = {}
for status_id in status_id_list:
tweet = api.GetStatus(status_id)
retweets = api.GetRetweets(str(status_id), count=100, trim_user=False)
id_list = []
for retweet in retweets:
# print (retweet)
id_list.append(retweet._json['id'])
dict_id_relationship[str(status_id)] = id_list
# Show the return result here
# print(tweet)
print (dict_id_relationship)
return dict_id_relationship
# Working in progress -------------------
def get_jsonfile(api=None, status_id_list=None):
res_json_objs = []
for status_id in status_id_list:
cur_json = {}
tweet = api.GetStatus(status_id)
# print (tweet)
cur_json['tweet_id'] = tweet._json['id']
cur_json['created_at'] = tweet._json['created_at']
cur_json['retweet_count'] = tweet._json['retweet_count']
cur_json['favorite_count'] = tweet._json['favorite_count']
cur_json['user_profile_image_https'] = tweet._json['user']['profile_image_url_https']
cur_json['user_followers_count'] = tweet._json['user']['followers_count']
cur_json['user_name'] = tweet._json['user']['name']
cur_json['retweet_list'] = []
retweets = api.GetRetweets(str(status_id), count=100, trim_user=False)
for retweet in retweets:
cur_retweet_json = {}
cur_retweet_json['tweet_id'] = tweet._json['id']
cur_retweet_json['created_at'] = tweet._json['created_at']
cur_retweet_json['retweet_count'] = tweet._json['retweet_count']
cur_retweet_json['favorite_count'] = tweet._json['favorite_count']
cur_retweet_json['user_profile_image_https'] = tweet._json['user']['profile_image_url_https']
cur_retweet_json['user_followers_count'] = tweet._json['user']['followers_count']
cur_retweet_json['user_name'] = tweet._json['user']['name']
cur_json['retweet_list'].append(cur_retweet_json)
res_json_objs.append(cur_json)
print ("exe")
f = open('./output/all_retweet.json', "w+")
f.write(json.dumps(res_json_objs))
f.close()
def get_tweets(api=None, screen_name=None):
timeline = api.GetUserTimeline(screen_name=screen_name, count=200)
earliest_tweet = min(timeline, key=lambda x: x.id).id
print("getting tweets before:", earliest_tweet)
while True:
tweets = api.GetUserTimeline(
screen_name=screen_name, max_id=earliest_tweet, count=200
)
new_earliest = min(tweets, key=lambda x: x.id).id
if not tweets or new_earliest == earliest_tweet:
break
else:
earliest_tweet = new_earliest
print("getting tweets before:", earliest_tweet)
timeline += tweets
return timeline
# Get the most popular ten tweet related to one topic (str)
def get_pop(api=None, topic=None):
pop = api.GetSearch(term=topic, count=10, result_type='popular')
if __name__ == "__main__" and __package__ is None:
api = twitter.Api(
CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET, sleep_on_rate_limit=True
)
topic = sys.argv[1]
print(topic)
# pop = api.GetSearch(term=str(topic), count=int(10), result_type='popular')
# with open('./output/pop.json', 'w+') as f:
# for tweet in pop:
# f.write(json.dumps(tweet._json))
# f.write('\n')
# Change the id here !!!!!!!!!!
status_id_list = get_parent_status(api, str(sys.argv[1]))
get_all_branch_status(api=api, status_id_list=status_id_list)
get_jsonfile(api=api, status_id_list=status_id_list)
# Code not used but for reference.
# # print (pop[0]._json['text'])
# id_str = pop[3]._json['id_str']
# print (pop[3]._json['text'])
# retweets = api.GetRetweets(id_str, count=100, trim_user=False)
# retweeters = api.GetRetweeters(id_str, cursor=True, count=1000, stringify_ids=False)
# # print (retweeters)
# with open('./retweet.json', 'w+') as f:
# for tweet in retweets:
# f.write(json.dumps(tweet._json))
# f.write('\n')
# print (len(retweeters))
# # for tweet in retweets:
# # print (tweet._json['retweet_count'])
| StarcoderdataPython |
1689470 | <gh_stars>1-10
import random
import string
import datetime
from math import log
def generate_timeseries(length, bounds=(0,1852255420), _type='timestamp',period=24*3600, swing=0, separator=','):
column = []
for n in range(*bounds,period):
if len(column) >= length:
break
column.append(n)
if _type == 'timestamp':
return column
elif _type == 'datetime':
return list(map(str, map(datetime.datetime.fromtimestamp ,column)))
elif _type == 'date':
return list(map(str, map(lambda x: datetime.datetime.fromtimestamp(x).date() ,column)))
def rand_str(length=random.randrange(4,120)):
# Create a list of unicode characters within the range 0000-D7FF
# @TODO Copy pasted the 0xD7FF value, not 100% sure it returns all uncideo chars, maybe check that
random_unicodes = [chr(random.randrange(0xD7FF)) for _ in range(0, length)]
return u"".join(random_unicodes)
def rand_ascii_str(length=None, give_nulls=True, only_letters=False):
if only_letters:
charlist = [*string.ascii_letters]
else:
#other = [' ', '_', '-', '?', '.', '<', '>', ')', '(']
other = []
charlist = [*other, *string.ascii_letters]
if length == None:
length = random.randrange(1,120)
if length % 4 == 0 and give_nulls==True:
return ''
#Sometimes we should return a number instead of a string
#if length % 7 == 0:
# return str(length)
return ''.join(random.choice(charlist) for _ in range(length))
def rand_int():
return int(random.randrange(-pow(2,18), pow(2,18)))
def rand_numerical_cat():
return int(random.randrange(-pow(2,3), pow(2,3)))
def rand_float():
return random.randrange(-pow(2,18), pow(2,18)) * random.random()
def generate_value_cols(types, length, separator=',', ts_period=48*3600):
columns = []
for t in types:
columns.append([])
# This is a header of sorts
columns[-1].append(rand_ascii_str(random.randrange(8,10),give_nulls=False,only_letters=True))
# Figure out which random generation function to use for this column
if t == 'str':
gen_fun = rand_str
elif t == 'ascii':
gen_fun = rand_ascii_str
elif t == 'int':
gen_fun = rand_int
elif t == 'nr_category':
gen_fun = rand_numerical_cat
elif t == 'float':
gen_fun = rand_float
else:
columns[-1].extend(generate_timeseries(length=length,_type=t,period=ts_period, separator=separator))
continue
for n in range(length):
val = gen_fun()
# @TODO Maybe escpae the separator rather than replace
if type(val) == str:
val = val.replace(separator,'_').replace('\n','_').replace('\r','_')
columns[-1].append(val)
return columns
# Ignore all but flaots and ints
# Adds up the log of all floats and ints
def generate_labels_1(columns, separator=','):
labels = []
# This is a header of sorts
labels.append(rand_ascii_str(random.randrange(14,28),give_nulls=False,only_letters=True))
for n in range(1, len(columns[-1])):
value = 0
for i in range(len(columns)):
try:
value += log(abs(columns[i][n]))
except:
pass
labels.append(value)
return labels
def generate_labels_2(columns, separator=','):
labels = []
# This is a header of sorts
labels.append(rand_ascii_str(random.randrange(5,11),give_nulls=False,only_letters=True))
for n in range(1, len(columns[-1])):
value = 1
for i in range(len(columns)):
if type(columns[i][n]) == str:
operand = len(columns[i][n])
else:
operand = columns[i][n]
if i % 2 == 0:
value = value * operand
else:
try:
value = value / operand
except:
value = 1
labels.append(value)
return labels
def generate_labels_3(columns, separator=','):
labels = []
# This is a header of sorts
labels.append(rand_ascii_str(random.randrange(14,18),give_nulls=False,only_letters=True))
col_nr = random.randrange(0,len(columns))
labels.extend(columns[col_nr][1:])
return labels
def columns_to_file(columns, filename, separator=',', headers=None):
with open(filename, 'w', encoding='utf-8') as fp:
fp.write('')
with open(filename, 'a', encoding='utf-8') as fp:
if headers is not None:
fp.write(separator.join(headers) + '\r\n')
for i in range(len(columns[-1])):
row = ''
for col in columns:
row += str(col[i]) + separator
fp.write(row.rstrip(separator) + '\r\n')
| StarcoderdataPython |
3258942 | <reponame>xanthous-tech/rasa-chinese-paddlenlp<filename>rasa_paddlenlp/nlu/paddlenlp_registry.py
from paddlenlp.transformers import (
BertModel,
BertTokenizer,
XLNetModel,
XLNetTokenizer,
RobertaModel,
RobertaTokenizer,
)
# these seems to be useful still, keeping
from rasa.nlu.utils.hugging_face.transformers_pre_post_processors import (
bert_tokens_pre_processor,
gpt_tokens_pre_processor,
xlnet_tokens_pre_processor,
roberta_tokens_pre_processor,
bert_embeddings_post_processor,
gpt_embeddings_post_processor,
xlnet_embeddings_post_processor,
roberta_embeddings_post_processor,
bert_tokens_cleaner,
openaigpt_tokens_cleaner,
gpt2_tokens_cleaner,
xlnet_tokens_cleaner,
)
model_special_tokens_pre_processors = {
"bert": bert_tokens_pre_processor,
"gpt": gpt_tokens_pre_processor,
"gpt2": gpt_tokens_pre_processor,
"xlnet": xlnet_tokens_pre_processor,
# "xlm": xlm_tokens_pre_processor,
"distilbert": bert_tokens_pre_processor,
"roberta": roberta_tokens_pre_processor,
}
model_tokens_cleaners = {
"bert": bert_tokens_cleaner,
"gpt": openaigpt_tokens_cleaner,
"gpt2": gpt2_tokens_cleaner,
"xlnet": xlnet_tokens_cleaner,
# "xlm": xlm_tokens_pre_processor,
"distilbert": bert_tokens_cleaner, # uses the same as BERT
"roberta": gpt2_tokens_cleaner, # Uses the same as GPT2
}
model_embeddings_post_processors = {
"bert": bert_embeddings_post_processor,
"gpt": gpt_embeddings_post_processor,
"gpt2": gpt_embeddings_post_processor,
"xlnet": xlnet_embeddings_post_processor,
# "xlm": xlm_embeddings_post_processor,
"distilbert": bert_embeddings_post_processor,
"roberta": roberta_embeddings_post_processor,
}
model_class_dict = {
"bert": BertModel,
"xlnet": XLNetModel,
"roberta": RobertaModel,
}
model_tokenizer_dict = {
"bert": BertTokenizer,
"xlnet": XLNetTokenizer,
"roberta": RobertaTokenizer,
}
model_weights_defaults = {
"bert": "bert-wwm-ext-chinese",
"xlnet": "chinese-xlnet-base",
"roberta": "roberta-wwm-ext",
}
| StarcoderdataPython |
154500 | from classes import biblioteca
def menu():
print("\n1-Inserir livros")
print("2- Exibir livros")
print("3-sair ")
op = int(input("\ndigite a opcao: "))
return op
def ler(biblioteca):
titulo = str(input("\ndigite o titulo do livro: "))
autor = str(input("digite o nome do autor: "))
data = str(input("digite a data de publicação no formato (dia/mes/ano): "))
preco = float(input("digite o preco alvo do livro: "))
biblioteca.inserir_livros(titulo, autor, data, preco)
Biblioteca = biblioteca()
quant = int(input("digite a quntidade de livros que voce deseja inserir: "))
aux = 1
for i in range(0,quant):
ler(Biblioteca)
while(aux != 0):
Biblioteca.imprimir_livros()
aux = int(input("\ndigite 1 se deseja continuar esta operacao ou digite 0 para sair: "))
| StarcoderdataPython |
3383798 | <gh_stars>0
"""Simple water flow example using ANUGA
Water flowing along a spiral wall and draining into a hole in the centre.
"""
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
from math import acos, cos, sin, sqrt, pi
import anuga
import matplotlib.pyplot as plt
import numpy as np
#------------------------------------------------------------------------------
# Setup computational domain
#------------------------------------------------------------------------------
length = 10.
width = 10.
dx = dy = 0.02 # Resolution: Length of subdivisions on both axes
center = (length/2 * 0.7, width/2)
# Create a domain with named boundaries "left", "right", "top" and "bottom"
domain = anuga.rectangular_cross_domain(int(length/dx), int(width/dy),
len1=length, len2=width)
domain.set_name('spiral_wall') # Output name
#------------------------------------------------------------------------------
# Setup initial conditions
#------------------------------------------------------------------------------
# Define wall polygon - spiral wall
def wall_polygon():
N = 50
c = center
r_outer = 2
r_inner = 1.8
width = 0.2
outer_vertices = []
inner_vertices = []
# Outer wall edge
for i in range(1, N):
theta = i * (2+0.3) * pi / N
a = theta * 0.5 # Spiral expansion term
x = r_outer * a * cos(theta) + c[0]
y = r_outer * a * sin(theta) + c[1]
outer_vertices.append((x, y))
vector = (x - c[0], y - c[1])
distance = sqrt(vector[0]**2 + vector[1]**2)
if distance > 0 and i > 6:
x = (distance - width) * vector[0]/distance + c[0]
y = (distance - width) * vector[1]/distance + c[1]
inner_vertices.append((x, y))
# Diagnostic plotting only
xos = [x[0] for x in outer_vertices]
yos = [x[1] for x in outer_vertices]
xis = [x[0] for x in inner_vertices]
yis = [x[1] for x in inner_vertices]
plt.plot(xos, yos, 'bo', xis, yis, 'g*')
#plt.show()
return outer_vertices + inner_vertices[::-1] # Reverse inner points to make polygon sensible
def topography(x, y):
# Define wall for given polygon
P = wall_polygon()
z = y * 0.0 # Flat surface # Sloping surface in the y direction
c = center # Center
N = len(x)
# Identify points inside polygon
x = x.reshape(-1, 1)
y = y.reshape(-1, 1)
points = np.concatenate((x, y), axis=1)
indices = anuga.geometry.polygon.inside_polygon(points, P, closed=True, verbose=False)
# Raise elevation for points in polygon
for i in indices:
z[i] += 1.0
return z
domain.set_quantity('elevation', topography) # Use function for elevation
domain.set_quantity('friction', 0.01) # Constant friction
domain.set_quantity('stage', # Dry bed
expression='elevation')
#------------------------------------------------------------------------------
# Setup forcing functions
#------------------------------------------------------------------------------
# FIXME: Let's use the built in Inflow class from ANUGA
class Inflow:
"""Class Inflow - general 'rain and drain' forcing term.
Useful for implementing flows in and out of the domain.
Inflow(center, radius, flow)
center [m]: Coordinates at center of flow point
radius [m]: Size of circular area
flow [m/s]: Rate of change of quantity over the specified area.
This parameter can be either a constant or a function of time.
Positive values indicate inflow,
negative values indicate outflow.
Examples
Inflow((0.7, 0.4), 0.07, -0.2) # Constant drain at 0.2 m/s.
# This corresponds to a flow of
# 0.07**2*pi*0.2 = 0.00314 m^3/s
Inflow((0.5, 0.5), 0.001, lambda t: min(4*t, 5)) # Tap turning up to
# a maximum inflow of
# 5 m/s over the
# specified area
"""
def __init__(self,
center=None, radius=None,
flow=0.0,
quantity_name = 'stage'):
if center is not None and radius is not None:
assert len(center) == 2
else:
msg = 'Both center and radius must be specified'
raise Exception(msg)
self.center = center
self.radius = radius
self.flow = flow
self.quantity = domain.quantities[quantity_name].explicit_update
def __call__(self, domain):
# Determine indices in flow area
if not hasattr(self, 'indices'):
center = self.center
radius = self.radius
N = len(domain)
self.indices = []
coordinates = domain.get_centroid_coordinates()
for k in range(N):
x, y = coordinates[k,:] # Centroid
if ((x - center[0])**2 + (y - center[1])**2) < radius**2:
self.indices.append(k)
# Update inflow
if callable(self.flow):
flow = self.flow(domain.get_time())
else:
flow = self.flow
for k in self.indices:
self.quantity[k] += flow
drain = Inflow(center=center, radius=0.2, flow=0.0) # Zero initially
domain.forcing_terms.append(drain)
source = Inflow(center=(9.4, 6.0), radius=0.2, flow=1.0)
domain.forcing_terms.append(source)
#------------------------------------------------------------------------------
# Setup boundary conditions
#------------------------------------------------------------------------------
#Bi = anuga.Dirichlet_boundary([0.4, 0, 0]) # Inflow
Br = anuga.Reflective_boundary(domain) # Solid reflective walls
domain.set_boundary({'left': Br, 'right': Br, 'top': Br, 'bottom': Br})
#------------------------------------------------------------------------------
# Evolve system through time
#------------------------------------------------------------------------------
for t in domain.evolve(yieldstep=0.2, finaltime=40):
domain.print_timestepping_statistics()
if domain.get_time() >= 14 and drain.flow == 0.0:
print('Turning drain on')
drain.flow = -2.5
| StarcoderdataPython |
1745235 | from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from django.utils.http import urlquote
from .models import MetaTags
def seo_metatags_admin_redirect(request):
url = request.GET.get('url', None)
if not url:
raise ValueError('No URL was provided in SEO redirect request.')
try:
metatags = MetaTags.objects.get(url=url)
return redirect('admin:seo_metatags_change', metatags.id)
except MetaTags.DoesNotExist:
return redirect(reverse('admin:seo_metatags_add') + '?url=' + urlquote(url))
| StarcoderdataPython |
3278315 | <reponame>cardosoyuri/RossmannStoreSalesPrediction
import pickle
import pandas as pd
from flask import Flask, request, Response
from rossmann.Rossmann import Rossmann
#loading model
model = pickle.load(open(r'C:\Users\prese\Desktop\Data Scince\Projetos\RossmannStoreSales\model\model_rossmann.pkl','rb'))
#Initialize API
app = Flask(__name__)
@app.route('/rossmann/predict', methods = ['POST'])
def rossmann_predict():
test_json = request.get_json()
if test_json: #there is data
if isinstance(test_json, dict): #Unique Example
test_raw = pd.DataFrame(test_json, index=[0])
else:
#Multiple Examples
test_raw = pd.DataFrame(test_json, columns = test_json[0].keys())
# Instantiate Rossmann class
pipeline = Rossmann() # creating an Rossmann class object
# data cleaning
df1 = pipeline.data_cleaning( test_raw )
# feature engineering
df2 = pipeline.feature_engineering( df1 )
# data preparation
df3 = pipeline.data_preparation( df2 )
# prediction
df_response = pipeline.get_prediction( model, test_raw, df3 )
return df_response
else:
return Response('{}', status=200, mimetype = 'application/json')
if __name__ == '__main__':
app.run('127.0.0.1') | StarcoderdataPython |
3305307 | <gh_stars>0
"""
qrcomm-py is a Python implementation of a QR-code communication protocol.
"""
import qrcode
from PIL import Image
import secrets
from hmac import compare_digest
from Crypto.Cipher import AES, Salsa20, ChaCha20, XChaCha20
import hashlib
hashes = {
"BLAKE2b": [hashlib.blake2b, 0]
}
ciphers = {
"AES": [AES, 1],
"Salsa20": [Salsa20, 2],
"ChaCha20": [ChaCha20, 3],
"XChaCha20": [XChaCha20, 4],
}
qr_max_bytes = 1273 # Maximum bytes supported by a v40 QR code
qr_data_bytes = 1024 # Number of bytes to encode.
# Number of bytes for the hash (64 bytes = 512 bits)
# When sending data the first (0th) frame must be a header frame.
# A header frame uses the frametype 0x0000
# A seed frame uses the frametype 0x0001
# A message frame uses the frametype 0x0002
# With the default options a single frame can contain 1024 data bytes,
# plus a 32-bit frametype (appended to the plaintext before encryption)
# plus a 512-bit HMAC hash of the ciphertext, with the encryption key and nonce as the MAC key,
# plus a 512-bit HMAC hash of the plaintext (including frametype) with the encryption key and nonce as the MAC key,
# plus a 128-bit frame index (unencrypted; each QR code represents 1 frame).
# the nonce is simply the frame index, XORd with an IV defined in a seed frame.
# The seed frame's IV is chosen randomly using a secure RNG.
# The data bytes are XORd with the IV before encryption.
# All encryption is done with a stream cipher or a block cipher in CTR mode.
# The default cipher is AES with a 256-bit key, and BLAKE2b as the hashing algorithm.
# Encryption algorithms may be cascaded. The algorithm(s) used are defined in the header frame.
# Hashing algorithms may not be cascaded, but can be chosen freely.
# So far only BLAKE2b is implemented, but that's trivial to fix.
# Hashes use KMAC for hashes that support it without compromising security, otherwise HMAC.
# I let hashlib decide which MAC construction to use, since this is a messaging library, not a crypto library.
# There can be multiple seed and header frames within data. The reason for this is to reinitialize the cryptography,
# if necesary. This allows encrypting unlimited amounts of data (as if 2**128 bits isn't enough).
# Seed frame reinitialization is only useful if a suitable TRNG is available.
# The seed frame contains a 1024-byte IV.
# The IV is sent the same as any other message, except within a seed frame (frametype 0x0002).
# If no seed frame is sent the IV defaults to a string of zero-bits.
# A seed frame is decrypted using the IV prior to receiving the seed frame. The first seed frame is decrypted
# with an IV being all zero-bits.
qr = qrcode.QRCode(
version=40,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
qr.add_data()
qr.make(fit=True)
img = qr.make_image(fill_color="black", back_color="white")
img.save("qr.png")
class qrcomm:
def qrcomm_init(self):
pass
"""
`msg` is a bytes object containing the message to send.
`key` is an encryption key. If a password is used it must
be expanded before using it here. PBKDF2 is recommended.
`crypto_options` is a 32-bit int.
`hash_options` is a 32-bit int.
"""
def build_message(msg, key, crypto_options=0, hash_options=0):
iv = b'\x00' * 1024
frames = []
crypto_alg = parse_crypto(crypto_options)
hash_alg = parse_hash(hash_options)
new_iv = secrets.token_bytes(1024) # Seed frame IV
frames += [build_seed_frame(new_iv, key, 0, iv, crypto_alg, hash_alg)]
iv = new_iv # Now that the seed frame has defined an IV we must use it
frames += [build_header_frame(msg, key, 1, iv, crypto_alg, hash_alg)]
for ix in range(0, len(msg), qr_data_bytes):
frames += [build_frame(msg, key, 2, 2+ix, iv, crypto_alg, hash_alg)]
def build_frame(msg, key, frametype, ix, iv, crypto_alg, hash_alg):
plaintext = list(msg) # msg length must equal 1024 bytes exactly
plaintext += list(frametype.to_bytes(4, 'big'))
for a in range(qr_data_bytes):
plaintext[a] ^= iv[a]
plaintext = bytes(plaintext)
crypto_alg.new(
def build_header_frame(msg, key, ix, iv, crypto_alg, hash_alg):
# a header frame contains the number of frames that will be sent.
# The count must also include any seed frames to be sent.
# If another header frame is to be sent it must include
# every frame up to (and including) the next header frame.
pass
def build_seed_frame(msg, key, ix, iv, crypto_alg, hash_alg):
pass
| StarcoderdataPython |
3208732 | <reponame>Torolfr/hw05_final
from http import HTTPStatus
from django.contrib.auth import get_user_model
from django.test import Client, TestCase
from django.urls import reverse
from posts.models import Group, Post
User = get_user_model()
class PostsURLTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.group = Group.objects.create(
title='Тестовый заголовок группы',
slug='test-slug',
description='Тестовое описание группы',
)
cls.user = User.objects.create_user(username='Testuser')
cls.post = Post.objects.create(
text='Тестовый текст',
author=cls.user,
group=cls.group
)
def setUp(self):
self.authorized_client = Client()
self.authorized_client.force_login(PostsURLTests.user)
def test_post_url_exists_at_desired_location(self):
"""Проверка доступности адресов в posts.url."""
username = PostsURLTests.user.username
group_slug = PostsURLTests.group.slug
post_id = PostsURLTests.post.id
guest = self.client
authorized = self.authorized_client
permitted_url_names = (
('/', guest),
(f'/group/{group_slug}/', guest),
('/new/', authorized),
('/follow/', authorized),
(f'/{username}/{post_id}/', guest),
(f'/{username}/{post_id}/edit/', authorized),
(f'/{username}/', guest)
)
for url, client in permitted_url_names:
with self.subTest(url=url):
response = client.get(url)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_post_url_uses_correct_redirects(self):
"""Проверка redirect-ов для адресов posts.url."""
user2 = User.objects.create_user(username='Testuser2')
reader = Client()
reader.force_login(user2)
username = PostsURLTests.user.username
post_id = PostsURLTests.post.id
guest = self.client
auth_login = reverse('login') + '?next='
redirect_url_names = (
('/new/', guest,
auth_login + reverse('new_post')),
(f'/{username}/{post_id}/edit/', guest,
auth_login + reverse('post_edit', args=(username, post_id))),
(f'/{username}/{post_id}/edit/', reader,
reverse('post', args=(username, post_id))),
(f'/{username}/follow/', guest,
auth_login + reverse('profile_follow', args=(username,))),
(f'/{username}/follow/', reader,
reverse('profile', args=(username,))),
(f'/{username}/unfollow/', guest,
auth_login + reverse('profile_unfollow', args=(username,))),
(f'/{username}/{post_id}/comment/', guest,
auth_login + reverse('add_comment', args=(username, post_id))),
)
for url, client, redirect in redirect_url_names:
with self.subTest(url=url):
response = client.get(url, follow=True)
self.assertRedirects(response, redirect)
def test_post_url_uses_correct_name_path(self):
"""Проверка name path() для адресов posts.url."""
username = PostsURLTests.user.username
group_slug = PostsURLTests.group.slug
post_id = PostsURLTests.post.id
url_names = (
('/', 'index', None),
(f'/group/{group_slug}/', 'group_posts', (group_slug,)),
('/new/', 'new_post', None),
('/follow/', 'follow_index', None),
(f'/{username}/{post_id}/', 'post', (username, post_id)),
(f'/{username}/{post_id}/edit/', 'post_edit', (username, post_id)),
(f'/{username}/{post_id}/comment/', 'add_comment',
(username, post_id)),
(f'/{username}/follow/', 'profile_follow', (username,)),
(f'/{username}/unfollow/', 'profile_unfollow', (username,)),
(f'/{username}/', 'profile', (username,))
)
for url, name, args in url_names:
with self.subTest(url=url):
self.assertEqual(url, reverse(name, args=args))
def test_incorrect_url_return_404_error(self):
"""Страница /abraabra/abraabra/ возвращает 404 код ответа."""
response = self.client.get('/abraabra/abraabra/')
self.assertEqual(response.status_code, HTTPStatus.NOT_FOUND)
| StarcoderdataPython |
1693091 | <reponame>ribuild/delphin_6_automation
__author__ = "<NAME>"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import matplotlib.pyplot as plt
import numpy as np
import os
import datetime
import matplotlib.dates as mdates
# RiBuild Modules
from delphin_6_automation.file_parsing import delphin_parser
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
# Functions
def get_points(result: dict, geo: dict):
points = []
for index in result['indices']:
x_ = geo['element_geometry'][index][1]
y_ = geo['element_geometry'][index][2]
points.append({'cell': index, 'x': x_, 'y': y_})
return points
def add_data_to_points(points: list, results: dict, result_name: str):
for cell_ in results['result'].keys():
cell_index = int(cell_.split('_')[1])
for point in points:
if point['cell'] == cell_index:
point[result_name] = np.array(results['result'][cell_][8760:])
break
# Application
colors = {'top': '#FBBA00', 'mid': '#B81A5D', 'bottom': '#79C6C0', '1d_brick': '#000000', '1d_mortar': '#BDCCD4'}
result_folder = r'U:\RIBuild\2D_1D\Results'
projects = ['5ad5da522e2cb21a90397b85', '5ad5dac32e2cb21a90397b86', '5ad5e05d5d9460d762130f93']
files = ['Temperature profile [2].d6o', 'Relative humidity profile [2].d6o',
'Moisture content profile [2].d6o', 'Moisture content integral [2].d6o']
parsed_dicts = {'brick_1d': {'temp': {}, 'rh': {}, 'm_content': {}, 'moisture': {}, 'geo': {}},
'mortar_1d': {'temp': {}, 'rh': {}, 'm_content': {}, 'moisture': {}, 'geo': {}},
'2d': {'temp': {}, 'rh': {}, 'm_content': {}, 'moisture': {}, 'geo': {}}, }
map_projects = {'5ad5da522e2cb21a90397b85': 'brick_1d', '5ad5dac32e2cb21a90397b86': 'mortar_1d',
'5ad5e05d5d9460d762130f93': '2d'}
for project in projects:
for mp_key in map_projects.keys():
if project == mp_key:
key = map_projects[mp_key]
folder = result_folder + f'/{project}/results'
geo_file = [file
for file in os.listdir(folder)
if file.endswith('.g6a')][0]
parsed_dicts[key]['temp'], _ = delphin_parser.d6o_to_dict(folder, files[0])
parsed_dicts[key]['rh'], _ = delphin_parser.d6o_to_dict(folder, files[1])
parsed_dicts[key]['m_content'], _ = delphin_parser.d6o_to_dict(folder, files[2])
parsed_dicts[key]['moisture'], _ = delphin_parser.d6o_to_dict(folder, files[3])
parsed_dicts[key]['geo'] = delphin_parser.g6a_to_dict(folder, geo_file)
x = np.linspace(0, len(parsed_dicts['brick_1d']['temp']['result']['cell_0'][8760:]),
len(parsed_dicts['brick_1d']['temp']['result']['cell_0'][8760:]))
x_date = [datetime.datetime(2020, 1, 1) + datetime.timedelta(hours=i)
for i in range(len(parsed_dicts['brick_1d']['temp']['result']['cell_0'][8760:]))]
x_2d = np.linspace(0, len(parsed_dicts['2d']['temp']['result']['cell_66'][8760:]),
len(parsed_dicts['2d']['temp']['result']['cell_66'][8760:]))
x_date_2d = [datetime.datetime(2020, 1, 1) + datetime.timedelta(hours=i)
for i in range(len(parsed_dicts['2d']['temp']['result']['cell_66'][8760:]))]
# Brick 1D
brick_1d = get_points(parsed_dicts['brick_1d']['temp'], parsed_dicts['brick_1d']['geo'])
brick_1d.sort(key=lambda point: point['x'])
add_data_to_points(brick_1d, parsed_dicts['brick_1d']['temp'], 'temperature')
add_data_to_points(brick_1d, parsed_dicts['brick_1d']['rh'], 'relative_humidity')
add_data_to_points(brick_1d, parsed_dicts['brick_1d']['m_content'], 'moisture_content')
add_data_to_points(brick_1d, parsed_dicts['brick_1d']['moisture'], 'moisture_integral')
# Mortar 1D
mortar_1d = get_points(parsed_dicts['mortar_1d']['temp'], parsed_dicts['mortar_1d']['geo'])
mortar_1d.sort(key=lambda point: point['x'])
add_data_to_points(mortar_1d, parsed_dicts['mortar_1d']['temp'], 'temperature')
add_data_to_points(mortar_1d, parsed_dicts['mortar_1d']['rh'], 'relative_humidity')
add_data_to_points(mortar_1d, parsed_dicts['mortar_1d']['m_content'], 'moisture_content')
add_data_to_points(mortar_1d, parsed_dicts['mortar_1d']['moisture'], 'moisture_integral')
# 2D
sim_2d = get_points(parsed_dicts['2d']['temp'], parsed_dicts['2d']['geo'])
sim_2d.sort(key=lambda point: (point['x'], point['y']))
add_data_to_points(sim_2d, parsed_dicts['2d']['temp'], 'temperature')
add_data_to_points(sim_2d, parsed_dicts['2d']['rh'], 'relative_humidity')
add_data_to_points(sim_2d, parsed_dicts['2d']['m_content'], 'moisture_content')
add_data_to_points(sim_2d, parsed_dicts['2d']['moisture'], 'moisture_integral')
# Plots
def plot_locations(quantity):
# Axes 00
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[0]['x']:.4f} and 2D-Location: {sim_2d[0]['x']:.4f}")
plt.plot(x_date, brick_1d[0][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[0][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date_2d, sim_2d[0][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date_2d, sim_2d[1][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date_2d, sim_2d[2][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 01
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[1]['x']:.4f} and 2D-Location: {sim_2d[3]['x']:.4f}")
plt.plot(x_date, brick_1d[1][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[1][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date_2d, sim_2d[3][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date_2d, sim_2d[4][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date_2d, sim_2d[5][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 10
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[2]['x']:.4f} and 2D-Location: {sim_2d[6]['x']:.4f}")
plt.plot(x_date, brick_1d[2][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[2][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date_2d, sim_2d[6][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date_2d, sim_2d[7][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date_2d, sim_2d[8][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 11
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[3]['x']:.4f} and 2D-Location: {sim_2d[9]['x']:.4f}")
plt.plot(x_date, brick_1d[3][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[3][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date_2d, sim_2d[9][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date_2d, sim_2d[10][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date_2d, sim_2d[11][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 20
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[4]['x']:.4f} and 2D-Location: {sim_2d[12]['x']:.4f}")
plt.plot(x_date, brick_1d[4][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[4][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date_2d, sim_2d[12][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date_2d, sim_2d[13][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date_2d, sim_2d[14][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 21
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[5]['x']:.4f} and 2D-Location: {sim_2d[15]['x']:.4f}")
plt.plot(x_date, brick_1d[5][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[5][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date_2d, sim_2d[15][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date_2d, sim_2d[16][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date_2d, sim_2d[17][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
#plot_locations(quantity='temperature')
#plt.show()
#plot_locations(quantity='relative_humidity')
#plt.show()
#plot_locations(quantity='moisture_content')
#plt.show()
# Moisture Integral
plt.figure()
plt.title('Moisture Integral')
plt.plot(x_date, brick_1d[0]['moisture_integral'], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[0]['moisture_integral'], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date_2d, sim_2d[0]['moisture_integral']*7.351860020585208, color=colors['bottom'], label=f"2D")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('kg')
def abs_diff(x1, x2):
return x2 - x1
def rel_diff(x1, x2):
return (abs(x2 - x1))/abs(x2) * 100
brick_abs = abs_diff(brick_1d[0]['moisture_integral'][:len(sim_2d[0]['moisture_integral'])],
sim_2d[0]['moisture_integral']*7.351860020585208)
mortar_abs = abs_diff(mortar_1d[0]['moisture_integral'][:len(sim_2d[0]['moisture_integral'])],
sim_2d[0]['moisture_integral']*7.351860020585208)
brick_rel = rel_diff(brick_1d[0]['moisture_integral'][:len(sim_2d[0]['moisture_integral'])],
sim_2d[0]['moisture_integral']*7.351860020585208)
mortar_rel = rel_diff(mortar_1d[0]['moisture_integral'][:len(sim_2d[0]['moisture_integral'])],
sim_2d[0]['moisture_integral']*7.351860020585208)
# Moisture Integral
plt.figure()
plt.title('Moisture Integral - Absolute Difference')
plt.plot(x_date_2d, brick_abs, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date_2d, mortar_abs, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('kg')
plt.figure()
plt.title('Moisture Integral - Relative Difference')
plt.plot(x_date_2d, brick_rel, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date_2d, mortar_rel, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('%')
print('Relative Difference:')
print()
print(f"25th PERCENTILE:\tBrick: {np.percentile(brick_rel, 25):.03f}\tMortar: {np.percentile(mortar_rel, 25):.03f}")
print(f"MEAN:\t\t\t\tBrick: {np.mean(brick_rel):.03f}\tMortar: {np.mean(mortar_rel):.03f}")
print(f"MEDIAN:\t\t\t\tBrick: {np.median(brick_rel):.03f}\tMortar: {np.median(mortar_rel):.03f}")
print(f"75th PERCENTILE:\tBrick: {np.percentile(brick_rel, 75):.03f}\tMortar: {np.percentile(mortar_rel, 75):.03f}")
print(f"STANDARD DEVIATION:\tBrick: {np.std(brick_rel):.03f}\tMortar: {np.std(mortar_rel):.03f}")
plt.show()
| StarcoderdataPython |
4837381 | from typing import Any, Dict, List, Optional
from ..._errors import ApifyApiError
from ..._utils import _catch_not_found_or_throw, _pluck_data_as_list, _snake_case_to_camel_case
from ..base import ResourceClient
class ScheduleClient(ResourceClient):
"""Sub-client for manipulating a single schedule."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initialize the ScheduleClient."""
resource_path = kwargs.pop('resource_path', 'schedules')
super().__init__(*args, resource_path=resource_path, **kwargs)
def get(self) -> Optional[Dict]:
"""Return information about the schedule.
https://docs.apify.com/api/v2#/reference/schedules/schedule-object/get-schedule
Returns:
dict, optional: The retrieved schedule
"""
return self._get()
def update(
self,
*,
cron_expression: Optional[str] = None,
is_enabled: Optional[bool] = None,
is_exclusive: Optional[bool] = None,
name: Optional[str] = None,
actions: Optional[List[Dict]] = None,
description: Optional[str] = None,
timezone: Optional[str] = None,
) -> Dict:
"""Update the schedule with specified fields.
https://docs.apify.com/api/v2#/reference/schedules/schedule-object/update-schedule
Args:
cron_expression (str, optional): The cron expression used by this schedule
is_enabled (bool, optional): True if the schedule should be enabled
is_exclusive (bool, optional): When set to true, don't start actor or actor task if it's still running from the previous schedule.
name (str, optional): The name of the schedule to create.
actions (list of dict, optional): Actors or tasks that should be run on this schedule. See the API documentation for exact structure.
description (str, optional): Description of this schedule
timezone (str, optional): Timezone in which your cron expression runs
(TZ database name from https://en.wikipedia.org/wiki/List_of_tz_database_time_zones)
Returns:
dict: The updated schedule
"""
updated_kwargs = {
_snake_case_to_camel_case(key): value
for key, value in locals().items() if key != 'self' and value is not None
}
return self._update(updated_kwargs)
def delete(self) -> None:
"""Delete the schedule.
https://docs.apify.com/api/v2#/reference/schedules/schedule-object/delete-schedule
"""
self._delete()
def get_log(self) -> Optional[List]:
"""Return log for the given schedule.
https://docs.apify.com/api/v2#/reference/schedules/schedule-log/get-schedule-log
Returns:
list, optional: Retrieved log of the given schedule
"""
try:
response = self.http_client.call(
url=self._url('log'),
method='GET',
params=self._params(),
)
return _pluck_data_as_list(response.json())
except ApifyApiError as exc:
_catch_not_found_or_throw(exc)
return None
| StarcoderdataPython |
3394221 | <filename>favoriteloop.py
#!/usr/bin/python3.7
favorite_languages = {'jen': 'python', 'sarah': 'c', 'edward': 'ruby', 'phil': 'python'}
for name in sorted(favorite_languages.keys()):
print(f"\n{name.try:
pass
except expression as identifier:
pass}") | StarcoderdataPython |
4822077 | import random
from pathlib import Path
from typing import Tuple
import glob
import numpy as np
import cv2
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import torchvision.transforms.functional as TF
from ganslate.utils.io import make_dataset_of_files
# Config imports
from dataclasses import dataclass
from ganslate import configs
from ganslate.data.utils.normalization import min_max_normalize
EXTENSIONS = ['.jpg', '.exr']
# Max allowed intenity of depthmap images. Specified in metres.
# This value is chosen by analyzing max values throughout the dataset.
UPPER_DEPTH_INTENSITY_LIMIT = 8.0
@dataclass
class ClearGraspTrainDatasetConfig(configs.base.BaseDatasetConfig):
load_size: Tuple[int, int] = (512, 256)
paired: bool = True # `True` for paired A-B.
require_domain_B_rgb: bool = False # Whether to fetch noisy RGB photo for domain B
class ClearGraspTrainDataset(Dataset):
"""
Multimodality dataset containing RGB photos, surface normalmaps and depthmaps.
Curated from Cleargrasp robot-vision dataset.
The domain translation task is: RGB + Normalmap --> Depthmap
"""
def __init__(self, conf):
# self.mode = conf.mode
self.paired = conf[conf.mode].dataset.paired
self.require_domain_B_rgb = conf[conf.mode].dataset.require_domain_B_rgb
rgb_dir = Path(conf[conf.mode].dataset.root) / "rgb"
normalmap_dir = Path(conf[conf.mode].dataset.root) / "normal"
depthmap_dir = Path(conf[conf.mode].dataset.root) / "depth"
self.image_paths = {'RGB': [], 'normalmap': [], 'depthmap': []}
self.image_paths['RGB'] = make_dataset_of_files(rgb_dir, EXTENSIONS)
self.image_paths['normalmap'] = make_dataset_of_files(normalmap_dir, EXTENSIONS)
self.image_paths['depthmap'] = make_dataset_of_files(depthmap_dir, EXTENSIONS)
self.dataset_size = len(self.image_paths['RGB'])
self.load_size = conf[conf.mode].dataset.load_size
self.load_resize_transform = transforms.Resize(
size=(self.load_size[1], self.load_size[0]), interpolation=transforms.InterpolationMode.BICUBIC
)
# Clipping ranges
self.rgb_min, self.rgb_max = 0.0, 255.0
self.normalmap_min, self.normalmap_max = -1.0, 1.0
self.depthmap_min, self.depthmap_max = 0.0, UPPER_DEPTH_INTENSITY_LIMIT
def __len__(self):
return self.dataset_size
def __getitem__(self, index):
# ------------
# Fetch images
index_A = index % self.dataset_size
index_B = index_A if self.paired else random.randint(0, self.dataset_size - 1)
index_A, index_B = 9, 1 ##
image_path_A, image_path_B = {}, {}
image_path_A['RGB'] = self.image_paths['RGB'][index_A]
image_path_A['normalmap'] = self.image_paths['normalmap'][index_A]
image_path_B['depthmap'] = self.image_paths['depthmap'][index_B]
if self.require_domain_B_rgb:
image_path_B['RGB'] = self.image_paths['RGB'][index_B]
images_A, images_B = {}, {}
images_A['RGB'] = read_rgb_to_tensor(image_path_A['RGB'])
images_A['normalmap'] = read_normalmap_to_tensor(image_path_A['normalmap'])
images_B['depthmap'] = read_depthmap_to_tensor(image_path_B['depthmap'])
if self.require_domain_B_rgb:
images_B['RGB'] = read_rgb_to_tensor(image_path_B['RGB'])
# ------
# Resize
for k in images_A.keys():
images_A[k] = self.load_resize_transform(images_A[k])
for k in images_B.keys():
images_B[k] = self.load_resize_transform(images_B[k])
# ---------
# Transform
images_A, images_B = self.apply_transforms(images_A, images_B)
# -------------
# Normalization
# Clip and then rescale all intensties to range [-1, 1]
# Normalmap is already in this scale.
images_A['RGB'] = clip_and_min_max_normalize(images_A['RGB'], self.rgb_min, self.rgb_max)
images_A['normalmap'] = torch.clamp(images_A['normalmap'], self.normalmap_min, self.normalmap_max)
images_B['depthmap'] = clip_and_min_max_normalize(images_B['depthmap'], self.depthmap_min, self.depthmap_max)
if self.require_domain_B_rgb:
images_B['RGB'] = clip_and_min_max_normalize(images_B['RGB'], self.rgb_min, self.rgb_max)
# -------------------------
# Add noise in domain-B RGB
if self.require_domain_B_rgb:
images_B['RGB'] = images_B['RGB'] + torch.normal(mean=0, std=0.05, size=(self.load_size[1], self.load_size[0]))
images_B['RGB'] = torch.clamp(images_B['RGB'], -1, 1) # Clip to remove out-of-range overshoots
# ---------------------
# Construct sample dict
# A and B need to have dims (C,D,H,W)
A = torch.cat([images_A['RGB'], images_A['normalmap']], dim=0)
if self.require_domain_B_rgb:
B = torch.cat([images_B['RGB'], images_B['depthmap']], dim=0)
else:
B = images_B['depthmap']
sample_dict = {'A': A, 'B': B}
return sample_dict
def apply_transforms(self, images_A, images_B):
"""
TODO: What transform to use for augmentation?
Cannot naively apply random flip and crop, would mess up the normalmap and depthmap info, resp.
Maybe flipping + changing normalmap colour mapping (by changing order of its RGB channels)
"""
return images_A, images_B
def read_rgb_to_tensor(path):
"""
RGB reader based on cv2.imread().
Just for consistency with normalmap and depthmap readers.
"""
bgr_img = cv2.imread(str(path))
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
rgb_img = rgb_img.transpose(2,0,1) # (H,W,C) to (C,H,W)
return torch.tensor(rgb_img, dtype=torch.float32)
def read_normalmap_to_tensor(path):
"""
Read normalmap image from EXR format to tensor of form (3,H,W)
"""
normalmap = cv2.imread(str(path), cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
normalmap = cv2.cvtColor(normalmap, cv2.COLOR_BGR2RGB)
normalmap = normalmap.transpose(2,0,1) # (H,W,C) to (C,H,W)
return torch.tensor(normalmap, dtype=torch.float32)
def read_depthmap_to_tensor(path):
"""
Read depthmap image from EXR format to tensor of form (1,H,W)
"""
depthmap = cv2.imread(str(path), cv2.IMREAD_ANYDEPTH)
depthmap = np.expand_dims(depthmap, axis=0) # (H,W) to (1,H,W)
return torch.tensor(depthmap, dtype=torch.float32)
def clip_and_min_max_normalize(tensor, min_value, max_value):
tensor = torch.clamp(tensor, min_value, max_value)
tensor = min_max_normalize(tensor, min_value, max_value)
return tensor
| StarcoderdataPython |
4808687 | import sqlite3 as sl
import pandas as pd
con = sl.connect('my-test.db')
con.execute("""
CREATE TABLE USER (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
name TEXT,
age INTEGER
);
""")
sql = 'INSERT INTO USER (id, name, age) values(?, ?, ?)'
data = [
(1, 'Alice', 21),
(2, 'Bob', 22),
(3, 'Chris', 23)
]
con.executemany(sql, data)
data = con.execute("SELECT * FROM USER WHERE age <= 22")
for row in data:
print(row)
df_skill = pd.DataFrame({
'user_id': [1,1,2,2,3,3,3],
'skill': ['Network Security', 'Algorithm Development', 'Network Security', 'Java', 'Python', 'Data Science', 'Machine Learning']
})
df_skill.to_sql('SKILL', con)
df = pd.read_sql('''
SELECT s.user_id, u.name, u.age, s.skill
FROM USER u LEFT JOIN SKILL s ON u.id = s.user_id
''', con)
df.to_sql('USER_SKILL', con)
вata = con.execute("SELECT * FROM USER WHERE age <= 22")
for row in data:
print(row)
data = con.execute("SELECT * FROM USER_SKILL WHERE age <= 22")
for row in data:
print(row)
data = con.execute("SELECT * FROM USER_SKILL")
for row in data:
print(row)
| StarcoderdataPython |
47043 | <filename>src/lib/parsers/parseretinac.py
#!/usr/bin/python
# parseretinac.py
#
# By <NAME> <EMAIL> | <EMAIL>
# Copyright 2011 Intru-Shun.ca Inc.
# v0.09
# 16 October 2011
#
# The current version of these scripts are at: http://dshield.handers.org/adebeaupre/ossams-parser.tgz
#
# Parses retina community version XML output
# http://eeye.com
#
# This file is part of the ossams-parser.
#
# The ossams-parser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The ossams-parser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the ossams-parser. If not, see <http://www.gnu.org/licenses/>.
#
# parseretina function
def parseretinac(time, os, root, filetoread, db, dbconnection, projectname, projectid, separator):
# Check to see if the document root is 'scanJob', exit if it is not
if root.tag:
if root.tag != "scanJob":
print filetoread, "is not a retina XML report file"
return
retinafile = filetoread.split(separator)
file = retinafile[-1]
filetime = time.ctime(os.path.getmtime(filetoread))
timenow = time.ctime()
db.execute("""
INSERT INTO tooloutput (toolname, filename, OSSAMSVersion, filedate, inputtimestamp, projectname, projectid)
VALUES
('retina', '%s', 0.09, '%s', '%s', '%s', '%s')
""" % (file, filetime, timenow, projectname, projectid)
)
tooloutputnumber = int(db.lastrowid)
print "Processed retina report number:", tooloutputnumber
hostattribs = ['ip', 'netBIOSName', 'netBIOSDomain', 'dnsName', 'mac', 'os']
auditattribs = ['rthID', 'cve', 'cce', 'name', 'description', 'date', 'risk', 'pciLevel', 'cvssScore', 'fixInformation', 'exploit']
hosts = root.findall('hosts/host')
for host in hosts:
hostvalues = {'ip': " ", 'netBIOSName': " ", 'netBIOSDomain': " ", 'dnsName': " ", 'mac': " ", 'os': " "}
auditvalues = {'rthID': " ", 'cve': " ", 'cce': " ", 'name': " ", 'description': " ", 'date': " ", 'risk': " ", 'pciLevel': " ",
'cvssScore': " ", 'fixInformation': " ", 'exploit': " "}
refs = ['cve', 'cce', 'cvssScore', 'pciLevel']
for value in hostattribs:
node = host.find(value)
if node.text:
hostvalues[value] = node.text
db.execute("""
INSERT INTO hosts (tooloutputnumber, ipv4, macaddress, hostname, recon, hostcriticality, hostos)
VALUES
(%s, '%s', '%s', '%s', 1, 0, '%s')
""" % (tooloutputnumber, hostvalues['ip'], hostvalues['mac'], hostvalues['dnsName'], hostvalues['os'])
)
hostnumber = int(db.lastrowid)
print "Processed host:", hostnumber, "IP:", hostvalues['ip']
audits = host.findall('audit')
for audit in audits:
for value in auditattribs:
node = audit.find(value)
if node.text:
auditvalues[value] = node.text
description = auditvalues['description']
encodeddescription = description.encode('utf-8','ignore')
db.execute("""
INSERT INTO vulnerabilities (tooloutputnumber, hostnumber, vulnerabilityid, vulnerabilityname, vulnerabilityrisk,
vulnerabilitydescription, vulnerabilityvalidation, vulnerabilitysolution)
VALUES
('%s', '%s', '%s', '%s', '%s', '%s', 0, '%s')
""" % (tooloutputnumber, hostnumber, auditvalues['rthID'], auditvalues['name'], auditvalues['risk'],
dbconnection.escape_string(encodeddescription), dbconnection.escape_string(auditvalues['fixInformation'])
)
)
vulnnumber = int(db.lastrowid)
for ref in refs:
refvalue = audit.find(ref)
if refvalue.text:
db.execute("""
INSERT INTO refs (tooloutputnumber, hostnumber, vulnerabilitynumber, referencetype, referencevalue )
VALUES
('%s', '%s', '%s', '%s', '%s')
""" % (tooloutputnumber, hostnumber, vulnnumber, refvalue.tag, refvalue.text)
)
return
| StarcoderdataPython |
3240351 | <filename>master/master.py<gh_stars>1-10
import sys
import socket
import threading
import logging
import json
import time
import random
import os
# Docker requires loopback address to be 0.0.0.0 instead of localhost.
# 'localhost' is chosen if run manually without docker.
JOB_REQUESTS_HOST = os.getenv("LOOPBACK_ADDRESS", "localhost")
JOB_REQUESTS_PORT = 5000
WORKER_RESPONSES_HOST = os.getenv("LOOPBACK_ADDRESS", "localhost")
WORKER_RESPONSES_PORT = 5001
WORKER_ACCEPT_JOBS_HOST = os.getenv("LOOPBACK_ADDRESS", "localhost")
ALL_MAPPERS_COMPLETED_CODE = -1
thread_lock = threading.Lock()
random.seed(3)
def read_args():
if len(sys.argv) != 3:
print("Usage: python master.py /path/to/config <scheduling-algorithm>")
exit(1)
config_file = sys.argv[1]
scheduling_algo = sys.argv[2]
with open(config_file, "r") as f:
config = json.loads(f.read())
return config, scheduling_algo
def init_logging(scheduling_algo):
logging.basicConfig(
filename=f"../logs/master_{scheduling_algo}.log",
filemode="w",
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
)
logging.disable(logging.DEBUG)
def preprocess_workers(workers):
for worker in workers:
worker["free_slots"] = worker["slots"]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((WORKER_ACCEPT_JOBS_HOST, int(worker["port"])))
s.listen(50)
worker["socket"] = s
workers_dict = {}
for worker in workers:
workers_dict[worker["worker_id"]] = worker
return workers_dict
def send_task_to_worker(worker, job_id, task):
worker_socket = worker["socket"]
c, addr = worker_socket.accept()
c.settimeout(5)
task_json = {
"job_id": job_id,
"task_id": task["task_id"],
"duration": task["duration"],
}
c.send(json.dumps(task_json).encode())
c.close()
logging.info(f"started task {task['task_id']} of job {job_id} on worker {worker['worker_id']}")
def listen_for_jobs(workers, scheduling_algo, jobs):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as job_request_socket:
job_request_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
job_request_socket.bind((JOB_REQUESTS_HOST, JOB_REQUESTS_PORT))
job_request_socket.listen(50)
selected_worker_index = 0
all_worker_ids = list(workers.keys())
while True:
client_socket, address = job_request_socket.accept()
client_socket.settimeout(5)
job_request = json.loads(client_socket.recv(2048).decode())
job_request["unfinished_map_tasks"] = len(job_request["map_tasks"])
jobs[job_request["job_id"]] = job_request
logging.info(f"started job {job_request['job_id']}")
for task in job_request["map_tasks"]:
assigned = False
while not assigned:
thread_lock.acquire()
if scheduling_algo == "RANDOM":
selected_worker_id = random.randint(1, len(workers))
elif scheduling_algo == "RR":
selected_worker_id = all_worker_ids[selected_worker_index]
elif scheduling_algo == "LL":
selected_worker_id = max(workers, key=lambda worker: workers[worker]["free_slots"])
if workers[selected_worker_id]["free_slots"] > 0:
send_task_to_worker(workers[selected_worker_id], job_request["job_id"], task)
workers[selected_worker_id]["free_slots"] -= 1
logging.debug(
f'worker {selected_worker_id} has {workers[selected_worker_id]["free_slots"]} free slots'
)
thread_lock.release()
assigned = True
else:
thread_lock.release()
if scheduling_algo == "LL":
logging.debug(f"all workers have filled slots")
time.sleep(1)
else:
logging.debug(f"all slots of worker {selected_worker_id} are full")
time.sleep(0.1)
selected_worker_index = (selected_worker_index + 1) % len(workers)
client_socket.close()
def finish_task_from_worker(workers, server_worker_socket, jobs):
client_socket, address = server_worker_socket.accept()
client_socket.settimeout(5)
completed_task = json.loads(client_socket.recv(2048).decode())
logging.info(
f"task {completed_task['task_id']} of job {completed_task['job_id']} on worker {completed_task['worker_id']} has finished executing"
)
thread_lock.acquire()
workers[completed_task["worker_id"]]["free_slots"] += 1
logging.debug(
f'worker {completed_task["worker_id"]} has {workers[completed_task["worker_id"]]["free_slots"]} free slots'
)
thread_lock.release()
if "M" in completed_task["task_id"]:
jobs[completed_task["job_id"]]["unfinished_map_tasks"] -= 1
logging.debug(
f"job {completed_task['job_id']} has {jobs[completed_task['job_id']]['unfinished_map_tasks']} remaining map tasks"
)
client_socket.close()
def listen_to_workers(workers, scheduling_algo, jobs):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_worker_socket:
server_worker_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_worker_socket.bind((WORKER_RESPONSES_HOST, WORKER_RESPONSES_PORT))
server_worker_socket.listen(50)
selected_worker_index = 0
all_worker_ids = list(workers.keys())
while True:
finish_task_from_worker(workers, server_worker_socket, jobs)
for job_id in list(jobs.keys()):
if jobs[job_id]["unfinished_map_tasks"] == 0:
for task in jobs[job_id]["reduce_tasks"]:
assigned = False
while not assigned:
thread_lock.acquire()
if scheduling_algo == "RANDOM":
selected_worker_id = random.randint(1, len(workers))
elif scheduling_algo == "RR":
selected_worker_id = all_worker_ids[selected_worker_index]
elif scheduling_algo == "LL":
selected_worker_id = max(workers, key=lambda worker: workers[worker]["free_slots"])
if workers[selected_worker_id]["free_slots"] > 0:
send_task_to_worker(workers[selected_worker_id], job_id, task)
workers[selected_worker_id]["free_slots"] -= 1
logging.debug(
f'worker {selected_worker_id} has {workers[selected_worker_id]["free_slots"]} free slots'
)
thread_lock.release()
assigned = True
else:
thread_lock.release()
if scheduling_algo == "LL":
logging.debug(f"all workers have filled slots")
time.sleep(1)
else:
logging.debug(f"all slots of worker {selected_worker_id} are full")
time.sleep(0.1)
finish_task_from_worker(workers, server_worker_socket, jobs)
selected_worker_index = (selected_worker_index + 1) % len(workers)
jobs[job_id]["unfinished_map_tasks"] = ALL_MAPPERS_COMPLETED_CODE
def main():
config, scheduling_algo = read_args()
init_logging(scheduling_algo)
workers = preprocess_workers(config["workers"])
jobs = {}
job_listen_thread = threading.Thread(target=listen_for_jobs, args=[workers, scheduling_algo, jobs])
job_listen_thread.start()
worker_listen_thread = threading.Thread(target=listen_to_workers, args=[workers, scheduling_algo, jobs])
worker_listen_thread.start()
if __name__ == "__main__":
main()
| StarcoderdataPython |
179755 | #
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
from ipaddress import IPv4Address, IPv6Address
from typing import List
from fastapi import HTTPException
from starlette.status import HTTP_400_BAD_REQUEST
from app.db.repositories.base import BaseRepository
from app.models.experiment import ExperimentCreate, ExperimentInDB, ExperimentUpdate
from app.services.authentication import MoonlandingUser
COLUMNS = "id, organization_name, model_name, creator, coordinator_ip, coordinator_port, auth_server_public_key, auth_server_private_key, created_at, updated_at"
CREATE_EXPERIMENT_QUERY = """
INSERT INTO experiments (organization_name, model_name, creator, coordinator_ip, coordinator_port, auth_server_public_key, auth_server_private_key)
VALUES (:organization_name, :model_name, :creator, :coordinator_ip, :coordinator_port, :auth_server_public_key, :auth_server_private_key)
RETURNING id, organization_name, model_name, creator, coordinator_ip, coordinator_port, auth_server_public_key, auth_server_private_key, created_at, updated_at;
"""
GET_EXPERIMENT_BY_ID_QUERY = """
SELECT id, organization_name, model_name, creator, coordinator_ip, coordinator_port, auth_server_public_key, auth_server_private_key, created_at, updated_at
FROM experiments
WHERE id = :id;
"""
GET_EXPERIMENT_BY_ORGANIZATON_AND_MODEL_NAME_QUERY = """
SELECT id, organization_name, model_name, creator, coordinator_ip, coordinator_port, auth_server_public_key, auth_server_private_key, created_at, updated_at
FROM experiments
WHERE model_name = :model_name
AND organization_name = :organization_name;
"""
LIST_ALL_USER_EXPERIMENTS_QUERY = """
SELECT id, organization_name, model_name, creator, coordinator_ip, coordinator_port, auth_server_public_key, auth_server_private_key, created_at, updated_at
FROM experiments
WHERE creator = :creator;
"""
UPDATE_EXPERIMENT_BY_ID_QUERY = """
UPDATE experiments
SET organization_name = :organization_name,
model_name = :model_name,
coordinator_ip = :coordinator_ip,
coordinator_port = :coordinator_port,
creator = :creator
WHERE id = :id
RETURNING id, organization_name, model_name, creator, coordinator_ip, coordinator_port, auth_server_public_key, auth_server_private_key, created_at, updated_at;
"""
DELETE_EXPERIMENT_BY_ID_QUERY = """
DELETE FROM experiments
WHERE id = :id
RETURNING id;
"""
class ExperimentsRepository(BaseRepository):
""" "
All database actions associated with the Experiment resource
"""
async def create_experiment(
self, *, new_experiment: ExperimentCreate, requesting_user: MoonlandingUser
) -> ExperimentInDB:
new_experiment_table = {**new_experiment.dict(), "creator": requesting_user.username}
if "coordinator_ip" in new_experiment_table.keys() and (
isinstance(new_experiment_table["coordinator_ip"], IPv4Address)
or isinstance(new_experiment_table["coordinator_ip"], IPv6Address)
):
new_experiment_table["coordinator_ip"] = str(new_experiment_table["coordinator_ip"])
experiment = await self.db.fetch_one(query=CREATE_EXPERIMENT_QUERY, values=new_experiment_table)
return ExperimentInDB(**experiment)
async def get_experiment_by_organization_and_model_name(
self, *, organization_name: str, model_name: str
) -> ExperimentInDB:
experiment = await self.db.fetch_one(
query=GET_EXPERIMENT_BY_ORGANIZATON_AND_MODEL_NAME_QUERY,
values={"organization_name": organization_name, "model_name": model_name},
)
if not experiment:
return None
return ExperimentInDB(**experiment)
async def get_experiment_by_id(self, *, id: int) -> ExperimentInDB:
experiment = await self.db.fetch_one(query=GET_EXPERIMENT_BY_ID_QUERY, values={"id": id})
if not experiment:
return None
return ExperimentInDB(**experiment)
async def list_all_user_experiments(self, requesting_user: MoonlandingUser) -> List[ExperimentInDB]:
experiment_records = await self.db.fetch_all(
query=LIST_ALL_USER_EXPERIMENTS_QUERY, values={"creator": requesting_user.username}
)
return [ExperimentInDB(**exp) for exp in experiment_records]
async def update_experiment_by_id(self, *, id_exp: int, experiment_update: ExperimentUpdate) -> ExperimentInDB:
experiment = await self.get_experiment_by_id(id=id_exp)
if not experiment:
return None
experiment_update_params = experiment.copy(update=experiment_update.dict(exclude_unset=True))
values = {
**experiment_update_params.dict(
exclude={
"auth_server_public_key",
"auth_server_private_key",
"created_at",
"updated_at",
}
)
}
if "coordinator_ip" in values.keys() and (
isinstance(values["coordinator_ip"], IPv4Address) or isinstance(values["coordinator_ip"], IPv6Address)
):
values["coordinator_ip"] = str(values["coordinator_ip"])
try:
updated_experiment = await self.db.fetch_one(query=UPDATE_EXPERIMENT_BY_ID_QUERY, values=values)
except Exception as e:
print(e)
raise HTTPException(status_code=HTTP_400_BAD_REQUEST, detail="Invalid update params.")
return ExperimentInDB(**updated_experiment)
async def delete_experiment_by_id(self, *, id: int) -> int:
experiment = await self.get_experiment_by_id(id=id)
if not experiment:
return None
deleted_id = await self.db.execute(query=DELETE_EXPERIMENT_BY_ID_QUERY, values={"id": id})
return deleted_id
| StarcoderdataPython |
1750680 | import json
from logging.config import dictConfig
from typing import List, Dict
from allennlp.models import load_archive
from allennlp.predictors import Predictor
from fever.api.web_server import fever_web_api
from fever.evidence.retrieval_methods.retrieval_method import RetrievalMethod
import os
import logging
from fever.evidence.retrieval_methods.top_docs import TopNDocsTopNSents
from fever.reader import FEVERDocumentDatabase
def predict_single(predictor, retrieval_method, instance):
evidence = retrieval_method.get_sentences_for_claim(instance["claim"])
test_instance = predictor._json_to_instance({"claim": instance["claim"], "predicted_sentences": evidence})
predicted = predictor.predict_instance(test_instance)
max_id = predicted["label_logits"].index(max(predicted["label_logits"]))
return {
"predicted_label": predictor._model.vocab.get_token_from_index(max_id, namespace="labels"),
"predicted_evidence": evidence
}
def make_api():
logger = logging.getLogger()
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stderr',
'formatter': 'default'
}},
'root': {
'level': 'INFO',
'handlers': ['wsgi']
},
'allennlp': {
'level': 'INFO',
'handlers': ['wsgi']
},
})
logger.info("My sample FEVER application")
config = json.load(open(os.getenv("CONFIG_PATH","configs/predict_docker.json")))
# Create document retrieval model
logger.info("Load FEVER Document database from {0}".format(config["database"]))
db = FEVERDocumentDatabase(config["database"])
logger.info("Load DrQA Document retrieval index from {0}".format(config['index']))
retrieval_method = RetrievalMethod.by_name("top_docs")(db,
config["index"],
config["n_docs"],
config["n_sents"])
# Load the pre-trained predictor and model from the .tar.gz in the config file.
# Override the database location for our model as this now comes from a read-only volume
logger.info("Load Model from {0}".format(config['model']))
archive = load_archive(config["model"],
cuda_device=config["cuda_device"],
overrides='{"dataset_reader":{"database":"' + config["database"] + '" }}')
predictor = Predictor.from_archive(archive, predictor_name="fever")
def baseline_predict(instances):
predictions = []
for instance in instances:
predictions.append(predict_single(predictor, retrieval_method, instance))
return predictions
return fever_web_api(baseline_predict)
| StarcoderdataPython |
1655316 | <gh_stars>1000+
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-02-05 18:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0057_v350_remove_become_method_type'),
]
operations = [
migrations.AlterField(
model_name='job',
name='limit',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='jobtemplate',
name='limit',
field=models.TextField(blank=True, default=''),
),
]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.