id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3352412 | """
Auhtor : <NAME>
"""
| StarcoderdataPython |
3384502 | <reponame>ShamanthNyk/wcep-mds-dataset<filename>experiments/baselines.py
import utils
import random
import collections
import numpy as np
import networkx as nx
import warnings
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import MiniBatchKMeans
from summarizer import Summarizer
warnings.filterwarnings('ignore', category=RuntimeWarning)
random.seed(24)
class RandomBaseline(Summarizer):
def summarize(self,
articles,
max_len=40,
len_type='words',
in_titles=False,
out_titles=False,
min_sent_tokens=7,
max_sent_tokens=40):
articles = self._preprocess(articles)
sents = [s for a in articles for s in a.sents]
if in_titles == False or out_titles == False:
sents = [s for s in sents if not s.is_title]
sents = self._deduplicate(sents)
sent_lens = [self._sent_len(s, len_type) for s in sents]
current_len = 0
remaining = list(range(len(sents)))
random.shuffle(remaining)
selected = []
for i in remaining:
new_len = current_len + sent_lens[i]
if new_len <= max_len:
if not (min_sent_tokens <= len(
sents[i].words) <= max_sent_tokens):
continue
selected.append(i)
current_len = new_len
if current_len >= max_len:
break
summary_sents = [sents[i].text for i in selected]
return ' '.join(summary_sents)
class RandomLead(Summarizer):
def summarize(self,
articles,
max_len=40,
len_type='words',
in_titles=False,
out_titles=False,
min_sent_tokens=7,
max_sent_tokens=40):
article_idxs = list(range(len(articles)))
random.shuffle(article_idxs)
summary = ''
for i in article_idxs:
a = articles[i]
a = self._preprocess([a])[0]
sents = a.sents
if in_titles == False or out_titles == False:
sents = [s for s in sents if not s.is_title]
current_len = 0
selected_sents = []
for s in sents:
l = self._sent_len(s, len_type)
new_len = current_len + l
if new_len <= max_len:
if not (min_sent_tokens <= len(s.words) <= max_sent_tokens):
continue
selected_sents.append(s.text)
current_len = new_len
if new_len > max_len:
break
if len(selected_sents) >= 1:
summary = ' '.join(selected_sents)
break
return summary
class TextRankSummarizer(Summarizer):
def __init__(self, max_redundancy=0.5):
self.max_redundancy = max_redundancy
def _compute_page_rank(self, S):
nodes = list(range(S.shape[0]))
graph = nx.from_numpy_matrix(S)
pagerank = nx.pagerank(graph, weight='weight')
scores = [pagerank[i] for i in nodes]
return scores
def summarize(self,
articles,
max_len=40,
len_type='words',
in_titles=False,
out_titles=False,
min_sent_tokens=7,
max_sent_tokens=40):
articles = self._preprocess(articles)
sents = [s for a in articles for s in a.sents]
if in_titles == False:
sents = [s for s in sents if not s.is_title]
sents = self._deduplicate(sents)
sent_lens = [self._sent_len(s, len_type) for s in sents]
raw_sents = [s.text for s in sents]
vectorizer = TfidfVectorizer(lowercase=True, stop_words='english')
X = vectorizer.fit_transform(raw_sents)
S = cosine_similarity(X)
scores = self._compute_page_rank(S)
scored = sorted(enumerate(scores), key=lambda x: x[1], reverse=True)
if not out_titles:
scored = [(i, score) for (i, score) in scored
if not sents[i].is_title]
current_len = 0
selected = []
for i, _ in scored:
new_len = current_len + sent_lens[i]
if new_len <= max_len:
if self._is_redundant(
sents, selected, i, self.max_redundancy):
continue
if not (min_sent_tokens <= len(
sents[i].words) <= max_sent_tokens):
continue
selected.append(i)
current_len = new_len
summary_sents = [sents[i].text for i in selected]
return ' '.join(summary_sents)
class CentroidSummarizer(Summarizer):
def __init__(self, max_redundancy=0.5):
self.max_redundancy = max_redundancy
def summarize(self,
articles,
max_len=40,
len_type='words',
in_titles=False,
out_titles=False,
min_sent_tokens=7,
max_sent_tokens=40):
articles = self._preprocess(articles)
sents = [s for a in articles for s in a.sents]
if in_titles == False:
sents = [s for s in sents if not s.is_title]
sents = self._deduplicate(sents)
sent_lens = [self._sent_len(s, len_type) for s in sents]
raw_sents = [s.text for s in sents]
vectorizer = TfidfVectorizer(lowercase=True, stop_words='english')
try:
X = vectorizer.fit_transform(raw_sents)
except:
return ''
centroid = X.mean(0)
scores = cosine_similarity(X, centroid)
scored = sorted(enumerate(scores), key=lambda x: x[1], reverse=True)
if not out_titles:
scored = [(i, score) for (i, score) in scored
if not sents[i].is_title]
current_len = 0
selected = []
for i, _ in scored:
new_len = current_len + sent_lens[i]
if new_len <= max_len:
if self._is_redundant(
sents, selected, i, self.max_redundancy):
continue
if not (min_sent_tokens <= len(
sents[i].words) <= max_sent_tokens):
continue
selected.append(i)
current_len = new_len
summary_sents = [sents[i].text for i in selected]
return ' '.join(summary_sents)
class SubmodularSummarizer(Summarizer):
"""
Selects a combination of sentences as a summary by greedily optimizing
a submodular function, in this case two functions representing
coverage and diversity of the sentence combination.
"""
def __init__(self, a=5, div_weight=6, cluster_factor=0.2):
self.a = a
self.div_weight = div_weight
self.cluster_factor = cluster_factor
def cluster_sentences(self, X):
n = X.shape[0]
n_clusters = round(self.cluster_factor * n)
if n_clusters <= 1 or n <= 2:
return dict((i, 1) for i in range(n))
clusterer = MiniBatchKMeans(
n_clusters=n_clusters,
init_size=3 * n_clusters
)
labels = clusterer.fit_predict(X)
i_to_label = dict((i, l) for i, l in enumerate(labels))
return i_to_label
def compute_summary_coverage(self,
alpha,
summary_indices,
sent_coverages,
pairwise_sims):
cov = 0
for i, i_generic_cov in enumerate(sent_coverages):
i_summary_cov = sum([pairwise_sims[i, j] for j in summary_indices])
i_cov = min(i_summary_cov, alpha * i_generic_cov)
cov += i_cov
return cov
def compute_summary_diversity(self,
summary_indices,
ix_to_label,
avg_sent_sims):
cluster_to_ixs = collections.defaultdict(list)
for i in summary_indices:
l = ix_to_label[i]
cluster_to_ixs[l].append(i)
div = 0
for l, l_indices in cluster_to_ixs.items():
cluster_score = sum([avg_sent_sims[i] for i in l_indices])
cluster_score = np.sqrt(cluster_score)
div += cluster_score
return div
def optimize(self,
sents,
max_len,
len_type,
ix_to_label,
pairwise_sims,
sent_coverages,
avg_sent_sims,
out_titles,
min_sent_tokens,
max_sent_tokens):
alpha = self.a / len(sents)
sent_lens = [self._sent_len(s, len_type) for s in sents]
current_len = 0
remaining = set(range(len(sents)))
for i, s in enumerate(sents):
bad_length = not (min_sent_tokens <= len(sents[i].words)
<= max_sent_tokens)
if bad_length:
remaining.remove(i)
elif out_titles == False and s.is_title:
remaining.remove(i)
selected = []
scored_selections = []
while current_len < max_len and len(remaining) > 0:
scored = []
for i in remaining:
new_len = current_len + sent_lens[i]
if new_len <= max_len:
summary_indices = selected + [i]
cov = self.compute_summary_coverage(
alpha, summary_indices, sent_coverages, pairwise_sims)
div = self.compute_summary_diversity(
summary_indices, ix_to_label, avg_sent_sims)
score = cov + self.div_weight * div
scored.append((i, score))
if len(scored) == 0:
break
scored.sort(key=lambda x: x[1], reverse=True)
best_idx, best_score = scored[0]
scored_selections.append((selected + [best_idx], best_score))
current_len += sent_lens[best_idx]
selected.append(best_idx)
remaining.remove(best_idx)
scored_selections.sort(key=lambda x: x[1], reverse=True)
best_selection = scored_selections[0][0]
return best_selection
def summarize(self,
articles,
max_len=40,
len_type='words',
in_titles=False,
out_titles=False,
min_sent_tokens=7,
max_sent_tokens=40):
articles = self._preprocess(articles)
sents = [s for a in articles for s in a.sents]
if in_titles == False:
sents = [s for s in sents if not s.is_title]
sents = self._deduplicate(sents)
raw_sents = [s.text for s in sents]
vectorizer = TfidfVectorizer(lowercase=True, stop_words='english')
X = vectorizer.fit_transform(raw_sents)
ix_to_label = self.cluster_sentences(X)
pairwise_sims = cosine_similarity(X)
sent_coverages = pairwise_sims.sum(0)
avg_sent_sims = sent_coverages / len(sents)
selected = self.optimize(
sents, max_len, len_type, ix_to_label,
pairwise_sims, sent_coverages, avg_sent_sims,
out_titles, min_sent_tokens, max_sent_tokens
)
summary = [sents[i].text for i in selected]
return ' '.join(summary)
| StarcoderdataPython |
3303184 | <filename>pyxadapterlib/pyxadapterlib/xroadclient.py
"""
Base class of a X-road SOAP client
Author: <NAME>
"""
import string
from random import Random
import os
import httplib2
import socket
from datetime import datetime
import re
import stat
from lxml import etree
from lxml.builder import ElementMaker
import logging
log = logging.getLogger(__name__)
from .xutils import (
NS,
E,
SoapFault,
get_text,
get_int,
get_boolean,
outer_xml,
tree_to_dict,
make_log_day_path,
)
from . import attachment
# X-road protocol version
XVER4 = 4
XVER3 = 3
class XroadClient(object):
security_server = None # security server IP (may be with :port)
security_server_uri = '/cgi-bin/consumer_proxy'
userId = None # user ID value in SOAP header
handler = None # view handler
producer = None # data provider ID (used in configuration)
namespace = None # data provider's namespace
settings = {} # configuration settings
xver = XVER4 # X-road protocol version
_consumer = None # X-road 3.1 <consumer> header value
_producer = None # X-road 3.1 <producer> header value
_caller = {} # X-road 4.0 <client> header values
_service = {} # X-road 4.0 <service> header values
xml_response = None # response XML
def __init__(self, handler=None, security_server=None, userId=None, settings=None):
"""
Parameters:
handler - view handler (to obtain settings from)
security_server - HOST or HOST:PORT
userId - user ID with country prefix (ISO2)
settings - config settings dict; if missing, will be obtained from handler
"""
if handler:
self.handler = handler
if not settings:
settings = handler.request.registry.settings
if settings:
self.settings = settings
db = self.producer
self._caller = self._get_client_data(db)
self._service = self._get_server_data(db)
self._consumer = self._get_setting('consumer', db)
self._producer = self._get_setting('producer', db) or db
self.security_server = self._get_setting('security_server', db)
self.security_server_uri = self._get_setting('security_server_uri', db) or \
self.security_server_uri
self.key = self._get_setting('key', db)
self.cert = self._get_setting('cert', db)
self.log_dir = self._get_setting('log_dir', db)
else:
self.security_server = security_server
self.userId = userId
def _get_client_data(self, db):
# consumer's data in header
return dict(
xRoadInstance = self._get_setting('client.xRoadInstance', db),
memberClass = self._get_setting('client.memberClass', db),
memberCode = self._get_setting('client.memberCode', db),
subsystemCode = self._get_setting('client.subsystemCode', db),
)
def _get_server_data(self, db):
# provider's data in header
return dict(
xRoadInstance = self._get_db_setting('xRoadInstance', db) or self._caller['xRoadInstance'],
memberClass = self._get_db_setting('memberClass', db),
memberCode = self._get_db_setting('memberCode', db),
subsystemCode = self._get_db_setting('subsystemCode', db),
)
def _get_db_setting(self, key, db):
return self.settings.get('%s.xroad.%s' % (db, key))
def _get_setting(self, key, db):
return self._get_db_setting(key, db) or self.settings.get('xroad.%s' % key)
def allowedMethods(self):
"Ask list of permitted services"
items = []
if self.xver == XVER3:
list_path = ['/response/service']
res = self.call('allowedMethods', E.allowedMethods(), None, list_path=list_path)
items = res['response'].get('service') or []
elif self.xver == XVER4:
list_path = ['/response/service']
res = self.call('allowedMethods', E.allowedMethods(), 'v1', list_path=list_path)
items = res['response'].get('service') or []
return items
def call(self, service_name, params, service_version='v1', attachments=[], list_path=[]):
"""
Call X-road service
- service_name - short name of service
- params - input parameters as XML object
"""
self.xml_response = ''
self.response_attachments = []
if service_name == 'allowedMethods':
# meta service belongs to X-road namespace
ns = self.xver == XVER3 and NS.XROAD3 or NS.XROAD4
else:
# data service belongs to data provider's own namespace
ns = self.namespace
# generate SOAP envelope
xml_request = self._gen_envelope(service_name, params, service_version, ns)
try:
# execute call
xml_response = self.send_xml(service_name, xml_request, attachments, ns)
# create XML object for response envelope and find Body element
root = etree.fromstring(xml_response.encode('utf-8'))
body = root.find(NS._SOAPENV+'Body')
if body is not None:
# detect SOAP fault message
response = body.find('*')
if response.tag == NS._SOAPENV+'Fault':
try:
detail = response.find('detail').find('message').text
except:
detail = None
raise SoapFault(response.find('faultcode').text,
response.find('faultstring').text,
detail)
if response is not None:
# convert XML to dict
return tree_to_dict(response, '', list_path)
except SoapFault as e:
msg = 'X-road SOAP fault'
buf = '%s (%s, %s)' % (msg, self.producer, service_name) +\
'\n' + e.faultstring +\
'\nSecurity server: %s' % (self.security_server) +\
'\n\n' + self.xml_response +\
'\n\nInput:\n' + xml_request
log.error(buf)
raise SoapFault(None, 'X-road service failed (%s: %s)' % (self.producer, e.faultstring))
except httplib2.ServerNotFoundError as e:
msg = 'X-road server not found'
buf = '%s\nSecurity server: %s' % (msg, self.security_server)
buf += '\n' + str(e)
log.error(buf)
raise SoapFault(None, msg)
except socket.error as e:
msg = 'No access to X-road'
buf = '%s (socket.error)\nSecurity server: %s' % (msg, self.security_server)
log.error(buf)
raise SoapFault(None, msg)
def send_xml(self, service_name, xml, attachments=[], namespace=None):
"Send input message to security server and receive output message"
args = {}
prot = 'http'
# SOAP server URL (at security server)
url = '%s://%s%s' % (prot, self.security_server, self.security_server_uri)
self.xml_request = xml
self._trace_msg(service_name, 'in.xml', xml)
# compose HTTP message
payload, headers, body = attachment.encode_soap(self.xml_request, attachments)
# send message
response = self._send_http(url, body, headers)
# decode envelope and attachments
self.xml_response, self.response_attachments = attachment.decode(response)
self._trace_msg(service_name, 'out.xml', self.xml_response)
#log.debug('REQUEST:\n%s\nRESPONSE:\n%s\n' % (self.xml_request, self.xml_response))
return self.xml_response
def _send_http(self, url, xml, headers):
"Send message over HTTP"
kwargs = {}
response, response_body = httplib2.Http().request(
url, "POST", body=xml, headers=headers, **kwargs)
# reconstruct whole message for MIME parsing later
buf = ''
for key, value in response.items():
buf += '%s: %s\r\n' % (key, value)
response = buf + '\r\n' + response_body.decode('utf-8')
return response
def _gen_envelope(self, service_name, params, service_version, namespace):
"Compose SOAP envelope"
# params is SOAP doc/literal wrapper element and must be named by name of the service
params.tag = '{%s}%s' % (namespace, service_name)
nsmap = {'soap': NS.SOAP11,
'soapenc': NS.SOAPENC,
'xsi': NS.XSI,
'xsd': NS.XSD,
'a': namespace
}
if self.xver == XVER3:
nsmap['xrd'] = NS.XROAD3
else:
nsmap['xrd'] = NS.XROAD4
nsmap['id'] = NS.XROAD4ID
e = ElementMaker(namespace=NS.SOAP11, nsmap=nsmap)
header = self._gen_header(service_name, service_version)
envelope = e.Envelope(header, e.Body(params))
return outer_xml(envelope, True)
def _gen_header(self, service_name, service_version):
"Compose SOAP header"
if self.xver == XVER3:
soap = ElementMaker(namespace=NS.SOAP11)
xrd = ElementMaker(namespace=NS.XROAD3)
service = '%s.%s.%s' % (self._producer, service_name, service_version)
header = soap.Header(xrd.consumer(self._consumer),
xrd.producer(self._producer),
xrd.service(service),
xrd.id(self._gen_nonce()),
xrd.userId(self.userId)
)
elif self.xver == XVER4:
soap = ElementMaker(namespace=NS.SOAP11)
xrd = ElementMaker(namespace=NS.XROAD4)
xid = ElementMaker(namespace=NS.XROAD4ID)
c = self._caller
client = xrd.client(xid.xRoadInstance(c['xRoadInstance']),
xid.memberClass(c['memberClass']),
xid.memberCode(c['memberCode']),
xid.subsystemCode(c['subsystemCode']))
client.set('{%s}objectType' % NS.XROAD4ID, 'SUBSYSTEM')
s = self._service
service = xrd.service(xid.xRoadInstance(s['xRoadInstance']),
xid.memberClass(s['memberClass']),
xid.memberCode(s['memberCode']),
xid.subsystemCode(s['subsystemCode']),
xid.serviceCode(service_name),
xid.serviceVersion(service_version))
service.set('{%s}objectType' % NS.XROAD4ID, 'SERVICE')
header = soap.Header(client,
service,
xrd.userId(self.userId),
xrd.id(self._gen_nonce()),
xrd.protocolVersion('4.0'))
return header
def _gen_nonce(self):
"Generate unique id for service call"
return ''.join(Random().sample(string.ascii_letters+string.digits, 32))
def _trace_msg(self, method, ext, data):
"Log input and output messages"
if self.log_dir:
prefix = make_log_day_path(self.log_dir)
fn = '%s.%s.%s.%s' % (prefix, self.producer, method, ext)
with open(fn, 'w') as file:
file.write(data)
os.chmod(fn, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
| StarcoderdataPython |
148500 | """Makes event-attribution schematics for 2019 tornado-prediction paper."""
import numpy
import pandas
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as pyplot
from descartes import PolygonPatch
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
from gewittergefahr.gg_utils import polygons
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.plotting import plotting_utils
from gewittergefahr.plotting import storm_plotting
from gewittergefahr.plotting import imagemagick_utils
TORNADIC_FLAG_COLUMN = 'is_tornadic'
SPECIAL_FLAG_COLUMN = 'is_main_tornadic_link'
POLYGON_COLUMN = 'polygon_object_xy_metres'
TORNADO_TIME_COLUMN = 'valid_time_unix_sec'
TORNADO_X_COLUMN = 'x_coord_metres'
TORNADO_Y_COLUMN = 'y_coord_metres'
SQUARE_X_COORDS = 2 * numpy.array([-1, -1, 1, 1, -1], dtype=float)
SQUARE_Y_COORDS = numpy.array([-1, 1, 1, -1, -1], dtype=float)
THIS_NUM = numpy.sqrt(3) / 2
HEXAGON_X_COORDS = 2 * numpy.array([1, 0.5, -0.5, -1, -0.5, 0.5, 1])
HEXAGON_Y_COORDS = numpy.array([
0, -THIS_NUM, -THIS_NUM, 0, THIS_NUM, THIS_NUM, 0
])
THIS_NUM = numpy.sqrt(2) / 2
OCTAGON_X_COORDS = 2 * numpy.array([
1, THIS_NUM, 0, -THIS_NUM, -1, -THIS_NUM, 0, THIS_NUM, 1
])
OCTAGON_Y_COORDS = numpy.array([
0, THIS_NUM, 1, THIS_NUM, 0, -THIS_NUM, -1, -THIS_NUM, 0
])
TRACK_COLOUR = numpy.full(3, 0.)
MIDPOINT_COLOUR = numpy.full(3, 152. / 255)
TORNADIC_STORM_COLOUR = numpy.array([117, 112, 179], dtype=float) / 255
NON_TORNADIC_STORM_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255
NON_INTERP_COLOUR = numpy.array([217, 95, 2], dtype=float) / 255
INTERP_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255
DEFAULT_FONT_SIZE = 40
SMALL_LEGEND_FONT_SIZE = 30
TEXT_OFFSET = 0.25
pyplot.rc('font', size=DEFAULT_FONT_SIZE)
pyplot.rc('axes', titlesize=DEFAULT_FONT_SIZE)
pyplot.rc('axes', labelsize=DEFAULT_FONT_SIZE)
pyplot.rc('xtick', labelsize=DEFAULT_FONT_SIZE)
pyplot.rc('ytick', labelsize=DEFAULT_FONT_SIZE)
pyplot.rc('legend', fontsize=DEFAULT_FONT_SIZE)
pyplot.rc('figure', titlesize=DEFAULT_FONT_SIZE)
TRACK_WIDTH = 4
POLYGON_OPACITY = 0.5
DEFAULT_MARKER_TYPE = 'o'
DEFAULT_MARKER_SIZE = 24
DEFAULT_MARKER_EDGE_WIDTH = 4
TORNADIC_STORM_MARKER_TYPE = 'v'
TORNADIC_STORM_MARKER_SIZE = 48
TORNADIC_STORM_MARKER_EDGE_WIDTH = 0
TORNADO_MARKER_TYPE = 'v'
TORNADO_MARKER_SIZE = 48
TORNADO_MARKER_EDGE_WIDTH = 0
FIGURE_WIDTH_INCHES = 15
FIGURE_HEIGHT_INCHES = 15
FIGURE_RESOLUTION_DPI = 300
CONCAT_FIGURE_SIZE_PX = int(1e7)
OUTPUT_DIR_NAME = (
'/localdata/ryan.lagerquist/eager/prediction_paper_2019/attribution_schemas'
)
def _get_data_for_interp_with_split():
"""Creates synthetic data for interpolation with storm split.
:return: storm_object_table: pandas DataFrame with the following columns.
Each row is one storm object.
storm_object_table.primary_id_string: Primary storm ID.
storm_object_table.secondary_id_string: Secondary storm ID.
storm_object_table.valid_time_unix_sec: Valid time.
storm_object_table.centroid_x_metres: x-coordinate of centroid.
storm_object_table.centroid_y_metres: y-coordinate of centroid.
storm_object_table.polygon_object_xy_metres: Storm outline (instance of
`shapely.geometry.Polygon`).
storm_object_table.first_prev_secondary_id_string: Secondary ID of first
predecessor ("" if no predecessors).
storm_object_table.second_prev_secondary_id_string: Secondary ID of second
predecessor ("" if only one predecessor).
storm_object_table.first_next_secondary_id_string: Secondary ID of first
successor ("" if no successors).
storm_object_table.second_next_secondary_id_string: Secondary ID of second
successor ("" if no successors).
:return: tornado_table: pandas DataFrame with the following columns.
tornado_table.valid_time_unix_sec: Valid time.
tornado_table.x_coord_metres: x-coordinate.
tornado_table.y_coord_metres: y-coordinate.
"""
primary_id_strings = ['foo'] * 5
secondary_id_strings = ['A', 'A', 'A', 'B', 'C']
valid_times_unix_sec = numpy.array([5, 10, 15, 20, 20], dtype=int)
centroid_x_coords = numpy.array([2, 7, 12, 17, 17], dtype=float)
centroid_y_coords = numpy.array([5, 5, 5, 8, 2], dtype=float)
first_prev_sec_id_strings = ['', 'A', 'A', 'A', 'A']
second_prev_sec_id_strings = ['', '', '', '', '']
first_next_sec_id_strings = ['A', 'A', 'B', '', '']
second_next_sec_id_strings = ['', '', 'C', '', '']
num_storm_objects = len(secondary_id_strings)
polygon_objects_xy = [None] * num_storm_objects
for i in range(num_storm_objects):
if secondary_id_strings[i] == 'B':
these_x_coords = OCTAGON_X_COORDS
these_y_coords = OCTAGON_Y_COORDS
elif secondary_id_strings[i] == 'C':
these_x_coords = HEXAGON_X_COORDS
these_y_coords = HEXAGON_Y_COORDS
else:
these_x_coords = SQUARE_X_COORDS
these_y_coords = SQUARE_Y_COORDS
polygon_objects_xy[i] = polygons.vertex_arrays_to_polygon_object(
exterior_x_coords=centroid_x_coords[i] + these_x_coords / 2,
exterior_y_coords=centroid_y_coords[i] + these_y_coords / 2
)
storm_object_table = pandas.DataFrame.from_dict({
tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings,
tracking_utils.SECONDARY_ID_COLUMN: secondary_id_strings,
tracking_utils.VALID_TIME_COLUMN: valid_times_unix_sec,
tracking_utils.CENTROID_X_COLUMN: centroid_x_coords,
tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,
tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:
first_prev_sec_id_strings,
tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:
second_prev_sec_id_strings,
tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:
first_next_sec_id_strings,
tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN:
second_next_sec_id_strings,
POLYGON_COLUMN: polygon_objects_xy
})
tornado_table = pandas.DataFrame.from_dict({
TORNADO_TIME_COLUMN: numpy.array([18], dtype=int),
TORNADO_X_COLUMN: numpy.array([15.]),
TORNADO_Y_COLUMN: numpy.array([3.2])
})
return storm_object_table, tornado_table
def _get_data_for_interp_with_merger():
"""Creates synthetic data for interpolation with storm merger.
:return: storm_object_table: See doc for `_get_data_for_interp_with_split`.
:return: tornado_table: Same.
"""
primary_id_strings = ['foo'] * 6
secondary_id_strings = ['A', 'B', 'A', 'B', 'C', 'C']
valid_times_unix_sec = numpy.array([5, 5, 10, 10, 15, 20], dtype=int)
centroid_x_coords = numpy.array([2, 2, 7, 7, 12, 17], dtype=float)
centroid_y_coords = numpy.array([8, 2, 8, 2, 5, 5], dtype=float)
first_prev_sec_id_strings = ['', '', 'A', 'B', 'A', 'C']
second_prev_sec_id_strings = ['', '', '', '', 'B', '']
first_next_sec_id_strings = ['A', 'B', 'C', 'C', 'C', '']
second_next_sec_id_strings = ['', '', '', '', '', '']
num_storm_objects = len(secondary_id_strings)
polygon_objects_xy = [None] * num_storm_objects
for i in range(num_storm_objects):
if secondary_id_strings[i] == 'A':
these_x_coords = OCTAGON_X_COORDS
these_y_coords = OCTAGON_Y_COORDS
elif secondary_id_strings[i] == 'B':
these_x_coords = HEXAGON_X_COORDS
these_y_coords = HEXAGON_Y_COORDS
else:
these_x_coords = SQUARE_X_COORDS
these_y_coords = SQUARE_Y_COORDS
polygon_objects_xy[i] = polygons.vertex_arrays_to_polygon_object(
exterior_x_coords=centroid_x_coords[i] + these_x_coords / 2,
exterior_y_coords=centroid_y_coords[i] + these_y_coords / 2
)
storm_object_table = pandas.DataFrame.from_dict({
tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings,
tracking_utils.SECONDARY_ID_COLUMN: secondary_id_strings,
tracking_utils.VALID_TIME_COLUMN: valid_times_unix_sec,
tracking_utils.CENTROID_X_COLUMN: centroid_x_coords,
tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,
tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:
first_prev_sec_id_strings,
tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:
second_prev_sec_id_strings,
tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:
first_next_sec_id_strings,
tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN:
second_next_sec_id_strings,
POLYGON_COLUMN: polygon_objects_xy
})
tornado_table = pandas.DataFrame.from_dict({
TORNADO_TIME_COLUMN: numpy.array([12], dtype=int),
TORNADO_X_COLUMN: numpy.array([9.]),
TORNADO_Y_COLUMN: numpy.array([3.2])
})
return storm_object_table, tornado_table
def _get_track1_for_simple_pred():
"""Creates synthetic data for simple predecessors.
:return: storm_object_table: Same as table produced by
`_get_data_for_interp_with_split`, except without column
"polygon_object_xy_metres" and with the following extra columns.
storm_object_table.is_tornadic: Boolean flag (True if storm object is
linked to a tornado).
storm_object_table.is_main_tornadic_link: Boolean flag (True if storm object
is the main one linked to a tornado, rather than being linked to tornado
as a predecessor or successor).
"""
primary_id_strings = ['foo'] * 10
secondary_id_strings = ['X', 'Y', 'X', 'Y', 'X', 'Y', 'Z', 'Z', 'Z', 'Z']
valid_times_unix_sec = numpy.array(
[5, 5, 10, 10, 15, 15, 20, 25, 30, 35], dtype=int
)
centroid_x_coords = numpy.array(
[2, 2, 7, 7, 12, 12, 17, 22, 27, 32], dtype=float
)
centroid_y_coords = numpy.array(
[8, 2, 8, 2, 8, 2, 5, 5, 5, 5], dtype=float
)
tornadic_flags = numpy.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0], dtype=bool)
main_tornadic_flags = numpy.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0], dtype=bool
)
first_prev_sec_id_strings = ['', '', 'X', 'Y', 'X', 'Y', 'X', 'Z', 'Z', 'Z']
second_prev_sec_id_strings = ['', '', '', '', '', '', 'Y', '', '', '']
first_next_sec_id_strings = [
'X', 'Y', 'X', 'Y', 'Z', 'Z', 'Z', 'Z', 'Z', ''
]
second_next_sec_id_strings = ['', '', '', '', '', '', '', '', '', '']
return pandas.DataFrame.from_dict({
tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings,
tracking_utils.SECONDARY_ID_COLUMN: secondary_id_strings,
tracking_utils.VALID_TIME_COLUMN: valid_times_unix_sec,
tracking_utils.CENTROID_X_COLUMN: centroid_x_coords,
tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,
TORNADIC_FLAG_COLUMN: tornadic_flags,
SPECIAL_FLAG_COLUMN: main_tornadic_flags,
tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:
first_prev_sec_id_strings,
tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:
second_prev_sec_id_strings,
tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:
first_next_sec_id_strings,
tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN:
second_next_sec_id_strings
})
def _get_track2_for_simple_pred():
"""Creates synthetic data for simple predecessors.
:return: storm_object_table: See doc for `_get_track1_for_simple_pred`.
"""
primary_id_strings = ['bar'] * 17
secondary_id_strings = [
'A', 'A', 'A', 'B', 'C', 'B', 'C', 'B', 'C',
'D', 'E', 'D', 'E', 'D', 'E', 'D', 'E'
]
valid_times_unix_sec = numpy.array(
[5, 10, 15, 20, 20, 25, 25, 30, 30, 35, 35, 40, 40, 45, 45, 50, 50],
dtype=int
)
centroid_x_coords = numpy.array(
[2, 6, 10, 14, 14, 18, 18, 22, 22, 26, 26, 30, 30, 34, 34, 38, 38],
dtype=float
)
centroid_y_coords = numpy.array(
[10, 10, 10, 13, 7, 13, 7, 13, 7, 10, 4, 10, 4, 10, 4, 10, 4],
dtype=float
)
tornadic_flags = numpy.array(
[0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0], dtype=bool
)
main_tornadic_flags = numpy.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], dtype=bool
)
first_prev_sec_id_strings = [
'', 'A', 'A', 'A', 'A', 'B', 'C', 'B', 'C',
'C', 'C', 'D', 'E', 'D', 'E', 'D', 'E'
]
second_prev_sec_id_strings = [
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ''
]
first_next_sec_id_strings = [
'A', 'A', 'B', 'B', 'C', 'B', 'C', '', 'D',
'D', 'E', 'D', 'E', 'D', 'E', '', ''
]
second_next_sec_id_strings = [
'', '', 'C', '', '', '', '', '', 'E',
'', '', '', '', '', '', '', ''
]
return pandas.DataFrame.from_dict({
tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings,
tracking_utils.SECONDARY_ID_COLUMN: secondary_id_strings,
tracking_utils.VALID_TIME_COLUMN: valid_times_unix_sec,
tracking_utils.CENTROID_X_COLUMN: centroid_x_coords,
tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,
TORNADIC_FLAG_COLUMN: tornadic_flags,
SPECIAL_FLAG_COLUMN: main_tornadic_flags,
tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:
first_prev_sec_id_strings,
tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:
second_prev_sec_id_strings,
tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:
first_next_sec_id_strings,
tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN:
second_next_sec_id_strings
})
def _get_track_for_simple_succ():
"""Creates synthetic data for simple successors.
:return: storm_object_table: See doc for `_get_track1_for_simple_pred`.
"""
primary_id_strings = ['moo'] * 21
secondary_id_strings = [
'A', 'B', 'A', 'B', 'A', 'B', 'A', 'B', 'C', 'D', 'C', 'D', 'C', 'D',
'E', 'E', 'E', 'F', 'G', 'F', 'G'
]
valid_times_unix_sec = numpy.array([
5, 5, 10, 10, 15, 15, 20, 20, 25, 25, 30, 30, 35, 35, 40, 45, 50, 55,
55, 60, 60
], dtype=int)
centroid_x_coords = numpy.array([
5, 5, 10, 10, 15, 15, 20, 20, 25, 25, 30, 30, 35, 35, 40, 45, 50, 55,
55, 60, 60
], dtype=float)
centroid_y_coords = numpy.array(
[8, 2, 8, 2, 8, 2, 8, 2, 11, 5, 11, 5, 11, 5, 8, 8, 8, 11, 5, 11, 5],
dtype=float
)
tornadic_flags = numpy.array(
[0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0],
dtype=bool
)
main_tornadic_flags = numpy.array(
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=bool
)
first_prev_sec_id_strings = [
'', '', 'A', 'B', 'A', 'B', 'A', 'B',
'', 'A', 'C', 'D', 'C', 'D',
'C', 'E', 'E',
'E', 'E', 'F', 'G'
]
second_prev_sec_id_strings = [
'', '', '', '', '', '', '', '',
'', 'B', '', '', '', '',
'D', '', '',
'', '', '', ''
]
first_next_sec_id_strings = [
'A', 'B', 'A', 'B', 'A', 'B', 'D', 'D',
'C', 'D', 'C', 'D', 'E', 'E',
'E', 'E', 'F',
'F', 'G', '', ''
]
second_next_sec_id_strings = [
'', '', '', '', '', '', '', '',
'', '', '', '', '', '',
'', '', 'G',
'', '', '', ''
]
return pandas.DataFrame.from_dict({
tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings,
tracking_utils.SECONDARY_ID_COLUMN: secondary_id_strings,
tracking_utils.VALID_TIME_COLUMN: valid_times_unix_sec,
tracking_utils.CENTROID_X_COLUMN: centroid_x_coords,
tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,
TORNADIC_FLAG_COLUMN: tornadic_flags,
SPECIAL_FLAG_COLUMN: main_tornadic_flags,
tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:
first_prev_sec_id_strings,
tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:
second_prev_sec_id_strings,
tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:
first_next_sec_id_strings,
tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN:
second_next_sec_id_strings
})
def _plot_interp_two_times(storm_object_table, tornado_table, legend_font_size,
legend_position_string):
"""Plots interpolation for one pair of times.
:param storm_object_table: See doc for `_get_interp_data_for_split`.
:param tornado_table: Same.
:param legend_font_size: Font size in legend.
:param legend_position_string: Legend position.
:return: figure_object: Figure handle (instance of
`matplotlib.figure.Figure`).
:return: axes_object: Axes handle (instance of
`matplotlib.axes._subplots.AxesSubplot`).
"""
centroid_x_coords = (
storm_object_table[tracking_utils.CENTROID_X_COLUMN].values
)
centroid_y_coords = (
storm_object_table[tracking_utils.CENTROID_Y_COLUMN].values
)
storm_times_minutes = (
storm_object_table[tracking_utils.VALID_TIME_COLUMN].values
).astype(float)
secondary_id_strings = (
storm_object_table[tracking_utils.SECONDARY_ID_COLUMN].values
)
storm_object_table = storm_object_table.assign(**{
tracking_utils.CENTROID_LONGITUDE_COLUMN: centroid_x_coords,
tracking_utils.CENTROID_LATITUDE_COLUMN: centroid_y_coords
})
figure_object, axes_object, basemap_object = (
plotting_utils.create_equidist_cylindrical_map(
min_latitude_deg=numpy.min(centroid_y_coords),
max_latitude_deg=numpy.max(centroid_y_coords),
min_longitude_deg=numpy.min(centroid_x_coords),
max_longitude_deg=numpy.max(centroid_x_coords)
)
)
storm_plotting.plot_storm_tracks(
storm_object_table=storm_object_table, axes_object=axes_object,
basemap_object=basemap_object, colour_map_object=None,
constant_colour=TRACK_COLOUR, line_width=TRACK_WIDTH,
start_marker_type=None, end_marker_type=None)
num_storm_objects = len(storm_object_table.index)
legend_handles = []
legend_strings = []
for i in range(num_storm_objects):
this_patch_object = PolygonPatch(
storm_object_table[POLYGON_COLUMN].values[i],
lw=0, ec=NON_INTERP_COLOUR, fc=NON_INTERP_COLOUR,
alpha=POLYGON_OPACITY)
axes_object.add_patch(this_patch_object)
this_handle = axes_object.plot(
storm_object_table[tracking_utils.CENTROID_X_COLUMN].values,
storm_object_table[tracking_utils.CENTROID_Y_COLUMN].values,
linestyle='None', marker=DEFAULT_MARKER_TYPE,
markersize=DEFAULT_MARKER_SIZE, markerfacecolor=NON_INTERP_COLOUR,
markeredgecolor=NON_INTERP_COLOUR,
markeredgewidth=DEFAULT_MARKER_EDGE_WIDTH
)[0]
legend_handles.append(this_handle)
legend_strings.append('Actual storm')
for i in range(num_storm_objects):
axes_object.text(
centroid_x_coords[i], centroid_y_coords[i] - TEXT_OFFSET,
secondary_id_strings[i], color=TRACK_COLOUR,
fontsize=DEFAULT_FONT_SIZE, fontweight='bold',
horizontalalignment='center', verticalalignment='top')
tornado_time_minutes = tornado_table[TORNADO_TIME_COLUMN].values[0]
previous_time_minutes = numpy.max(
storm_times_minutes[storm_times_minutes < tornado_time_minutes]
)
next_time_minutes = numpy.min(
storm_times_minutes[storm_times_minutes > tornado_time_minutes]
)
previous_object_indices = numpy.where(
storm_times_minutes == previous_time_minutes
)[0]
next_object_indices = numpy.where(
storm_times_minutes == next_time_minutes
)[0]
previous_x_coord = numpy.mean(centroid_x_coords[previous_object_indices])
previous_y_coord = numpy.mean(centroid_y_coords[previous_object_indices])
next_x_coord = numpy.mean(centroid_x_coords[next_object_indices])
next_y_coord = numpy.mean(centroid_y_coords[next_object_indices])
if len(next_object_indices) == 1:
midpoint_x_coord = previous_x_coord
midpoint_y_coord = previous_y_coord
midpoint_label_string = 'Midpoint of {0:s} and {1:s}'.format(
secondary_id_strings[previous_object_indices[0]],
secondary_id_strings[previous_object_indices[1]]
)
line_x_coords = numpy.array([midpoint_x_coord, next_x_coord])
line_y_coords = numpy.array([midpoint_y_coord, next_y_coord])
else:
midpoint_x_coord = next_x_coord
midpoint_y_coord = next_y_coord
midpoint_label_string = 'Midpoint of {0:s} and {1:s}'.format(
secondary_id_strings[next_object_indices[0]],
secondary_id_strings[next_object_indices[1]]
)
line_x_coords = numpy.array([previous_x_coord, midpoint_x_coord])
line_y_coords = numpy.array([previous_y_coord, midpoint_y_coord])
this_handle = axes_object.plot(
midpoint_x_coord, midpoint_y_coord, linestyle='None',
marker=DEFAULT_MARKER_TYPE, markersize=DEFAULT_MARKER_SIZE,
markerfacecolor=MIDPOINT_COLOUR, markeredgecolor=MIDPOINT_COLOUR,
markeredgewidth=DEFAULT_MARKER_EDGE_WIDTH
)[0]
legend_handles.append(this_handle)
legend_strings.append(midpoint_label_string)
this_ratio = (
(tornado_time_minutes - previous_time_minutes) /
(next_time_minutes - previous_time_minutes)
)
interp_x_coord = previous_x_coord + (
this_ratio * (next_x_coord - previous_x_coord)
)
interp_y_coord = previous_y_coord + (
this_ratio * (next_y_coord - previous_y_coord)
)
if len(next_object_indices) == 1:
x_offset = interp_x_coord - next_x_coord
y_offset = interp_y_coord - next_y_coord
interp_polygon_object_xy = storm_object_table[POLYGON_COLUMN].values[
next_object_indices[0]
]
else:
x_offset = interp_x_coord - previous_x_coord
y_offset = interp_y_coord - previous_y_coord
interp_polygon_object_xy = storm_object_table[POLYGON_COLUMN].values[
previous_object_indices[0]
]
interp_polygon_object_xy = polygons.vertex_arrays_to_polygon_object(
exterior_x_coords=(
x_offset + numpy.array(interp_polygon_object_xy.exterior.xy[0])
),
exterior_y_coords=(
y_offset + numpy.array(interp_polygon_object_xy.exterior.xy[1])
)
)
this_patch_object = PolygonPatch(
interp_polygon_object_xy, lw=0, ec=INTERP_COLOUR, fc=INTERP_COLOUR,
alpha=POLYGON_OPACITY)
axes_object.add_patch(this_patch_object)
this_handle = axes_object.plot(
interp_x_coord, interp_y_coord, linestyle='None',
marker=DEFAULT_MARKER_TYPE, markersize=DEFAULT_MARKER_SIZE,
markerfacecolor=INTERP_COLOUR, markeredgecolor=INTERP_COLOUR,
markeredgewidth=DEFAULT_MARKER_EDGE_WIDTH
)[0]
legend_handles.append(this_handle)
legend_strings.append('Interpolated storm')
this_handle = axes_object.plot(
line_x_coords, line_y_coords,
linestyle='dashed', color=MIDPOINT_COLOUR, linewidth=4
)[0]
legend_handles.insert(-1, this_handle)
legend_strings.insert(-1, 'Interpolation line')
this_handle = axes_object.plot(
tornado_table[TORNADO_X_COLUMN].values[0],
tornado_table[TORNADO_Y_COLUMN].values[0], linestyle='None',
marker=TORNADO_MARKER_TYPE, markersize=TORNADO_MARKER_SIZE,
markerfacecolor=INTERP_COLOUR, markeredgecolor=INTERP_COLOUR,
markeredgewidth=TORNADO_MARKER_EDGE_WIDTH
)[0]
legend_handles.insert(1, this_handle)
this_string = 'Tornado (at {0:d} min)'.format(
int(numpy.round(tornado_time_minutes))
)
legend_strings.insert(1, this_string)
x_tick_values, unique_indices = numpy.unique(
centroid_x_coords, return_index=True)
x_tick_labels = [
'{0:d}'.format(int(numpy.round(storm_times_minutes[i])))
for i in unique_indices
]
axes_object.set_xticks(x_tick_values)
axes_object.set_xticklabels(x_tick_labels)
axes_object.set_xlabel('Storm time (minutes)')
axes_object.set_yticks([], [])
axes_object.legend(
legend_handles, legend_strings, fontsize=legend_font_size,
loc=legend_position_string)
return figure_object, axes_object
def _plot_attribution_one_track(storm_object_table, plot_legend, plot_x_ticks,
legend_font_size=None, legend_location=None):
"""Plots tornado attribution for one storm track.
:param storm_object_table: pandas DataFrame created by
`_get_track1_for_simple_pred`, `_get_track2_for_simple_pred`, or
`_get_track_for_simple_succ`.
:param plot_legend: Boolean flag.
:param plot_x_ticks: Boolean flag.
:param legend_font_size: Font size in legend (used only if
`plot_legend == True`).
:param legend_location: Legend location (used only if
`plot_legend == True`).
:return: figure_object: See doc for `_plot_interp_two_times`.
:return: axes_object: Same.
"""
centroid_x_coords = storm_object_table[
tracking_utils.CENTROID_X_COLUMN].values
centroid_y_coords = storm_object_table[
tracking_utils.CENTROID_Y_COLUMN].values
secondary_id_strings = storm_object_table[
tracking_utils.SECONDARY_ID_COLUMN].values
storm_object_table = storm_object_table.assign(**{
tracking_utils.CENTROID_LONGITUDE_COLUMN: centroid_x_coords,
tracking_utils.CENTROID_LATITUDE_COLUMN: centroid_y_coords
})
figure_object, axes_object, basemap_object = (
plotting_utils.create_equidist_cylindrical_map(
min_latitude_deg=numpy.min(centroid_y_coords),
max_latitude_deg=numpy.max(centroid_y_coords),
min_longitude_deg=numpy.min(centroid_x_coords),
max_longitude_deg=numpy.max(centroid_x_coords)
)
)
storm_plotting.plot_storm_tracks(
storm_object_table=storm_object_table, axes_object=axes_object,
basemap_object=basemap_object, colour_map_object=None,
constant_colour=TRACK_COLOUR, line_width=TRACK_WIDTH,
start_marker_type=None, end_marker_type=None)
tornadic_flags = storm_object_table[TORNADIC_FLAG_COLUMN].values
main_tornadic_flags = storm_object_table[SPECIAL_FLAG_COLUMN].values
legend_handles = [None] * 3
legend_strings = [None] * 3
for i in range(len(centroid_x_coords)):
if main_tornadic_flags[i]:
this_handle = axes_object.plot(
centroid_x_coords[i], centroid_y_coords[i], linestyle='None',
marker=TORNADIC_STORM_MARKER_TYPE,
markersize=TORNADIC_STORM_MARKER_SIZE,
markerfacecolor=TORNADIC_STORM_COLOUR,
markeredgecolor=TORNADIC_STORM_COLOUR,
markeredgewidth=TORNADIC_STORM_MARKER_EDGE_WIDTH
)[0]
legend_handles[0] = this_handle
legend_strings[0] = 'Object initially linked\nto tornado'
axes_object.text(
centroid_x_coords[i], centroid_y_coords[i] - TEXT_OFFSET,
secondary_id_strings[i], color=TORNADIC_STORM_COLOUR,
fontsize=DEFAULT_FONT_SIZE, fontweight='bold',
horizontalalignment='center', verticalalignment='top')
else:
if tornadic_flags[i]:
this_edge_colour = TORNADIC_STORM_COLOUR
this_face_colour = TORNADIC_STORM_COLOUR
else:
this_edge_colour = NON_TORNADIC_STORM_COLOUR
this_face_colour = 'white'
this_handle = axes_object.plot(
centroid_x_coords[i], centroid_y_coords[i], linestyle='None',
marker=DEFAULT_MARKER_TYPE, markersize=DEFAULT_MARKER_SIZE,
markerfacecolor=this_face_colour,
markeredgecolor=this_edge_colour,
markeredgewidth=DEFAULT_MARKER_EDGE_WIDTH
)[0]
if tornadic_flags[i] and legend_handles[1] is None:
legend_handles[1] = this_handle
legend_strings[1] = 'Also linked to tornado'
if not tornadic_flags[i] and legend_handles[2] is None:
legend_handles[2] = this_handle
legend_strings[2] = 'Not linked to tornado'
axes_object.text(
centroid_x_coords[i], centroid_y_coords[i] - TEXT_OFFSET,
secondary_id_strings[i], color=this_edge_colour,
fontsize=DEFAULT_FONT_SIZE, fontweight='bold',
horizontalalignment='center', verticalalignment='top')
if plot_x_ticks:
storm_times_minutes = storm_object_table[
tracking_utils.VALID_TIME_COLUMN].values
x_tick_values, unique_indices = numpy.unique(
centroid_x_coords, return_index=True)
x_tick_labels = [
'{0:d}'.format(int(numpy.round(storm_times_minutes[i])))
for i in unique_indices
]
axes_object.set_xticks(x_tick_values)
axes_object.set_xticklabels(x_tick_labels)
axes_object.set_xlabel('Storm time (minutes)')
else:
axes_object.set_xticks([], [])
axes_object.set_xlabel(r'Time $\longrightarrow$')
axes_object.set_yticks([], [])
y_min, y_max = axes_object.get_ylim()
axes_object.set_ylim([y_min - 0.25, y_max])
if plot_legend:
axes_object.legend(
legend_handles, legend_strings, fontsize=legend_font_size,
loc=legend_location)
return figure_object, axes_object
def _run():
"""Makes event-attribution schematics for 2019 tornado-prediction paper.
This is effectively the main method.
"""
file_system_utils.mkdir_recursive_if_necessary(
directory_name=OUTPUT_DIR_NAME)
# Interpolation with merger.
figure_object, axes_object = _plot_interp_two_times(
storm_object_table=_get_data_for_interp_with_merger()[0],
tornado_table=_get_data_for_interp_with_merger()[1],
legend_font_size=SMALL_LEGEND_FONT_SIZE, legend_position_string='upper right'
)
axes_object.set_title('Interpolation with merger')
this_file_name = '{0:s}/interp_with_merger_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(a)')
panel_file_names = ['{0:s}/interp_with_merger.jpg'.format(OUTPUT_DIR_NAME)]
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Interpolation with split.
figure_object, axes_object = _plot_interp_two_times(
storm_object_table=_get_data_for_interp_with_split()[0],
tornado_table=_get_data_for_interp_with_split()[1],
legend_font_size=DEFAULT_FONT_SIZE,
legend_position_string='upper left'
)
axes_object.set_title('Interpolation with split')
this_file_name = '{0:s}/interp_with_split_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(b)')
panel_file_names.append(
'{0:s}/interp_with_split.jpg'.format(OUTPUT_DIR_NAME)
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Simple successors.
figure_object, axes_object = _plot_attribution_one_track(
storm_object_table=_get_track_for_simple_succ(),
plot_legend=True, plot_x_ticks=True,
legend_font_size=SMALL_LEGEND_FONT_SIZE, legend_location='lower right'
)
this_file_name = '{0:s}/simple_successors_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(c)')
axes_object.set_title('Linking to simple successors')
panel_file_names.append(
'{0:s}/simple_successors.jpg'.format(OUTPUT_DIR_NAME)
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Simple predecessors, example 1.
figure_object, axes_object = _plot_attribution_one_track(
storm_object_table=_get_track1_for_simple_pred(),
plot_legend=True, plot_x_ticks=False,
legend_font_size=DEFAULT_FONT_SIZE, legend_location=(0.28, 0.1)
)
axes_object.set_title('Simple predecessors, example 1')
this_file_name = '{0:s}/simple_predecessors_track1_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(d)')
axes_object.set_title('Linking to simple predecessors, example 1')
panel_file_names.append(
'{0:s}/simple_predecessors_track1.jpg'.format(OUTPUT_DIR_NAME)
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Simple predecessors, example 2.
figure_object, axes_object = _plot_attribution_one_track(
storm_object_table=_get_track2_for_simple_pred(),
plot_legend=False, plot_x_ticks=False
)
axes_object.set_title('Simple predecessors, example 2')
this_file_name = '{0:s}/simple_predecessors_track2_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(e)')
axes_object.set_title('Linking to simple predecessors, example 2')
panel_file_names.append(
'{0:s}/simple_predecessors_track2.jpg'.format(OUTPUT_DIR_NAME)
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Concatenate all panels into one figure.
concat_file_name = '{0:s}/attribution_schemas.jpg'.format(OUTPUT_DIR_NAME)
print('Concatenating panels to: "{0:s}"...'.format(concat_file_name))
imagemagick_utils.concatenate_images(
input_file_names=panel_file_names, output_file_name=concat_file_name,
num_panel_rows=2, num_panel_columns=3)
imagemagick_utils.resize_image(
input_file_name=concat_file_name, output_file_name=concat_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX)
if __name__ == '__main__':
_run()
| StarcoderdataPython |
126431 | from .fc_ping import FCPing
def setup(bot):
bot.add_cog(FCPing(bot))
| StarcoderdataPython |
80415 | <reponame>42jaylonw/rrc_2021_three_wolves
import pickle
class EpisodeData:
"""
The structure in which the data from each episode
will be logged.
"""
def __init__(self, joint_goal, tip_goal):
self.joint_goal = joint_goal
self.tip_goal = tip_goal
self.joint_positions = []
self.tip_positions = []
self.timestamps = []
def append(self, joint_pos, tip_pos, timestamp):
self.joint_positions.append(joint_pos)
self.tip_positions.append(tip_pos)
self.timestamps.append(timestamp)
class DataLogger:
"""
Dumps the env episodic data to a pickle file
"""
def __init__(self):
self.episodes = []
self._curr = None
def new_episode(self, joint_goal, tip_goal):
if self._curr:
# convert to dict for saving so loading has no dependencies
self.episodes.append(self._curr.__dict__)
self._curr = EpisodeData(joint_goal, tip_goal)
def append(self, joint_pos, tip_pos, timestamp):
self._curr.append(joint_pos, tip_pos, timestamp)
def store(self, filename):
with open(filename, "wb") as file_handle:
pickle.dump(self.episodes, file_handle)
| StarcoderdataPython |
3258401 | <reponame>jsjaskaran/createblockchain<gh_stars>0
# Create a Blockchain
# importing libraries
import datetime
import hashlib
import json
from flask import Flask, jsonify
# Part 1 - Building a Blockchain
class Blockchain:
def __init__(self):
self.chain = []
self.create_block(proof = 1, prev_hash = '0') # create genesis block
# create blocks
# we use this fn. right after a block is mined
# proof of work, previous hash
def create_block(self, proof, prev_hash):
block = {}
block['index'] = len(self.chain) + 1
block['timestamp'] = str(datetime.datetime.now())
block['proof'] = proof
block['previous_hash'] = prev_hash
self.chain.append(block)
return block
# return last block of chain
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, prev_proof):
new_proof = 1 # increment by 1 to find the right proof (solve by trial-error approach)
check_proof = False
while check_proof is False:
# problem that miners have to solve, can make operation more complex for challenging problem
hash_operation = hashlib.sha256(str(new_proof ** 2 - prev_proof ** 2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
# create a hash of the block
def hash(self, block):
encoded_block = json.dumps(block, sort_keys = True).encode()
return hashlib.sha256(encoded_block).hexdigest()
# check if the chain is valid
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
# previous hash of this block is equal to hash of previous block
if block['previous_hash'] != self.hash(previous_block):
return False
# check for proof of work
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof ** 2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
# Part 2: Mining Blockchain
# create web app
app = Flask(__name__)
# blockchain
blockchain = Blockchain() # instance
# Mining block
@app.route('/mineblock', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
block = blockchain.create_block(proof, previous_hash)
response = {}
response['message'] = 'Congratulations, you just mined a block!'
response['index'] = block['index']
response['timestamp'] = block['timestamp']
response['previous_hash'] = block['previous_hash']
return jsonify(response), 200
@app.route('/getchain', methods=['GET'])
def get_blockchain():
response = {}
response['chain'] = blockchain.chain
response['length'] = len(blockchain.chain)
response['message'] = 'The current blockchain'
return jsonify(response), 200
# check if blockchain is valid
@app.route('/checkvalid', methods=['GET'])
def check_if_valid():
result = blockchain.is_chain_valid(blockchain.chain)
response = {}
if result:
response['message'] = 'The blockchain is valid.'
else:
response['message'] = 'We have a problem, the blockchain is not valid.'
return jsonify(response), 200
# running the app
app.run(host = '0.0.0.0', port = 5000) | StarcoderdataPython |
1683780 | <filename>client/paddleflow/pipeline/dsl/io_types/artifact.py
#!/usr/bin/env python3
"""
Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Artifact: the input/output file of step
from paddleflow.pipeline.dsl.utils.util import validate_string_by_regex
from paddleflow.pipeline.dsl.utils.consts import VARIBLE_NAME_REGEX
from paddleflow.pipeline.dsl.utils.consts import PipelineDSLError
from paddleflow.common.exception.paddleflow_sdk_exception import PaddleFlowSDKException
class Artifact(object):
""" Artifact: the input/output file/directory of step
"""
def __init__(self):
""" create a new instance of Artifact
"""
self.__step = None
self.__name = None
def set_base_info(self, name: str, step):
""" set the step that this paramter instances was belong to and set the name of it
Args:
step (Step): the step that this paramter instances was belong to
name (str): the name of it
Raises:
PaddleFlowSDKException: if the name is illegal
.. note:: This is not used directly by the users
"""
self.__step = step
if not validate_string_by_regex(name, VARIBLE_NAME_REGEX):
raise PaddleFlowSDKException(PipelineDSLError,
f"the name of Artifact[{name}] for step[{step.name}]is illegal, " + \
f"the regex used for validation is {VARIBLE_NAME_REGEX}")
self.__name = name
def compile(self):
""" trans to template when downstream step ref it at compile stage
Returns:
A string indicate the template of it
Raises:
PaddleFlowSDKException: if cannot trans to template
"""
if self.__step is None or self.__name is None:
raise PaddleFlowSDKException(PipelineDSLError,
"when trans Artifact to template, it's step and name cannot be None")
return "{{" + f"{self.__step.name}.{self.__name}" + "}}"
@property
def step(self):
""" get the step of it
Returns:
A step instance whicht it was belong to
"""
return self.__step
@property
def name(self):
""" get the name of it
Returns:
A string which describe the name of it
"""
return self.__name
def __deepcopy__(self, memo):
""" support copy.deepcopy
"""
art = Artifact()
if self.name:
art.set_base_info(name=self.name, step=self.step)
return art
def __eq__(self, other):
""" support ==
"""
return self.name == other.name and self.step == other.step
| StarcoderdataPython |
136900 | <filename>haul3/haul/platforms/webos/__init__.py<gh_stars>1-10
__all__ = [
'builder_webos',
] | StarcoderdataPython |
171286 | import pygame
pygame.init()
pygame.mixer.music.load('E.mp3')
pygame.mixer.music.play()
pygame.event.wait()
print('boa musica ne?') | StarcoderdataPython |
78441 | from django import template
from django.template.loader import get_template
register = template.Library()
@register.simple_tag(takes_context=True)
def activity_item(context, item):
template_name = f"barriers/activity/partials/{item.model}/{item.field}.html"
try:
item_template = get_template(template_name)
except template.TemplateDoesNotExist:
return ""
return item_template.render(context.flatten())
| StarcoderdataPython |
3258647 | from django.urls import path
from .views import ServiceCreate, ServiceList, ServiceDetail, ServiceUpdate, ServiceDelete, getService, \
acceptRequest, declineRequest, addRequest, addFeedback, checkCredits
urlpatterns = [
path('create/', ServiceCreate.as_view(), name='create-service'),
path('', ServiceList.as_view()),
path('<int:pk>/', ServiceDetail.as_view(), name='retrieve-service'),
path('update/<int:pk>/', ServiceUpdate.as_view(), name='update-service'),
path('delete/<int:pk>/', ServiceDelete.as_view(), name='delete-service'),
path('<int:pk>/set/', getService, name='service-servicelist'),
path('accept/<int:service>/<int:requestmaker>/', acceptRequest, name='service-acceptrequest'),
path('decline/<int:service>/<int:requestmaker>/', declineRequest, name='service-declinerequest'),
path('request/<int:service>/<int:requestmaker>/', addRequest, name='service-addrequest'),
path('feedback/<int:service>/<int:feedback>/', addFeedback, name='service-addfeedbak'),
path('checkcredits/<int:user>/', checkCredits, name='service-checkcredits'),
]
| StarcoderdataPython |
3248249 | from penaltymodel.maxgap.generation import *
from penaltymodel.maxgap.interface import *
from penaltymodel.maxgap.package_info import *
| StarcoderdataPython |
3363578 | <filename>examples/acados_template/python/soft_constraints/generate_c_code.py
#
# Copyright 2019 <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
from acados_template import *
import acados_template as at
from export_ode_model import *
import numpy as np
import scipy.linalg
from ctypes import *
FORMULATION = 2 # 0 for linear soft bounds,
# 1 for equivalent nonlinear soft constraint
# 2 for equivalent nonlinear soft constraint +
# terminal soft state constraint
def export_nonlinear_constraint():
con_name = 'nl_con'
# set up states & controls
x1 = SX.sym('x1')
theta = SX.sym('theta')
v1 = SX.sym('v1')
dtheta = SX.sym('dtheta')
x = vertcat(x1, v1, theta, dtheta)
# controls
F = SX.sym('F')
u = vertcat(F)
# voltage sphere
constraint = acados_constraint()
constraint.expr = u
constraint.x = x
constraint.u = u
constraint.nc = 1
constraint.name = con_name
return constraint
def export_terminal_nonlinear_constraint():
con_name = 'nl_state_con'
# set up states & controls
x1 = SX.sym('x1')
theta = SX.sym('theta')
v1 = SX.sym('v1')
dtheta = SX.sym('dtheta')
x = vertcat(x1, v1, theta, dtheta)
# controls
F = SX.sym('F')
u = vertcat(F)
# voltage sphere
constraint = acados_constraint()
constraint.expr = x1
constraint.x = x
constraint.u = u
constraint.nc = 1
constraint.name = con_name
return constraint
# create render arguments
ocp = acados_ocp_nlp()
# export model
model = export_ode_model()
# set model_name
ocp.model = model
Tf = 2.0
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
ny_e = nx
N = 50
# set ocp_nlp_dimensions
nlp_dims = ocp.dims
nlp_dims.nx = nx
nlp_dims.ny = ny
nlp_dims.ny_e = ny_e
nlp_dims.nbx = 0
nlp_dims.nu = model.u.size()[0]
nlp_dims.ns = nu
nlp_dims.N = N
if FORMULATION == 1:
nlp_dims.nh = model.u.size()[0]
nlp_dims.nsh = model.u.size()[0]
nlp_dims.nbu = 0
nlp_dims.nsbu = 0
elif FORMULATION == 0:
nlp_dims.nh = 0
nlp_dims.nsh = 0
nlp_dims.nbu = model.u.size()[0]
nlp_dims.nsbu = model.u.size()[0]
elif FORMULATION == 2:
nlp_dims.ns_e = 1
nlp_dims.nh = model.u.size()[0]
nlp_dims.nsh = model.u.size()[0]
nlp_dims.nh_e = 1
nlp_dims.nsh_e = 1
nlp_dims.nbu = 0
nlp_dims.nsbu = 0
# set weighting matrices
nlp_cost = ocp.cost
Q = np.eye(4)
Q[0,0] = 1e0
Q[1,1] = 1e2
Q[2,2] = 1e-3
Q[3,3] = 1e-2
R = np.eye(1)
R[0,0] = 1e0
nlp_cost.W = scipy.linalg.block_diag(Q, R)
Vx = np.zeros((ny, nx))
Vx[0,0] = 1.0
Vx[1,1] = 1.0
Vx[2,2] = 1.0
Vx[3,3] = 1.0
nlp_cost.Vx = Vx
Vu = np.zeros((ny, nu))
Vu[4,0] = 1.0
nlp_cost.Vu = Vu
nlp_cost.W_e = Q
Vx_e = np.zeros((ny_e, nx))
Vx_e[0,0] = 1.0
Vx_e[1,1] = 1.0
Vx_e[2,2] = 1.0
Vx_e[3,3] = 1.0
nlp_cost.Vx_e = Vx_e
nlp_cost.yref = np.zeros((ny, ))
nlp_cost.yref_e = np.zeros((ny_e, ))
nlp_cost.zl = 500*np.ones((1, ))
nlp_cost.Zl = 0*np.ones((1, 1))
nlp_cost.zu = 500*np.ones((1, ))
nlp_cost.Zu = 0*np.ones((1, 1))
nlp_cost.zl_e = 5000*np.ones((1, ))
nlp_cost.Zl_e = 0*np.ones((1, 1))
nlp_cost.zu_e = 5000*np.ones((1, ))
nlp_cost.Zu_e = 0*np.ones((1, 1))
# setting bounds
Fmax = 2.0
nlp_con = ocp.constraints
nlp_con.x0 = np.array([0.0, 3.14, 0.0, 0.0])
con_h = export_nonlinear_constraint()
con_h_e = export_terminal_nonlinear_constraint()
if FORMULATION == 1:
nlp_con.lh = np.array([-Fmax])
nlp_con.uh = np.array([+Fmax])
nlp_con.lsh = 0*np.array([-Fmax])
nlp_con.ush = 0*np.array([+Fmax])
nlp_con.idxsh = np.array([0])
elif FORMULATION == 0:
nlp_con.lbu = np.array([-Fmax])
nlp_con.ubu = np.array([+Fmax])
nlp_con.lsbu = 0*np.array([-Fmax])
nlp_con.usbu = 0*np.array([+Fmax])
nlp_con.idxbu = np.array([0])
nlp_con.idxsbu = np.array([0])
elif FORMULATION == 2:
nlp_con.lh = np.array([-Fmax])
nlp_con.uh = np.array([+Fmax])
nlp_con.lsh = 0*np.array([-Fmax])
nlp_con.ush = 0*np.array([+Fmax])
nlp_con.idxsh = np.array([0])
nlp_con.lh_e = np.array([-xmax])
nlp_con.uh_e = np.array([+xmax])
nlp_con.lsh_e = 0*np.array([-Fmax])
nlp_con.ush_e = 0*np.array([+Fmax])
nlp_con.idxsh_e = np.array([0])
# set QP solver
ocp.solver_config.qp_solver = 'PARTIAL_CONDENSING_HPIPM'
ocp.solver_config.hessian_approx = 'GAUSS_NEWTON'
ocp.solver_config.integrator_type = 'ERK'
# set prediction horizon
ocp.solver_config.tf = Tf
ocp.solver_config.nlp_solver_type = 'SQP'
# set header path
ocp.acados_include_path = '/usr/local/include'
ocp.acados_lib_path = '/usr/local/lib'
# json_layout = acados_ocp2json_layout(ocp)
# with open('acados_layout.json', 'w') as f:
# json.dump(json_layout, f, default=np_array_to_list)
# exit()
if FORMULATION == 1:
ocp.con_h = con_h
acados_solver = generate_solver(ocp, json_file = 'acados_ocp.json')
elif FORMULATION == 0:
acados_solver = generate_solver(ocp, json_file = 'acados_ocp.json')
elif FORMULATION == 2:
ocp.con_h = con_h
ocp.con_h_e = con_h_e
acados_solver = generate_solver(ocp, json_file = 'acados_ocp.json')
Nsim = 100
simX = np.ndarray((Nsim, nx))
simU = np.ndarray((Nsim, nu))
for i in range(Nsim):
status = acados_solver.solve()
# get solution
x0 = acados_solver.get(0, "x")
u0 = acados_solver.get(0, "u")
for j in range(nx):
simX[i,j] = x0[j]
for j in range(nu):
simU[i,j] = u0[j]
# update initial condition
x0 = acados_solver.get(1, "x")
acados_solver.set(0, "lbx", x0)
acados_solver.set(0, "ubx", x0)
# plot results
import matplotlib
import matplotlib.pyplot as plt
t = np.linspace(0.0, Tf/N, Nsim)
plt.subplot(2, 1, 1)
plt.step(t, simU, color='r')
plt.title('closed-loop simulation')
plt.ylabel('u')
plt.xlabel('t')
plt.grid(True)
plt.subplot(2, 1, 2)
plt.plot(t, simX[:,1])
plt.ylabel('theta')
plt.xlabel('t')
plt.grid(True)
plt.subplot(3, 1, 3)
plt.plot(t, simX[:,1])
plt.ylabel('x')
plt.xlabel('t')
plt.grid(True)
plt.show()
| StarcoderdataPython |
1692998 | <reponame>annetrose/xparty<filename>server/view/tags/library.py
from server.view import custom_templates
from google.appengine.ext.webapp import template
import os
register = template.create_template_register()
@register.filter
def init_activity_types_js(tmp=None):
# TODO: Filters require at least 1 variable to be passed in
# Tried implementing as an inclusion_tag (which does not require a variable) but
# then only one of the two template directories could find it
activity_types= []
for template_file in os.listdir(os.path.dirname(custom_templates.__file__)):
if template_file.startswith("student_") and template_file.endswith(".html"):
activity_type = template_file.replace("student_","").replace(".html", "")
activity_types.append({ 'type' : activity_type, 'description' : activity_type.replace("_"," ").title() })
tag_file = os.path.join(os.path.dirname(__file__), 'activity_type_js.html')
return template.render(tag_file, { 'activity_types' : activity_types })
@register.filter
def init_activities_js(activities):
tag_file = os.path.join(os.path.dirname(__file__), 'activity_js.html')
return template.render(tag_file, { 'activities' : activities, 'list' : True })
@register.filter
def init_activity_js(activity):
activities = []
activities.append(activity)
tag_file = os.path.join(os.path.dirname(__file__), 'activity_js.html')
return template.render(tag_file, { 'activities' : activities, 'list' : False })
@register.filter
def init_students_js(students):
tag_file = os.path.join(os.path.dirname(__file__), 'student_js.html')
return template.render(tag_file, { 'students' : students, 'list' : True })
@register.filter
def init_student_js(student):
students = []
students.append(student)
tag_file = os.path.join(os.path.dirname(__file__), 'student_js.html')
return template.render(tag_file, { 'students' : students, 'list' : False })
@register.filter
def init_task_histories_js(histories):
tag_file = os.path.join(os.path.dirname(__file__), 'task_history_js.html')
return template.render(tag_file, { 'task_histories' : histories }) | StarcoderdataPython |
157997 | <reponame>devkral/spkbspider<filename>spkcspider/apps/spider/urls.py
from django.contrib.auth.decorators import login_required
from django.urls import path
from .views import (
OwnerTokenManagement, ComponentCreate, ComponentIndex,
ComponentPublicIndex, ComponentUpdate, ConfirmTokenUpdate, ContentAccess,
ContentAdd, ContentIndex, EntityMassDeletion, RequestTokenUpdate,
TokenDeletionRequest, TokenRenewal, TravelProtectionManagement
)
app_name = "spider_base"
# components plural: most components url retrieve multiple items
# one "component"-url for single retrievals is confusing
urlpatterns = [
path(
'components/export/',
login_required(ComponentIndex.as_view(scope="export")),
name='ucomponent-export'
),
path(
'components/slug:user>/export/',
login_required(ComponentIndex.as_view(scope="export")),
name='ucomponent-export'
),
path(
'components/list/',
login_required(ComponentIndex.as_view()),
name='ucomponent-list'
),
path(
'components/list/<slug:user>/',
login_required(ComponentIndex.as_view()),
name='ucomponent-list'
),
# path(
# 'ucs/create/<slug:user>/',
# ComponentCreate.as_view(),
# name='ucomponent-add'
# ),
path(
'components/add/',
login_required(ComponentCreate.as_view()),
name='ucomponent-add'
),
path(
'components/<str:token>/update/',
login_required(ComponentUpdate.as_view()),
name='ucomponent-update'
),
path(
'components/<str:token>/delete/',
EntityMassDeletion.as_view(),
name='entity-delete'
),
path(
'components/<str:token>/list/',
ContentIndex.as_view(),
name='ucontent-list'
),
path(
'components/<str:token>/export/',
ContentIndex.as_view(scope="export"),
name='ucontent-export'
),
path(
'components/<str:token>/add/<slug:type>/',
ContentAdd.as_view(),
name='ucontent-add'
),
path(
'content/<str:token>/<slug:access>/',
ContentAccess.as_view(),
name='ucontent-access'
),
path(
'token/<str:token>/delete/',
OwnerTokenManagement.as_view(scope="delete"),
name='token-owner-delete'
),
path(
'token/<str:token>/share/',
OwnerTokenManagement.as_view(scope="share"),
name='token-owner-share'
),
path(
'token/confirm-update-request/',
ConfirmTokenUpdate.as_view(),
name='token-confirm-update-request'
),
path(
'token/update-request/',
RequestTokenUpdate.as_view(),
name='token-update-request'
),
path(
'token/delete-request/',
TokenDeletionRequest.as_view(),
name='token-delete-request'
),
path(
'token/renew/',
TokenRenewal.as_view(),
name='token-renew'
),
path(
'travelprotection/',
TravelProtectionManagement.as_view(),
name='travelprotection-manage'
),
path(
'components/',
ComponentPublicIndex.as_view(
is_home=False
),
name='ucomponent-listpublic'
),
]
| StarcoderdataPython |
1681174 | import sys
import json
send_message_back = {
'arguments': sys.argv[1:], # mistype "sys" as "sy" to produce an error
'message': """Hello,
This is my message.
To the world"""
}
print(json.dumps(send_message_back))
| StarcoderdataPython |
1738254 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2020 <NAME>
"""Post processing functions for Zemax import
.. Created on Mon Aug 10 18:17:55 2020
.. codeauthor: <NAME>
"""
import numpy as np
import rayoptics.seq.medium as mdm
import rayoptics.elem.profiles as profiles
import rayoptics.elem.surface as surface
from rayoptics.optical.model_enums import DecenterType as dec
def apply_fct_to_sm(opt_model, fct, start=None, stop=None, step=None):
"""Iterate in reverse over seq_model.ifcs. Override if needed."""
sm = opt_model.seq_model
start = len(sm.ifcs)-1 if start is None else start
stop = 0 if stop is None else stop
step = -1 if step is None else step
num_changes = 0
for cur in range(start, stop, step):
if fct(opt_model, cur):
num_changes += 1
return num_changes
def convert_to_bend(opt_model, cur):
"""Scan the zemax import for tilted mirrors and convert to BEND types."""
sm = opt_model.seq_model
ifc = sm.ifcs[cur]
if ifc.interact_mode == 'reflect':
ifc_p = sm.ifcs[cur-1]
ifc_f = sm.ifcs[cur+1]
if (ifc_p.z_type == 'COORDBRK' and ifc_f.z_type == 'COORDBRK'):
if np.array_equal(ifc_f.decenter.euler, ifc_p.decenter.euler):
ifc.decenter = ifc_p.decenter
ifc.decenter.dtype = dec.BEND
sm.remove(cur+1, prev=True)
sm.remove(cur-1)
return True
return False
def convert_to_dar(opt_model, cur):
"""Scan the zemax import for tilted surfs and convert to DAR types."""
sm = opt_model.seq_model
if cur < len(sm.ifcs)-1:
ifc = sm.ifcs[cur]
ifc_p = sm.ifcs[cur-1]
ifc_f = sm.ifcs[cur+1]
if (ifc_p.z_type == 'COORDBRK' and ifc_f.z_type == 'COORDBRK'):
acum_dec = ifc_f.decenter.dec + ifc_p.decenter.dec
acum_euler = ifc_f.decenter.euler + ifc_p.decenter.euler
if np.all(acum_dec == 0) and np.all(acum_euler == 0):
ifc.decenter = ifc_p.decenter
ifc.decenter.dtype = dec.DAR
sm.remove(cur+1, prev=True)
sm.remove(cur-1)
return True
return False
def collapse_coordbrk(opt_model, cur):
"""Attempt to apply the cur COORDBRK to an adjacent real interface."""
sm = opt_model.seq_model
ifc_cb = sm.ifcs[cur]
if ifc_cb.z_type == 'COORDBRK':
if ifc_cb.decenter.dtype == dec.REV:
ifc = sm.ifcs[cur-1]
prev = True
else:
ifc = sm.ifcs[cur+1]
prev = False
if ifc.decenter is not None:
return False
else:
ifc.decenter = ifc_cb.decenter
sm.remove(cur, prev=prev)
return True
return False
def remove_null_sg(opt_model, cur):
"""Remove sg with planar profile and an adjacent zero thickness air gap."""
sm = opt_model.seq_model
ifc = sm.ifcs[cur]
if is_null_ifc(ifc):
prev = None
cur_gap = False if len(sm.gaps)-1 < cur else True
prev_gap = True if 0 < cur else False
if cur_gap and is_null_gap(sm.gaps[cur]):
prev = False
elif prev_gap and is_null_gap(sm.gaps[cur-1]):
prev = True
if prev is not None:
sm.remove(cur, prev=prev)
return True
return False
def is_null_ifc(ifc):
if isinstance(ifc, surface.Surface):
if isinstance(ifc.profile, profiles.Spherical):
if (
ifc.profile.cv == 0 and
ifc.decenter is None and
ifc.interact_mode == 'transmit'
):
return True
return False
def is_null_gap(gap):
if gap.thi == 0 and isinstance(gap.medium, mdm.Air):
return True
else:
return False
| StarcoderdataPython |
1611030 |
class AAShadow:
color: str
offsetX: float
offsetY: float
opacity: float
width: float
def colorSet(self, prop: str):
self.color = prop
return self
def offsetXSet(self, prop: float):
self.offsetX = prop
return self
def offsetYSet(self, prop: float):
self.offsetY = prop
return self
def opacitySet(self, prop: float):
self.opacity = prop
return self
def widthSet(self, prop: float):
self.width = prop
return self
| StarcoderdataPython |
1727754 | import argparse
import os
student_number = 1155114481
home_dir = "/home/" + str(student_number) + "/"
servers = ['proj5', 'proj6', 'proj7', 'proj8', 'proj9', 'proj10']
ssh_cmd = (
"ssh "
"-o StrictHostKeyChecking=no "
)
# [clone]:
# python3 batch_ops.py -o clone -u https://github.com/RickAi/minips.git
#
# [pull]:
# python3 batch_ops.py -o pull
#
# [build]:
# python3 batch_ops.py -o build -d minips
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Batch Ops Tool')
parser.add_argument('-o', choices=['clone', 'pull', 'build'], default="pull", help='The operation, e.g. clone, fetch, build')
parser.add_argument('-u', nargs="?", default=" ", help='The remote repo url the git operation will execute')
parser.add_argument('-d', nargs="?", default="", help='The dir that will be built')
args = parser.parse_args()
op = args.o
url = args.u
build_dir = args.d
if build_dir == "" and op == 'build':
parser.print_help()
exit(1)
for server_name in servers:
cmd = ssh_cmd + server_name + " "
cmd += "\""
cmd += "cd " + home_dir + ";"
if op == 'build':
cmd += "cd " + build_dir + ";"
cmd += "mkdir debug;"
cmd += "cmake -DCMAKE_BUILD_TYPE=Debug ..;"
else :
cmd += "git " + op + " " + url
cmd += "\" &"
print(cmd)
os.system(cmd)
| StarcoderdataPython |
3394077 | <filename>misc/log.py
import argparse
import os
from datetime import datetime
from dateutil import tz
log_dir = None
reg_log_dir = None
LOG_FOUT = None
inited = False
def setup_log(args):
global LOG_FOUT, log_dir, inited, start_time, reg_log_dir
if inited:
return
inited = True
config = args.config.split('/')[-1].split('.')[0].replace('config_baseline', 'cb')
model_config = args.model_config.split('/')[-1].split('.')[0]
tz_sh = tz.gettz('Asia/Shanghai')
now = datetime.now(tz=tz_sh)
if (not os.path.exists("./tf_logs")):
os.mkdir("./tf_logs")
# dir = '{}-{}-{}'.format(config, model_config, now.strftime("%m%d-%H%M%S"))
dir = '{}-{}'.format(config, model_config)
log_dir = os.path.join("./tf_logs", dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
os.system('rm -r {}'.format(os.path.join("./tf_logs", 'latest')))
os.system("cd tf_logs && ln -s {} {} && cd ..".format(dir, "latest"))
start_time = now
LOG_FOUT = open(os.path.join(log_dir, 'log_train.txt'), 'w')
log_string('log dir: {}'.format(log_dir))
reg_log_dir = os.path.join(log_dir, "registration")
if not os.path.exists(reg_log_dir):
os.makedirs(reg_log_dir)
def log_string(out_str, end='\n'):
LOG_FOUT.write(out_str)
LOG_FOUT.write(end)
LOG_FOUT.flush()
print(out_str, end=end, flush=True)
def log_silent(out_str, end='\n'):
LOG_FOUT.write(out_str)
LOG_FOUT.write(end)
LOG_FOUT.flush()
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str)
parser.add_argument('--model_config', type=str)
parser.add_argument('--debug', dest='debug', action='store_true')
parser.set_defaults(debug=False)
parser.add_argument('--checkpoint', type=str, required=False, help='Trained model weights', default="")
parser.add_argument('--weights')
parser.add_argument('--log', action='store_true')
parser.add_argument('--visualize', dest='visualize', action='store_true')
parser.set_defaults(visualize=False)
args = parser.parse_known_args()[0]
setup_log(args)
def is_last_run_end(last_run_file):
with open(last_run_file) as f:
lines = f.readlines()
for i in lines:
if 'end' in i:
return True
return False
cuda_dev = os.environ.get('CUDA_VISIBLE_DEVICE')
if cuda_dev is None:
cuda_dev = '0'
last_run = 'lastrun_{}'.format(cuda_dev)
last_run_file = last_run + '.log'
last_run_id = 1
# while os.path.exists(last_run_file) and not is_last_run_end(last_run_file):
# last_run_file = last_run + str(last_run_id) + '.log'
# last_run_id += 1
# with open(last_run_file, 'w') as f:
# f.write(f'start:{start_time.strftime("%m%d-%H%M%S")}\n')
# f.write(f'log_dir:{log_dir}\n')
# for k,v in vars(args).items():
# f.write(f'{k}:{v}\n')
# @atexit.register
# def end_last_run():
# tz_sh = tz.gettz('Asia/Shanghai')
# now = datetime.now(tz=tz_sh)
# with open(last_run_file, 'a') as f:
# f.write(f'end:{now.strftime("%m%d-%H%M%S")}\n')
| StarcoderdataPython |
1779206 | <reponame>Saad-Shaikh/COVID19-Count-Notifier
#!/usr/bin/env python3
import credentials as cred
import details as det
import utils
import smtplib
import schedule
import time
def send_via_email():
"""
Send an email to every person in the email list
"""
count_list = utils.get_count_list(det.states_and_cities, det.state_url, det.district_url)
for entry in det.email_list:
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(cred.login_email, cred.login_pass)
message = utils.form_message_for_email(count_list, entry.get('states'), entry.get('cities'))
s.sendmail(cred.login_email, entry.get('email'), "Subject: Daily COVID19 Count\n\n" + message)
print("Email sent to " + entry.get('email'))
s.quit()
print()
# if you want to schedule mails at a specific time, use this and comment the if block below
# schedule.every().day.at("04:30").do(send_via_email)
# while(1):
# schedule.run_pending()
# time.sleep(1)
if __name__ == '__main__':
send_via_email()
| StarcoderdataPython |
1672754 | <filename>Graphics/MessageBox.py
from tkinter import *
from tkinter import messagebox
root = Tk()
root.title("Buttons")
root.geometry("300x300")
root.iconbitmap("assets/favicon.ico")
def info_box():
messagebox.showinfo("Info Box","This is an Info Box") # First Arg is title, Second Arg is the info.
def error_box():
messagebox.showerror("Error Box","This is an Error Box") #showerror displays error box
def warning_box():
messagebox.showwarning("Warning Box","This is a Warning Box") #showwarning displays warning box
info_button = Button(root,text="Info Box",command=info_box)
info_button.grid(column=0,row=0)
error_button = Button(root,text="Error Box",command=error_box)
error_button.grid(column=1,row=0)
warning_button = Button(root,text="Warning Box",command=warning_box)
warning_button.grid(column=2,row=0)
root.mainloop()
# There are other types of message boxes as well.
root = Tk()
root.title("Buttons")
root.geometry("300x300")
root.iconbitmap("assets/favicon.ico")
answer = messagebox.askyesno("Yes or No Box","Do you like Blue?")
if(answer):
print("\nYou like blue!!")
else:
print("\nOh, guess not everyone likes blue.")
temp = messagebox.askokcancel("Okay,Cancel Box","This is another message box!")
temp = messagebox.askretrycancel("Retry, Cancel Box","What do you want to do?")
temp = messagebox.askyesnocancel("Yes,No,Cancel Box","Do you like PYTHON!????")
if(temp):
print("\nNice")
else:
print("\nOh, guess not everyone likes Python xD")
| StarcoderdataPython |
1715480 | import enum
class MailTag(enum.Enum):
SUBSCRIBE = "subscribe"
VERIFY = "verify"
INVOICE = "invoice"
class FileType(enum.IntEnum):
INVOICE = 0
LETTER = 1
class FileStatus(enum.IntEnum):
VALID = 0
DEPRECATED = 1
INVALID = 2
class FileVerificationStatus(enum.IntEnum):
SUCCESS = 0
FAILED = 1
| StarcoderdataPython |
3240553 | <reponame>svetasmirnova/mysqlcookbook<filename>recipes/tblmgmt/uniq_name.py
#!/usr/bin/python3
# uniq_name.py: show how to use PID to create table name
#@ _GENERATE_NAME_WITH_PID_1_
import os
#@ _GENERATE_NAME_WITH_PID_1_
pid = os.getpid()
print("PID: %s" % pid)
#@ _GENERATE_NAME_WITH_PID_2_
tbl_name = "tmp_tbl_%d" % os.getpid()
#@ _GENERATE_NAME_WITH_PID_2_
print("Table name: %s" % tbl_name)
| StarcoderdataPython |
1613215 | <reponame>greck2908/gamification-engine<gh_stars>100-1000
import sys
import os
import json
from gengine.base.settings import get_settings
from gengine.base.util import lstrip_word
from pyramid.settings import asbool
def includeme(config):
config.add_static_view(name='admin/jsstatic', path='gengine:app/jsscripts/build/static')
def get_jsmain():
debug = asbool(get_settings().get("load_from_webpack_dev_server", False))
if debug:
return "http://localhost:3000/static/js/bundle.js"
else:
modpath = os.path.dirname(sys.modules[__name__].__file__)
buildpath = os.path.join(modpath, "build")
with open(os.path.join(buildpath, "asset-manifest.json"), "r") as f:
manifest = json.load(f)
return "/admin/jsstatic/"+lstrip_word(manifest["main.js"], "static/")
return None
def get_cssmain():
debug = asbool(get_settings().get("load_from_webpack_dev_server", False))
if debug:
return "http://localhost:3000/static/css/bundle.css"
else:
modpath = os.path.dirname(sys.modules[__name__].__file__)
buildpath = os.path.join(modpath, "build")
with open(os.path.join(buildpath, "asset-manifest.json"), "r") as f:
manifest = json.load(f)
return "/admin/jsstatic/"+lstrip_word(manifest["main.css"],"static/")
return None
| StarcoderdataPython |
1624120 | <reponame>polde-live/python-mich-3<gh_stars>0
import socket
import re
conStr = raw_input('URL to be searched:')
host = re.findall('http://(.+?)/', conStr)
try:
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect((host[0], 80))
mysock.send('GET %s HTTP/1.0\n\n' % conStr)
while True:
data = mysock.recv(512)
if (len(data) < 1):
break
print data
mysock.close()
except:
print 'Bad URL'
| StarcoderdataPython |
1690460 | <reponame>gorshok-py/TerminalTelegramBOT<filename>cmdwin.py
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils import executor
from subprocess import check_output
bot = Bot(token='<PASSWORD>')
dp = Dispatcher(bot)
user_id = 157191657
@dp.message_handler(content_types="text")
async def process_start_command(message: types.Message):
if (user_id == message.chat.id):
comand = message.text
try:
await bot.send_message(message.chat.id, check_output(comand, shell=True).decode('cp866'))
except:
await bot.send_message(message.chat.id, "Некорректная команда")
if __name__ == '__main__':
executor.start_polling(dp) | StarcoderdataPython |
1655263 | <filename>AEME/AEME.py
# Class to generate Autoencoder Meta Embeddings (AEME)
# File: AEME.py
# Author: <NAME>
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, RandomSampler, DataLoader
from sklearn.preprocessing import LabelEncoder
import time
import numpy as np
import gc
from DAE import DAE
from CAE import CAE
from AAE import AAE
class AEME():
""" Class to implement Autoencoder for generating Meta-Embeddings """
def __init__(self, mode="CAE", input_dim=300, latent_dim=100, activation="leaky_relu", lambda1=1, lambda2=1, lambda3=1, lambda4=1, lambda5=1, lambda6=1):
""" Constructor to initialize autoencoder parameters
@param mode (string): type of Autoencoder to build: Decoupled Autoencoder (DAE), Concatenated Autoencoder (CAE), Averaged Autoencoder (AAE).
@param latent_dim (int): latent_dimension for each autoencoder. Default: 300.
@ activation (string): type of activation: leaky_relu, paramaterized_leaky_relu, relu, tanh, and sigmoid. Default: leaky_relu.
@param lambda1 (int): Multiplicaiton factor for computing loss for part1. Default: 1.
@param lambda2 (int): Multiplicaiton factor for computing loss for part2. Default: 1.
@param lambda3 (int): Multiplicaiton factor for computing loss for part3. Default: 1.
@param lambda4 (int): Multiplicaiton factor for computing loss for part4 (Only for DAE). Default: 1.
@param lambda5 (int): Multiplicaiton factor for computing loss for part5 ((Only for DAE). Default: 1.
@param lambda6 (int): Multiplicaiton factor for computing loss for part6 ((Only for DAE). Default: 1.
"""
self.label_encoder = LabelEncoder()
if torch.cuda.is_available():
self.device = torch.device("cuda:0")
print("GPU : ", torch.cuda.get_device_name(0))
else:
self.device = torch.device("cpu")
print("CPU on")
self.mode = mode
self.encoder = None
if activation == "leaky_relu":
activation = nn.LeakyReLU()
elif activation == "paramaterized_leaky_relu":
activation = nn.PReLU()
else:
activation = nn.ReLU()
if mode == "DAE":
self.ae = DAE(input_dim, latent_dim, activation, lambda1, lambda2, lambda3, lambda4, lambda5, lambda6)
elif mode == "CAE":
self.ae = CAE(input_dim, latent_dim, activation, lambda1, lambda2, lambda3)
elif mode == "AAE":
self.ae = AAE(input_dim, latent_dim, activation, lambda1, lambda2, lambda3)
def add_noise(self, data, masking_noise_factor):
"""Function to add mask noise to data.
@param data (np.array): data to add noise to.
@param masking_noise_factor (float): Percentage of noise to add to the data.
@return data (np.array): noise added data.
"""
data_size, feature_size = data.shape
for i in range(data_size):
mask_noise = np.random.randint(0, feature_size, int(feature_size * masking_noise_factor))
for m in mask_noise:
data[i][m] = 0
return data
def prepare_input(self, vocab, x_train1, x_train2, x_train3, batch_size=128, masking_noise_factor=0.05):
""" Funciton to generate Tensor Dataset.
@param vocab (list): list of intersection vocabulary.
@param x_train1 (np.array): The input data1.
@param x_train2 (np.array): The input data2.
@param x_train3 (np.array): The input data3.
@param batch_size (int): Number of batches to divide the training data into.
@param masking_noise (bool): To add Masking Noise or not.
@param masking_noise_factor (float): Percentage noise to be induced in the input data. Default: 0.05 or 5%.
"""
vocab = torch.as_tensor(self.label_encoder.fit_transform(vocab), device=self.device)
x_train1_noisy = self.add_noise(x_train1, masking_noise_factor)
x_train2_noisy = self.add_noise(x_train2, masking_noise_factor)
x_train3_noisy = self.add_noise(x_train3, masking_noise_factor)
tensor_dataset = torch.utils.data.TensorDataset(x_train1_noisy,
x_train2_noisy,
x_train3_noisy,
x_train1,
x_train2,
x_train3,
vocab)
del x_train1_noisy
del x_train2_noisy
del x_train3_noisy
del x_train1
del x_train2
del x_train3
del vocab
gc.collect()
torch.cuda.empty_cache()
return torch.utils.data.DataLoader(dataset=tensor_dataset,
sampler=torch.utils.data.RandomSampler(tensor_dataset),
batch_size=batch_size)
def train(self, tensor_dataset, epochs=500, checkpoint_path=""):
""" Function to train the Autoencoder Model.
@param tensor_dataset (torch.tensor): Batch-wise dataset.
@@param epochs (int): Number of epochs for which the model is to be trained. Default: 10.
"""
self.ae.train()
self.ae.to(self.device)
optimizer = torch.optim.Adam(self.ae.parameters(), lr=0.001)
training_loss = []
if self.mode == "DAE":
for step in range(1, epochs+1):
start = time.time()
epoch_loss = 0.0
for batch_data in tensor_dataset:
optimizer.zero_grad()
x_train1_noisy, x_train2_noisy, x_train3_noisy, x_train1, x_train2, x_train3, _ = tuple(t.to(self.device) for t in batch_data)
output, bottleneck = self.ae(x_train1_noisy, x_train2_noisy, x_train3_noisy)
loss = self.ae.loss([output, bottleneck], [x_train1, x_train2, x_train3])
loss.backward()
epoch_loss = epoch_loss + loss.item()
optimizer.step()
epoch_loss = epoch_loss/len(tensor_dataset)
training_loss.append(epoch_loss)
end = time.time()
print("\nEpoch: {} of {} ----> loss: {:.5f}\t ETA: {:.2f} s".format(step, epochs, epoch_loss, (end-start)))
if len(training_loss) > 2:
if epoch_loss < training_loss[-2]:
model_checkpoint = checkpoint_path + "_epoch_{}_loss_{:.5f}.pt".format(step, epoch_loss)
torch.save(self.ae.state_dict(), model_checkpoint)
else:
for step in range(1, epochs+1):
start = time.time()
epoch_loss = 0.0
for batch_data in tensor_dataset:
optimizer.zero_grad()
x_train1_noisy, x_train2_noisy, x_train3_noisy, x_train1, x_train2, x_train3, _ = tuple(t.to(self.device) for t in batch_data)
output, _ = self.ae(x_train1_noisy, x_train2_noisy, x_train3_noisy)
loss = self.ae.loss(output, [x_train1, x_train2, x_train3])
loss.backward()
epoch_loss = epoch_loss + loss.item()
optimizer.step()
epoch_loss = epoch_loss/len(tensor_dataset)
training_loss.append(epoch_loss)
end = time.time()
print("\nEpoch: {} of {} ----> loss: {:5f}\t ETA: {:.2f} s".format(step, epochs, epoch_loss, (end-start)))
if len(training_loss) > 2:
if epoch_loss < training_loss[-2]:
model_checkpoint = checkpoint_path + "_epoch_{}_loss_{:.5f}.pt".format(step, epoch_loss)
torch.save(self.ae.state_dict(), model_checkpoint)
def predict(self, tensor_dataset, model_checkpoint):
""" Function to generate predictions of the autoencoder's encoder.
@param x_test1 (np.array): test input 1.
@param x_test2 (np.array): test input 2.
@param x_test3 (np.array): test input 3.
@param model_checkpoint (string): model weights.
@return predictions (np.array): Autoencoder's encoder's predictions.
"""
self.ae.load_state_dict(torch.load(model_checkpoint))
self.ae.eval()
self.ae.to(self.device)
embedding_dict = dict()
for batch_data in tensor_dataset:
_, _, _, x_train1, x_train2, x_train3, words = tuple(t.to(self.device) for t in batch_data)
words = self.label_encoder.inverse_transform(words.to('cpu')).tolist()
with torch.no_grad():
_, bottleneck = self.ae(x_train1, x_train2, x_train3)
bottleneck = torch.split(bottleneck, 1, dim=0)
for word, vec in list(zip(words, bottleneck)):
embedding_dict[word] = vec[0]
del batch_data
del x_train1
del x_train2
del x_train3
del words
del bottleneck
gc.collect()
torch.cuda.empty_cache()
return embedding_dict
| StarcoderdataPython |
62397 | <reponame>Aneesh540/python-projects
"""How to extract data from Fraction class(is is not callable) but
__mul__ method is defined
>>> Fractions(1,2)*Fractions(3,4)
>>> Fractions(3,8)
>>> print(Fraction(6,8))
>>> 3/4
>>> t=Fraction(1,2)
>>> t.numerator
>>> 1
>>> t.denominator
>>> 2
"""
from fractions import Fraction
from functools import reduce
def product(fracs):
t=reduce(lambda x,y:x*y , fracs)
# now t is a instance of Fraction bcoz recude'll return a single class
return t.numerator, t.denominator
if __name__ == '__main__':
fracs = []
for _ in range(int(input())):
fracs.append(Fraction(*map(int, input().split())))
print(fracs)
result = product(fracs)
print(*result)
# x=1,2,3,4
# print(*x) prints all value in one line
# 1 2 3 4
| StarcoderdataPython |
3355710 | <reponame>DrEricEbert/fpga101-workshop<filename>tutorials/11-Computer/rom.py
import binascii
import sys
def split_every(n, s):
return [ s[i:i+n] for i in xrange(0, len(s), n) ]
filename = sys.argv[1]
with open(filename, 'rb') as f:
content = f.read()
list = split_every(2, binascii.hexlify(content))
for i in list:
print(i)
| StarcoderdataPython |
1696929 | <reponame>prashantramnani/nn_likelihoods<gh_stars>1-10
#from .de_crossover_mcmc_parallel import DifferentialEvolutionCrossover
from .de_mcmc_one_core import DifferentialEvolutionSequential
#from .de_mcmc_parallel import DifferentialEvolutionParallel
#from .mh_mcmc_parallel import MetropolisHastingsParallel
#from .mh_mcmc_adaptive import MetropolisHastingsAdaptive
from .mh_componentwise import MetropolisHastingsComponentwise
#from .particle_filter_naive import ParticleFilterSequential
#from .particle_filter_parallel import ParticleFilterParallel
from .slice_sampler import SliceSampler
#from .importance import ImportanceSampler
#from .diagnostics import diagnostics
| StarcoderdataPython |
3303840 | <filename>o365spray/core/handlers/enumerator.py<gh_stars>100-1000
#!/usr/bin/env python3
"""
Based on: https://bitbucket.org/grimhacker/office365userenum/
https://github.com/Raikia/UhOh365
https://github.com/nyxgeek/onedrive_user_enum/blob/master/onedrive_enum.py
https://github.com/gremwell/o365enum/blob/master/o365enum.py
https://github.com/Gerenios/AADInternals/blob/master/KillChain_utils.ps1#L112
"""
import re
import time
import string
import random
import logging
import urllib3
import asyncio
import concurrent.futures
import concurrent.futures.thread
from uuid import uuid4
from typing import List, Dict, Union
from functools import partial
from requests.auth import HTTPBasicAuth
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from o365spray.core.handlers.base import BaseHandler # type: ignore
from o365spray.core import ( # type: ignore
Defaults,
DefaultFiles,
Helper,
ThreadWriter,
text_colors,
)
class Enumerator(BaseHandler):
"""Perform user enumeration against Microsoft O365."""
HELPER = Helper() # Helper functions
VALID_ACCOUNTS = [] # Valid accounts storage
def __init__(
self,
loop: asyncio.unix_events._UnixSelectorEventLoop,
module: str = "office",
domain: str = None,
output_dir: str = None,
timeout: int = 25,
proxy: Union[str, Dict[str, str]] = None,
workers: int = 5,
writer: bool = True,
sleep: int = 0,
jitter: int = 0,
*args,
**kwargs,
):
"""Initialize an Enuermator instance.
Note:
All arguments, besides loop, are optional so that the Enumerator
instance can be used to re-run the run() method multiple times
against multiple domains/user lists without requiring a new instance
or class level var modifications.
Arguments:
<required>
loop: asyncio event loop
<optional>
module: enumeration module to run
domain: domain to enumerate users against
output_dir: directory to write results to
timeout: http request timeout
proxy: http request proxy
workers: thread pool worker rate
writer: toggle writing to output files
sleep: throttle http requests
jitter: randomize throttle
Raises:
ValueError: if no output directory provided when output writing
is enabled
"""
self._modules = {
"autodiscover": None, # self._autodiscover, # DISABLED
"activesync": None, # self._activesync, # DISABLED
"onedrive": self._onedrive,
"office": self._office,
"oauth2": self._oauth2,
}
if writer and not output_dir:
raise ValueError("Missing 1 required argument: 'output_dir'")
# If proxy server provided, build HTTP proxies object for
# requests lib
if isinstance(proxy, str):
proxy = {"http": proxy, "https": proxy}
self.loop = loop
self.module = module
self.domain = domain
self.timeout = timeout
self.proxies = proxy
self.sleep = sleep
self.jitter = jitter
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=workers)
# Internal exit handler
self.exit = False
# Initialize writers
self.writer = writer
if self.writer:
self.found_idp = False # Init bool for IDP accounts
self.idp_writer = ThreadWriter(DefaultFiles.ENUM_IDP, output_dir)
self.valid_writer = ThreadWriter(DefaultFiles.ENUM_FILE, output_dir)
self.tested_writer = ThreadWriter(DefaultFiles.ENUM_TESTED, output_dir)
def shutdown(self, key: bool = False):
"""Custom method to handle exitting multi-threaded tasking.
Arguments:
key: identify if we are shutting down normally or via a
caught signal
"""
msg = "\n\n[ ! ] CTRL-C caught." if key else "\n"
if self.writer:
msg += f"\n[ * ] Valid accounts can be found at: '{self.valid_writer.output_file}'"
if self.found_idp:
msg += f"\n[ * ] Accounts in different Identity Providers can be found at: '{self.idp_writer.output_file}'"
msg += f"\n[ * ] All enumerated accounts can be found at: '{self.tested_writer.output_file}'\n"
print(Defaults.ERASE_LINE, end="\r")
logging.info(msg)
# https://stackoverflow.com/a/48351410
# https://gist.github.com/yeraydiazdiaz/b8c059c6dcfaf3255c65806de39175a7
# Unregister _python_exit while using asyncio
# Shutdown ThreadPoolExecutor and do not wait for current work
import atexit
atexit.unregister(concurrent.futures.thread._python_exit)
self.executor.shutdown = lambda wait: None
# Close the open file handles
if self.writer:
self.idp_writer.close()
self.valid_writer.close()
self.tested_writer.close()
def get_modules(self):
"""Return the list of module names."""
return self._modules.keys()
# =============================
# == -- ActiveSync MODULE -- ==
# =============================
def _activesync(self, domain: str, user: str, password: str = "<PASSWORD>"):
"""Enumerate users on Microsoft using Microsoft Server ActiveSync
Original enumeration via: https://bitbucket.org/grimhacker/office365userenum/
Arguments:
<required>
domain: domain to enumerate against
user: username for enumeration request
<optional>
password: password for enumeration request
Raises:
NotImplementedError
"""
raise NotImplementedError("This method is not currently implemented.")
try:
# Grab external headers from defaults.py and add special header
# for ActiveSync
headers = Defaults.HTTP_HEADERS
headers["MS-ASProtocolVersion"] = "14.0"
# Build email if not already built
email = self.HELPER.check_email(user, domain)
# Write the tested user
tested = f"{user} -> {email}" if user != email else email
if self.writer:
self.tested_writer.write(tested)
time.sleep(0.250)
auth = HTTPBasicAuth(email, password)
url = "https://outlook.office365.com/Microsoft-Server-ActiveSync"
response = self._send_request(
"options",
url,
auth=auth,
headers=headers,
proxies=self.proxies,
timeout=self.timeout,
sleep=self.sleep,
jitter=self.jitter,
)
status = response.status_code
if status == 200:
if self.writer:
self.valid_writer.write(email)
self.VALID_ACCOUNTS.append(email)
logging.info(f"[{text_colors.green}VALID{text_colors.reset}] {email}")
# Note: After the new MS updates, it appears that invalid users return a
# 403 Forbidden while valid users appear to respond with a
# 401 Unauthorized with a WWW-Authenticate response header that
# indicates Basic Auth negotiation was started
elif status == 401 and "WWW-Authenticate" in response.headers.keys():
if self.writer:
self.valid_writer.write(email)
self.VALID_ACCOUNTS.append(email)
logging.info(f"[{text_colors.green}VALID{text_colors.reset}] {email}")
# Note: Since invalid user's are now identified via 403 responses, we can
# just default all 403/404/etc. as invalid users
else:
print(
f"[{text_colors.red}INVALID{text_colors.reset}] "
f"{email}{' '*10}",
end="\r",
)
except Exception as e:
logging.debug(e)
pass
# ===========================
# == -- OneDrive MODULE -- ==
# ===========================
def _onedrive(self, domain: str, user: str, password: str = "<PASSWORD>"):
"""Enumerate users on Microsoft using One Drive
https://github.com/nyxgeek/onedrive_user_enum/blob/master/onedrive_enum.py
https://www.trustedsec.com/blog/achieving-passive-user-enumeration-with-onedrive/
Arguments:
<required>
domain: domain to enumerate against
user: username for enumeration request
<optional>
password: password for enumeration request
Raises:
Exception: generic handler so we can successfully fail without
crashing the run
"""
try:
# Remove email format from user if present
orig_user = user
user = user.split("@")[0]
# Write the tested user
tested = f"{orig_user} -> {user}" if user != orig_user else user
if self.writer:
self.tested_writer.write(tested)
time.sleep(0.250)
# TODO: Continue testing to find the best method
# of constructing this data from the provided
# domain
# Collect the pieces to build the One Drive URL
domain_mod = re.sub("^https?://", "", domain)
domain_mod = domain_mod.split("/")[0]
domain_array = domain_mod.split(".")
# Assume the domain/subdomain is the tenant
# i.e. tenant.onmicrosoft.com
# tenant.com
tenant = domain_array[0]
# Replace the `.` with `_` in the domain
# and keep the TLD
domain = "_".join(domain_array)
# Replace any `.` with `_` for use in the URL
fmt_user = user.replace(".", "_")
url = f"https://{tenant}-my.sharepoint.com/personal/{fmt_user}_{domain}/_layouts/15/onedrive.aspx"
response = self._send_request(
"get",
url,
proxies=self.proxies,
timeout=self.timeout,
sleep=self.sleep,
jitter=self.jitter,
)
# It appears that valid browser User-Agents will return a 302 redirect
# instead of 401/403 on valid accounts
status = response.status_code
if status in [302, 401, 403]:
if self.writer:
self.valid_writer.write(user)
self.VALID_ACCOUNTS.append(user)
logging.info(f"[{text_colors.green}VALID{text_colors.reset}] {user}")
# Since 404 responses are invalid and everything else is considered
# 'unknown', we will just handle them all as 'invalid'
else: # elif status == 404:
print(
f"[{text_colors.red}INVALID{text_colors.reset}] " f"{user}{' '*10}",
end="\r",
)
except Exception as e:
logging.debug(e)
pass
# =============================
# == -- Office.com MODULE -- ==
# =============================
def _pre_office(self):
"""
Pre-handling of Office.com enumeration
Collect and build the correct header and parameter data to perform
user enumeration against office.com
https://github.com/gremwell/o365enum/blob/master/o365enum.py
"""
# Request the base domain to collect the `client_id`
response = self._send_request(
"get",
"https://www.office.com",
proxies=self.proxies,
timeout=self.timeout,
sleep=self.sleep,
jitter=self.jitter,
)
client_id = re.findall(b'"appId":"([^"]*)"', response.content)
# Request the /login page and follow redirects to collect the following params:
# `hpgid`, `hpgact`, `hpgrequestid`
response = self._send_request(
"get",
"https://www.office.com/login?es=Click&ru=/&msafed=0",
proxies=self.proxies,
timeout=self.timeout,
allow_redirects=True,
sleep=self.sleep,
jitter=self.jitter,
)
hpgid = re.findall(b'hpgid":([0-9]+),', response.content)
hpgact = re.findall(b'hpgact":([0-9]+),', response.content)
hpgrequestid = response.headers["x-ms-request-id"]
# Grab external headers from defaults.py
self.office_headers = Defaults.HTTP_HEADERS
# Update headers
self.office_headers["Referer"] = response.url
self.office_headers["hpgrequestid"] = hpgrequestid
self.office_headers["client-request-id"] = client_id[0]
self.office_headers["hpgid"] = hpgid[0]
self.office_headers["hpgact"] = hpgact[0]
self.office_headers["Accept"] = "application/json"
self.office_headers["Origin"] = "https://login.microsoftonline.com"
# Build random canary token
self.office_headers["canary"] = "".join(
random.choice(
string.ascii_uppercase + string.ascii_lowercase + string.digits + "-_"
)
for _ in range(248)
)
# fmt: off
# Build the Office request data
self.office_data = {
"originalRequest": re.findall(
b'"sCtx":"([^"]*)"',
response.content,
)[0].decode("utf-8"),
"isOtherIdpSupported": True,
"isRemoteNGCSupported": True,
"isAccessPassSupported": True,
"checkPhones": False,
"isCookieBannerShown": False,
"isFidoSupported": False,
"forceotclogin": False,
"isExternalFederationDisallowed": False,
"isRemoteConnectSupported": False,
"isSignup": False,
"federationFlags": 0,
}
# fmt: on
def _office(self, domain: str, user: str, password: str = "<PASSWORD>"):
"""Enumerate users on Microsoft using Office.com
https://github.com/gremwell/o365enum/blob/master/o365enum.py
Arguments:
<required>
domain: domain to enumerate against
user: username for enumeration request
<optional>
password: password for enumeration request
Raises:
Exception: generic handler so we can successfully fail without
crashing the run
"""
try:
# Grab prebuilt office headers
headers = self.office_headers
# Build email if not already built
email = self.HELPER.check_email(user, domain)
# Write the tested user
tested = f"{user} -> {email}" if user != email else email
if self.writer:
self.tested_writer.write(tested)
time.sleep(0.250)
data = self.office_data
data["username"] = email
url = "https://login.microsoftonline.com/common/GetCredentialType?mkt=en-US"
response = self._send_request(
"post",
url,
json=data,
headers=headers,
proxies=self.proxies,
timeout=self.timeout,
sleep=self.sleep,
jitter=self.jitter,
)
status = response.status_code
body = response.json()
if status == 200:
# This enumeration is only valid if the user has DesktopSSO
# enabled
# https://github.com/Gerenios/AADInternals/blob/master/KillChain_utils.ps1#L93
if "DesktopSsoEnabled" in body["EstsProperties"]:
is_desktop_sso = body["EstsProperties"]["DesktopSsoEnabled"]
if not is_desktop_sso:
logging.info(f"Desktop SSO disabled. Shutting down...")
self.exit = True
return self.shutdown()
# Check if the requests are being throttled and shutdown
# if so
is_request_throttled = int(body["ThrottleStatus"])
if is_request_throttled == 1:
logging.info(f"Requests are being throttled. Shutting down...")
self.exit = True
return self.shutdown()
if_exists_result = int(body["IfExistsResult"])
# It appears that both 0 and 6 response codes indicate a valid user
# whereas 5 indicates the use of a different identity provider -- let's
# account for that.
# https://www.redsiege.com/blog/2020/03/user-enumeration-part-2-microsoft-office-365/
# https://warroom.rsmus.com/enumerating-emails-via-office-com/
if if_exists_result in [0, 6]:
if self.writer:
self.valid_writer.write(email)
self.VALID_ACCOUNTS.append(email)
logging.info(
f"[{text_colors.green}VALID{text_colors.reset}] {email}"
)
# This will not be added to our list of valid users as we want to avoid
# hitting personal accounts
elif if_exists_result == 5:
if self.writer:
if not self.found_idp:
self.found_idp = True
self.idp_writer.write(email)
logging.info(
f"[{text_colors.yellow}DIFFIDP{text_colors.reset}] {email}"
)
logging.debug(f"{email} -> Different Identity Provider")
else:
print(
f"[{text_colors.red}INVALID{text_colors.reset}] "
f"{email}{' '*10}",
end="\r",
)
else:
print(
f"[{text_colors.red}INVALID{text_colors.reset}] "
f"{email}{' '*10}",
end="\r",
)
except Exception as e:
logging.debug(e)
pass
# ===========================================
# == -- Autodiscover MODULE -- DISABLED -- ==
# ===========================================
def _autodiscover(self, domain: str, user: str, password: str = "<PASSWORD>"):
"""Enumerate users on Microsoft using Microsoft Autodiscover
Note: This method is dead based on recent MS updates. I am leaving this
code here in case a new method of enumeration is identified via
Autodiscover.
Note: There may be a potential path of enumeration using Autodiscover by
identifying responses that show 'Locked' based on the AAADSTS code
(this appears to happen as a default response code to an invalid
authentication attempt), but this would require an authentication attempt
for each user.
https://github.com/Raikia/UhOh365
Raises:
NotImplementedError
"""
raise NotImplementedError("This method is not currently implemented.")
try:
headers = Defaults.HTTP_HEADERS
headers[
"User-Agent"
] = "Microsoft Office/16.0 (Windows NT 10.0; Microsoft Outlook 16.0.12026; Pro)"
email = self.HELPER.check_email(user, domain)
if self.writer:
self.tested_writer.write(email)
time.sleep(0.250)
url = f"https://outlook.office365.com/autodiscover/autodiscover.json/v1.0/{email}?Protocol=Autodiscoverv1"
response = self._send_request(
"get",
url,
headers=headers,
proxies=self.proxies,
timeout=self.timeout,
sleep=self.sleep,
jitter=self.jitter,
)
status = response.status_code
body = response.text
# "X-MailboxGuid" in response.headers.keys()
# This appears to not be a required header for valid accounts
if status == 200:
if self.writer:
self.valid_writer.write(email)
self.VALID_ACCOUNTS.append(email)
logging.info(f"[{text_colors.green}VALID{text_colors.reset}] {email}")
elif status == 302:
if "outlook.office365.com" not in body:
if self.writer:
self.valid_writer.write(email)
self.VALID_ACCOUNTS.append(email)
logging.info(
f"[{text_colors.green}VALID{text_colors.reset}] {email}"
)
else:
print(
f"[{text_colors.red}INVALID{text_colors.reset}] "
f"{email}{' '*10}",
end="\r",
)
else:
print(
f"[{text_colors.red}INVALID{text_colors.reset}] "
f"{email}{' '*10}",
end="\r",
)
except Exception as e:
logging.debug(e)
pass
# =========================
# == -- oAuth2 MODULE -- ==
# =========================
def _oauth2(self, domain: str, user: str, password: str = "<PASSWORD>"):
"""Enumerate users via Microsoft's oAuth2 endpoint
https://github.com/Gerenios/AADInternals/blob/master/KillChain_utils.ps1#L112
Arguments:
<required>
domain: domain to enumerate against
user: username for enumeration request
<optional>
password: password for enumeration request
Raises:
Exception: generic handler so we can successfully fail without
crashing the run
"""
try:
# Grab prebuilt office headers
headers = Defaults.HTTP_HEADERS
headers["Content-Type"] = "application/x-www-form-urlencoded"
# Build email if not already built
email = self.HELPER.check_email(user, domain)
# Write the tested user
tested = f"{user} -> {email}" if user != email else email
if self.writer:
self.tested_writer.write(tested)
time.sleep(0.250)
randomGuid = uuid4()
data = {
"resource": randomGuid,
"client_id": randomGuid,
"grant_type": "password",
"username": email,
"password": password,
"scope": "openid",
}
url = "https://login.microsoftonline.com/common/oauth2/token"
response = self._send_request(
"post",
url,
data=data,
headers=headers,
proxies=self.proxies,
timeout=self.timeout,
sleep=self.sleep,
jitter=self.jitter,
)
status = response.status_code
body = response.json()
if "error_codes" in body:
error_codes = [f"AADSTS{code}" for code in body["error_codes"]]
else:
error_codes = None
# Default to valid if 200 or 302
if status == 200 or status == 302:
if self.writer:
self.valid_writer.write(email)
self.VALID_ACCOUNTS.append(email)
logging.info(f"[{text_colors.green}VALID{text_colors.reset}] {email}")
elif error_codes:
# User not found error is an invalid user
if "AADSTS50034" in error_codes:
print(
f"[{text_colors.red}INVALID{text_colors.reset}] "
f"{email}{' '*10}",
end="\r",
)
# Otherwise, valid user
else:
if self.writer:
self.valid_writer.write(email)
self.VALID_ACCOUNTS.append(email)
logging.info(
f"[{text_colors.green}VALID{text_colors.reset}] {email}"
)
# Unknown response -> invalid user
else:
print(
f"[{text_colors.red}INVALID{text_colors.reset}] "
f"{email}{' '*10}",
end="\r",
)
except Exception as e:
logging.debug(e)
pass
async def run(
self,
userlist: List[str],
password: str = "<PASSWORD>",
domain: str = None,
module: str = None,
):
"""Asyncronously Send HTTP Requests to enumerate a list of users.
This method's params override the class' level of params.
Publicly accessible class method. To implement and run
this method from another script:
```
from o365spray.core import Enumerator
loop = asyncio.get_event_loop()
e = Enumerator(loop, writer=False)
loop.run_until_complete(
e.run(
userlist,
password,
domain,
module,
)
)
loop.run_until_complete()
loop.close()
list_of_valid_users = e.VALID_ACCOUNTS
```
Arguments:
<required>
userlist: list of users to enumerate
<optional>
password: password for modules that perform authentication
domain: domain to enumerate users against
module: enumeration module to run
Raises:
ValueError: if provided domain is empty/None or module does not
exist
"""
domain = domain or self.domain
if not domain:
raise ValueError(f"Invalid domain for user enumeration: '{domain}'")
module = module or self.module
if module not in self._modules.keys():
raise ValueError(f"Invalid user enumeration module name: '{module}'")
# Handle NotImplementedError exception handling here to avoid async
# weirdness or relying on return_when of the asyncio.wait() method
# since we want to pass through generic exceptions on run
module_f = self._modules[module]
if module_f == None:
raise NotImplementedError("This module is not currently implemented.")
# Build office module header/param data
if module == "office":
self._pre_office()
blocking_tasks = [
self.loop.run_in_executor(
self.executor,
partial(
module_f,
domain=domain,
user=user,
password=password,
),
)
for user in userlist
]
if blocking_tasks:
await asyncio.wait(blocking_tasks)
| StarcoderdataPython |
124630 | import json
import logging
import csv
from datetime import date
from StringIO import StringIO
from zipfile import ZipFile
from django import forms
from django.core import mail
from django.core.urlresolvers import reverse
from django.utils.unittest import skip
from vumi.message import TransportUserMessage
import go.base.utils
from go.base.tests.helpers import GoDjangoTestCase, DjangoVumiApiHelper
from go.conversation.templatetags import conversation_tags
from go.conversation.view_definition import (
ConversationViewDefinitionBase, EditConversationView)
from go.conversation.tasks import export_conversation_messages_unsorted
from go.vumitools.api import VumiApiCommand
from go.vumitools.conversation.definition import (
ConversationDefinitionBase, ConversationAction)
from go.vumitools.conversation.utils import ConversationWrapper
from go.vumitools.tests.helpers import GoMessageHelper
from go.dashboard.dashboard import DashboardLayout, DashboardParseError
from go.dashboard import client as dashboard_client
from go.dashboard.tests.utils import FakeDiamondashApiClient
class EnabledAction(ConversationAction):
action_name = 'enabled'
action_display_name = 'Enabled Operation'
def check_disabled(self):
return None
def perform_action(self, action_data):
pass
class DisabledAction(ConversationAction):
action_name = 'disabled'
action_display_name = 'Disabled Operation'
def check_disabled(self):
return "This action is disabled."
def perform_action(self, action_data):
raise Exception("This action should never be performed.")
class DummyConversationDefinition(ConversationDefinitionBase):
conversation_type = 'dummy'
conversation_display_name = 'Dummy Conversation'
class ActionConversationDefinition(ConversationDefinitionBase):
conversation_type = 'with_actions'
conversation_display_name = 'Conversation With Actions'
actions = (EnabledAction, DisabledAction)
class EndpointConversationDefinition(ConversationDefinitionBase):
conversation_type = u'extra_endpoints'
conversation_display_name = u'Extra Endpoints'
extra_static_endpoints = (u'extra',)
class SimpleEditForm(forms.Form):
simple_field = forms.CharField()
class SimpleEditView(EditConversationView):
edit_forms = (
(None, SimpleEditForm),
)
class SimpleEditConversationDefinition(ConversationDefinitionBase):
conversation_type = 'simple_edit'
conversation_display_name = 'Simple Editable Conversation'
class SimpleEditViewDefinition(ConversationViewDefinitionBase):
edit_view = SimpleEditView
class ComplexEditView(EditConversationView):
edit_forms = (
('foo', SimpleEditForm),
('bar', SimpleEditForm),
)
class ComplexEditConversationDefinition(ConversationDefinitionBase):
conversation_type = 'complex_edit'
conversation_display_name = 'Complex Editable Conversation'
class ComplexEditViewDefinition(ConversationViewDefinitionBase):
edit_view = ComplexEditView
class DefaultConfigConversationDefinition(ConversationDefinitionBase):
conversation_type = 'default_config'
conversation_display_name = 'Default Config Conversation'
def get_default_config(self, name, description):
return {
'name': name,
'description': description,
}
DUMMY_CONVERSATION_DEFS = {
'dummy': (
DummyConversationDefinition, ConversationViewDefinitionBase),
'with_actions': (
ActionConversationDefinition, ConversationViewDefinitionBase),
'extra_endpoints': (
EndpointConversationDefinition, ConversationViewDefinitionBase),
'simple_edit': (
SimpleEditConversationDefinition, SimpleEditViewDefinition),
'complex_edit': (
ComplexEditConversationDefinition, ComplexEditViewDefinition),
'default_config': (
DefaultConfigConversationDefinition, ConversationViewDefinitionBase),
}
DUMMY_CONVERSATION_SETTINGS = dict([
('gotest.' + app, {
'namespace': app,
'display_name': defs[0].conversation_display_name,
}) for app, defs in DUMMY_CONVERSATION_DEFS.items()])
class FakeConversationPackage(object):
"""Pretends to be a package containing modules and classes for an app.
"""
def __init__(self, conversation_type):
self.definition = self
self.view_definition = self
def_cls, vdef_cls = DUMMY_CONVERSATION_DEFS[conversation_type]
self.ConversationDefinition = def_cls
self.ConversationViewDefinition = vdef_cls
class BaseConversationViewTestCase(GoDjangoTestCase):
def setUp(self):
self.vumi_helper = self.add_helper(
DjangoVumiApiHelper(), setup_vumi_api=False)
self.monkey_patch(
go.base.utils, 'get_conversation_pkg', self._get_conversation_pkg)
self.vumi_helper.patch_config(
VUMI_INSTALLED_APPS=DUMMY_CONVERSATION_SETTINGS)
self.vumi_helper.setup_vumi_api()
self.user_helper = self.vumi_helper.make_django_user()
self.client = self.vumi_helper.get_client()
def _get_conversation_pkg(self, conversation_type, from_list=()):
"""Test stub for `go.base.utils.get_conversation_pkg()`
"""
return FakeConversationPackage(conversation_type)
def get_view_url(self, conv, view):
view_def = go.base.utils.get_conversation_view_definition(
conv.conversation_type)
return view_def.get_view_url(view, conversation_key=conv.key)
def get_new_view_url(self):
return reverse('conversations:new_conversation')
def get_action_view_url(self, conv, action_name):
return reverse('conversations:conversation_action', kwargs={
'conversation_key': conv.key, 'action_name': action_name})
def get_api_commands_sent(self):
return go.base.utils.connection.get_commands()
class TestConversationsDashboardView(BaseConversationViewTestCase):
def test_index(self):
"""Display all conversations"""
response = self.client.get(reverse('conversations:index'))
self.assertNotContains(response, u'myconv')
myconv = self.user_helper.create_conversation(u'dummy', name=u'myconv')
response = self.client.get(reverse('conversations:index'))
self.assertContains(response, u'myconv')
self.assertContains(response, u'Dummy Conversation')
self.assertContains(response, self.get_view_url(myconv, 'show'))
self.assertContains(response, self.get_view_url(
myconv, 'message_list'))
self.assertContains(response, self.get_view_url(myconv, 'reports'))
def test_index_search(self):
"""Filter conversations based on query string"""
conv = self.user_helper.create_conversation(u'dummy')
response = self.client.get(reverse('conversations:index'))
self.assertContains(response, conv.name)
response = self.client.get(reverse('conversations:index'), {
'query': 'something that does not exist in the fixtures'})
self.assertNotContains(response, conv.name)
def test_index_search_on_type(self):
conv = self.user_helper.create_conversation(u'dummy')
self.user_helper.add_app_permission(u'gotest.dummy')
self.user_helper.add_app_permission(u'gotest.with_actions')
def search(conversation_type):
return self.client.get(reverse('conversations:index'), {
'query': conv.name,
'conversation_type': conversation_type,
})
self.assertContains(search('dummy'), conv.key)
self.assertNotContains(search('with_actions'), conv.key)
def test_index_search_on_status(self):
conv = self.user_helper.create_conversation(u'dummy')
def search(conversation_status):
return self.client.get(reverse('conversations:index'), {
'query': conv.name,
'conversation_status': conversation_status,
})
# it should be draft
self.assertContains(search('draft'), conv.key)
self.assertNotContains(search('running'), conv.key)
self.assertNotContains(search('finished'), conv.key)
# Set the status to `running'
conv = self.user_helper.get_conversation(conv.key)
conv.set_status_started()
conv.save()
self.assertNotContains(search('draft'), conv.key)
self.assertContains(search('running'), conv.key)
self.assertNotContains(search('finished'), conv.key)
# Set the status to `stopped' again
conv = self.user_helper.get_conversation(conv.key)
conv.set_status_stopped()
conv.save()
self.assertContains(search('draft'), conv.key)
self.assertNotContains(search('running'), conv.key)
self.assertNotContains(search('finished'), conv.key)
# Archive it
conv.archive_conversation()
self.assertNotContains(search('draft'), conv.key)
self.assertNotContains(search('running'), conv.key)
self.assertContains(search('finished'), conv.key)
def test_pagination(self):
for i in range(13):
conv = self.user_helper.create_conversation(u'dummy')
response = self.client.get(reverse('conversations:index'))
# CONVERSATIONS_PER_PAGE = 12
self.assertContains(response, conv.name, count=12)
response = self.client.get(reverse('conversations:index'), {'p': 2})
self.assertContains(response, conv.name, count=1)
def test_pagination_with_query_and_type(self):
self.user_helper.add_app_permission(u'gotest.dummy')
self.user_helper.add_app_permission(u'gotest.with_actions')
for i in range(13):
conv = self.user_helper.create_conversation(u'dummy')
response = self.client.get(reverse('conversations:index'), {
'query': conv.name,
'p': 2,
'conversation_type': 'dummy',
'conversation_status': 'draft',
})
self.assertNotContains(response, '?p=2')
class TestNewConversationView(BaseConversationViewTestCase):
def test_get_new_conversation(self):
self.user_helper.add_app_permission(u'gotest.dummy')
response = self.client.get(reverse('conversations:new_conversation'))
self.assertContains(response, 'Conversation name')
self.assertContains(response, 'kind of conversation')
self.assertContains(response, 'dummy')
self.assertNotContains(response, 'with_actions')
def test_post_new_conversation(self):
self.user_helper.add_app_permission(u'gotest.dummy')
conv_data = {
'name': 'new conv',
'conversation_type': 'dummy',
}
response = self.client.post(
reverse('conversations:new_conversation'), conv_data)
[conv] = self.user_helper.user_api.active_conversations()
show_url = reverse('conversations:conversation', kwargs={
'conversation_key': conv.key, 'path_suffix': ''})
self.assertRedirects(response, show_url)
self.assertEqual(conv.name, 'new conv')
self.assertEqual(conv.conversation_type, 'dummy')
def test_post_new_conversation_extra_endpoints(self):
self.user_helper.add_app_permission(u'gotest.extra_endpoints')
conv_data = {
'name': 'new conv',
'conversation_type': 'extra_endpoints',
}
response = self.client.post(reverse('conversations:new_conversation'),
conv_data)
[conv] = self.user_helper.user_api.active_conversations()
show_url = reverse('conversations:conversation', kwargs={
'conversation_key': conv.key, 'path_suffix': ''})
self.assertRedirects(response, show_url)
self.assertEqual(conv.name, 'new conv')
self.assertEqual(conv.conversation_type, 'extra_endpoints')
self.assertEqual(list(conv.extra_endpoints), [u'extra'])
def test_post_new_conversation_default_config(self):
self.user_helper.add_app_permission(u'gotest.default_config')
conv_data = {
'name': 'new conv',
'description': 'a new conversation',
'conversation_type': 'default_config',
}
self.client.post(reverse('conversations:new_conversation'), conv_data)
[conv] = self.user_helper.user_api.active_conversations()
self.assertEqual(conv.config, {
'name': 'new conv',
'description': 'a new conversation'
})
class TestConversationViews(BaseConversationViewTestCase):
def setUp(self):
super(TestConversationViews, self).setUp()
self.msg_helper = self.add_helper(
GoMessageHelper(vumi_helper=self.vumi_helper))
def test_show_no_content_block(self):
conv = self.user_helper.create_conversation(u'dummy')
show_url = self.get_view_url(conv, 'show')
response = self.client.get(show_url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'Content')
self.assertNotContains(response, show_url + 'edit/')
def test_show_editable(self):
conv = self.user_helper.create_conversation(u'simple_edit')
response = self.client.get(self.get_view_url(conv, 'show'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Content')
self.assertContains(response, self.get_view_url(conv, 'edit'))
def test_edit_simple(self):
conv = self.user_helper.create_conversation(u'simple_edit')
self.assertEqual(conv.config, {})
response = self.client.get(self.get_view_url(conv, 'edit'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'simple_field')
self.assertNotContains(response, 'field value')
response = self.client.post(self.get_view_url(conv, 'edit'), {
'simple_field': ['field value'],
})
self.assertRedirects(response, self.get_view_url(conv, 'show'))
conv = self.user_helper.get_conversation(conv.key)
self.assertEqual(conv.config, {'simple_field': 'field value'})
response = self.client.get(self.get_view_url(conv, 'edit'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'simple_field')
self.assertContains(response, 'field value')
def test_edit_complex(self):
conv = self.user_helper.create_conversation(u'complex_edit')
self.assertEqual(conv.config, {})
response = self.client.get(self.get_view_url(conv, 'edit'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'foo-simple_field')
self.assertContains(response, 'bar-simple_field')
self.assertNotContains(response, 'field value 1')
self.assertNotContains(response, 'field value 2')
response = self.client.post(self.get_view_url(conv, 'edit'), {
'foo-simple_field': ['field value 1'],
'bar-simple_field': ['field value 2'],
})
self.assertRedirects(response, self.get_view_url(conv, 'show'))
conv = self.user_helper.get_conversation(conv.key)
self.assertEqual(conv.config, {
'foo': {'simple_field': 'field value 1'},
'bar': {'simple_field': 'field value 2'},
})
response = self.client.get(self.get_view_url(conv, 'edit'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'foo-simple_field')
self.assertContains(response, 'bar-simple_field')
self.assertContains(response, 'field value 1')
self.assertContains(response, 'field value 2')
def test_edit_conversation_details(self):
conv = self.user_helper.create_conversation(
u'dummy', name=u'test', description=u'test')
response = self.client.post(
reverse('conversations:conversation', kwargs={
'conversation_key': conv.key, 'path_suffix': 'edit_detail/',
}), {
'name': 'foo',
'description': 'bar',
})
show_url = reverse('conversations:conversation', kwargs={
'conversation_key': conv.key, 'path_suffix': ''})
self.assertRedirects(response, show_url)
reloaded_conv = self.user_helper.get_conversation(conv.key)
self.assertEqual(reloaded_conv.name, 'foo')
self.assertEqual(reloaded_conv.description, 'bar')
def test_conversation_contact_group_listing(self):
conv = self.user_helper.create_conversation(
u'dummy', name=u'test', description=u'test')
contact_store = self.user_helper.user_api.contact_store
group1 = contact_store.new_group(u'Contact Group 1')
group2 = contact_store.new_group(u'Contact Group 2')
conv.add_group(group1)
conv.save()
show_url = reverse('conversations:conversation', kwargs={
'conversation_key': conv.key, 'path_suffix': ''})
resp = self.client.get(show_url)
self.assertContains(resp, group1.name)
self.assertNotContains(resp, group2.name)
def test_conversation_render_contact_group_edit(self):
conv = self.user_helper.create_conversation(
u'dummy', name=u'test', description=u'test')
contact_store = self.user_helper.user_api.contact_store
group1 = contact_store.new_group(u'Contact Group 1')
group2 = contact_store.new_group(u'Contact Group 2')
conv.add_group(group1)
conv.save()
groups_url = reverse('conversations:conversation', kwargs={
'conversation_key': conv.key,
'path_suffix': 'edit_groups/'
})
response = self.client.get(groups_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.context['model_data']), {
'key': conv.key,
'urls': {
'show': reverse(
'conversations:conversation', kwargs={
'conversation_key': conv.key,
'path_suffix': ''
})
},
'groups': [{
'key': group2.key,
'name': u'Contact Group 2',
'inConversation': False,
'urls': {
'show': reverse(
'contacts:group',
kwargs={'group_key': group2.key}),
},
}, {
'key': group1.key,
'name': u'Contact Group 1',
'inConversation': True,
'urls': {
'show': reverse(
'contacts:group',
kwargs={'group_key': group1.key}),
},
}]
})
def test_conversation_contact_group_assignment(self):
conv = self.user_helper.create_conversation(
u'dummy', name=u'test', description=u'test')
contact_store = self.user_helper.user_api.contact_store
contact_store.new_group(u'Contact Group 1')
group2 = contact_store.new_group(u'Contact Group 2')
group3 = contact_store.new_group(u'Contact Group 3')
groups_url = reverse('conversations:conversation', kwargs={
'conversation_key': conv.key, 'path_suffix': 'edit_groups/'})
resp = self.client.put(
groups_url,
content_type='application/json',
data=json.dumps({
'key': conv.key,
'groups': [
{'key': group2.key},
{'key': group3.key}]
}))
self.assertEqual(resp.status_code, 200)
def test_start(self):
conv = self.user_helper.create_conversation(u'dummy')
response = self.client.post(
self.get_view_url(conv, 'start'), follow=True)
self.assertRedirects(response, self.get_view_url(conv, 'show'))
[msg] = response.context['messages']
self.assertEqual(str(msg), "Dummy Conversation started")
conv = self.user_helper.get_conversation(conv.key)
self.assertTrue(conv.starting())
[start_cmd] = self.get_api_commands_sent()
self.assertEqual(start_cmd, VumiApiCommand.command(
'%s_application' % (conv.conversation_type,), 'start',
user_account_key=conv.user_account.key, conversation_key=conv.key))
def test_stop(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
response = self.client.post(
self.get_view_url(conv, 'stop'), follow=True)
self.assertRedirects(response, self.get_view_url(conv, 'show'))
[msg] = response.context['messages']
self.assertEqual(str(msg), "Dummy Conversation stopped")
conv = self.user_helper.get_conversation(conv.key)
self.assertTrue(conv.stopping())
def test_aggregates(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
# Inbound only
self.msg_helper.add_inbound_to_conv(
conv, 5, start_date=date(2012, 1, 1), time_multiplier=12)
# Inbound and outbound
msgs = self.msg_helper.add_inbound_to_conv(
conv, 5, start_date=date(2013, 1, 1), time_multiplier=12)
self.msg_helper.add_replies_to_conv(conv, msgs)
response = self.client.get(
self.get_view_url(conv, 'aggregates'), {'direction': 'inbound'})
self.assertEqual(response.content, '\r\n'.join([
'2011-12-30,1',
'2011-12-31,2',
'2012-01-01,2',
'2012-12-30,1',
'2012-12-31,2',
'2013-01-01,2',
'', # csv ends with a blank line
]))
response = self.client.get(
self.get_view_url(conv, 'aggregates'), {'direction': 'outbound'})
self.assertEqual(response.content, '\r\n'.join([
'2012-12-30,1',
'2012-12-31,2',
'2013-01-01,2',
'', # csv ends with a blank line
]))
def test_export_csv_messages(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
msgs = self.msg_helper.add_inbound_to_conv(
conv, 5, start_date=date(2012, 1, 1), time_multiplier=12)
self.msg_helper.add_replies_to_conv(conv, msgs)
response = self.client.post(self.get_view_url(conv, 'export_messages'))
self.assertRedirects(response, self.get_view_url(conv, 'message_list'))
[email] = mail.outbox
self.assertEqual(
email.recipients(), [self.user_helper.get_django_user().email])
self.assertTrue(conv.name in email.subject)
self.assertTrue(conv.name in email.body)
[(file_name, zipcontent, mime_type)] = email.attachments
self.assertEqual(file_name, 'messages-export.zip')
zipfile = ZipFile(StringIO(zipcontent), 'r')
content = zipfile.open('messages-export.csv', 'r').read()
# 1 header, 5 sent, 5 received, 1 trailing newline == 12
self.assertEqual(12, len(content.split('\n')))
self.assertEqual(mime_type, 'application/zip')
def test_download_json_messages_inbound(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
response = self.client.get(self.get_view_url(conv, 'export_messages'))
self.assertEqual(
response['X-Accel-Redirect'],
'/message_store_exporter/%s/inbound.json' % (conv.batch.key,))
self.assertEqual(
response['Content-Disposition'],
'attachment; filename=%s-inbound.json' % (conv.key,))
self.assertEqual(response['X-Accel-Buffering'], 'no')
def test_download_csv_messages_inbound(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
response = self.client.get("%s?format=csv" % (
self.get_view_url(conv, 'export_messages')))
self.assertEqual(
response['X-Accel-Redirect'],
'/message_store_exporter/%s/inbound.csv' % (conv.batch.key,))
self.assertEqual(
response['Content-Disposition'],
'attachment; filename=%s-inbound.csv' % (conv.key,))
self.assertEqual(response['X-Accel-Buffering'], 'no')
def test_download_json_messages_outbound(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
response = self.client.get('%s?direction=outbound' % (
self.get_view_url(conv, 'export_messages'),))
self.assertEqual(
response['X-Accel-Redirect'],
'/message_store_exporter/%s/outbound.json' % (conv.batch.key,))
self.assertEqual(
response['Content-Disposition'],
'attachment; filename=%s-outbound.json' % (conv.key,))
self.assertEqual(response['X-Accel-Buffering'], 'no')
def test_download_csv_messages_outbound(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
response = self.client.get('%s?direction=outbound&format=csv' % (
self.get_view_url(conv, 'export_messages'),))
self.assertEqual(
response['X-Accel-Redirect'],
'/message_store_exporter/%s/outbound.csv' % (conv.batch.key,))
self.assertEqual(
response['Content-Disposition'],
'attachment; filename=%s-outbound.csv' % (conv.key,))
self.assertEqual(response['X-Accel-Buffering'], 'no')
def test_download_messages_unknown_direction_404(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
response = self.client.get('%s?direction=unknown&format=json' % (
self.get_view_url(conv, 'export_messages'),))
self.assertEqual(response.status_code, 404)
def test_download_messages_unknown_format_404(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
response = self.client.get('%s?direction=outbound&format=unknown' % (
self.get_view_url(conv, 'export_messages'),))
self.assertEqual(response.status_code, 404)
def test_message_list_pagination(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
# Create 21 inbound messages, since we have 20 messages per page it
# should give us 2 pages
self.msg_helper.add_inbound_to_conv(conv, 21)
response = self.client.get(self.get_view_url(conv, 'message_list'))
# Check pagination
# Ordinarily we'd have 60 references to a contact, which by default
# display the from_addr if a contact cannot be found. (Each block has 3
# references, one in the table listing, 2 in the reply-to modal div.)
# We have no channels connected to this conversation, however, so we
# only have 20 in this test.
self.assertContains(response, 'from-', 20)
# We should have 2 links to page two, one for the actual page link
# and one for the 'Next' page link
self.assertContains(response, '&p=2', 2)
# There should only be 1 link to the current page
self.assertContains(response, '&p=1', 1)
# There should not be a link to the previous page since we are not
# the first page.
self.assertContains(response, '&p=0', 0)
def test_message_list_statistics(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
msgs = self.msg_helper.add_inbound_to_conv(conv, 10)
replies = self.msg_helper.add_replies_to_conv(conv, msgs)
for msg in replies[:4]:
self.msg_helper.make_stored_ack(conv, msg)
for msg in replies[4:9]:
self.msg_helper.make_stored_nack(conv, msg)
for msg in replies[:2]:
self.msg_helper.make_stored_delivery_report(
conv, msg, delivery_status='delivered')
for msg in replies[2:5]:
self.msg_helper.make_stored_delivery_report(
conv, msg, delivery_status='pending')
for msg in replies[5:9]:
self.msg_helper.make_stored_delivery_report(
conv, msg, delivery_status='failed')
response = self.client.get(self.get_view_url(conv, 'message_list'))
self.assertContains(
response,
'<tr><th>Total sent</th><td colspan="2">10</td></tr>',
html=True)
self.assertContains(
response, '<tr><th>Accepted</th><td>4</td><td>40%</td></tr>',
html=True)
self.assertContains(
response, '<tr><th>Rejected</th><td>5</td><td>50%</td></tr>',
html=True)
self.assertContains(
response, '<tr><th>Delivered</th><td>2</td><td>20%</td></tr>',
html=True)
self.assertContains(
response, '<tr><th>Pending</th><td>3</td><td>30%</td></tr>',
html=True)
self.assertContains(
response, '<tr><th>Failed</th><td>4</td><td>40%</td></tr>',
html=True)
def test_message_list_inbound_uniques_display(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
msgs = self.msg_helper.add_inbound_to_conv(conv, 10)
response = self.client.get(self.get_view_url(conv, 'message_list'))
self.assertContains(
response, 'Messages from 10 unique people')
def test_message_list_inbound_download_links_display(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
response = self.client.get(self.get_view_url(conv, 'message_list'))
inbound_json_url = ('%s?direction=inbound&format=json' %
self.get_view_url(conv, 'export_messages'))
self.assertContains(response, 'href="%s"' % inbound_json_url)
self.assertContains(response, "Download received messages as JSON")
inbound_csv_url = ('%s?direction=inbound&format=csv' %
self.get_view_url(conv, 'export_messages'))
self.assertContains(response, 'href="%s"' % inbound_csv_url)
self.assertContains(response, "Download received messages as CSV")
def test_message_list_outbound_uniques_display(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
msgs = self.msg_helper.add_inbound_to_conv(conv, 10)
replies = self.msg_helper.add_replies_to_conv(conv, msgs)
response = self.client.get(
self.get_view_url(conv, 'message_list'), {
'direction': 'outbound'
})
self.assertContains(
response, 'Messages to 10 unique people')
def test_message_list_outbound_download_links_display(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
response = self.client.get(
self.get_view_url(conv, 'message_list'), {
'direction': 'outbound'
})
outbound_json_url = ('%s?direction=outbound&format=json' %
self.get_view_url(conv, 'export_messages'))
self.assertContains(response, 'href="%s"' % outbound_json_url)
self.assertContains(response, "Download sent messages as JSON")
outbound_csv_url = ('%s?direction=outbound&format=csv' %
self.get_view_url(conv, 'export_messages'))
self.assertContains(response, 'href="%s"' % outbound_csv_url)
self.assertContains(response, "Download sent messages as CSV")
def test_message_list_no_sensitive_msgs(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
def make_stored_msgs(go_metadata={}):
self.msg_helper.make_stored_inbound(
conv, "hi", from_addr='from-me',
helper_metadata={'go': go_metadata})
self.msg_helper.make_stored_outbound(
conv, "hi", to_addr='from-me',
helper_metadata={'go': go_metadata})
def assert_messages(count):
r_in = self.client.get(
self.get_view_url(conv, 'message_list'),
{'direction': 'inbound'})
self.assertContains(r_in, 'from-me', count)
r_out = self.client.get(
self.get_view_url(conv, 'message_list'),
{'direction': 'outbound'})
self.assertContains(r_out, 'from-me', count)
assert_messages(0)
make_stored_msgs()
assert_messages(1)
make_stored_msgs({'sensitive': True})
assert_messages(1)
make_stored_msgs({'sensitive': False})
assert_messages(2)
def test_message_list_with_bad_transport_type_inbound(self):
# inbound messages could have an unsupported transport_type
# if the transport sent something we don't yet support
conv = self.user_helper.create_conversation(u'dummy', started=True)
self.msg_helper.make_stored_inbound(
conv, "hi", transport_type="bad horse", from_addr='from-me')
r_in = self.client.get(
self.get_view_url(conv, 'message_list'),
{'direction': 'inbound'})
self.assertContains(r_in, 'from-me', 1)
self.assertContains(r_in, 'bad horse (unsupported)', 1)
def test_message_list_with_bad_transport_type_outbound(self):
# unsent message don't have their transport type set to something
# that a contact can be created for
conv = self.user_helper.create_conversation(u'dummy', started=True)
self.msg_helper.make_stored_outbound(
conv, "hi", transport_type="bad horse", to_addr='from-me')
r_out = self.client.get(
self.get_view_url(conv, 'message_list'),
{'direction': 'outbound'})
self.assertContains(r_out, 'from-me', 1)
self.assertContains(r_out, 'bad horse (unsupported)', 1)
def test_reply_on_inbound_messages_only(self):
# Fake the routing setup.
self.monkey_patch(
ConversationWrapper, 'has_channel_supporting_generic_sends',
lambda s: True)
conv = self.user_helper.create_conversation(u'dummy', started=True)
[msg_in] = self.msg_helper.add_inbound_to_conv(conv, 1)
[msg_out] = self.msg_helper.add_replies_to_conv(conv, [msg_in])
response = self.client.get(
self.get_view_url(conv, 'message_list'), {'direction': 'inbound'})
self.assertContains(response, 'Reply')
self.assertContains(response, 'href="#reply-%s"' % (
msg_in['message_id'],))
response = self.client.get(
self.get_view_url(conv, 'message_list'), {'direction': 'outbound'})
self.assertNotContains(response, 'Reply')
def test_no_reply_with_no_generic_send_channels(self):
# We have no routing hooked up and hence no channels supporting generic
# sends.
conv = self.user_helper.create_conversation(u'dummy', started=True)
self.msg_helper.add_inbound_to_conv(conv, 1)
response = self.client.get(
self.get_view_url(conv, 'message_list'), {'direction': 'inbound'})
self.assertNotContains(response, 'Reply')
def test_send_one_off_reply(self):
conv = self.user_helper.create_conversation(u'dummy', started=True)
self.msg_helper.add_inbound_to_conv(conv, 1)
[msg] = conv.received_messages_in_cache()
response = self.client.post(self.get_view_url(conv, 'message_list'), {
'in_reply_to': msg['message_id'],
'content': 'foo',
'to_addr': 'should be ignored',
'_send_one_off_reply': True,
})
self.assertRedirects(response, self.get_view_url(conv, 'message_list'))
[reply_to_cmd] = self.get_api_commands_sent()
self.assertEqual(reply_to_cmd['worker_name'], 'dummy_application')
self.assertEqual(reply_to_cmd['command'], 'send_message')
self.assertEqual(
reply_to_cmd['args'], [conv.user_account.key, conv.key])
self.assertEqual(reply_to_cmd['kwargs']['command_data'], {
'batch_id': conv.batch.key,
'conversation_key': conv.key,
'content': 'foo',
'to_addr': msg['from_addr'],
'msg_options': {'in_reply_to': msg['message_id']},
})
class TestConversationTemplateTags(BaseConversationViewTestCase):
def _assert_cs_url(self, suffix, conv, view_name=None):
expected = '/conversations/%s/%s' % (conv.key, suffix)
if view_name is None:
result = conversation_tags.conversation_screen(conv)
else:
result = conversation_tags.conversation_screen(conv, view_name)
self.assertEqual(expected, result)
def test_conversation_screen_tag(self):
conv = self.user_helper.create_conversation(u'dummy')
self._assert_cs_url('', conv)
self._assert_cs_url('', conv, 'show')
self._assert_cs_url('edit_detail/', conv, 'edit_detail')
self._assert_cs_url('start/', conv, 'start')
# The dummy conversation isn't editable.
self.assertRaises(Exception, self._assert_cs_url, '', conv, 'edit')
def _assert_ca_url(self, suffix, conv, action_name):
expected = '/conversations/%s/action/%s' % (conv.key, suffix)
result = conversation_tags.conversation_action(conv, action_name)
self.assertEqual(expected, result)
def test_conversation_action_tag(self):
conv = self.user_helper.create_conversation(u'with_actions')
self._assert_ca_url('enabled', conv, 'enabled')
self._assert_ca_url('disabled', conv, 'disabled')
# The conversation_action tag currently just builds a URL without
# regard to the existence of the action.
self._assert_ca_url('foo', conv, 'foo')
@skip("TODO")
def test_get_contact_for_message(self):
raise NotImplementedError("TODO")
@skip("TODO")
def test_get_reply_form_for_message(self):
raise NotImplementedError("TODO")
class TestConversationReportsView(BaseConversationViewTestCase):
def setUp(self):
super(TestConversationReportsView, self).setUp()
self.diamondash_api = FakeDiamondashApiClient()
self.error_log = []
logger = logging.getLogger('go.conversation.view_definition')
def log_error(e, exc_info):
exc_type, exc_value, exc_traceback = exc_info
self.assertEqual(e, exc_value)
self.error_log.append(unicode(e))
self.monkey_patch(logger, 'error', log_error)
self.monkey_patch(
dashboard_client,
'get_diamondash_api',
lambda: self.diamondash_api)
def test_get_dashboard(self):
self.diamondash_api.set_response({'happy': 'dashboard'})
conv = self.user_helper.create_conversation(u'dummy')
response = self.client.get(self.get_view_url(conv, 'reports'))
[dd_request] = self.diamondash_api.get_requests()
raw_dashboard = dd_request['data']
self.assertEqual(
raw_dashboard['name'],
"go.conversations.%s" % conv.key)
self.assertTrue('widgets' in raw_dashboard)
self.assertEqual(
json.loads(response.context['dashboard_config']),
{'happy': 'dashboard'})
def test_get_dashboard_for_sync_error_handling(self):
self.diamondash_api.set_error_response(400, ':(')
conv = self.user_helper.create_conversation(u'dummy')
response = self.client.get(self.get_view_url(conv, 'reports'))
self.assertEqual(
self.error_log,
['Dashboard sync failed: '
'400: {"message": ":(", "success": false}'])
self.assertEqual(response.context['dashboard_config'], None)
def test_get_dashboard_for_parse_error_handling(self):
conv = self.user_helper.create_conversation(u'dummy')
def bad_add_entity(*a, **kw):
raise DashboardParseError(':(')
self.monkey_patch(DashboardLayout, 'add_entity', bad_add_entity)
response = self.client.get(self.get_view_url(conv, 'reports'))
self.assertEqual(self.error_log, [':('])
self.assertEqual(response.context['dashboard_config'], None)
class TestConversationTasks(GoDjangoTestCase):
def setUp(self):
self.vumi_helper = self.add_helper(
DjangoVumiApiHelper())
self.user_helper = self.vumi_helper.make_django_user()
self.msg_helper = self.add_helper(
GoMessageHelper(vumi_helper=self.vumi_helper))
def create_conversation(self, name=u'dummy', reply_count=5,
time_multiplier=12,
start_date=date(2013, 1, 1)):
conv = self.user_helper.create_conversation(name)
if reply_count:
inbound_msgs = self.msg_helper.add_inbound_to_conv(
conv, reply_count, start_date=start_date,
time_multiplier=time_multiplier)
self.msg_helper.add_replies_to_conv(conv, inbound_msgs)
return conv
def get_attachment(self, email, file_name):
for attachment in email.attachments:
fn, attachment_content, mime_type = attachment
if fn == file_name:
return StringIO(attachment_content)
def get_zipfile_attachment(
self, email, attachment_file_name, zipfile_file_name):
attachment = self.get_attachment(email, attachment_file_name)
zipfile = ZipFile(attachment, 'r')
return zipfile.open(zipfile_file_name, 'r')
def test_export_conversation_messages_unsorted(self):
conv = self.create_conversation()
export_conversation_messages_unsorted(conv.user_account.key, conv.key)
[email] = mail.outbox
self.assertEqual(
email.recipients(), [self.user_helper.get_django_user().email])
self.assertTrue(conv.name in email.subject)
self.assertTrue(conv.name in email.body)
fp = self.get_zipfile_attachment(
email, 'messages-export.zip', 'messages-export.csv')
reader = csv.DictReader(fp)
message_ids = [row['message_id'] for row in reader]
self.assertEqual(
set(message_ids),
set(conv.inbound_keys() + conv.outbound_keys()))
def test_export_conversation_message_session_events(self):
conv = self.create_conversation(reply_count=0)
msg = self.msg_helper.make_stored_inbound(
conv, "inbound", from_addr='from-1',
session_event=TransportUserMessage.SESSION_NEW)
reply = self.msg_helper.make_reply(
msg, "reply", session_event=TransportUserMessage.SESSION_CLOSE)
self.msg_helper.store_outbound(conv, reply)
export_conversation_messages_unsorted(conv.user_account.key, conv.key)
[email] = mail.outbox
fp = self.get_zipfile_attachment(
email, 'messages-export.zip', 'messages-export.csv')
reader = csv.DictReader(fp)
events = [row['session_event'] for row in reader]
self.assertEqual(
set(events),
set([TransportUserMessage.SESSION_NEW,
TransportUserMessage.SESSION_CLOSE]))
def test_export_conversation_message_transport_types(self):
conv = self.create_conversation(reply_count=0)
# SMS message
self.msg_helper.make_stored_inbound(
conv, "inbound", from_addr='from-1', transport_type='sms')
# USSD message
self.msg_helper.make_stored_inbound(
conv, "inbound", from_addr='from-1', transport_type='ussd')
export_conversation_messages_unsorted(conv.user_account.key, conv.key)
[email] = mail.outbox
fp = self.get_zipfile_attachment(
email, 'messages-export.zip', 'messages-export.csv')
reader = csv.DictReader(fp)
events = [row['transport_type'] for row in reader]
self.assertEqual(
set(events),
set(['sms', 'ussd']))
def test_export_conversation_message_directions(self):
conv = self.create_conversation()
export_conversation_messages_unsorted(conv.user_account.key, conv.key)
[email] = mail.outbox
fp = self.get_zipfile_attachment(
email, 'messages-export.zip', 'messages-export.csv')
reader = csv.DictReader(fp)
directions = [row['direction'] for row in reader]
self.assertEqual(
set(directions),
set(['inbound', 'outbound']))
def test_export_conversation_delivery_status(self):
conv = self.create_conversation(reply_count=0)
msg = self.msg_helper.make_stored_outbound(
conv, "outbound", to_addr='from-1')
self.msg_helper.make_stored_delivery_report(msg=msg, conv=conv)
export_conversation_messages_unsorted(conv.user_account.key, conv.key)
[email] = mail.outbox
fp = self.get_zipfile_attachment(
email, 'messages-export.zip', 'messages-export.csv')
reader = csv.DictReader(fp)
delivery_statuses = [row['delivery_status'] for row in reader]
self.assertEqual(set(delivery_statuses), set(['delivered']))
def test_export_conversation_ack(self):
conv = self.create_conversation(reply_count=0)
msg = self.msg_helper.make_stored_outbound(
conv, "outbound", to_addr='from-1')
self.msg_helper.make_stored_ack(msg=msg, conv=conv)
export_conversation_messages_unsorted(conv.user_account.key, conv.key)
[email] = mail.outbox
fp = self.get_zipfile_attachment(
email, 'messages-export.zip', 'messages-export.csv')
reader = csv.DictReader(fp)
[row] = list(reader)
self.assertEqual(row['network_handover_status'], 'ack')
def test_export_conversation_nack(self):
conv = self.create_conversation(reply_count=0)
msg = self.msg_helper.make_stored_outbound(
conv, "outbound", to_addr='from-1')
self.msg_helper.make_stored_nack(msg=msg, conv=conv, nack_reason='foo')
export_conversation_messages_unsorted(conv.user_account.key, conv.key)
[email] = mail.outbox
fp = self.get_zipfile_attachment(
email, 'messages-export.zip', 'messages-export.csv')
reader = csv.DictReader(fp)
[row] = list(reader)
self.assertEqual(row['network_handover_status'], 'nack')
self.assertEqual(row['network_handover_reason'], 'foo')
def test_export_conversation_endpoints(self):
conv = self.create_conversation(reply_count=0)
msg = self.msg_helper.make_outbound(
"outbound", conv=conv, to_addr='from-1')
msg.set_routing_endpoint('foo')
self.msg_helper.store_outbound(conv, msg)
msg = self.msg_helper.make_outbound(
"inbound", conv=conv, from_addr='from-1')
msg.set_routing_endpoint('bar')
self.msg_helper.store_inbound(conv, msg)
export_conversation_messages_unsorted(conv.user_account.key, conv.key)
[email] = mail.outbox
fp = self.get_zipfile_attachment(
email, 'messages-export.zip', 'messages-export.csv')
reader = csv.DictReader(fp)
[row1, row2] = list(reader)
self.assertEqual(row1['direction'], 'inbound')
self.assertEqual(row1['endpoint'], 'bar')
self.assertEqual(row2['direction'], 'outbound')
self.assertEqual(row2['endpoint'], 'foo')
| StarcoderdataPython |
3304450 | <filename>zerowka/zestaw2/zad2.py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_excel('ceny2.xlsx')
print(data.groupby(['Rodzaje towarów'])['Wartość'].mean())
lata = data.Rok.unique()
nazwy = data['Rodzaje towarów'].unique()
ryz_wartosci = data[data['Rodzaje towarów'] == nazwy[0]]['Wartość']
maka_wartosci = data[data['Rodzaje towarów'] == nazwy[1]]['Wartość']
plt.plot(lata, ryz_wartosci, label = 'Ryż', color = 'violet')
plt.plot(lata, maka_wartosci, label = 'Mąka', color = 'navy')
plt.title('Zmiana cen produktów na przestrzeni lat')
plt.xlabel('Lata')
plt.ylabel('Cena w zł/kg')
plt.annotate('21376969', xy=(2010,2))
plt.legend()
plt.savefig('zad2.jpg')
| StarcoderdataPython |
3242734 | <gh_stars>10-100
import pytest
import numpy as np
from copulae.special.clausen import clausen
from numpy.testing import assert_almost_equal
@pytest.mark.parametrize('x, exp, dp', [
(np.arange(-2, 4.1, 0.4), [-0.727146050863279,
-0.905633219234944,
-1.00538981376486,
-0.98564887439532,
-0.767405894042677,
0,
0.767405894042678,
0.98564887439532,
1.00538981376486,
0.905633219234944,
0.727146050863279,
0.496799302036956,
0.23510832439665,
-0.0404765846184049,
-0.313708773770116,
-0.56814394442987
], 6),
(0.25, 0.596790672033802, 6),
(1e-16, 3.78413614879047e-15, 20)
])
def test_clausen(x, exp, dp):
assert_almost_equal(clausen(x), exp, dp)
| StarcoderdataPython |
3334784 | <reponame>hspsh/pythonhacking-flask
import time
import pytest
from flask import url_for
def test_add_car(client):
resp = client.get(url_for('some_json'))
assert resp.status_code == 200
assert len(resp.json) == 2
epoch = int(resp.json['epoch_time'])
assert time.time() == pytest.approx(epoch, abs=1)
| StarcoderdataPython |
3356650 | #!/usr/bin/env python3
#
# consumption.py
"""
Class to represent consumption data.
"""
#
# Copyright © 2020 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# stdlib
from datetime import datetime
# 3rd party
import attr
from attr_utils.serialise import serde
from domdf_python_tools.doctools import prettify_docstrings
# this package
from octo_api.utils import add_repr, from_iso_zulu
__all__ = ["Consumption"]
@serde
@add_repr
@prettify_docstrings
@attr.s(slots=True, frozen=True, repr=False)
class Consumption:
"""
Represents the consumption for a given period of time.
"""
#: The consumption.
consumption: float = attr.ib()
#: The start of the time period.
interval_start: datetime = attr.ib(converter=from_iso_zulu)
#: The end of the time period.
interval_end: datetime = attr.ib(converter=from_iso_zulu)
| StarcoderdataPython |
3370950 | <reponame>vohoaiviet/PyReID<gh_stars>10-100
__author__ = 'luigolas'
import os
from package.image import Image
from package.utilities import ImagesNotFoundError, NotADirectoryError
class ImageSet(object):
def __init__(self, folder_name, name_ids=2):
self.path = ImageSet._valid_directory(folder_name)
self.name = "_".join(self.path.split("/")[-name_ids:])
# name = "_".join(name)
self.files = self._read_all_files()
self.dataset_len = len(self.files)
if self.dataset_len == 0:
raise ImagesNotFoundError("At folder " + self.path)
self.files_train = []
self.files_test = []
self.images_train = []
self.images_test = []
self.masks_train = []
self.masks_test = []
self.regions_train = []
self.regions_test = []
self.maps_train = []
self.maps_test = []
self.fe_train = []
self.fe_test = []
def _read_all_files(self):
files = []
for path, subdirs, files_order_list in os.walk(self.path):
for filename in files_order_list:
if ImageSet._valid_format(filename):
f = os.path.join(path, filename)
files.append(f)
return sorted(files)
def load_images(self):
self.images_train = []
self.images_test = []
if not self.files_test: # If not initialized
self.files_test = self.files
self.files_train = []
for imname in self.files_train:
self.images_train.append(Image.from_filename(imname))
for imname in self.files_test:
self.images_test.append(Image.from_filename(imname))
@staticmethod
def _valid_format(name):
return ((".jpg" in name) or (".png" in name) or (
".bmp" in name)) and "MASK" not in name and "FILTERED" not in name
@staticmethod
def _valid_directory(folder_name):
if not os.path.isdir(folder_name):
raise NotADirectoryError("Not a valid directory path: " + folder_name)
if folder_name[-1] == '/':
folder_name = folder_name[:-1]
return folder_name
def unload(self):
self.images_train = None
self.images_train = None
self.masks_train = None
self.masks_test = None
self.files = None
self.files_train = None
self.files_test = None
| StarcoderdataPython |
40645 | import django
# Now this is ugly.
# The django.db.backend.features that exist changes per version and per db :/
if django.VERSION[:2] == (2, 2):
has_sufficient_json_support = ('has_jsonb_agg',)
if django.VERSION[:2] == (3, 2):
# This version of EasyDMP is not using Django's native JSONField
# implementation, but the deprecated postgres-specific field. When no
# longer supporting 2.2 this can be "has_native_json_field"
has_sufficient_json_support = ('is_postgresql_10',)
if django.VERSION[:2] == (4, 0):
has_sufficient_json_support = ('has_native_json_field',)
| StarcoderdataPython |
3357770 | <filename>tests/library/codelibnode_test.py
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import dace
from dace.data import Array
from dace.properties import Property, make_properties
from dace.libraries.standard.nodes import CodeLibraryNode
from dace.codegen.targets.cpp import cpp_offset_expr
import numpy as np
from typing import Dict
@make_properties
class MyNode(CodeLibraryNode):
value_to_add = Property(dtype=int,
default=5,
desc="Value to add in custom code")
def __init__(self):
super().__init__(input_names=['inp'], output_names=['out'])
def generate_code(self, inputs: Dict[str, Array], outputs: Dict[str,
Array]):
assert len(inputs) == 1
assert len(outputs) == 1
inarr = inputs['inp']
outarr = outputs['out']
assert len(inarr.shape) == len(outarr.shape)
# Construct for loops
code = ''
for dim, shp in enumerate(inarr.shape):
code += f'for (int i{dim} = 0; i{dim} < {shp}; ++i{dim}) {{\n'
# Construct index expressions
output_expr = ' + '.join(f'i{dim} * {stride}'
for dim, stride in enumerate(outarr.strides))
input_expr = ' + '.join(f'i{dim} * {stride}'
for dim, stride in enumerate(inarr.strides))
code += \
f'out[{output_expr}] = inp[{input_expr}] + {self.value_to_add};\n'
# End for loops
for dim in range(len(inarr.shape)):
code += '}\n'
return code
@make_properties
class MyNode2(CodeLibraryNode):
value_to_mul = Property(dtype=int,
default=2,
desc="Value to mul in custom code")
def __init__(self):
super().__init__(input_names=['inp'], output_names=['out'])
def generate_code(self, inputs: Dict[str, Array], outputs: Dict[str,
Array]):
assert len(inputs) == 1
assert len(outputs) == 1
inarr = inputs['inp']
outarr = outputs['out']
assert len(inarr.shape) == len(outarr.shape)
# Construct for loops
code = ''
for dim, shp in enumerate(inarr.shape):
code += f'for (int i{dim} = 0; i{dim} < {shp}; ++i{dim}) {{\n'
# Construct index expressions
output_expr = ' + '.join(f'i{dim} * {stride}'
for dim, stride in enumerate(outarr.strides))
input_expr = ' + '.join(f'i{dim} * {stride}'
for dim, stride in enumerate(inarr.strides))
code += \
f'out[{output_expr}] = inp[{input_expr}] * {self.value_to_mul};\n'
# End for loops
for dim in range(len(inarr.shape)):
code += '}\n'
return code
def test_custom_code_node():
# Construct graph
sdfg = dace.SDFG('custom_code')
sdfg.add_array('A', [20, 30], dace.float64)
sdfg.add_array('B', [20, 30], dace.float64)
sdfg.add_array('C', [20, 30], dace.float64)
state = sdfg.add_state()
a = state.add_read('A')
node = MyNode()
b = state.add_access('B')
node2 = MyNode2()
c = state.add_write('C')
state.add_edge(a, None, node, 'inp',
dace.Memlet.simple('A', '10:20, 10:30'))
state.add_edge(node, 'out', b, None, dace.Memlet.simple('B', '0:10, 0:20'))
state.add_edge(b, None, node2, 'inp', dace.Memlet.simple('B', '0:10, 0:20'))
state.add_edge(node2, 'out', c, None, dace.Memlet.simple('C', '0:10, 0:20'))
# Run graph with default node value
A = np.random.rand(20, 30)
B = np.random.rand(20, 30)
C = np.random.rand(20, 30)
sdfg(A=A, B=B, C=C)
assert np.allclose(B[0:10, 0:20], A[10:20, 10:30] + 5)
assert np.allclose(B[0:10, 0:20] * 2, C[0:10, 0:20])
# Try again with a different value
node.value_to_add = 7
node2.value_to_mul = 3
sdfg(A=A, B=B, C=C)
assert np.allclose(B[0:10, 0:20], A[10:20, 10:30] + 7)
assert np.allclose(B[0:10, 0:20] * 3, C[0:10, 0:20])
if __name__ == '__main__':
test_custom_code_node()
| StarcoderdataPython |
3222505 | # Copyright (C) 2020-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
from ..contexts.project import build_validate_parser as build_parser
from ..contexts.project import get_validate_sensitive_args as get_sensitive_args
__all__ = [
'build_parser',
'get_sensitive_args',
]
| StarcoderdataPython |
100689 | # -*- coding: utf8 -*-
import os
import unittest
import das # pylint: disable=import-error
class TestCase(unittest.TestCase):
TestDir = None
InputFile = None
OutputFile = None
@classmethod
def setUpClass(cls):
cls.TestDir = os.path.abspath(os.path.dirname(__file__))
cls.InputFile = cls.TestDir + "/old.compat"
cls.OutputFile = cls.TestDir + "/out.compat"
os.environ["DAS_SCHEMA_PATH"] = cls.TestDir
def setUp(self):
self.addCleanup(self.cleanUp)
def tearDown(self):
pass
def cleanUp(self):
if os.path.isfile(self.OutputFile):
os.remove(self.OutputFile)
@classmethod
def tearDownClass(cls):
del(os.environ["DAS_SCHEMA_PATH"])
# Test functions
def testSet(self):
r = das.make_default("compatibility.SomeType")
r.oldField = ["hello", "world"]
def testOld(self):
r = das.read(self.InputFile)
self.assertEqual(r.newField == ["hello", "world"], True)
def testNew(self):
r = das.read(self.InputFile)
das.write(r, self.OutputFile)
with open(self.OutputFile, "r") as f:
d = eval(f.read())
self.assertEqual(d["newField"] == ["hello", "world"], True)
| StarcoderdataPython |
42333 | <gh_stars>100-1000
import consus
c1 = consus.Client()
t1 = c1.begin_transaction()
t1.commit()
c2 = consus.Client(b'127.0.0.1')
t2 = c1.begin_transaction()
t2.commit()
c3 = consus.Client('127.0.0.1')
t3 = c1.begin_transaction()
t3.commit()
c4 = consus.Client(b'127.0.0.1', 1982)
t4 = c1.begin_transaction()
t4.commit()
c5 = consus.Client('127.0.0.1', 1982)
t5 = c1.begin_transaction()
t5.commit()
c6 = consus.Client(b'127.0.0.1:1982')
t6 = c1.begin_transaction()
t6.commit()
c7 = consus.Client('127.0.0.1:1982')
t7 = c1.begin_transaction()
t7.commit()
c8 = consus.Client(b'[::]:1982,127.0.0.1:1982')
t8 = c1.begin_transaction()
t8.commit()
c9 = consus.Client('[::]:1982,127.0.0.1:1982')
t9 = c1.begin_transaction()
t9.commit()
| StarcoderdataPython |
1731386 | <filename>modules/log.py
from datetime import datetime
def log(function):
def wrapper():
with open('./Logs/log.txt', 'a') as log:
log.writelines(
f'Function {function.__name__} initialized at'
f' {datetime.now()} \n'
)
return function
return wrapper() | StarcoderdataPython |
1709741 | <reponame>dapqa/dapqa-fast-mf<filename>dfmf/model/_SVD.py
import numpy as np
from numba import jit, types
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils import check_X_y, check_array
@jit(
types.Tuple((
types.Array(types.float64, 2, 'C'),
types.Array(types.float64, 2, 'C'),
types.Array(types.float64, 1, 'C'),
types.Array(types.float64, 1, 'C'),
types.float64
))(
types.Array(types.int64, 2, 'C'),
types.Array(types.float64, 1, 'C'),
types.Array(types.int64, 1, 'C'),
types.Array(types.int64, 1, 'C'),
types.int64,
types.int64,
types.float64,
types.float64
),
locals={
'mean_rating': types.float64,
'user_factors': types.Array(types.float64, 2, 'C'),
'item_factors': types.Array(types.float64, 2, 'C'),
'user_biases': types.Array(types.float64, 1, 'C'),
'item_biases': types.Array(types.float64, 1, 'C'),
'row': types.Array(types.int64, 1, 'C'),
'original_row': types.Array(types.int64, 1, 'C'),
'r_ui': types.int64,
'u_idx': types.int64,
'i_idx': types.int64,
'b_u': types.float64,
'b_i': types.float64,
'p_u': types.Array(types.float64, 1, 'C'),
'q_i': types.Array(types.float64, 1, 'C'),
'r_ui_pred': types.float64,
'e_ui': types.float64,
},
nopython=True,
fastmath=True,
cache=True,
)
def _fit_svd(
X_train_widx,
y_train,
user_ids_uq,
item_ids_uq,
n_factors,
n_epochs,
learning_rate,
reg
):
mean_rating = np.mean(y_train)
user_factors = np.random.normal(size=(len(user_ids_uq), n_factors), loc=0, scale=.1)
item_factors = np.random.normal(size=(len(item_ids_uq), n_factors), loc=0, scale=.1)
user_biases = np.zeros(len(user_ids_uq))
item_biases = np.zeros(len(item_ids_uq))
b_u = user_biases[0]
b_i = item_biases[0]
p_u = user_factors[0]
q_i = item_factors[0]
prev_u_idx = 0
prev_i_idx = 0
for epoch_number in range(n_epochs):
for i in range(len(X_train_widx)):
r_ui = y_train[i]
row = X_train_widx[i]
u_idx, i_idx = row[0], row[1]
# Reading/Writing variables
if prev_u_idx != u_idx:
user_biases[prev_u_idx] = b_u
user_factors[prev_u_idx] = p_u
b_u = user_biases[u_idx]
p_u = user_factors[u_idx]
prev_u_idx = u_idx
if prev_i_idx != i_idx:
item_biases[prev_i_idx] = b_i
item_factors[prev_i_idx] = q_i
b_i = item_biases[i_idx]
q_i = item_factors[i_idx]
prev_i_idx = i_idx
# Calculating the prediction and its error
r_ui_pred = mean_rating + b_u + b_i
for j in range(n_factors):
r_ui_pred = r_ui_pred + p_u[j] * q_i[j]
e_ui = r_ui - r_ui_pred
# Updating biases
b_u += learning_rate * (e_ui - reg * b_u)
b_i += learning_rate * (e_ui - reg * b_i)
# Updating factors
for j in range(n_factors):
p_u_j = p_u[j]
q_i_j = q_i[j]
p_u[j] = p_u_j + learning_rate * (e_ui * q_i_j - reg * p_u_j)
q_i[j] = q_i_j + learning_rate * (e_ui * p_u_j - reg * q_i_j)
# Biases and factors are not updated at the last iteration, so here the manual last update
user_biases[prev_u_idx] = b_u
user_factors[prev_u_idx] = p_u
item_biases[prev_i_idx] = b_i
item_factors[prev_i_idx] = q_i
return user_factors, item_factors, user_biases, item_biases, mean_rating
@jit(
types.Array(types.float64, 1, 'C')(
types.Array(types.int64, 2, 'C'),
types.Array(types.int64, 2, 'C'),
types.Array(types.int64, 1, 'C'),
types.Array(types.int64, 1, 'C'),
types.Array(types.float64, 2, 'C'),
types.Array(types.float64, 2, 'C'),
types.Array(types.float64, 1, 'C'),
types.Array(types.float64, 1, 'C'),
types.float64
),
nopython=True,
fastmath=True,
cache=True,
)
def _predict_svd(
X_test_widx,
X_test,
user_ids_uq,
item_ids_uq,
user_factors,
item_factors,
user_biases,
item_biases,
mean_rating
):
y_pred = np.full((len(X_test_widx)), mean_rating)
n_factors = len(user_factors[0])
for i in range(len(X_test_widx)):
row = X_test_widx[i]
original_row = X_test[i]
u_idx, i_idx = row[0], row[1]
if u_idx < 0 or u_idx >= len(user_ids_uq) or user_ids_uq[u_idx] != original_row[0] \
or i_idx < 0 or i_idx >= len(item_ids_uq) or item_ids_uq[i_idx] != original_row[1]:
continue
b_u = user_biases[u_idx]
b_i = item_biases[i_idx]
p_u = user_factors[u_idx, :]
q_i = item_factors[i_idx, :]
r_ui_pred = mean_rating + b_u + b_i
for j in range(n_factors):
r_ui_pred = r_ui_pred + p_u[j] * q_i[j]
y_pred[i] = r_ui_pred
return y_pred
class SVD(BaseEstimator, RegressorMixin):
def __init__(self,
n_factors=100,
reg=0.02,
learning_rate=0.005,
n_epochs=20
) -> None:
super().__init__()
# TODO Predefined ids may be usable
self.user_ids_uq = None
self.item_ids_uq = None
self.n_factors = n_factors
self.reg = reg
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.user_factors = None
self.item_factors = None
self.user_biases = None
self.item_biases = None
self.mean_rating = 0
def fit(self, X, y):
X, y = check_X_y(X, y)
if self.user_ids_uq is None:
self.user_ids_uq = np.unique(X[:, 0])
if self.item_ids_uq is None:
self.item_ids_uq = np.unique(X[:, 1])
X_widx = np.c_[
np.searchsorted(self.user_ids_uq, X[:, 0]).T,
np.searchsorted(self.item_ids_uq, X[:, 1]).T
]
self.user_factors, self.item_factors, self.user_biases, self.item_biases, self.mean_rating = _fit_svd(
X_train_widx=X_widx.astype('int64'),
y_train=y.astype('float64'),
user_ids_uq=self.user_ids_uq.astype('int64'),
item_ids_uq=self.item_ids_uq.astype('int64'),
n_factors=self.n_factors,
n_epochs=self.n_epochs,
learning_rate=self.learning_rate,
reg=self.reg
)
def predict(self, X):
X = check_array(X)
X_widx = np.c_[
np.searchsorted(self.user_ids_uq, X[:, 0]).T,
np.searchsorted(self.item_ids_uq, X[:, 1]).T
]
return _predict_svd(
X_test_widx=X_widx.astype('int64'),
X_test=X.astype('int64'),
user_ids_uq=self.user_ids_uq.astype('int64'),
item_ids_uq=self.item_ids_uq.astype('int64'),
user_factors=self.user_factors,
item_factors=self.item_factors,
user_biases=self.user_biases,
item_biases=self.item_biases,
mean_rating=self.mean_rating
)
| StarcoderdataPython |
1730848 | # Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dummy volatile module
"""
import time
import json
class DummyVolatileStore(object):
"""
This is a dummy volatile store that makes use of a local json file
"""
_path = '/tmp/dummyvolatile.json'
_storage = {}
_timeout = {}
@staticmethod
def clean():
"""
Empties the store
"""
import os
try:
os.remove(DummyVolatileStore._path)
except OSError:
pass
def _read(self):
"""
Reads the local json file
"""
try:
f = open(self._path, 'r')
data = json.loads(f.read())
f.close()
except IOError:
data = {'t': {}, 's': {}}
return data
def get(self, key, default=None):
"""
Retrieves a certain value for a given key
"""
data = self._read()
if key in data['t'] and data['t'][key] > time.time():
value = data['s'].get(key)
if 'ovs_primarykeys_' in key:
value[0] = set(value[0])
return value
return default
def set(self, key, value, timeout=99999999):
"""
Sets the value for a key to a given value
"""
if 'ovs_primarykeys_' in key:
value[0] = list(value[0])
data = self._read()
data['s'][key] = value
data['t'][key] = time.time() + timeout
self._save(data)
def add(self, key, value, timeout=99999999):
"""
Adds a given key to the store, expecting the key does not exists yet
"""
data = self._read()
if key not in data['s']:
self.set(key, value, timeout)
return True
else:
return False
def delete(self, key):
"""
Deletes a given key from the store
"""
data = self._read()
if key in data['s']:
del data['s'][key]
del data['t'][key]
self._save(data)
def incr(self, key, delta=1):
"""
Increments the value of the key, expecting it exists
"""
data = self._read()
if key in data['s']:
data['s'][key] += delta
self._save(data)
return True
return False
def _save(self, data):
"""
Saves the local json file
"""
rawdata = json.dumps(data, sort_keys=True, indent=2)
f = open(self._path, 'w+')
f.write(rawdata)
f.close()
| StarcoderdataPython |
20468 | <filename>pydocteur/actions.py
import json
import logging
import os
import random
import time
from functools import lru_cache
from github import Github
from github import PullRequest
from pydocteur.github_api import get_commit_message_for_merge
from pydocteur.github_api import get_trad_team_members
from pydocteur.pr_status import is_already_greeted
from pydocteur.pr_status import is_first_time_contributor
from pydocteur.settings import GH_TOKEN
from pydocteur.settings import REPOSITORY_NAME
from pydocteur.settings import VERSION
logger = logging.getLogger("pydocteur")
COMMENT_BODIES_FILEPATH = os.path.join(os.path.dirname(__file__), "../comment_bodies.json")
END_OF_BODY = """
---
<details>
<summary>Disclaimer</summary>
Je suis un robot fait par l'équipe de [l'AFPy et de Traduction](https://github.com/AFPy/PyDocTeur/graphs/contributors)
sur leur temps libre. Je risque de dire des bétises. Ne me blâmez pas, blamez les développeurs.
[Code source](https://github.com/afpy/pydocteur)
I'm a bot made by the [Translation and AFPy teams](https://github.com/AFPy/PyDocTeur/graphs/contributors) on their free
time. I might say or do dumb things sometimes. Don't blame me, blame the developer !
[Source code](https://github.com/afpy/pydocteur)
(state: {state})
`PyDocTeur {version}`
</details>
"""
def replace_body_variables(pr: PullRequest, body: str):
logger.debug("Replacing variables")
author = pr.user.login
reviewers_login = {review.user.login for review in pr.get_reviews()}
new_body = body.replace("@$AUTHOR", "@" + author)
if not reviewers_login:
reviewers_login = get_trad_team_members()
reviewers_login.discard(author)
reviewers = ", @".join(reviewers_login)
new_body = new_body.replace("@$REVIEWERS", "@" + reviewers)
new_body = new_body.replace("@$MERGEABLE_STATE", pr.mergeable_state)
return new_body
@lru_cache()
def get_comment_bodies(state):
logger.debug(f"Getting comment bodies for {state}")
with open(COMMENT_BODIES_FILEPATH, "r") as handle:
bodies = json.load(handle).get(state)
return bodies
def comment_pr(pr: PullRequest, state: str):
bodies = get_comment_bodies(state)
if not bodies:
logger.warning(f"PR #{pr.number}: No comment for state {state}")
return
body = random.choice(bodies)
body = replace_body_variables(pr, body)
logger.info(f"PR #{pr.number}: Commenting.")
pr.create_issue_comment(body + END_OF_BODY.format(state=state, version=VERSION))
def merge_and_thank_contributors(pr: PullRequest, state: str):
gh = Github(GH_TOKEN if GH_TOKEN else None)
repo = gh.get_repo(REPOSITORY_NAME)
contributor_usernames = [u.login for u in repo.get_collaborators()]
reviewer_usernames = [i.user.login for i in pr.get_reviews()]
if not any(x in reviewer_usernames for x in contributor_usernames):
logger.info("PR not reviewed by a contributor, not merging.")
return
logger.info(f"Testing if PR #{pr.number} can be merged")
if not pr.mergeable or pr.mergeable_state != "clean":
logger.warning(f"PR #{pr.number} cannot be merged. mergeable_state={pr.mergeable_state}")
unmergeable_comments = get_comment_bodies("unmergeable")
body = random.choice(unmergeable_comments)
body = replace_body_variables(pr, body)
pr.create_issue_comment(body + END_OF_BODY.format(state=state, version=VERSION))
return
logger.info(f"PR #{pr.number}: About to merge")
warnings = get_comment_bodies("automerge_approved_testok")
thanks = get_comment_bodies("automerge_approved_testok-done")
logger.info(f"PR #{pr.number}: Sending warning before merge")
warning_body = random.choice(warnings)
warning_body = replace_body_variables(pr, warning_body)
pr.create_issue_comment(warning_body + END_OF_BODY.format(state=state, version=VERSION))
logger.debug(f"PR #{pr.number}: Sleeping one second")
time.sleep(1)
message = get_commit_message_for_merge(pr)
pr.merge(merge_method="squash", commit_message=message)
logger.info(f"PR #{pr.number}: Merged.")
logger.info(f"PR #{pr.number}: Sending thanks after merge")
thanks_body = random.choice(thanks)
thanks_body = replace_body_variables(pr, thanks_body)
pr.create_issue_comment(thanks_body + END_OF_BODY.format(state=state, version=VERSION))
def maybe_greet_user(pr: PullRequest):
if is_first_time_contributor(pr) and not is_already_greeted(pr):
bodies = get_comment_bodies("greetings")
body = random.choice(bodies)
body = replace_body_variables(pr, body)
logger.info(f"PR #{pr.number}: Greeting {pr.user.login}")
pr.create_issue_comment(body + END_OF_BODY.format(state="greetings", version=VERSION))
# TODO: Check if changing state for incorrect title may not create a bug where PyDocteur might repeat itself
def comment_about_title(pr: PullRequest):
bodies = get_comment_bodies("incorrect_title")
body = random.choice(bodies)
body = replace_body_variables(pr, body)
logger.info(f"PR #{pr.number}: Sending incorrect title message")
pr.create_issue_comment(body + END_OF_BODY.format(state="incorrect_title", version=VERSION))
def comment_about_rerun_workflow(pr: PullRequest):
bodies = get_comment_bodies("rerun_workflow")
body = random.choice(bodies)
body = replace_body_variables(pr, body)
logger.info(f"PR #{pr.number}: Sending rerun workflow message")
pr.create_issue_comment(body + END_OF_BODY.format(state="rerun_workflow", version=VERSION))
| StarcoderdataPython |
60268 | # -*- coding: utf-8 -*-
class Pessoa:
"""Implementação de uma classe que modela uma pessoa"""
temDeficiencia = False # atributo de classe
def __init__(self, *filhos, nome=None, idade=0):
self.nome = nome
self.idade = idade
self.filhos = list(filhos)
def cumprimentar(self):
return f"Ola, meu nome é {self.nome}"
def __repr__(self):
txtSaida = f"Meu nome é {self.nome} e tenho {self.idade} anos"
if self.filhos:
txtSaida = txtSaida + f"\nTenho {len(self.filhos)} filhos:"
for filho in self.filhos:
txtSaida = txtSaida + f"\n==> {filho}"
return txtSaida
class Homem(Pessoa):
def cumprimentar(self):
return f"{super().cumprimentar()}. Aperto de mão"
if __name__ == '__main__':
filhos = [('Arthur', 7), ('Alice', 0)]
pai = Homem(nome='<NAME>', idade=41)
mae = Pessoa(nome='<NAME>', idade=39)
for filho in filhos:
pai.filhos.append(Pessoa(nome=filho[0], idade=filho[1]))
print(pai)
# O atributo __dict__ lista todos os atributos de instancia de um objeto.
# Imprimindo o objeto ANTES da criacao de um atributo de instancia
print(pai.__dict__)
# Criando um atributo booleano 'casado' no objeto 'pai'
# (esse atributo nao existe em outros objetos da classe Pessoa
pai.casado = True
# Imprimindo novamente os atributos de instancia
print(pai.__dict__)
mae.filhos = pai.filhos.copy()
mae.temDeficiencia = True
print(mae)
print(pai.__dict__)
print(mae.__dict__)
# teste
print(pai.temDeficiencia)
print(mae.temDeficiencia)
print(pai.cumprimentar())
print(mae.cumprimentar())
| StarcoderdataPython |
141141 | <reponame>corona10/dc_cnu
class Queue(object):
def __init__(self):
self.q = []
def size(self):
return len(self.q)
def empty(self):
if len(self.q) is 0:
return True
return False
def put(self, data):
self.q.append(data)
def get(self):
data = self.q.pop(0)
return data
def __str__(self):
return self.q.__str__()
if __name__ == "__main__":
q = Queue()
while True:
str_input = raw_input("Enter put item (finish: no input) : ")
if len(str_input) < 1:
print "Finished input item"
break
q.put(str_input)
print str_input, "input"
print "==============================="
print "Queue size : ", q.size()
print "==============================="
while True:
if q.empty():
print "Queue is empty"
break
get_item = q.get()
print get_item + " get, Queue size:", q.size()
| StarcoderdataPython |
3324313 | import requests
import time, csv
hashes = [
'QmSQfLsPDKaFJM3SPKZYU971XSCArXKVCAWcEzQaKgtxQp', # file_hash 1.3KB
'QmUTcaZg3UVqKxM8GvjEVjtNaFDCsG3EAQAU4nzmdi7Vis', # image_hash 3.6MB
'QmbvdTQ5eCS7MqBfqv93Pdhy74p95XHuLh5nzSmaQiu6wk' #video_hash 153.3MB
]
gateway_providers = [
{
'provider': 'Protocol Labs',
'gateway_url': 'https://ipfs.io/ipfs/'
},
{
'provider': 'Cloudflare',
'gateway_url': 'https://cloudflare-ipfs.com/ipfs/'
},
{
'provider': 'Infura',
'gateway_url': 'https://ipfs.infura.io/ipfs/'
},
{
'provider': 'Pinata',
'gateway_url': 'https://gateway.pinata.cloud/ipfs/'
},
{
'provider': 'Eternum',
'gateway_url': 'https://ipfs.eternum.io/ipfs/'
},
{
'provider': 'Siderus',
'gateway_url': 'https://siderus.io/ipfs/'
},
{
'provider': 'Temporal',
'gateway_url': 'https://gateway.temporal.cloud/ipfs/'
}
]
def test_url(url):
start = time.time()
r = requests.get(url)
end = time.time()
time_elapsed = end - start
time_in_ms = time_elapsed * 1000
return time_in_ms
with open('results.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['provider', 'hash', 'time (ms)'])
for provider_entry in gateway_providers:
base_url = provider_entry['gateway_url']
for hash in hashes:
call_url = base_url + hash
for i in range(10):
result = test_url(call_url)
writer.writerow([provider_entry['provider'], hash, result])
time.sleep(1)
print(f"Finished attempt {i} through the {provider_entry['provider']} IPFS gateway for hash: {hash}")
| StarcoderdataPython |
1659851 | from django.db import models
from django.db.models import F, Q, Sum, Case, When, Value as V
from django.db.models.functions import Coalesce
from django.contrib.auth import get_user_model
# Create your managers here.
class InventoryQuerySet(models.QuerySet):
def shipping(self):
return self.filter(type__exact=Inventory.Type.SHIPPING)
def receiving(self):
return self.filter(type__exact=Inventory.Type.RECEIVING)
class InventoryManager(models.Manager):
def summarize(self):
SHIPPING = Inventory.Type.SHIPPING
RECEIVING = Inventory.Type.RECEIVING
return self.values('commodity') \
.annotate(commodity_name = F('commodity__name')) \
.annotate(total_quantity = Coalesce(Sum('quantity'), V(0))) \
.annotate(shipping_quantity = Coalesce(Sum(Case(When(type=SHIPPING, then=F('quantity')), default=0)), V(0))) \
.annotate(receiving_quantity = Coalesce(Sum('quantity', filter=Q(type=RECEIVING)), V(0))) \
.order_by()
def list_glutted_commodities(self, quantity):
return self.values('commodity') \
.annotate(total_quantity = Coalesce(Sum('quantity'), V(0))) \
.filter(total_quantity__gte=quantity) \
.order_by('-total_quantity')
# Create your models here.
class TradePartner(models.Model):
name = models.CharField(max_length=100)
address = models.CharField(max_length=512, default='', blank=True)
class Meta:
db_table = 'commodities_trade_partner'
ordering = ['id']
class Commodity(models.Model):
name = models.CharField(max_length=100)
description = models.CharField(max_length=512, default='', blank=True)
trade_partner = models.ForeignKey(TradePartner, null=True, on_delete=models.SET_NULL)
class Meta:
db_table = 'commodities_commodity'
ordering = ['id']
class Inventory(models.Model):
class Type(models.IntegerChoices):
SHIPPING = 1
RECEIVING = 2
type = models.IntegerField(choices=Type.choices)
quantity = models.PositiveIntegerField()
commodity = models.ForeignKey(Commodity, on_delete=models.CASCADE)
trade_partner = models.ForeignKey(TradePartner, null=True, on_delete=models.SET_NULL)
objects = InventoryManager.from_queryset(InventoryQuerySet)()
class Meta:
db_table = 'commodities_inventory'
ordering = ['id']
class InventoryHistory(models.Model):
class Action(models.TextChoices):
ADD = 'a'
MODIFY = 'm'
DELETE = 'd'
action = models.CharField(choices=Action.choices, max_length=2)
detail = models.CharField(max_length=512, default='', blank=True)
type = models.IntegerField(choices=Inventory.Type.choices)
quantity = models.PositiveIntegerField()
created_at = models.DateTimeField(auto_now_add=True)
inventory = models.ForeignKey(Inventory, db_index=False, null=True, on_delete=models.SET_NULL)
user = models.ForeignKey(get_user_model(), null=True, on_delete=models.SET_NULL)
class Meta:
db_table = 'commodities_inventory_history'
ordering = ['id']
indexes = [
models.Index(fields=['created_at']),
models.Index(fields=['inventory', 'created_at']),
]
| StarcoderdataPython |
170412 | """
Tests for dit.rate_distortion.
"""
| StarcoderdataPython |
40654 | <reponame>dastra/hargreaves-sdk-python
import logging
from requests_tracker.session import WebSessionFactory
from requests_tracker.storage import ICookieStorage
from ..config.models import ApiConfiguration
from ..utils.cookies import HLCookieHelper
from ..session.shared import LoggedInSession
logging.getLogger(__name__).addHandler(logging.NullHandler())
def create_session(
cookies_storage: ICookieStorage,
config: ApiConfiguration,
retry_count: int = 1,
timeout: float = 15.00):
"""
Creates a WebSession that will automatically handle login redirects
:param timeout:
:param retry_count:
:param cookies_storage:
:param config:
:return:
"""
web_session = WebSessionFactory.create(
cookies_storage,
default_referer='https://online.hl.co.uk/',
sensitive_values=[config.username, config.password,
config.secure_number, config.date_of_birth],
sensitive_params=['secure-number['],
retry_count=retry_count,
timeout=timeout
)
HLCookieHelper.set_default_cookies(web_session.cookies)
return LoggedInSession(
web_session=web_session,
config=config)
| StarcoderdataPython |
3268334 | <reponame>cosmodesi/desi-dlas
""" Code to build/load/write DESI Training sets"""
'''
1. Load up the Sightlines
2. Split into samples of kernel length
3. Grab DLAs and non-DLA samples
4. Hold in memory or write to disk??
5. Convert to TF Dataset
'''
import itertools
import numpy as np
from desidlas.dla_cnn.spectra_utils import get_lam_data
from desidlas.dla_cnn.defs import REST_RANGE,kernel,best_v
def pad_sightline(sightline, lam, lam_rest, ix_dla_range,kernelrangepx,v=best_v['b']):
"""
padding the left and right sides of the spectra
Parameters
----------
sightline: dla_cnn.data_model.Sightline
lam: np.ndarray
lam_rest: np.ndarray
ix_dla_range: np.ndarray Indices listing where to search for the DLA
kernelrangepx:int, half of the kernel
v:float, best v for the b band
Returns
flux_padded:np.ndarray,flux after padding
lam_padded:np.ndarray,lam after padding
pixel_num_left:int,the number of pixels padded to the left side of spectra
-------
"""
c = 2.9979246e8
dlnlambda = np.log(1+v/c)
#pad left side
if np.nonzero(ix_dla_range)[0][0]<kernelrangepx:
pixel_num_left=kernelrangepx-np.nonzero(ix_dla_range)[0][0]
pad_lam_left= lam[0]*np.exp(dlnlambda*np.array(range(-pixel_num_left,0)))
pad_value_left = np.mean(sightline.flux[0:50])
else:
pixel_num_left=0
pad_lam_left=[]
pad_value_left=[]
#pad right side
if np.nonzero(ix_dla_range)[0][-1]>len(lam)-kernelrangepx:
pixel_num_right=kernelrangepx-(len(lam)-np.nonzero(ix_dla_range)[0][-1])
pad_lam_right= lam[0]*np.exp(dlnlambda*np.array(range(len(lam),len(lam)+pixel_num_right)))
pad_value_right = np.mean(sightline.flux[-50:])
else:
pixel_num_right=0
pad_lam_right=[]
pad_value_right=[]
flux_padded = np.hstack((pad_lam_left*0+pad_value_left, sightline.flux,pad_lam_right*0+pad_value_right))
lam_padded = np.hstack((pad_lam_left,lam,pad_lam_right))
return flux_padded,lam_padded,pixel_num_left
def split_sightline_into_samples(sightline, REST_RANGE=REST_RANGE, kernel=kernel):
"""
Split the sightline into a series of snippets, each with length kernel
Parameters
----------
sightline: dla_cnn.data_model.Sightline
REST_RANGE: list
kernel: int, optional
Returns
-------
"""
lam, lam_rest, ix_dla_range = get_lam_data(sightline.loglam, sightline.z_qso, REST_RANGE)
kernelrangepx = int(kernel/2) #200
#padding the sightline:
flux_padded,lam_padded,pixel_num_left=pad_sightline(sightline,lam,lam_rest,ix_dla_range,kernelrangepx,v=best_v['b'])
fluxes_matrix = np.vstack(map(lambda x:x[0][x[1]-kernelrangepx:x[1]+kernelrangepx],zip(itertools.repeat(flux_padded), np.nonzero(ix_dla_range)[0]+pixel_num_left)))
lam_matrix = np.vstack(map(lambda x:x[0][x[1]-kernelrangepx:x[1]+kernelrangepx],zip(itertools.repeat(lam_padded), np.nonzero(ix_dla_range)[0]+pixel_num_left)))
#using cut will lose side information,so we use padding instead of cutting
#the wavelength and flux array we input:
input_lam=lam_padded[np.nonzero(ix_dla_range)[0]+pixel_num_left]
input_flux=flux_padded[np.nonzero(ix_dla_range)[0]+pixel_num_left]
return fluxes_matrix, sightline.classification, sightline.offsets, sightline.column_density,lam_matrix,input_lam,input_flux
def select_samples_50p_pos_neg(sightline,kernel=kernel):
"""
For a given sightline, generate the indices for DLAs and for without
Split 50/50 to have equal representation
Parameters
----------
classification: np.ndarray
Array of classification values. 1=DLA; 0=Not; -1=not analyzed
Returns
-------
idx: np.ndarray
positive + negative indices
"""
lam, lam_rest, ix_dla_range = get_lam_data(sightline.loglam, sightline.z_qso)
kernelrangepx = int(kernel/2)# take half length of the kernel
num_pos = np.sum(sightline.classification==1, dtype=np.float64) #count the quantity of all positive samples(classification=1,with DLAs)
num_neg = np.sum(sightline.classification==0, dtype=np.float64)#count the quantity of all negtive samples
n_samples = int(min(num_pos, num_neg)) #take the minimum of these two quantities
r = np.random.permutation(len(sightline.classification))#make a random array
pos_ixs = r[sightline.classification[r]==1][0:n_samples]# index for positive samples
neg_ixs = r[sightline.classification[r]==0][0:n_samples]# index for negative samples
return np.hstack((pos_ixs,neg_ixs))
| StarcoderdataPython |
3342198 | import argparse
import numpy
from keras.models import load_model
import utils
from settings import TEST_DIR
def calculateSSE(exp_results, giv_results):
if len(exp_results) != len(giv_results):
return False
length = len(exp_results)
sum = 0
for i in range(length):
difference = exp_results[i] - giv_results[i]
sum += pow(difference,2)
return sum
def calculateSST(expected_results, average):
sum = 0
for i in range(len(expected_results)):
difference = average - expected_results[i]
sum += pow(difference,2)
return sum
def calculateSSR(giv_results, average):
sum = 0
for i in range(len(giv_results)):
difference = average - giv_results[i]
sum += pow(difference,2)
return sum
def load_exptexted_results(path, given_results):
model = load_model(path)
return_list = []
for path in given_results:
image = utils.load_image(path)
image = numpy.array([image])
result = float(model.predict(image, batch_size=1))
return_list.append(result)
return return_list
def load_given_results():
return utils.load_data(TEST_DIR, 'driving_log.csv')
def main(path):
X, y = load_given_results()
expected_results = load_exptexted_results(path, X)
avg = numpy.mean(y)
sse = calculateSSE(expected_results, y)
sst = calculateSST(y, avg)
ssr = calculateSSR(expected_results, avg)
coefficient_of_determination_squared = ssr / sst
print("sse: " + str(sse))
print("sst: " + str(sst))
print("ssr: " + str(ssr))
print('coef:' + str(coefficient_of_determination_squared))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('model')
args = parser.parse_args()
main(path=args.model)
| StarcoderdataPython |
1631690 | # -*- coding: utf-8 -*-
from sst_unittest import *
from sst_unittest_support import *
import os
################################################################################
# Code to support a single instance module initialize, must be called setUp method
module_init = 0
module_sema = threading.Semaphore()
def initializeTestModule_SingleInstance(class_inst):
global module_init
global module_sema
module_sema.acquire()
if module_init != 1:
# Put your single instance Init Code Here
class_inst._buildJunoAssembler()
module_init = 1
module_sema.release()
################################################################################
class testcase_juno(SSTTestCase):
def initializeClass(self, testName):
super(type(self), self).initializeClass(testName)
# Put test based setup code here. it is called before testing starts
# NOTE: This method is called once for every test
def setUp(self):
super(type(self), self).setUp()
initializeTestModule_SingleInstance(self)
# Put test based setup code here. it is called once before every test
def tearDown(self):
# Put test based teardown code here. it is called once after every test
super(type(self), self).tearDown()
#####
def test_juno_sum(self):
self.juno_test_template("sum")
def test_juno_modulo(self):
self.juno_test_template("modulo")
#####
def juno_test_template(self, testcase):
# Get the path to the test files
test_path = self.get_testsuite_dir()
outdir = self.get_test_output_run_dir()
tmpdir = self.get_test_output_tmp_dir()
juno_assembler_dir = "{0}/../asm/".format(test_path)
juno_assembler = "{0}/sst-juno-asm".format(juno_assembler_dir)
juno_test_asm_dir = "{0}/../test/asm/".format(test_path)
# Set the various file paths
testDataFileName="juno_{0}".format(testcase)
sdlfile = "{0}/../test/sst/juno-test.py".format(test_path, testcase)
reffile = "{0}/refFiles/{1}.out".format(test_path, testDataFileName)
outfile = "{0}/{1}.out".format(outdir, testDataFileName)
errfile = "{0}/{1}.err".format(outdir, testDataFileName)
mpioutfiles = "{0}/{1}.testfile".format(outdir, testDataFileName)
# Assemble the juno program
test_asm_in_file = "{0}/{1}.juno".format(juno_test_asm_dir, testcase)
test_bin_out_file = "{0}/juno_{1}.bin".format(tmpdir, testcase)
test_asm_log_file = "{0}/juno_{1}.log".format(tmpdir, testcase)
cmd = "{0} -i {1} -o {2} >> {3}".format(juno_assembler, test_asm_in_file, test_bin_out_file, test_asm_log_file)
cmd_rtn = os.system(cmd)
asm_success = cmd_rtn == 0
self.assertTrue(asm_success, "Juno Assembler failed to assemble file {0} into {1}; Return Code = {2}".format(test_asm_in_file, test_bin_out_file, cmd_rtn))
os.environ["JUNO_EXE"] = test_bin_out_file
log_debug("JUNO_EXE ENV VAR = {0}".format(os.environ["JUNO_EXE"]))
self.run_sst(sdlfile, outfile, errfile, mpi_out_files=mpioutfiles)
# NOTE: THE PASS / FAIL EVALUATIONS ARE PORTED FROM THE SQE BAMBOO
# BASED testSuite_XXX.sh THESE SHOULD BE RE-EVALUATED BY THE
# DEVELOPER AGAINST THE LATEST VERSION OF SST TO SEE IF THE
# TESTS & RESULT FILES ARE STILL VALID
# Perform the tests
self.assertFalse(os_test_file(errfile, "-s"), "juno test {0} has Non-empty Error File {1}".format(testDataFileName, errfile))
# Only check for Simulation is complete in the out and ref file and make sure they match
out_cmd = 'grep "Simulation is complete" {0}'.format(outfile)
ref_cmd = 'grep "Simulation is complete" {0}'.format(reffile)
out_cmd_rtn = os_simple_command(out_cmd)
ref_cmd_rtn = os_simple_command(ref_cmd)
log_debug("out_cmd_rtn = {0}\n".format(out_cmd_rtn))
log_debug("ref_cmd_rtn = {0}\n".format(ref_cmd_rtn))
cmd_result = out_cmd_rtn[1] == ref_cmd_rtn[1]
self.assertTrue(cmd_result, "Juno Test; Output found line {0} does not match Ref found line {1}".format(out_cmd_rtn[1], ref_cmd_rtn[1]))
####
def _buildJunoAssembler(self):
# Get the path to the test files
test_path = self.get_testsuite_dir()
juno_assembler_dir = "{0}/../asm/".format(test_path)
# Build the juno assembler
cmd = "make -C {0} > /dev/null 2>&1".format(juno_assembler_dir)
cmd_rtn = os.system(cmd)
build_success = cmd_rtn == 0
self.assertTrue(build_success, "Juno Assembler failed to build properly; Makefile rtn = {0}".format(cmd_rtn))
| StarcoderdataPython |
23150 | <reponame>Jiaolong/gcn-parking-slot
"""Universal network struture unit definition."""
import torch
import math
from torch import nn
import torchvision
from torch.utils import model_zoo
from torchvision.models.resnet import BasicBlock, model_urls, Bottleneck
def define_squeeze_unit(basic_channel_size):
"""Define a 1x1 squeeze convolution with norm and activation."""
conv = nn.Conv2d(2 * basic_channel_size, basic_channel_size, kernel_size=1,
stride=1, padding=0, bias=False)
norm = nn.BatchNorm2d(basic_channel_size)
relu = nn.LeakyReLU(0.1)
layers = [conv, norm, relu]
return layers
def define_expand_unit(basic_channel_size):
"""Define a 3x3 expand convolution with norm and activation."""
conv = nn.Conv2d(basic_channel_size, 2 * basic_channel_size, kernel_size=3,
stride=1, padding=1, bias=False)
norm = nn.BatchNorm2d(2 * basic_channel_size)
relu = nn.LeakyReLU(0.1)
layers = [conv, norm, relu]
return layers
def define_halve_unit(basic_channel_size):
"""Define a 4x4 stride 2 expand convolution with norm and activation."""
conv = nn.Conv2d(basic_channel_size, 2 * basic_channel_size, kernel_size=4,
stride=2, padding=1, bias=False)
norm = nn.BatchNorm2d(2 * basic_channel_size)
relu = nn.LeakyReLU(0.1)
layers = [conv, norm, relu]
return layers
def define_depthwise_expand_unit(basic_channel_size):
"""Define a 3x3 expand convolution with norm and activation."""
conv1 = nn.Conv2d(basic_channel_size, 2 * basic_channel_size,
kernel_size=1, stride=1, padding=0, bias=False)
norm1 = nn.BatchNorm2d(2 * basic_channel_size)
relu1 = nn.LeakyReLU(0.1)
conv2 = nn.Conv2d(2 * basic_channel_size, 2 * basic_channel_size, kernel_size=3,
stride=1, padding=1, bias=False, groups=2 * basic_channel_size)
norm2 = nn.BatchNorm2d(2 * basic_channel_size)
relu2 = nn.LeakyReLU(0.1)
layers = [conv1, norm1, relu1, conv2, norm2, relu2]
return layers
def define_detector_block(basic_channel_size):
"""Define a unit composite of a squeeze and expand unit."""
layers = []
layers += define_squeeze_unit(basic_channel_size)
layers += define_expand_unit(basic_channel_size)
return layers
class YetAnotherDarknet(nn.modules.Module):
"""Yet another darknet, imitating darknet-53 with depth of darknet-19."""
def __init__(self, input_channel_size, depth_factor):
super(YetAnotherDarknet, self).__init__()
layers = []
# 0
layers += [nn.Conv2d(input_channel_size, depth_factor, kernel_size=3,
stride=1, padding=1, bias=False)]
layers += [nn.BatchNorm2d(depth_factor)]
layers += [nn.LeakyReLU(0.1)]
# 1
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
# 2
depth_factor *= 2
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
# 3
depth_factor *= 2
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
layers += define_detector_block(depth_factor)
# 4
depth_factor *= 2
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
layers += define_detector_block(depth_factor)
# 5
depth_factor *= 2
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
# vgg backbone
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 1024, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def vgg16(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model
class ResNet18(nn.Module):
def __init__(self, block, layers, aux_classes=1000, classes=100, domains=3):
self.inplanes = 64
super(ResNet18, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 1024, layers[3], stride=2)#resnet 18
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def is_patch_based(self):
return False
def forward(self, x, **kwargs):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet18(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
return model
class ResNet50(nn.Module):
def __init__(self, block, layers, aux_classes=1000, classes=100, domains=3):
self.inplanes = 64
super(ResNet50, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 256, layers[3], stride=2) #resnet50
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def is_patch_based(self):
return False
def forward(self, x, **kwargs):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet50(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)
return model
| StarcoderdataPython |
3229874 | class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
unsorted = len(nums)
for i in nums:
index = nums.index(i)
if i in nums[index:] and index != len(nums)-1:
nums.remove(i)
return nums
abcd = Solution()
print abcd.removeDuplicates([1,1,2])
| StarcoderdataPython |
172885 | # Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
from unittest import mock
from requests.exceptions import RequestException
from django.test import override_settings
from promgen import models, rest, tests
from promgen.notification.webhook import NotificationWebhook
class WebhookTest(tests.PromgenTest):
fixtures = ["testcases.yaml"]
@mock.patch("django.dispatch.dispatcher.Signal.send")
def setUp(self, mock_signal):
one = models.Project.objects.get(pk=1)
two = models.Service.objects.get(pk=1)
self.senderA = NotificationWebhook.create(
obj=one, value="http://webhook.example.com/project"
)
self.senderB = NotificationWebhook.create(
obj=two, value="http://webhook.example.com/service"
)
@override_settings(PROMGEN=tests.SETTINGS)
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
@override_settings(CELERY_TASK_EAGER_PROPAGATES=True)
@mock.patch("promgen.util.post")
def test_webhook(self, mock_post):
response = self.fireAlert()
self.assertRoute(response, rest.AlertReceiver, 202)
self.assertCount(models.AlertError, 0, "No failed alerts")
self.assertCount(models.Alert, 1, "Alert should be queued")
self.assertEqual(mock_post.call_count, 2, "Two alerts should be sent")
# Our sample is the same as the original, with some annotations added
_SAMPLE = tests.Data("notification", "webhook.json").json()
# External URL is depended on test order
_SAMPLE["externalURL"] = mock.ANY
mock_post.assert_has_calls(
[
mock.call("http://webhook.example.com/project", json=_SAMPLE),
mock.call("http://webhook.example.com/service", json=_SAMPLE),
],
any_order=True,
)
@override_settings(PROMGEN=tests.SETTINGS)
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
@override_settings(CELERY_TASK_EAGER_PROPAGATES=True)
@mock.patch("promgen.util.post")
def test_filter(self, mock_post):
# Our first sender will only allow critical messages
self.senderA.filter_set.create(name="severity", value="critical")
# Our second sender allows critical and major
self.senderB.filter_set.create(name="severity", value="critical")
self.senderB.filter_set.create(name="severity", value="major")
self.assertCount(models.Filter, 3, "Should be three filters")
response = self.fireAlert()
self.assertRoute(response, rest.AlertReceiver, 202)
self.assertCount(models.AlertError, 0, "No failed alerts")
self.assertCount(models.Alert, 1, "Alert should be queued")
self.assertEqual(mock_post.call_count, 1, "One notification should be skipped")
@override_settings(PROMGEN=tests.SETTINGS)
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
@override_settings(CELERY_TASK_EAGER_PROPAGATES=True)
@mock.patch("promgen.util.post")
def test_failure(self, mock_post):
# When our post results in a failure, then our error_count should be
# properly updated and some errors should be logged to be viewed later
mock_post.side_effect = RequestException("Boom!")
response = self.fireAlert()
self.assertRoute(response, rest.AlertReceiver, 202)
self.assertCount(models.Alert, 1, "Alert should be queued")
self.assertCount(models.AlertError, 2, "Two errors should be logged")
self.assertEqual(mock_post.call_count, 2, "Two posts should be attempted")
alert = models.Alert.objects.first()
self.assertEqual(alert.sent_count, 0, "No successful sent")
self.assertEqual(alert.error_count, 2, "Error incremented")
| StarcoderdataPython |
4806091 | <reponame>allenai/relation_extraction
import json
from sklearn.metrics import precision_recall_curve
from scipy.interpolate import spline
import matplotlib.pyplot as plt
with open('scripts/PR_curves.json') as f:
x = json.load(f)
plt.step(x['belagy_et_al_best'][0], x['belagy_et_al_best'][1], where='post')
plt.step(x['belagy_et_al_baseline'][0], x['belagy_et_al_baseline'][1], where='post')
plt.step(x['reside'][0], x['reside'][1], where='post')
plt.step(x['lin_et_al'][0], x['lin_et_al'][1], where='post')
plt.grid( linestyle='dashed', linewidth=0.5)
plt.legend(['This work',
'Baseline',
'RESIDE (Vashishth et al., 2018)',
'PCNN+ATT (Lin et al., 2016)',
])
plt.xlabel('recall')
plt.ylabel('precision')
plt.ylim([0.4, 1])
plt.xlim([0, 0.4])
| StarcoderdataPython |
1780476 | <gh_stars>0
from mesh.generic.commandMsg import CommandMsg
from mesh.generic.command import Command
from mesh.generic.cmds import NodeCmds, PixhawkCmds, TDMACmds
from mesh.generic.cmdDict import CmdDict
from struct import calcsize
from mesh.generic.nodeHeader import headers
from unittests.testCmds import testCmds
cmdsToTest = [NodeCmds['GCSCmd'], NodeCmds['ConfigRequest'], NodeCmds['ParamUpdate'], PixhawkCmds['FormationCmd'],
PixhawkCmds['NodeStateUpdate'], PixhawkCmds['PosCmd'], PixhawkCmds['StateUpdate'], PixhawkCmds['TargetUpdate'],
TDMACmds['MeshStatus'], TDMACmds['TimeOffset'], TDMACmds['TimeOffsetSummary']]
class TestCmdDict:
"""Tests all command dictionaries."""
def setup_method(self, method):
self.cmdId = NodeCmds['GCSCmd'] # test command
self.cmdData = {'cmd': CommandMsg(self.cmdId, [5, 3])}
self.header = [NodeCmds['GCSCmd'], 0, 10] # NodeHeader
pass
def test_init(self):
"""Test creation of Command instance."""
# Test creation without header
command = Command(self.cmdId, self.cmdData)
assert(command.cmdId == self.cmdId)
assert(command.cmdData == self.cmdData)
# Test creation with header
command = Command(self.cmdId, self.cmdData, self.header)
assert(isinstance(command.header, dict) == True) # header created as dict
assert(len(command.header['header']) == len(self.header)) # header has same number of entries as provided header data
def test_serialize(self):
"""Test serialization of all NodeCmds."""
for cmdId in cmdsToTest:
print("Testing serializing:", cmdId)
serMsg = CmdDict[cmdId].serialize(testCmds[cmdId].cmdData, 0)
print(testCmds[cmdId].body,serMsg)
assert(serMsg == testCmds[cmdId].body) # check that output matches truth command
| StarcoderdataPython |
66209 | <filename>authentik/events/migrations/0002_auto_20200918_2116.py<gh_stars>10-100
# Generated by Django 3.1.1 on 2020-09-18 21:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("authentik_events", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="event",
name="action",
field=models.TextField(
choices=[
("LOGIN", "login"),
("LOGIN_FAILED", "login_failed"),
("LOGOUT", "logout"),
("AUTHORIZE_APPLICATION", "authorize_application"),
("SUSPICIOUS_REQUEST", "suspicious_request"),
("SIGN_UP", "sign_up"),
("PASSWORD_RESET", "<PASSWORD>"),
("INVITE_CREATED", "invitation_created"),
("INVITE_USED", "invitation_used"),
("IMPERSONATION_STARTED", "impersonation_started"),
("IMPERSONATION_ENDED", "impersonation_ended"),
("CUSTOM", "custom"),
]
),
),
]
| StarcoderdataPython |
157273 | import numpy as np
import numba as nb
from pymcx import MCX
def create_props(spec, wavelen):
layers = spec['layers']
lprops = spec['layer_properties']
ext_coeff = {k: np.interp(wavelen, *itr) for k, itr in spec['extinction_coeffs'].items()}
media = np.empty((1+len(layers), 4), np.float32)
media[0] = 0, 0, 1, spec.get('n_external', 1)
for i, l in enumerate(layers):
lp = lprops[l]
g = lp['g']
mua = sum(ext_coeff[k] * lp['components'][k] for k in ext_coeff)
mus = lp['Scatter A'] * wavelen ** -lp['Scatter b'] / (1 - g)
media[1+i] = mua, mus, g, lp['n']
return media, np.stack(lprops[l]['BFi'] for l in layers)
@nb.jit(nopython=True, nogil=True, parallel=False)
def analysis(detp, prop, tof_domain, tau, wavelength, BFi, freq, ndet, ntof, nmedia, pcounts, paths, phiTD, phiFD, g1_top, phiDist):
c = 2.998e+11 # speed of light in mm / s
detBins = detp[0].astype(np.intc) - 1
tofBins = np.minimum(np.digitize(prop[1:, 3] @ detp[2:(2+nmedia)], c * tof_domain), ntof) - 1
distBins = np.minimum(np.digitize(prop[1:, 3] * detp[2:(2+nmedia)].T, c * tof_domain), ntof) - 1
path = -prop[1:, 0] @ detp[2:(2+nmedia)]
phis = np.exp(path)
fds = np.exp((-prop[1:, 0] + 2j * np.pi * freq * prop[1:, 3] / c).astype(np.complex64) @ detp[2:(2+nmedia)].astype(np.complex64))
prep = (-2*(2*np.pi*prop[1:, 3]/(wavelength*1e-6))**2*BFi).astype(np.float32) @ detp[(2+nmedia):(2+2*nmedia)]
for i in range(len(detBins)):
pcounts[detBins[i], tofBins[i]] += 1
paths[detBins[i], tofBins[i]] += detp[2:(2+nmedia), i]
phiTD[detBins[i], tofBins[i]] += phis[i]
phiFD[detBins[i]] += fds[i]
for l in range(nmedia):
phiDist[detBins[i], distBins[i, l], l] += phis[i]
for j in range(len(tau)):
g1_top[detBins[i], j] += np.exp(prep[i] * tau[j] + path[i])
def simulate(spec, wavelength):
cfg = spec['mcx']
cfg.ismomentum = True
cfg.prop, BFi = create_props(spec, wavelength)
run_count = spec.get('run_count', 1)
seeds = np.asarray(spec.get('seeds', np.random.randint(0xFFFF, size=run_count)))
tof_domain = spec.get('tof_domain', np.append(np.arange(cfg.tstart, cfg.tend, cfg.tstep), cfg.tend))
tau = spec.get('tau', np.logspace(-8, -2))
freq = spec.get('frequency', 110e6)
ndet, ntof, nmedia = len(cfg.detpos), len(tof_domain) - 1, len(cfg.prop) - 1
phiTD = np.zeros((ndet, ntof), np.float64)
phiFD = np.zeros(ndet, np.complex128)
paths = np.zeros((ndet, ntof, nmedia), np.float64)
pcounts = np.zeros((ndet, ntof), np.int64)
g1_top = np.zeros((ndet, len(tau)), np.float64)
phiDist = np.zeros((ndet, ntof, nmedia), np.float64)
fslice = 0
for seed in seeds:
cfg.seed = int(seed)
result = cfg.run(2)
detp = result["detphoton"]
if detp.shape[1] >= cfg.maxdetphoton:
raise Exception("Too many photons detected: {}".format(detp.shape[1]))
analysis(detp, cfg.prop, tof_domain, tau, wavelength, BFi, freq, ndet, ntof, nmedia, pcounts, paths, phiTD, phiFD, g1_top, phiDist)
fslice += result["fluence"][spec['slice']]
del detp
del result
fslice /= run_count
paths /= pcounts[:, :, np.newaxis]
g1 = g1_top / np.sum(phiTD, axis=1)[:, np.newaxis]
phiDist /= np.sum(phiTD, axis=1)[:, np.newaxis, np.newaxis]
return {'Photons': pcounts, 'Paths': paths, 'PhiTD': phiTD, 'PhiFD': phiFD, 'PhiDist': phiDist, 'Seeds': seeds, 'Slice': fslice, 'g1': g1}
| StarcoderdataPython |
6080 | # Copyright 2020 XAMES3. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================
"""
vdoXA is an open-source python package for trimming the videos.
It is built as a subsystem for < XXXXX Not to be named XXXXX > project.
Originally inspired by my colleague's work, I thought of improving the
concept and build a tool to simplify the process. I hope it comes with
strong support for continuous updates, reliable functions and overall
ease of use.
Read complete documentation at: <https://github.com/xames3/vdoxa>.
"""
from setuptools import find_packages, setup
from vdoxa.vars import dev
doclines = __doc__.split('\n')
def use_readme() -> str:
"""Use `README.md` for parsing long description."""
with open('README.md') as file:
return file.read()
with open('requirements.txt', 'r') as requirements:
required_packages = [package.rstrip() for package in requirements]
setup(
name=dev.PROJECT_NAME,
version=dev.PROJECT_VERSION,
url=dev.PROJECT_LINK,
download_url=dev.PROJECT_LINK,
author=dev.AUTHOR,
author_email=dev.AUTHOR_EMAIL,
maintainer=dev.AUTHOR,
maintainer_email=dev.AUTHOR_EMAIL,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
],
license=dev.PROJECT_LICENSE,
description=f'{doclines[1]}',
long_description=use_readme(),
long_description_content_type='text/markdown',
keywords='opencv2 cv2 moviepy',
zip_safe=False,
install_requires=required_packages,
python_requires='~=3.6',
include_package_data=True,
packages=find_packages(),
entry_points={
'console_scripts': [
'vdoxa = vdoxa.parser:main',
],
}
)
| StarcoderdataPython |
82711 | # @Time : 8/20/21 18:00 PM
# @Author : <NAME>
# @Affiliation : Nanyang Technological University
# @Email : <EMAIL>
# @File : download_and_extract_noise_file.py
"""
dataset:
MUSAN noise subdataset
Usage:
python download_and_extract_noise_file.py \
--data_root <absolute path to where the data should be stored>
"""
import os
import shutil
import argparse
import logging
import re
import tarfile
import urllib.request
def __maybe_download_file(destination: str, source: str):
"""
Downloads source to destination if it doesn't exist.
If exists, skips download
Args:
destination: local filepath
source: url of resource
Returns:
"""
if not os.path.exists(destination):
logging.info(f"{destination} does not exist. Downloading ...")
urllib.request.urlretrieve(source, filename=destination + '.tmp')
os.rename(destination + '.tmp', destination)
logging.info(f"Downloaded {destination}.")
else:
logging.info(f"Destination {destination} exists. Skipping.")
return destination
def __extract_all_files(filepath: str, data_root: str, data_dir: str):
extract_file(filepath, data_dir)
def extract_file(filepath: str, data_dir: str):
try:
tar = tarfile.open(filepath)
tar.extractall(data_dir)
tar.close()
except Exception:
logging.info('Not extracting. Maybe already there?')
def main():
parser = argparse.ArgumentParser(description='MUSAN noise download')
parser.add_argument("--data_root", required=True, default='/notebooks/data_processing', type=str)
args = parser.parse_args()
data_root = args.data_root
data_set = 'MUSAN'
URL = 'https://www.openslr.org/resources/17/musan.tar.gz'
# Download noise dataset
file_path = os.path.join(data_root, data_set + ".tar.gz")
__maybe_download_file(file_path, URL)
# Extract all the noise file
if not os.path.exists(data_root + '/musan'):
__extract_all_files(file_path, data_root, data_root)
download_musan_noise_dir = data_root + '/musan/noise/'
noise_dir = data_root + '/noise/'
if not os.path.exists(noise_dir):
os.mkdir(noise_dir)
os.chdir(download_musan_noise_dir)
file_path = './free-sound/'
wavelist=[]
filenames=os.listdir(file_path)
for filename in filenames:
name,category=os.path.splitext(file_path+filename)
if category=='.wav':
wavelist.append(filename)
for file in wavelist:
f_src = './free-sound/' + file
f_dst = noise_dir + file
shutil.copyfile(f_src, f_dst)
file_path = './sound-bible/'
wavelist=[]
filenames=os.listdir(file_path)
for filename in filenames:
name,category=os.path.splitext(file_path+filename)
if category=='.wav':
wavelist.append(filename)
for file in wavelist:
f_src = './sound-bible/' + file
f_dst = noise_dir + file
shutil.copyfile(f_src, f_dst)
os.chdir(noise_dir)
g = os.walk(r"./")
step = 1
for path,dir_list,file_list in g:
for file in file_list:
os.rename('./'+file, './'+str(step)+'.wav')
step = step + 1
if __name__ == "__main__":
main()
| StarcoderdataPython |
1711332 | <filename>design/water_channel_mechanics/src/water_channel_mechanics/slider_mount_plate.py
"""
Copyright 2010 IO Rodeo Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division
import roslib
roslib.load_manifest('water_channel_mechanics')
import rospy
import math
import copy
import cad.csg_objects as csg
import cad.finite_solid_objects as fso
import cad.pattern_objects as po
import cad_library.origin as origin
import cad.export.bom as bom
import airbearing
SLIDER_MOUNT_PLATE_PARAMETERS = {
'bearing_type': 'RAB6',
'slide_travel': 4,
'color': [0.7,0.7,0.7,1.0],
'x': 1,
'z': 0.125,
'hole_diameter': 0.26,
'show_origin': False,
}
def get_parameters():
return copy.deepcopy(SLIDER_MOUNT_PLATE_PARAMETERS)
class SliderMountPlate(csg.Difference):
def __init__(self):
super(SliderMountPlate, self).__init__()
self.parameters = SLIDER_MOUNT_PLATE_PARAMETERS
ab = airbearing.RAB(bearing_type=self.parameters['bearing_type'],slide_travel=self.parameters['slide_travel'])
self.ab_parameters = ab.get_parameters()
self.__make_slider_mount_plate()
self.__make_holes()
self.__set_bom()
self.__make_origin()
self.set_color(self.parameters['color'],recursive=True)
def get_parameters(self):
return copy.deepcopy(self.parameters)
def __make_slider_mount_plate(self):
x = self.parameters['x']
y = self.ab_parameters['slide_width']
self.parameters['y'] = y
z = self.parameters['z']
smp = fso.Box(x=x,y=y,z=z)
self.add_obj(smp)
def __make_holes(self):
hole = fso.Cylinder(r=self.parameters['hole_diameter']/2,l=self.parameters['z']*2)
hole_ty = self.ab_parameters['slide_screw_dW']/2
holes = po.LinearArray(hole,x=0,y=[-hole_ty,hole_ty],z=0)
self.add_obj(holes)
def __set_bom(self):
scale = self.get_scale()
BOM = bom.BOMObject()
BOM.set_parameter('name','slider_mount_plate')
BOM.set_parameter('description','Mounts t_slotted beams to air bearing slider')
BOM.set_parameter('dimensions','x: {x:0.3f}, y: {y:0.3f}, z: {z:0.3f}'.format(x=self.parameters['x']*scale[0],y=self.parameters['y']*scale[1],z=self.parameters['z']*scale[2]))
BOM.set_parameter('vendor','?')
BOM.set_parameter('part number','?')
self.set_object_parameter('bom',BOM)
def __make_origin(self):
o = origin.Origin(mag=10)
if self.parameters['show_origin']:
self.add_obj(o)
if __name__ == "__main__":
slider_mount_plate = SliderMountPlate()
slider_mount_plate.set_object_parameter('slice',True)
slider_mount_plate.export()
| StarcoderdataPython |
3282995 | import numpy as np
import cmath
from random import random
class Polynomial:
def __init__(self, a):
"""
Define the polynomial 'p = a[0] + a[1]*x + a[2]*x^2 + ... + a[n]*x^n'.
Params:
- a array with the coefficients of the polynomial
"""
self._a = a
self.a = np.copy(a)
def eval(self, x, tol=1.0e-12):
"""
Evaluate the polynomial 'p = a[0] + a[1]*x + a[2]*x^2 + ... + a[n]*x^n' at 'x'.
Params:
- x point where the polynomial is to be evaluated
Return values:
- p the value of the polynomial at 'x'
- dp the value of the first derivative of the polynomial at 'x'
- ddp the value of the second derivative of the polynomial at 'x'
"""
self._a = self.a
L = list(self._eval(x))
for i in range(len(L)):
if abs(L[i].imag) < tol:
L[i] = L[i].real
return L
def _eval(self, x):
n = len(self._a) - 1
p = self._a[n]
dp = 0.0 + 0.0j
ddp = 0.0 + 0.0j
for i in range(1, n + 1):
ddp = ddp * x + 2.0 * dp
dp = dp * x + p
p = p * x + self._a[n - i]
return p, dp, ddp
def _laguerre(self, x, tol):
n = len(self._a) - 1
for i in range(30):
p, dp, ddp = self._eval(x)
if abs(p) < tol:
return x
g = dp / p
h = g * g - ddp / p
f = cmath.sqrt((n - 1) * (n * h - g * g))
if abs(g + f) > abs(g - f):
dx = n / (g + f)
else:
dx = n / (g - f)
x = x - dx
if abs(dx) < tol:
return x
raise OverflowError('too many iterations')
def _deflate(self, root):
n = len(self._a) - 1
b = [(0.0 + 0.0j)] * n
b[n - 1] = self._a[n]
for i in range(n - 2, -1, -1):
b[i] = self._a[i + 1] + root * b[i + 1]
return b
def roots(self, tol=1.0e-12, roots_guess=None):
"""
Uses Laguerre's method to compute all the roots of 'a[0] + a[1]*x + a[2]*x^2 + ... + a[n]*x^n = 0', which
can be real or complex.
Params:
- tol error tolerance (a value less than 'tol' is considered as zero)
- roots_guess initial guess values for the roots; if None (default), random guesses will be used
(note: a polynomial of order n, has n roots)
Return value:
- array with the roots of the polynomial
"""
self._a = self.a
n = len(self._a) - 1
roots = np.zeros(n, dtype=complex)
for i in range(n):
if roots_guess is not None:
x = roots_guess[i]
else:
x = random()
x = self._laguerre(x, tol)
if abs(x.imag) < tol:
x = x.real
roots[i] = x
self._a = self._deflate(x)
return roots
| StarcoderdataPython |
105127 | <gh_stars>0
print("Mon premier")
print("programme")
print("est affiche") | StarcoderdataPython |
1751858 | #
# File : ihex2hex.py
# Autor : <NAME>.
# Data : 2019.03.04
# Language : Python
# Description : This is script for converting ihex format to hex
# Copyright(c) : 2018 - 2019 <NAME>.
#
import sys
pars_file = open("program_file/program.ihex" , "r")
out_file_f = open("program_file/nf_program.vhd" , "w") # full mem [31:0]
hi_addr = 0
out_file_f.write(str("\
library ieee;\n\
use ieee.std_logic_1164.all;\n\
use ieee.numeric_std.all;\n\
library work;\n\
use work.nf_mem_pkg.all;\n\
\n\
package nf_program is\n\
\n\
constant program : mem_t({:s}*4-1 downto 0)(7 downto 0) := \
( ".format(sys.argv[1]) ) )
for lines in pars_file:
# find checksum
checksum = lines[-3:-1]
lines = lines[0:-3]
# break if end of record
if( lines[7:9] == "01"):
out_file_f.write(" others => X\"XX\"")
break
# update high address of linear record
elif( lines[7:9] == "04"):
hi_addr = int('0x'+lines[9:13],16)
# record data
elif( lines[7:9] == "00" ):
# delete ':'
lines = lines[1:]
# find lenght
lenght = int('0x'+lines[0:2],16)
lines = lines[2:]
# find addr
lo_addr = int('0x'+lines[0:4],16)
lines = lines[4:]
# find type
type_ = lines[0:2]
lines = lines[2:]
i = 0
# write addr
while(1):
st_addr = str(" {:d}".format( ( ( hi_addr << 16 ) + lo_addr + i + 0 ) ) )
out_file_f.write(st_addr + " => ")
# write data
out_file_f.write("X\"" + lines[0:2] + "\"" + ",\n")
st_addr = str(" {:d}".format( ( ( hi_addr << 16 ) + lo_addr + i + 1 ) ) )
out_file_f.write(st_addr + " => ")
# write data
out_file_f.write("X\"" + lines[2:4] + "\"" + ",\n")
st_addr = str(" {:d}".format( ( ( hi_addr << 16 ) + lo_addr + i + 2 ) ) )
out_file_f.write(st_addr + " => ")
# write data
out_file_f.write("X\"" + lines[4:6] + "\"" + ",\n")
st_addr = str(" {:d}".format( ( ( hi_addr << 16 ) + lo_addr + i + 3 ) ) )
out_file_f.write(st_addr + " => ")
# write data
out_file_f.write("X\"" + lines[6:8] + "\"" + ",\n")
lines = lines[8:]
i += 4
if( i >= lenght ):
break
out_file_f.write('''
);
end package nf_program;
''')
print("Conversion comlete!")
| StarcoderdataPython |
1720372 | #!/usr/bin/python
r"""
This is a test docstring.
"""
import subprocess as sb_pr
import fire
def subprocess_execute(command_list):
"""Subprocess_execute executes the command on host OS,
then dumps the output to STDOUT.
Arguments:
command_list: This is a list of string making the command to be executed.
"""
sb_pr.run(command_list, text=True, check=True)
def action_build(image_name, image_tag, dockerfile_name, docker_path):
"""The function action_build builds the image
Arguments:
image_name: Name of the image file.
image_tag: Tag of the build image file.
dockerfile_name: This is the Dockerfile to be used for building the image.
docker_path: working directory of docker.
"""
image_name_with_tag = image_name + ":" + image_tag
build_command_list = [
"docker",
"build",
"-t",
image_name_with_tag,
"-f",
dockerfile_name,
docker_path,
]
return build_command_list
def action_run(image_name, image_tag, user_name, test_file_path, test_file_name):
"""The function action_run runs the container and initiates the tests.
Arguments:
image_name: Name of image to be used to build the containers.
image_tag: The tag of imgaes to be used to build the containers.
test_file_path: Path of the test file from which tests has to run.
test_file_name: Name of the file containing the tests to be done.
"""
image_name_with_tag = image_name + ":" + image_tag
run_command_list = [
"docker",
"run",
"-it",
image_name_with_tag,
"/usr/bin/su",
"-l",
user_name,
"-c",
"cd qiskit_alt; sh " + test_file_path + test_file_name,
]
return run_command_list
def action_get_into_fish(image_name, image_tag, user_name):
"""The function action_get_into_fish takes into the fish shell running in the container.
Arguments:
image_name: Name of the image to be used to build the container.
image_tag: The tag of the image which is used to build the container.
user_name: The user name which logins into the container.
"""
image_name_with_tag = image_name + ":" + image_tag
get_fish_command_list = [
"docker",
"run",
"-it",
image_name_with_tag,
"/usr/bin/su",
"-l",
user_name,
"-s",
"/usr/bin/fish",
]
return get_fish_command_list
def action_get_into_bash(image_name, image_tag, user_name):
"""The function action_get_into_bash takes into the bash shell running in the container.
Arguments:
image_name: Name of the image to be used to build the container.
image_tag: The tag of the image which is used to build the container.
user_name: The user name which logins into the container.
"""
image_name_with_tag = image_name + ":" + image_tag
get_bash_command_list = [
"docker",
"run",
"-it",
image_name_with_tag,
"/usr/bin/su",
"-l",
user_name,
"-s",
"/usr/bin/bash",
]
return get_bash_command_list
def action_get_into_rootfish(image_name, image_tag):
"""The function action_get_into_rootfish takes into the fish shell
running in the container as root.
Arguments:
image_name: Name of the image to be used to build the container.
image_tag: The tag of the image which is used to build the container.
user_name: The user name which logins into the container.
"""
image_name_with_tag = image_name + ":" + image_tag
get_rootfish_command_list = [
"docker",
"run",
"-it",
image_name_with_tag,
"/usr/bin/fish",
]
return get_rootfish_command_list
def _cli(
action="",
image_name="qiskit_alt",
image_tag="latest",
dockerfile_name="Dockerfile",
user_name="quser",
test_file_path="./",
test_file_name="run_init_tests.sh",
docker_path="..",
dry_run="false",
):
"""All the arguments of this function are supposed to be passed as command line
arguments while initiating the python script.
Arguments:
action: This are the possible actions to be performed.
Possible actions are:
build: To build the containers
Example: ./run_dockerfile.py --action=build
run: To run the containers
"": To build and then to run the containers.
get_into_fish: To get into the fish shell running in the container.
get_into_bash: To get into the bash shell running in the container.
get_into_rootfish: To get into the fish shell running in the container
as root.
image_name: The name of the image to be build.
image_tag: The tag given to the image to be build.
dockerfile_name: The name of the Dockerfile to be used for building the image.
user_name: A username in the container.
test_file_path: The path to the test file which contains all the tests to run.
docker_path: The working directory for docker.
dry_run: Either true or false. If true, then only print action, but don't execute it.
"""
if dry_run == "false":
_dry_run = False
elif dry_run == "true":
_dry_run = True
else:
print("dry_run must be either true or false. See ./run_dockerfile.py --help")
return
command_lists = []
if action == "build":
command_lists.append(action_build(image_name, image_tag, dockerfile_name, docker_path))
elif action == "run":
command_lists.append(action_run(image_name, image_tag, user_name, test_file_path, test_file_name))
elif action == "":
command_lists.append(action_build(image_name, image_tag, dockerfile_name, docker_path))
command_lists.append(action_run(image_name, image_tag, user_name, test_file_path, test_file_name))
elif action == "get_into_fish":
command_lists.append(action_get_into_fish(image_name, image_tag, user_name))
elif action == "get_into_bash":
command_lists.append(action_get_into_bash(image_name, image_tag, user_name))
elif action == "get_into_rootfish":
command_lists.append(action_get_into_rootfish(image_name, image_tag))
else:
print("Bad arguments, See ./run_dockerfile.py --help")
for command_list in command_lists:
command_string = " ".join(map(str, command_list))
print(command_string + "\n")
if not _dry_run:
subprocess_execute(command_list)
if __name__ == "__main__":
fire.Fire(_cli)
| StarcoderdataPython |
1781097 | <gh_stars>1-10
from flask import Flask, render_template, request, redirect, url_for
import jinja2
import difflib
import pandas
import cPickle as pickle
import json
from recommendation_functions import *
from amazon_api_image import *
from titlecase import titlecase
import time
class Data:
""" This loads the book lookups as well as stop words that are used to search through the book dictionary"""
book_list = []
book_dict = {}
def __init__(self):
self.pickle_book, self.pickle_words, self.json_image = open("Data/book_dict.pickle","rb"),open("Data/stop_words.pickle","rb"),open("Data/image_dict.txt", "rb")
self.books_dict, self.stop_words = pickle.load(self.pickle_book), pickle.load(self.pickle_words)
#{'8353': [nan, 'The King', 'Author']}
self.image_dict = json.load(self.json_image)# this dictionary is based on book number lookup
# this converts book6738 to 6738
class UserInputValid():
""" validates search of books to make sure its in our dictionary. Searches using first word and close matches algo"""
num_wrongs = 0
def convert_to_num (self, book_recs):
str_book_list = []
for i in book_recs:
str_book_list.append (i[0].replace('book', '')) # remove book and leave number
return str_book_list
def similar (self, word):
word = titlecase(word)
first_word = str (word.partition(' ')[0])
first_word_matches = [book for book in data.books_dict.iterkeys() if str(titlecase(first_word)) == str (book.partition(' ')[0])
and first_word.lower() not in data.stop_words]
return ', '.join (first_word_matches), ','.join(difflib.get_close_matches(word, data.books_dict.keys(), n=6, cutoff=.5)) # gets highest match
# This validates that book is in dictionary
def book_return (self, book):
book = titlecase(book)
if book in data.books_dict:
data.num_wrongs = 0 #resets
return book, data.books_dict[book][0]
if len(self.similar(book))>0 and self.num_wrongs <= 3: # allows user to make 3 mistakes
self.num_wrongs +=1
return "<span> Copy and paste the exact title without quotes </span> Did you mean {}? ".format(self.similar(book))
else:
self.num_wrongs = 0 # resets
return 'Sorry, we don\'t have this title'
# Initialize Objects
data = Data()
user_input = UserInputValid()
application =Flask(__name__)
@application.route("/", methods =['GET', 'POST'])
def index():
#if user clicks the add button
if request.method =='POST' and 'add_button' in request.form:
book=request.form["book_name"]
rating=request.form["rating"]
if type (user_input.book_return(book))==tuple: # return book if found in dictionary
print user_input.book_return(book)[0]
data.book_list.append(user_input.book_return(book)[0])
print data.book_list
data.book_dict[user_input.book_return(book)[1]] = rating # add book_number and rating to dictionary
print data.book_list
return render_template("index_mine.html", text = "Current Books: {}".format(','.join(data.book_list))) # show found book to user
else:
return render_template("index_mine.html", text = user_input.book_return(book)) #if book not found return message from book_return
return render_template("index_mine.html")
# if user clicks submit
@application.route("/success", methods=['POST', 'GET'])
def success():
#global book_list, book_dict
if request.method=='POST' and 'submit_button' in request.form:
temp_list = data.book_list
temp_dict = data.book_dict
data.book_list =[] # clear booklist
data.book_dict = {} # clear book dictionary upon submit
# if user clicks submit prior to add
if len(temp_list)== 0:
return render_template ("index_mine.html", text = "{}".format("Please Click the 'Add Book' Button Prior to Submitting"))
else:
# initialize object
generate_recommendations = GenerateRecommendations(user_books = temp_dict, num_recs = 10)
my_recs = user_input.convert_to_num(generate_recommendations.generate_list(generate_recommendations.handler_template_final()))
print 'recs are', my_recs
#initializes the object
amazon_api = Amazon_Api(list_of_numbers = my_recs, some_dict = data.image_dict, number_of_recs = 6)
urls = amazon_api.generate_images()
print len (urls)
return render_template("success.html", value = ','.join(my_recs), urls = urls)
if __name__ == '__main__':
#app.debug=True
application.run(port=5005)
| StarcoderdataPython |
172498 | <filename>NewsCrawler/spiders/eastmoney.py
from json import loads
from random import random
from re import match
from time import time
import scrapy
from requests import get
from NewsCrawler.items import NewsItem
from NewsCrawler.utils.call_nav_map import nav_map
from NewsCrawler.utils.validate_published import validate_replace
class EastmoneySpider(scrapy.Spider):
"""东方财富网7X24小时快讯"""
name = 'eastmoney'
allowed_domains = ['eastmoney.com']
base_url = 'https://newsapi.eastmoney.com/kuaixun/v1/getlist_102_ajaxResult_50_%(page)s_.html?r=%(ran_num)s&_=%(time_stamp)s'
time_stamp = str(time()).replace('.', '')[:-4]
ran_num = random()
start_urls = [base_url % {'page': 1, 'ran_num': ran_num, 'time_stamp': time_stamp}]
def parse(self, response):
"""解析出详情页的url,并实现翻页"""
item = NewsItem()
ajax_data = response.text.replace('var ajaxResult=', '')
data_list = loads(ajax_data)['LivesList']
for data in data_list:
item['news_id'] = data['newsid']
item['title'] = data['title']
item['link'] = data['url_unique']
item['nav_name'] = [nav_map[i] for i in data['column'].split(',') if i in nav_map.keys()]
item['published'] = validate_replace(data['showtime'])
yield scrapy.Request(item['link'], callback=self.parse_detail, meta={'item': item})
for page in range(2, 21):
next_url = self.base_url % {'page': 1, 'ran_num': self.ran_num, 'time_stamp': self.time_stamp}
yield scrapy.Request(next_url)
def parse_detail(self, response):
item = response.meta['item']
item['source'] = response.xpath('//div[@class="source data-source"]/@data-source').extract_first()
item['desc'] = response.xpath('//div[@class="b-review"]/text()').extract_first().strip()
item['content'] = []
item['images'] = []
p_list = response.xpath('//div[@id="ContentBody"]/p[not(@class)] | //div[@id="ContentBody"]/center')
for p in p_list:
if p.xpath('.//img'):
img_link = p.xpath('.//img/@src').extract_first()
# https://dfscdn.dfcfw.com/download/D25618177642896768707.jpg
if match(r"https://dfscdn\.dfcfw\.com/download/.*", img_link):
item['content'].append(img_link)
img_content = get(img_link).content
item['images'].append(img_content)
else:
text = ''.join(p.xpath('.//text()').extract()).strip()
if text:
item['content'].append(text)
yield item
| StarcoderdataPython |
126498 | from django.urls import include, path
from . import views
urlpatterns = [
path('confirm-email/<str:key>/', views.UserConfirmEmailView.as_view(), name='confirm_email'),
path('social/signup/', views.SocialUserSignupView.as_view(), name='socialaccount_signup'),
path('', include('allauth.urls')),
]
| StarcoderdataPython |
3237685 | <reponame>fusion-jena/BiodivOnto
from Tutorial.clustering_manger import ClusteringManager
from os.path import realpath, join
import pandas as pd
from vector_manager import VectorManager
from dfs_clustering import RecursiveClustering
from vis_manager import Visualizer
def init_vectors():
data_path = join(realpath('.'), 'data', 'Keywords.csv')
df = pd.read_csv(data_path)
keywords = list(df['Keywords'])
vecMang = VectorManager(keywords)
vecMang.generate_save_vecs()
def cluster():
recurs = RecursiveClustering()
keywords, X = recurs.read_data()
recurs.init_keywords_path(keywords)
recurs.run(X, keywords, 0, '0')
recurs.save_keywords_path()
def visualize():
Visualizer().run()
if __name__ == '__main__':
init_vectors()
cluster()
visualize() | StarcoderdataPython |
1653617 | <filename>lemmatized_text/forms.py
from django import forms
from ckeditor.widgets import CKEditorWidget
class LemmatizedTextEditForm(forms.Form):
title = forms.CharField()
text = forms.CharField(widget=CKEditorWidget(config_name="hedera_ckeditor"))
| StarcoderdataPython |
3357400 | # -*- coding: utf-8 -*-
import os
import warnings
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import sys
from comet.models import download_model
def cal_comet(file_can, file_ref, num, model):
"""
Calculate COMET score
Args:
file_can: the path of candidate file
file_ref: the path of reference file
num (int): the number of reference for each candidate
model: COMET model
Returns:
the list of COMET scores
"""
scores, srcs = [], []
with open(file_can,'r') as fin:
cand = []
for line in fin.readlines():
srcs.append('')
cand.append(line.strip())
for i in range(int(num)):
refs = []
with open(file_ref+str(i),'r') as fin:
for line in fin.readlines():
refs.append(line.strip())
data = {"src": srcs, "mt": cand, "ref": refs}
data = [dict(zip(data, t)) for t in zip(*data.values())]
scores.extend(model.predict(data, cuda=True,
show_progress=False)[-1])
return scores
scores = []
model = download_model("wmt-large-da-estimator-1719")
scores.extend(cal_comet(sys.argv[1], sys.argv[3], sys.argv[5], model))
scores.extend(cal_comet(sys.argv[2], sys.argv[4], sys.argv[5], model))
print('The average comet score is {}'.format(sum(scores)/len(scores)))
| StarcoderdataPython |
187110 | <gh_stars>1-10
from raven.contrib.transports.zeromq.raven_zmq import ZmqPubTransport
| StarcoderdataPython |
3361043 | # Copyright 2018 The Simons Foundation, Inc. - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netket as nk
from generate_data import generate
import sys
import numpy as np
mpi_rank = nk.MPI.rank()
# Load the data
N = 10
hi, rotations, training_samples, training_bases, ha, psi = generate(
N, n_basis=2 * N, n_shots=500
)
# Machine
ma = nk.machine.RbmSpinPhase(hilbert=hi, alpha=1)
ma.init_random_parameters(seed=1234, sigma=0.01)
# Sampler
sa = nk.sampler.MetropolisLocal(machine=ma)
# Optimizer
op = nk.optimizer.AdaDelta()
# Quantum State Reconstruction
qst = nk.unsupervised.Qsr(
sampler=sa,
optimizer=op,
batch_size=1000,
n_samples=1000,
rotations=rotations,
samples=training_samples,
bases=training_bases,
method="Sr",
)
qst.add_observable(ha, "Energy")
for step in qst.iter(4000, 100):
obs = qst.get_observable_stats()
if mpi_rank == 0:
print("step={}".format(step))
print("acceptance={}".format(list(sa.acceptance)))
print("observables={}".format(obs))
# Compute fidelity with exact state
psima = ma.to_array()
fidelity = np.abs(np.vdot(psima, psi))
print("fidelity={}".format(fidelity))
# Compute NLL on training data
nll = qst.nll(
rotations=rotations,
samples=training_samples,
bases=training_bases,
log_norm=ma.log_norm(),
)
print("negative log likelihood={}".format(nll))
# Print output to the console immediately
sys.stdout.flush()
# Save current parameters to file
ma.save("test.wf")
| StarcoderdataPython |
1799752 | <reponame>19-1-skku-oss/2019-1-OSS-L1
def setup_function(function):
print("setting up %s" % function)
def test_func1():
assert True
def test_func2():
assert False | StarcoderdataPython |
1658647 | <filename>bayespy/inference/vmp/nodes/dirichlet.py
################################################################################
# Copyright (C) 2011-2012,2014 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
"""
Module for the Dirichlet distribution node.
"""
import numpy as np
from scipy import special
from bayespy.utils import random
from bayespy.utils import misc
from bayespy.utils import linalg
from .stochastic import Stochastic
from .expfamily import ExponentialFamily, ExponentialFamilyDistribution
from .constant import Constant
from .node import Node, Moments, ensureparents
class ConcentrationMoments(Moments):
"""
Class for the moments of Dirichlet conjugate-prior variables.
"""
def __init__(self, categories):
self.categories = categories
self.dims = ( (categories,), () )
return
def compute_fixed_moments(self, alpha):
"""
Compute the moments for a fixed value
"""
alpha = np.asanyarray(alpha)
if np.ndim(alpha) < 1:
raise ValueError("The prior sample sizes must be a vector")
if np.any(alpha < 0):
raise ValueError("The prior sample sizes must be non-negative")
gammaln_sum = special.gammaln(np.sum(alpha, axis=-1))
sum_gammaln = np.sum(special.gammaln(alpha), axis=-1)
z = gammaln_sum - sum_gammaln
return [alpha, z]
@classmethod
def from_values(cls, alpha):
"""
Return the shape of the moments for a fixed value.
"""
if np.ndim(alpha) < 1:
raise ValueError("The array must be at least 1-dimensional array.")
categories = np.shape(alpha)[-1]
return cls(categories)
class DirichletMoments(Moments):
"""
Class for the moments of Dirichlet variables.
"""
def __init__(self, categories):
self.categories = categories
self.dims = ( (categories,), )
def compute_fixed_moments(self, p):
"""
Compute the moments for a fixed value
"""
# Check that probabilities are non-negative
p = np.asanyarray(p)
if np.ndim(p) < 1:
raise ValueError("Probabilities must be given as a vector")
if np.any(p < 0) or np.any(p > 1):
raise ValueError("Probabilities must be in range [0,1]")
if not np.allclose(np.sum(p, axis=-1), 1.0):
raise ValueError("Probabilities must sum to one")
# Normalize probabilities
p = p / np.sum(p, axis=-1, keepdims=True)
# Message is log-probabilities
logp = np.log(p)
u = [logp]
return u
@classmethod
def from_values(cls, x):
"""
Return the shape of the moments for a fixed value.
"""
if np.ndim(x) < 1:
raise ValueError("Probabilities must be given as a vector")
categories = np.shape(x)[-1]
return cls(categories)
class DirichletDistribution(ExponentialFamilyDistribution):
"""
Class for the VMP formulas of Dirichlet variables.
"""
def compute_message_to_parent(self, parent, index, u_self, u_alpha):
r"""
Compute the message to a parent node.
"""
logp = u_self[0]
m0 = logp
m1 = 1
return [m0, m1]
def compute_phi_from_parents(self, u_alpha, mask=True):
r"""
Compute the natural parameter vector given parent moments.
"""
return [u_alpha[0]]
def compute_moments_and_cgf(self, phi, mask=True):
r"""
Compute the moments and :math:`g(\phi)`.
.. math::
\overline{\mathbf{u}} (\boldsymbol{\phi})
&=
\begin{bmatrix}
\psi(\phi_1) - \psi(\sum_d \phi_{1,d})
\end{bmatrix}
\\
g_{\boldsymbol{\phi}} (\boldsymbol{\phi})
&=
TODO
"""
if np.any(np.asanyarray(phi) <= 0):
raise ValueError("Natural parameters should be positive")
sum_gammaln = np.sum(special.gammaln(phi[0]), axis=-1)
gammaln_sum = special.gammaln(np.sum(phi[0], axis=-1))
psi_sum = special.psi(np.sum(phi[0], axis=-1, keepdims=True))
# Moments <log x>
u0 = special.psi(phi[0]) - psi_sum
u = [u0]
# G
g = gammaln_sum - sum_gammaln
return (u, g)
def compute_cgf_from_parents(self, u_alpha):
r"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
return u_alpha[1]
def compute_fixed_moments_and_f(self, p, mask=True):
r"""
Compute the moments and :math:`f(x)` for a fixed value.
.. math::
u(p) =
\begin{bmatrix}
\log(p_1)
\\
\vdots
\\
\log(p_D)
\end{bmatrix}
.. math::
f(p) = - \sum_d \log(p_d)
"""
# Check that probabilities are non-negative
p = np.asanyarray(p)
if np.ndim(p) < 1:
raise ValueError("Probabilities must be given as a vector")
if np.any(p < 0) or np.any(p > 1):
raise ValueError("Probabilities must be in range [0,1]")
if not np.allclose(np.sum(p, axis=-1), 1.0):
raise ValueError("Probabilities must sum to one")
# Normalize probabilities
p = p / np.sum(p, axis=-1, keepdims=True)
# Message is log-probabilities
logp = np.log(p)
u = [logp]
f = - np.sum(logp, axis=-1)
return (u, f)
def random(self, *phi, plates=None):
r"""
Draw a random sample from the distribution.
"""
return random.dirichlet(phi[0], size=plates)
def compute_gradient(self, g, u, phi):
r"""
Compute the moments and :math:`g(\phi)`.
\psi(\phi_1) - \psi(\sum_d \phi_{1,d})
Standard gradient given the gradient with respect to the moments, that
is, given the Riemannian gradient :math:`\tilde{\nabla}`:
.. math::
\nabla &=
\begin{bmatrix}
(\psi^{(1)}(\phi_1) - \psi^{(1)}(\sum_d \phi_{1,d}) \nabla_1
\end{bmatrix}
"""
sum_phi = np.sum(phi[0], axis=-1, keepdims=True)
d0 = g[0] * (special.polygamma(1, phi[0]) - special.polygamma(1, sum_phi))
return [d0]
class Concentration(Stochastic):
_parent_moments = ()
def __init__(self, D, regularization=True, **kwargs):
"""
ML estimation node for concentration parameters.
Parameters
----------
D : int
Number of categories
regularization : 2-tuple of arrays (optional)
"Prior" log-probability and "prior" sample number
"""
self.D = D
self.dims = ( (D,), () )
self._moments = ConcentrationMoments(D)
super().__init__(dims=self.dims, initialize=False, **kwargs)
self.u = self._moments.compute_fixed_moments(np.ones(D))
if regularization is None or regularization is False:
regularization = [0, 0]
elif regularization is True:
# Decent default regularization?
regularization = [np.log(1/D), 1]
self.regularization = regularization
return
@property
def regularization(self):
return self.__regularization
@regularization.setter
def regularization(self, regularization):
if len(regularization) != 2:
raise ValueError("Regularization must 2-tuple")
if not misc.is_shape_subset(np.shape(regularization[0]), self.get_shape(0)):
raise ValueError("Wrong shape")
if not misc.is_shape_subset(np.shape(regularization[1]), self.get_shape(1)):
raise ValueError("Wrong shape")
self.__regularization = regularization
return
def _update_distribution_and_lowerbound(self, m):
r"""
Find maximum likelihood estimate for the concentration parameter
"""
a = np.ones(self.D)
da = np.inf
logp = m[0] + self.regularization[0]
N = m[1] + self.regularization[1]
# Compute sufficient statistic
mean_logp = logp / N[...,None]
# It is difficult to estimate values lower than 0.02 because the
# Dirichlet distributed probability vector starts to give numerically
# zero random samples for lower values.
if np.any(np.isinf(mean_logp)):
raise ValueError(
"Cannot estimate DirichletConcentration because of infs. This "
"means that there are numerically zero probabilities in the "
"child Dirichlet node."
)
# Fixed-point iteration
while np.any(np.abs(da / a) > 1e-5):
a_new = misc.invpsi(
special.psi(np.sum(a, axis=-1, keepdims=True))
+ mean_logp
)
da = a_new - a
a = a_new
self.u = self._moments.compute_fixed_moments(a)
return
def initialize_from_value(self, x):
self.u = self._moments.compute_fixed_moments(x)
return
def lower_bound_contribution(self):
return (
linalg.inner(self.u[0], self.regularization[0], ndim=1)
+ self.u[1] * self.regularization[1]
)
class Dirichlet(ExponentialFamily):
r"""
Node for Dirichlet random variables.
The node models a set of probabilities :math:`\{\pi_0, \ldots, \pi_{K-1}\}`
which satisfy :math:`\sum_{k=0}^{K-1} \pi_k = 1` and :math:`\pi_k \in [0,1]
\ \forall k=0,\ldots,K-1`.
.. math::
p(\pi_0, \ldots, \pi_{K-1}) = \mathrm{Dirichlet}(\alpha_0, \ldots,
\alpha_{K-1})
where :math:`\alpha_k` are concentration parameters.
The posterior approximation has the same functional form but with different
concentration parameters.
Parameters
----------
alpha : (...,K)-shaped array
Prior counts :math:`\alpha_k`
See also
--------
Beta, Categorical, Multinomial, CategoricalMarkovChain
"""
_distribution = DirichletDistribution()
@classmethod
def _constructor(cls, alpha, **kwargs):
"""
Constructs distribution and moments objects.
"""
# Number of categories
alpha = cls._ensure_moments(alpha, ConcentrationMoments)
parent_moments = (alpha._moments,)
parents = [alpha]
categories = alpha.dims[0][0]
moments = DirichletMoments(categories)
return (
parents,
kwargs,
moments.dims,
cls._total_plates(kwargs.get('plates'), alpha.plates),
cls._distribution,
moments,
parent_moments
)
def __str__(self):
"""
Show distribution as a string
"""
alpha = self.phi[0]
return ("%s ~ Dirichlet(alpha)\n"
" alpha =\n"
"%s" % (self.name, alpha))
| StarcoderdataPython |
3391265 | <filename>sf3_rtsc_combine.py<gh_stars>1-10
#!/usr/bin/env python3
#Imports
import argparse
import sf3libs.sf3io as sfio
#Functions
def merge_rtsc(rtsc_lyst):
'''Takes a list of <.rtsc> files, returns a dictionary that is sum of the RT stops'''
all_stops= {}
for rtsc in sorted(rtsc_lyst):
data = sfio.read_in_rtsc(rtsc)
for transcript, values in data.items():
if transcript in all_stops:
current_values = all_stops[transcript]
all_stops[transcript] = [sum(spot) for spot in zip(current_values,values)]
else:
all_stops[transcript] = values
return all_stops
def main():
parser = argparse.ArgumentParser(description='Combines <.rtsc> files, typically replicates of the same sample')
in_files = parser.add_argument_group('Input')
in_files.add_argument('rtsc',help='Input <.rtsc> files',nargs='+')
out_files = parser.add_argument_group('Output')
out_files.add_argument('-sort',action='store_true',default=False,help='Sort output by transcript name')
out_files.add_argument('-name',default=None,help='Specify output file name')
args = parser.parse_args()
#Generate name or assign the user provided name
default_name = '_'.join(sorted([x.replace('.rtsc','') for x in args.rtsc]))+'.rtsc'
out_name = default_name if args.name == None else sfio.check_extension(args.name,'.rtsc')
#Pool all <.rtsc> into a dictionary
all_stops = merge_rtsc(args.rtsc)
#Write out the dictionary
sfio.write_rtsc(all_stops,args.sort,out_name)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3382739 | """
this pattern seems to be the best bet to make 2.7 code forward compatible, the unicode import caused bugs
"""
from __future__ import absolute_import, division, print_function # makes code Python 2 and 3 compatible mostly
| StarcoderdataPython |
3329964 | <reponame>dksifoua/NMT<filename>nmt/train/trainer.py
import os
import tqdm
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchtext.data import Dataset, Field
from torchtext.data.metrics import bleu_score
from torchtext.data.iterator import BucketIterator
from nmt.config.global_config import GlobalConfig
from nmt.config.train_config import TrainConfig
from nmt.train.train_utils import accuracy, adjust_lr, adjust_tf, AverageMeter, clip_gradient, load, save
from nmt.train.optim_utils import LRFinder
from nmt.train.beam_utils import find_best_path, Node
from nmt.utils.logger import Logger
from typing import Optional
class Trainer:
"""
Training routines.
Args:
model: nn.Module
The wrapped model.
optimizer: Optional[optim.Optimizer]
The wrapped optimizer. Can be None for evaluation and inference phases.
criterion: Optional[nn.Module]
The wrapped loss function. Can be None for evaluation and inference phases.
train_data: Dataset
Train dataset.
valid_data: Dataset
Valid dataset.
test_data: Dataset
Test dataset.
"""
def __init__(self, model: nn.Module, optimizer: Optional[optim.Optimizer], criterion: Optional[nn.Module], src_field: Field,
dest_field: Field, train_data: Dataset, valid_data: Dataset, test_data: Dataset, logger: Logger):
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.src_field = src_field
self.dest_field = dest_field
self.train_data = train_data
self.valid_data = valid_data
self.test_data = test_data
self.logger = logger
self.train_iterator = None
self.valid_iterator = None
self.test_iterator = None
def build_data_iterator(self, batch_size: int, device: torch.device):
"""
Build data iterators for the training.
Args:
batch_size (int): the batch size.
device (torch.device): the device on which the training will process.
"""
self.train_iterator, self.valid_iterator, self.test_iterator = BucketIterator.splits(
(self.train_data, self.valid_data, self.test_data),
batch_size=batch_size,
sort_key=lambda x: len(x.src),
sort_within_batch=True,
device=device
)
def lr_finder(self, model_name: str):
"""
Find the best learning rate for training process.
Args:
model_name:
The class name of the model.
"""
lr_finder = LRFinder(model=self.model, optimizer=self.optimizer, criterion=self.criterion, logger=self.logger,
grad_clip=TrainConfig.GRAD_CLIP)
lr_finder.range_test(data_loader=self.train_iterator, end_lr=TrainConfig.END_LR, n_iters=TrainConfig.N_ITERS)
fig = plt.figure(figsize=(15, 5))
ax = fig.add_subplot(1, 1, 1)
ax, lr = lr_finder.plot(ax=ax)
plt.savefig(os.path.join(GlobalConfig.IMG_PATH, f'SuggestedLR_{model_name}.png'))
plt.show()
if lr is not None: # Create an optimizer with the suggested LR
self.optimizer = optim.RMSprop(params=self.model.parameters(), lr=lr)
def load_model_optimizer_weights(self):
last_improvement = 0
if f'Best_{self.model.__class__.__name__}.pth' in os.listdir(GlobalConfig.CHECKPOINT_PATH):
model_state_dict, optim_state_dict, last_improvement = load(self.model.__class__.__name__)
self.model.load_state_dict(model_state_dict)
if self.optimizer is not None:
self.optimizer.load_state_dict(optim_state_dict)
self.logger.info('The model is loaded!')
return last_improvement
def train_step(self, epoch: int, grad_clip: float, tf_ratio: float):
"""
Train the model on a batch.
Args:
epoch (int): the epoch number.
grad_clip (float): the value beyond which we clip gradients in order avoid exploding gradients.
tf_ratio (float): the teacher forcing ratio. Must be in [0, 1.0]
Returns:
loss (float): the validation loss.
acc (float): the validation top-5 accuracy.
"""
loss_tracker, acc_tracker = AverageMeter(), AverageMeter()
self.model.train()
progress_bar = tqdm.tqdm(enumerate(self.train_iterator), total=len(self.train_iterator))
for i, data in progress_bar:
# Forward prop.
logits, sorted_dest_sequences, sorted_decode_lengths, sorted_indices = self.model(*data.src, *data.dest,
tf_ratio=tf_ratio)
# Since we decoded starting with <sos>, the targets are all words after <sos>, up to <eos>
sorted_dest_sequences = sorted_dest_sequences[1:, :]
# Remove paddings
logits = nn.utils.rnn.pack_padded_sequence(logits, sorted_decode_lengths).data
sorted_dest_sequences = nn.utils.rnn.pack_padded_sequence(sorted_dest_sequences, sorted_decode_lengths).data
# Calculate loss
loss = self.criterion(logits, sorted_dest_sequences)
# Back prop.
self.optimizer.zero_grad()
loss.backward()
# Clip gradients
if grad_clip is not None:
clip_gradient(self.optimizer, grad_clip)
# Update weights
self.optimizer.step()
# Track metrics
loss_tracker.update(loss.item(), sum(sorted_decode_lengths))
acc_tracker.update(accuracy(logits, sorted_dest_sequences, top_k=5), sum(sorted_decode_lengths))
# Update progressbar description
progress_bar.set_description(
f'Epoch: {epoch + 1:03d} - loss: {loss_tracker.average:.3f} - acc: {acc_tracker.average:.3f}%')
return loss_tracker.average, acc_tracker.average
def validate(self, epoch: int):
"""
Validate the model on a batch.
Args:
epoch: int
The epoch number.
Returns:
loss: float
The validation loss.
acc: float
The validation top-5 accuracy.
bleu-4: float
The validation BLEU score.
"""
references, hypotheses = [], []
loss_tracker, acc_tracker = AverageMeter(), AverageMeter()
self.model.eval()
with torch.no_grad():
progress_bar = tqdm.tqdm(enumerate(self.valid_iterator), total=len(self.valid_iterator))
for i, data in progress_bar:
# Forward prop.
logits, sorted_dest_sequences, sorted_decode_lengths, sorted_indices = self.model(*data.src, *data.dest,
tf_ratio=0.)
# Since we decoded starting with <sos>, the targets are all words after <sos>, up to <eos>
sorted_dest_sequences = sorted_dest_sequences[1:, :]
# Remove paddings
logits_copy = logits.clone()
logits = nn.utils.rnn.pack_padded_sequence(logits, sorted_decode_lengths).data
sorted_dest_sequences = nn.utils.rnn.pack_padded_sequence(sorted_dest_sequences,
sorted_decode_lengths).data
# Calculate loss
loss = self.criterion(logits, sorted_dest_sequences)
# Track metrics
loss_tracker.update(loss.item(), sum(sorted_decode_lengths))
acc_tracker.update(accuracy(logits, sorted_dest_sequences, top_k=5), sum(sorted_decode_lengths))
# Update references
target_sequences = data.dest[0].t()[sorted_indices]
for j in range(target_sequences.size(0)):
target_sequence = target_sequences[j].tolist()
reference = [self.dest_field.vocab.itos[indice] for indice in target_sequence if indice not in (
self.dest_field.vocab.stoi[self.dest_field.init_token],
self.dest_field.vocab.stoi[self.dest_field.pad_token]
)]
references.append([reference])
# Update hypotheses
_, predictions = torch.max(logits_copy, dim=2)
predictions = predictions.t().tolist()
for j, p in enumerate(predictions):
hypotheses.append([self.dest_field.vocab.itos[indice]
for indice in predictions[j][:sorted_decode_lengths[j]] # Remove padding
if indice not in (
self.dest_field.vocab.stoi[self.dest_field.init_token],
self.dest_field.vocab.stoi[self.dest_field.pad_token]
)])
assert len(references) == len(hypotheses)
# Update progressbar description
progress_bar.set_description(
f'Epoch: {epoch + 1:03d} - val_loss: {loss_tracker.average:.3f}'
f' - val_acc: {acc_tracker.average:.3f}%')
# Calculate BLEU-4 score
bleu4 = bleu_score(hypotheses, references, max_n=4, weights=[0.25, 0.25, 0.25, 0.25])
# Display some examples
for i in np.random.choice(len(self.valid_iterator), size=3, replace=False):
src, dest = ' '.join(references[i][0]), ' '.join(hypotheses[i])
self.logger.info(f'Ground truth translation: {src}')
self.logger.info(f'Predicted translation: {dest}')
self.logger.info('=' * 100)
return loss_tracker.average, acc_tracker.average, bleu4
def train(self, n_epochs: int, grad_clip: float, tf_ratio: float):
"""
Train the model.
Args:
n_epochs: int
grad_clip: float
tf_ratio: float
Returns:
history: Dict[str, List[float]]
"""
last_improvement = self.load_model_optimizer_weights()
history, best_bleu = {'acc': [], 'loss': [], 'val_acc': [], 'val_loss': [], 'bleu4': []}, 0.
for epoch in range(n_epochs):
if last_improvement == 4: # Stop training if no improvement since last 4 epochs
self.logger.info('Training Finished - The model has stopped improving since last 4 epochs')
break
if last_improvement > 0: # Decay LR if no improvement
adjust_lr(optimizer=self.optimizer, shrink_factor=0.9, verbose=True, logger=self.logger)
loss, acc = self.train_step(epoch=epoch, grad_clip=grad_clip, tf_ratio=tf_ratio) # Train step
val_loss, val_acc, bleu4 = self.validate(epoch=epoch) # Validation step
# Update history dict
history['acc'].append(acc)
history['loss'].append(loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
history['bleu4'].append(bleu4)
# Print BLEU score
text = f'BLEU-4: {bleu4 * 100:.3f}%'
if bleu4 > best_bleu:
best_bleu, last_improvement = bleu4, 0
else:
last_improvement += 1
text += f' - Last improvement since {last_improvement} epoch(s)'
self.logger.info(text)
# Decrease teacher forcing rate
tf_ratio = adjust_tf(tf_ratio=tf_ratio, shrink_factor=0.8, verbose=False)
# Checkpoint
save(model=self.model, optimizer=self.optimizer, last_improvement=last_improvement, bleu4=bleu4,
is_best=bleu4 >= best_bleu)
return history
def evaluate(self, dataset_name: str, beam_size: int, max_len: int, device: torch.device):
"""
Evaluate the model on the test data
Args:
beam_size: int
dataset_name: str
The dataset on which we evaluate the model. Can be valid or test.
max_len: int
device: torch.device
Returns:
hypotheses: List[str]
references: List[str]
sources: List[str]
bleu4: float
pred_logps: List[float]
attention_weights: List[np.array]
"""
if dataset_name not in ['valid', 'test']:
raise ValueError
_ = self.load_model_optimizer_weights()
# TODO
# Use dataset instead of iterator
attention = self.model.__class__.__name__.__contains__('Attention')
references, hypotheses, sources, pred_logps, attention_weights = [], [], [], [], []
self.model.eval()
with torch.no_grad():
iterator = getattr(self, f'{dataset_name}_iterator')
progress_bar = tqdm.tqdm(enumerate(iterator), total=len(iterator))
for i, data in progress_bar:
src_sequences, src_lengths = data.src[0], data.src[1]
dest_sequences, dest_lengths = data.dest[0], data.dest[1]
batch_size = src_sequences.shape[1]
for j in range(batch_size): # We evaluate sentence by sentence
src_sequence = src_sequences[:, j].unsqueeze(1) # [seq_len, 1]
dest_sequence = dest_sequences[:, j].unsqueeze(1) # [seq_len, 1]
src_length, dest_length = src_lengths[j, None], dest_lengths[j, None] # [1,]
# Encoding
enc_outputs, (h_state, c_state) = self.model.encoder(input_sequences=src_sequence,
sequence_lengths=src_length)
# Decoding
if attention:
mask = self.model.create_mask(src_sequence) # [seq_len, 1]
tree = [[Node(
token=torch.LongTensor([self.dest_field.vocab.stoi[self.dest_field.init_token]]).to(device),
states=(h_state, c_state, None)
)]]
for _ in range(max_len):
next_nodes = []
for node in tree[-1]:
if node.eos: # Skip eos token
continue
# Decode
if attention:
logit, (h_state, c_state, attention_weights) = self.model.decoder(
input_word_index=node.token,
h_state=node.states[0].contiguous(),
c_state=node.states[1].contiguous(),
enc_outputs=enc_outputs,
mask=mask
)
else:
logit, (h_state, c_state) = self.model.decoder(input_word_index=node.token,
h_state=node.states[0].contiguous(),
c_state=node.states[1].contiguous())
# logit: [1, vocab_size]
# h_state: [n_layers, 1, hidden_size]
# c_state: [n_layers, 1, hidden_size]
# Get scores
logp = F.log_softmax(logit, dim=1).squeeze(dim=0) # [vocab_size]
# Get top k tokens & logps
topk_logps, topk_tokens = torch.topk(logp, beam_size)
for k in range(beam_size):
next_nodes.append(Node(
token=topk_tokens[k, None], states=(
h_state, c_state, attention_weights if attention else None),
logp=topk_logps[k, None].cpu().item(), parent=node,
eos=topk_tokens[k].cpu().item() == self.dest_field.vocab[self.dest_field.eos_token]
))
if len(next_nodes) == 0:
break
# Sort next_nodes to get the best
next_nodes = sorted(next_nodes, key=lambda _node: _node.logps, reverse=True)
# Update the tree
tree.append(next_nodes[:beam_size])
# Find the best path of the tree
best_path = find_best_path(tree)
# Get the translation
pred_translated = [*map(lambda _node: self.dest_field.vocab.itos[_node.token], best_path)]
pred_translated = [*filter(lambda word: word not in [
self.dest_field.init_token, self.dest_field.eos_token
], pred_translated[::-1])]
# Update hypotheses
hypotheses.append(pred_translated)
# Update pred logps
pred_logps.append(sum([*map(lambda _node: _node.logps, best_path)]))
# Update attention weights
if attention:
attention_weights.append(
torch.cat([*map(lambda _node: _node.states[-1], best_path)], dim=1).cpu().detach().numpy()
)
# Update references
references.append([[
self.dest_field.vocab.itos[indice]
for indice in dest_sequence
if indice not in (
self.dest_field.vocab.stoi[self.dest_field.init_token],
self.dest_field.vocab.stoi[self.dest_field.eos_token],
self.dest_field.vocab.stoi[self.dest_field.pad_token]
)
]])
# Update sources
sources.append([
self.src_field.vocab.itos[indice]
for indice in src_sequence
if indice not in (
self.src_field.vocab.stoi[self.src_field.init_token],
self.src_field.vocab.stoi[self.src_field.eos_token],
self.src_field.vocab.stoi[self.src_field.pad_token]
)
])
# Calculate BLEU-4 score
bleu4 = bleu_score(hypotheses, references, max_n=4, weights=[0.25, 0.25, 0.25, 0.25])
return hypotheses, references, sources, bleu4, pred_logps, attention_weights
def translate(self):
# TODO
raise NotImplementedError
| StarcoderdataPython |
127616 | <filename>css/index.py<gh_stars>0
#declaro una lista
#lista = ["cadena", 1, False, "nombre", 20, [1, 3, 2 , 4]]
#listaMultiple = ["cadena", ["cadena dentro", "de otra cadena"]]
#listaEnteros = [2, 3, 1]
#Insertar en listas
#lista.append(["agregado", "otro mas"])
#print (lista)
# #tuplas
#tupla = (1, "hola", True)
# #diccionarios
#dic = {
# 'Nombre': 'Paco',
# 'Apellido': 'Ocampo'
# }
#Insertar en diccionario
#dic["nuevo"] = value
#mi lista
#lista = ["Nombre", 2, True, "melisa", 19, [0, 2, 4, 6]]
#listaMultiple = ["perros", ["miztin", "saya"]]
#listaEnteros = [1, 3]
#agregar a la lista
>>> miDiccionario =
{
"nombre" : {'primero' : 'sandra', 'segundo' : 'melisa'},
'apellidos' : {'paterno' : 'castillo', 'materno' : 'vargas'},
'edad' : [19]
}
>>> print(miDiccionario
... )
{
'apellidos': {'paterno': 'castillo', 'materno': 'vargas'},
'nombre': {'segundo': 'melisa', 'primero': 'sandra'},
'edad': [19]
}
>>> otroDiccionario = {'hobbies' : ['leer', 'musica', 'perros']}
>>> miDiccionario['hobbies'] = otroDiccionario
>>> print(miDiccionario)
{
'apellidos': {'paterno': 'castillo', 'materno': 'vargas'},
'nombre': {'segundo': 'melisa', 'primero': 'sandra'},
'edad': [19],
'hobbies': {'hobbies': ['leer', 'musica', 'perros']}
}
>>> miDiccionario["direccion"] = {'calle' : 'pedregal', 'numero' : 66}
>>> print(miDiccionario)
{
'apellidos': {'paterno': 'castillo', 'materno': 'vargas'},
'nombre': {'segundo': 'melisa', 'primero': 'sandra'},
'direccion': {'calle': 'pedregal', 'numero': 66},
'edad': [19],
'hobbies': {'hobbies': ['leer', 'musica', 'perros']}
} | StarcoderdataPython |
1631029 | from je_auto_control import size
print(size())
| StarcoderdataPython |
1630075 | <gh_stars>0
import os
import shutil
import pathlib
import warnings
import exdir
from . import exdir_object as exob
from .group import Group
from .. import utils
class File(Group):
"""Exdir file object."""
def __init__(self, directory, mode=None, allow_remove=False,
name_validation=None, plugins=None, validate_name=None):
"""
Create or open a directory as an Exdir File.
Parameters
----------
directory:
Name of the directory to be opened or created as an Exdir File.
mode: str, optional
A file mode string that defines the read/write behavior.
See open() for information about the different modes.
allow_remove: bool
Set to True if you want mode 'w' to remove existing trees if they
exist. This False by default to avoid removing entire directory
trees by mistake.
name_validation: str, function, optional
Set the validation mode for names.
Can be a function that takes a name and returns True if the name
is valid or one of the following built-in validation modes:
'strict': only allow numbers, lowercase letters, underscore (_)
and dash (-)
'simple': allow numbers, lowercase letters, uppercase letters,
underscore (_) and dash (-), check if any file exists with
same name in any case.
'thorough': verify if name is safe on all platforms, check if
any file exists with same name in any case.
'none': allows any filename
The default is 'thorough'.
plugins: list, optional
A list of instantiated plugins or modules with a plugins()
function that returns a list of plugins.
"""
if validate_name is not None:
warnings.warn("validate_name is deprecated. Use name_validation instead.")
name_validation = name_validation or validate_name
directory = pathlib.Path(directory) #.resolve()
if directory.suffix != ".exdir":
directory = directory.with_suffix(directory.suffix + ".exdir")
mode = mode or 'a'
recognized_modes = ['a', 'r', 'r+', 'w', 'w-', 'x', 'a']
if mode not in recognized_modes:
raise ValueError(
"IO mode {} not recognized, "
"mode must be one of {}".format(mode, recognized_modes)
)
plugin_manager = exdir.plugin_interface.plugin_interface.Manager(plugins)
if mode == "r":
self.io_mode = self.OpenMode.READ_ONLY
else:
self.io_mode = self.OpenMode.READ_WRITE
super(File, self).__init__(
root_directory=directory,
parent_path=pathlib.PurePosixPath(""),
object_name="",
io_mode=self.io_mode,
name_validation=name_validation,
plugin_manager=plugin_manager
)
already_exists = directory.exists()
if already_exists:
if not exob.is_nonraw_object_directory(directory):
raise FileExistsError(
"Path '{}' already exists, but is not a valid exdir file.".format(directory)
)
# TODO consider extracting this function to avoid cyclic imports
if self.meta[exob.EXDIR_METANAME][exob.TYPE_METANAME] != exob.FILE_TYPENAME:
raise FileExistsError(
"Path '{}' already exists, but is not a valid exdir file.".format(directory)
)
should_create_directory = False
if mode == "r":
if not already_exists:
raise IOError("File " + str(directory) + " does not exist.")
elif mode == "r+":
if not already_exists:
raise IOError("File " + str(directory) + " does not exist.")
elif mode == "w":
if already_exists:
if allow_remove:
shutil.rmtree(str(directory)) # NOTE str needed for Python 3.5
else:
raise FileExistsError(
"File {} already exists. We won't delete the entire tree "
"by default. Add allow_remove=True to override.".format(directory)
)
should_create_directory = True
elif mode == "w-" or mode == "x":
if already_exists:
raise IOError("File " + str(directory) + " already exists.")
should_create_directory = True
elif mode == "a":
if not already_exists:
should_create_directory = True
if should_create_directory:
self.name_validation(directory.parent, directory.name)
exob._create_object_directory(directory, exob.FILE_TYPENAME)
def close(self):
# yeah right, as if we would create a real file format
pass
def create_group(self, name):
path = utils.path.remove_root(name)
return super().create_group(path)
def require_group(self, name):
path = utils.path.remove_root(name)
return super().require_group(path)
def __getitem__(self, name):
path = utils.path.remove_root(name)
if len(path.parts) < 1:
return self
return super().__getitem__(path)
def __contains__(self, name):
path = utils.path.remove_root(name)
return super().__contains__(path)
| StarcoderdataPython |
3342654 | from flopz.util.integer_representation import representable, build_immediates
def test_build_immediates():
assert(build_immediates(0xF1C, ["[11|4|9:8|10|6|7|3:1|5]", "[5:3]"]) == [1996, 3])
def test_representable():
assert(representable(12, 5, signed=False, shift=2))
assert(representable(-2, 3))
assert(representable(2, 1) is False)
assert(representable(13, 5, signed=False, shift=2) is False)
| StarcoderdataPython |
3363018 | from setuptools import __version__, setup
if int(__version__.split(".")[0]) < 41:
raise RuntimeError("setuptools >= 41 required to build")
setup(
use_scm_version={"write_to": "src/virtualenv/version.py", "write_to_template": '__version__ = "{version}"'},
setup_requires=[
# this cannot be enabled until https://github.com/pypa/pip/issues/7778 is addressed
# "setuptools_scm >= 2"
],
)
| StarcoderdataPython |
3204665 | <gh_stars>10-100
# https://leetcode.com/problems/exam-room/
#
# algorithms
# Medium (36.69%)
# Total Accepted: 11,241
# Total Submissions: 30,636
# beats 67.77% of python submissions
class ExamRoom(object):
def __init__(self, N):
self.N, self.L = N, []
def seat(self):
N, L = self.N, self.L
if not L:
res = 0
else:
d, res = L[0], 0
for a, b in zip(L, L[1:]):
if (b - a) / 2 > d:
d, res = (b - a) / 2, (b + a) / 2
if N - 1 - L[-1] > d:
res = N - 1
bisect.insort(L, res)
return res
def leave(self, p):
self.L.remove(p)
# Your ExamRoom object will be instantiated and called as such:
# obj = ExamRoom(N)
# param_1 = obj.seat()
# obj.leave(p)
| StarcoderdataPython |
48178 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import dirname, join
from setuptools import setup
import doctest
def test_suite():
return doctest.DocTestSuite('undervolt')
setup(
name='undervolt',
version='0.2.9',
description='Undervolt Intel CPUs under Linux',
long_description=open(
join(dirname(__file__), 'README.rst')).read(),
url='http://github.com/georgewhewell/undervolt',
author='<NAME>',
author_email='<EMAIL>',
license='GPL',
py_modules=['undervolt'],
test_suite='setup.test_suite',
entry_points={
'console_scripts': [
'undervolt=undervolt:main',
],
},
keywords=['undervolt', 'intel', 'linux'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
)
| StarcoderdataPython |
1616378 | <reponame>code-intenssive/library-management-system
import os
import sys
from tkinter import messagebox as _msgbox
from tkinter import filedialog
import tkinter as tk
from utils import center_window, get_current_date, show
from tkinter import ttk
from backends import BaseManager
from constants import *
from datetime import datetime
from db import PostgresConnect
def database_setup():
index_view_window = tk.Tk()
index_view_window.withdraw()
create_user_window = tk.Toplevel(master=index_view_window)
create_user_window.wm_withdraw()
create_user_window.title(DATABASE_SETUP_WINDOW_TITLE)
if WIN32:
create_user_window.iconbitmap(WINDOW_ICON_BITMAP_PATH)
container = tk.Canvas(master=create_user_window, bg=DEFAULT_BACKGROUND_COLOR)
database_name = tk.StringVar()
user_full_name = tk.StringVar()
user_name = tk.StringVar()
user_password = <PASSWORD>()
security_question = tk.StringVar()
security_answer = tk.StringVar()
admin_status = tk.BooleanVar()
__show = tk.BooleanVar()
# # label and input box
main_heading_label = tk.Label(container, text='SET UP DATABASE', bg=DEFAULT_BACKGROUND_COLOR, font=HEADER_FONT_2)
main_heading_label.place(relx=.2525, relwidth=.55, rely=.03, relheight=.085)
database_name_label = tk.Label(
container, anchor="e", text='Database name', font=('Comic Scan Ms', 10, 'bold'),
bg=DEFAULT_BACKGROUND_COLOR
)
database_name_label.place(relx=.1, relwidth=.22, rely=.2, relheight=.05)
database_name_entry = tk.Entry(container, textvariable=database_name, font=DEFAULT_FONT)
database_name_entry.place(relx=.35, relwidth=.45, rely=.2, relheight=.05)
database_name_entry.focus_set()
user_full_name_label = tk.Label(
container, anchor="e", text='Full Name', font=('Comic Scan Ms', 10, 'bold'),
bg=DEFAULT_BACKGROUND_COLOR
)
user_full_name_label.place(relx=.1, relwidth=.22, rely=.3, relheight=.05)
user_full_name_entry = tk.Entry(container, textvariable=user_full_name, font=DEFAULT_FONT)
user_full_name_entry.place(relx=.35, relwidth=.45, rely=.3, relheight=.05)
user_name_label = tk.Label(
container, anchor="e", text='Username', font=('Comic Scan Ms', 10, 'bold'),
bg=DEFAULT_BACKGROUND_COLOR
)
user_name_label.place(relx=.1, relwidth=.22, rely=.4, relheight=.05)
user_name_entry = tk.Entry(container, textvariable=user_name, font=DEFAULT_FONT)
user_name_entry.place(relx=.35, relwidth=.45, rely=.4, relheight=.05)
user_password_label = tk.Label(
container, anchor="e", text='Password', font=('Comic Scan Ms', 10, 'bold'),
bg=DEFAULT_BACKGROUND_COLOR
)
user_password_label.place(relx=.1, relwidth=.22, rely=.5, relheight=.05)
user_password_entry = tk.Entry(container, show="*", textvariable=user_password, font=DEFAULT_FONT)
user_password_entry.place(relx=.35, relwidth=.45, rely=.5, relheight=.05)
# show_status_checkbox = tk.Checkbutton(
# container, variable=__show, onvalue=True, offvalue=False,
# fg=DEFAULT_FOREGROUND_COLOR, bg=DEFAULT_BACKGROUND_COLOR,
# command=lambda: show(user_password_entry, __show),
# activebackground=DEFAULT_BACKGROUND_COLOR, activeforeground=DEFAULT_FOREGROUND_COLOR)
# show_status_checkbox.place(relx=.77, rely=.5, relwidth=.05, relheight=.05)
security_question_label = tk.Label(container, anchor="e", text='Question', font=('Comic Scan Ms', 10, 'bold'),
bg=DEFAULT_BACKGROUND_COLOR
)
security_question_label.place(relx=.1, relwidth=.22, rely=.6, relheight=.05)
security_question.set(SECURITY_QUESTIONS[0])
security_question = ttk.Combobox(container, textvariable=security_question, values=SECURITY_QUESTIONS, state="readonly")
security_question.place(relx=.35, relwidth=.45, rely=.6, relheight=.05)
security_answer_label = tk.Label(
container, anchor="e", text='Answer', font=('Comic Scan Ms', 10, 'bold'),
bg=DEFAULT_BACKGROUND_COLOR
)
security_answer_label.place(relx=.1, relwidth=.22, rely=.7, relheight=.05)
security_answer_entry = tk.Entry(container, textvariable=security_answer, font=DEFAULT_FONT)
security_answer_entry.place(relx=.35, relwidth=.45, rely=.7, relheight=.05)
admin_status_text = tk.Label(container, text="Grant admin rights?", bg=DEFAULT_BACKGROUND_COLOR, fg=DEFAULT_FOREGROUND_COLOR)
admin_status_text.place(relx=.54, rely=.8, relwidth=.23)
admin_status_checkbox = tk.Checkbutton(
container, variable=admin_status, onvalue=True, offvalue=False,
fg=DEFAULT_FOREGROUND_COLOR, bg=DEFAULT_BACKGROUND_COLOR,
activebackground=DEFAULT_BACKGROUND_COLOR, activeforeground=DEFAULT_FOREGROUND_COLOR)
admin_status_checkbox.place(relx=.77, rely=.8, relwidth=.05, relheight=.05)
create_user_button = tk.Button(
container, text="Register", font=DEFAULT_FONT, bg=DEFAULT_BACKGROUND_COLOR,
command=lambda: create_user(create_user_window, database_name=database_name.get(), user_name=user_name.get(),
user_full_name=user_full_name.get(), user_password=<PASSWORD>(),admin_status=admin_status.get(),
security_question=security_question.get(), security_answer=security_answer.get()
)
)
create_user_button.place(relx=.35, relwidth=.15, rely=.8, relheight=.07)
container.place(relheight=1, relwidth=1)
center_window(create_user_window, height=400, width=480)
create_user_window.wm_deiconify()
index_view_window.mainloop()
| StarcoderdataPython |
139977 | #!/bin/env python2.7
VERSION = '2.1.2'
PROGRAMS = 'readCounts.py LoH.py RNA2DNAlign.py exonicFilter.py snv_computation.py'
INCLUDES = 'common ReadCounts'
if __name__ == '__main__':
import sys
print(eval(sys.argv[1]))
| StarcoderdataPython |
161447 | #
# Copyright 2019 FMR LLC <<EMAIL>>
#
# SPDX-License-Identifier: MIT
#
"""CLI and library to concurrently execute user-defined commands across AWS accounts.
## Overview
`awsrun` is both a CLI and library to execute commands over one or more AWS
accounts concurrently. Commands are user-defined Python modules that implement a
simple interface to abstract away the complications of obtaining credentials for
Boto3 sessions - especially when using SAML authentication and/or cross-account
access.
### CLI Usage
The awsrun CLI command is documented extensively on the `awsrun.cli` page. It
includes both a user guide as well as a reference guide on the use of the CLI
command, its command line options, use of the account loader and credential
plug-ins, as well as the syntax of the configuration file.
### Library Usage
Not only is awsrun a CLI, but it is, first and foremost, a Python package that
can be used in other Python libraries and scripts. The package contains
extensive documentation on its use. Each submodule contains an overview of the
module and how to use it, which is then followed by standard module docs for
classes and methods. The available [submodules](#header-submodules) can be found
at the bottom of this page. Of particular interest to library users will be the
following submodules:
`awsrun.runner`
: The core module to execute a command across one or more accounts. You will
find the `awsrun.runner.AccountRunner` and `awsrun.runner.Command` classes
defined in this module. Build your own commands by subclassing the base class.
See the [User-Defined Commmands](#user-defined-commands) next for more
information.
`awsrun.session`
: Contains the definition of the `awsrun.session.SessionProvider`, which is used
to provide Boto3 sessions pre-loaded with credentials. Included are several
built-in implementations such as `awsrun.session.aws.CredsViaProfile`,
`awsrun.session.aws.CredsViaSAML`, and `awsrun.session.aws.CredsViaCrossAccount`.
This module can be used outside of awsurn in other scripts. The module
documentation includes a user guide on how to do so.
### User-Defined Commands
To get the most benefit from awsrun, one typically writes their own used-defined
commands. Please refer to the `awsrun.commands` page for an extensive user guide
on building commands. In summary, a command is nothing more than a single Python
file that contains a subclass of `awsrun.runner.Command`. After the command has
been written, it must be added to the awsrun command path using the `--cmd-path`
[CLI flag](cli.html#options) or `cmd-path` option in the awsrun [configuration
file](cli.html#configuration_1).
### User-Defined Plug-ins
In addition to writing your own user-defined commands, you can write your own
account loader plug-ins as well as credential loader plug-ins. The following are
the high-level steps involved in writing your own plug-ins:
1. Subclass `awsrun.plugmgr.Plugin`. Be sure to read the class and module
documentation for details on how the CLI loads your plug-in.
2. Add an `__init__` method to register command line flags and configuration
options you want to make available to the end user. Be sure to call the
superclass's `__init__` method as well.
3. Provide an implementation for `awsrun.plugmgr.Plugin.instantiate`, which must
return an instance of either `awsrun.acctload.AccountLoader` or
`awsrun.session.SessionProvider` depending on whether you are writing an
account loader or a credential loader.
4. Provide an implementation for your account loader or credential loader
returned in step 3. Refer to the `awsrun.acctload.AccountLoader` and
`awsrun.session.SessionProvider` for the methods that must be implemented.
It is recommended that you review the existing plug-ins included in awsrun for
additional guidance on how to build your own.
## Future Plans
Prior to open-sourcing awsrun, the codebase was refactored to support the use of
other cloud service providers. This section includes the implementation details
as well as a high-level roadmap of future enhancements.
### Other CSPs
Other Cloud Service Providers (CSPs) aside from AWS and Azure can be supported.
The name of the installed CLI script is used to determine which CSP is being
used. For example, if the CLI has been installed as `awsrun`, the CSP is `aws`.
If the CLI has been installed as `azurerun`, the CSP is `azure`. The name of the
CSP dictates the following:
- The user configuration file is loaded from `$HOME/.csprun.yaml`, where `csp`
is the name of the CSP. This allows users to have CSP-specific configuration
files.
- The environment variable used to select an alternate path for the
configuration file is `CSPRUN_CONFIG`, where `CSP` is the name of the CSP.
This allows users to have multiple environment variables set for different
CSPs.
- The default command path is set to `awsrun.commands.csp`, where `csp` is the
name of the CSP. All of the included CSP commands are isolated in modules
dedicated to the CSP. This prevents commands for a different CSP from being
displayed on the command line when a user lists the available commands.
- The default credential loader plug-in is `awsrun.plugins.creds.csp.Default`,
where `csp` is the name of the CSP. Providing credentials to commands is done
via a credential loader. When none has been specified in the configuration
file, awsrun must default to a sane choice for a CSP.
### Roadmap
- Add tests for each module (only a handful have been done so far). PyTest is
the framework used in awsrun. See the tests/ directory which contains the
directories for unit and integration tests.
"""
name = "awsrun"
__version__ = "2.3.1"
| StarcoderdataPython |
3370032 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import kl_divergence, Normal
use_cuda=True
device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu")
# recon loss function with 2 reconstruction loss and two KL-divergence loss
def recon_loss_function(recon, target, distribution, step, beta=1):
CE = F.binary_cross_entropy(
recon.view(-1, recon.size(-1)),
target.view(-1, recon.size(-1)),
reduction='mean')
normal = Normal(
torch.zeros(distribution.mean.size()).to(device),
torch.ones(distribution.stddev.size()).to(device))
KLD = kl_divergence(distribution, normal).mean()
return CE + beta * KLD, CE, KLD
# classification loss function
def classification_loss(recon, label, classifier):
criterion = nn.CrossEntropyLoss()
# get output
output = classifier(recon)
# calculate loss
loss = criterion(output, label)
return loss
'''
Reference to the paper Adversarial Defense by Restricting the Hidden Space of Deep Neural Networks at https://github.com/aamir-mustafa/pcl-adversarial-defense
'''
# proximity loss for the z
class Proximity(nn.Module):
def __init__(self, args):
super(Proximity, self).__init__()
self.num_classes = args.num_classes
self.z_dim = args.z_dim
self.use_gpu = args.use_gpu
self.centers = torch.randn(self.num_classes, self.z_dim)
self.classes = torch.arange(self.num_classes).long()
if self.use_gpu:
self.centers = self.centers.to(device)
self.classes = self.classes.to(device)
self.centers = nn.Parameter(self.centers)
def forward(self, x, labels):
# calculate the distance between x and centers
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).T
distmat.addmm_(x, self.centers.T, beta=1, alpha=-2)
# get matrix masks
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(self.classes.expand(batch_size, self.num_classes))
# calculate distance for each batch
dist = list()
for i in range(batch_size):
value = distmat[i][mask[i]]
value = torch.sqrt(value)
value = value.clamp(min=1e-12, max=1e+12)
dist.append(value)
losses = torch.cat(dist)
loss = losses.mean()
return loss
# distance loss for the z
class Distance(nn.Module):
def __init__(self, args):
super(Distance, self).__init__()
self.num_classes = args.num_classes
self.z_dim = args.z_dim
self.use_gpu = args.use_gpu
self.centers = torch.randn(self.num_classes, self.z_dim)
self.classes = torch.arange(self.num_classes).long()
if self.use_gpu:
self.centers = self.centers.to(device)
self.classes = self.classes.to(device)
self.centers = nn.Parameter(self.centers)
def forward(self, x, labels):
# calculate the distance between x and centers
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).T
distmat.addmm_(x, self.centers.T, beta=1, alpha=-2)
# get matrix masks
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(self.classes.expand(batch_size, self.num_classes))
# calculate loss
dist = list()
for i in range(batch_size):
k = mask[i].clone().to(dtype=torch.int8)
k = -1 * k +1
kk = k.clone().to(dtype=torch.bool)
value = distmat[i][kk]
value = torch.sqrt(value)
value = value.clamp(min=1e-8, max=1e+8) # for numerical stability
dist.append(value)
losses = torch.cat(dist)
loss = losses.mean()
return loss | StarcoderdataPython |
1739617 | <gh_stars>10-100
import itertools
import numpy as np
from numpy.linalg import inv
from numpy.testing import (assert_array_almost_equal, assert_almost_equal,
assert_array_equal, assert_equal)
from scipy.spatial.transform import Rotation
from tadataka.camera import CameraParameters
from tadataka.matrix import (
solve_linear, motion_matrix, inv_motion_matrix, get_rotation_translation,
decompose_essential, estimate_fundamental, fundamental_to_essential,
to_homogeneous, from_homogeneous, calc_relative_transform)
from tadataka.projection import PerspectiveProjection
from tadataka.rigid_transform import transform
from tadataka.so3 import tangent_so3
from tests.utils import random_rotation_matrix
def test_solve_linear():
# some random matrix
A = np.array(
[[7, 3, 6, 7, 4, 3, 7, 2],
[0, 1, 5, 2, 9, 5, 9, 7],
[7, 5, 2, 3, 4, 1, 4, 3]]
)
x = solve_linear(A)
assert_equal(x.shape, (8,))
assert_array_almost_equal(np.dot(A, x), np.zeros(3))
def test_to_homogeneous():
assert_array_equal(
to_homogeneous(np.array([[2, 3], [4, 5]], dtype=np.float64)),
[[2, 3, 1], [4, 5, 1]]
)
assert_array_equal(
to_homogeneous(np.array([2, 3], dtype=np.float64)),
[2, 3, 1]
)
def test_from_homogeneous():
assert_array_equal(from_homogeneous(np.array([2, 3, 1])), [2, 3])
assert_array_equal(from_homogeneous(np.array([[2, 3, 1], [3, 4, 1]])),
[[2, 3], [3, 4]])
def test_motion_matrix():
R = np.array([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]
])
t = np.array([9, 10, 11])
T = motion_matrix(R, t)
assert_array_equal(T,
np.array([
[0, 1, 2, 9],
[3, 4, 5, 10],
[6, 7, 8, 11],
[0, 0, 0, 1]
])
)
def test_inv_motion_matirx():
R = random_rotation_matrix(3)
t = np.random.uniform(-1, 1, 3)
T = motion_matrix(R, t)
assert_array_almost_equal(inv_motion_matrix(T), inv(T))
def test_calc_relative_transform():
R_wa = random_rotation_matrix(3)
t_wa = np.random.uniform(-1, 1, 3)
T_wa = motion_matrix(R_wa, t_wa)
R_wb = random_rotation_matrix(3)
t_wb = np.random.uniform(-1, 1, 3)
T_wb = motion_matrix(R_wb, t_wb)
T_ab = calc_relative_transform(T_wa, T_wb)
assert_array_almost_equal(np.dot(T_wa, T_ab), T_wb)
def test_get_rotation_translation():
R = random_rotation_matrix(3)
t = np.random.uniform(-1, 1, 3)
R_, t_ = get_rotation_translation(motion_matrix(R, t))
assert_array_equal(R, R_)
assert_array_equal(t, t_)
def test_estimate_fundamental():
camera_parameters = CameraParameters(
focal_length=[0.8, 1.2],
offset=[0.8, 0.2]
)
projection = PerspectiveProjection(camera_parameters)
R = random_rotation_matrix(3)
t = np.random.uniform(-10, 10, 3)
points_true = np.random.uniform(-10, 10, (10, 3))
keypoints0 = projection.compute(points_true)
keypoints1 = projection.compute(transform(R, t, points_true))
K = camera_parameters.matrix
K_inv = inv(K)
F = estimate_fundamental(keypoints0, keypoints1)
E = fundamental_to_essential(F, K)
for i in range(points_true.shape[0]):
x0 = np.append(keypoints0[i], 1)
x1 = np.append(keypoints1[i], 1)
assert_almost_equal(x1.dot(F).dot(x0), 0)
y0 = np.dot(K_inv, x0)
y1 = np.dot(K_inv, x1)
assert_almost_equal(y1.dot(E).dot(y0), 0)
# properties of the essential matrix
assert_almost_equal(np.linalg.det(E), 0)
assert_array_almost_equal(
2 * np.dot(E, np.dot(E.T, E)) - np.trace(np.dot(E, E.T)) * E,
np.zeros((3, 3))
)
def to_essential(R, t):
return np.dot(tangent_so3(t), R)
def test_fundamental_to_essential():
R = random_rotation_matrix(3)
t = np.random.uniform(-10, 10, 3)
K0 = CameraParameters(focal_length=[0.8, 1.2], offset=[0.8, -0.2]).matrix
K1 = CameraParameters(focal_length=[0.7, 0.9], offset=[-1.0, 0.1]).matrix
E_true = to_essential(R, t)
F = inv(K1).T.dot(E_true).dot(inv(K0))
E_pred = fundamental_to_essential(F, K0, K1)
assert_array_almost_equal(E_true, E_pred)
def test_decompose_essential():
def test(R_true, t_true):
# skew matrx corresponding to t
S_true = tangent_so3(t_true)
E_true = np.dot(R_true, S_true)
R1, R2, t1, t2 = decompose_essential(E_true)
# t1 = -t2, R.T * t1 is parallel to t_true
assert_array_almost_equal(t1, -t2)
assert_array_almost_equal(np.cross(np.dot(R1.T, t1), t_true),
np.zeros(3))
assert_array_almost_equal(np.cross(np.dot(R2.T, t1), t_true),
np.zeros(3))
# make sure that both of R1 and R2 are rotation matrices
assert_array_almost_equal(np.dot(R1.T, R1), np.identity(3))
assert_array_almost_equal(np.dot(R2.T, R2), np.identity(3))
assert_almost_equal(np.linalg.det(R1), 1.)
assert_almost_equal(np.linalg.det(R2), 1.)
N = 10
angles = np.random.uniform(-np.pi, np.pi, (N, 3))
rotations = Rotation.from_euler('xyz', angles).as_matrix()
translations = np.random.uniform(-10, 10, (N, 3))
for R, t in itertools.product(rotations, translations):
test(R, t)
| StarcoderdataPython |
65367 | #def divisor is from:
#https://www.w3resource.com/python-exercises/basic/python-basic-1-exercise-24.php
def divisor(n):
for i in range(n):
x = len([i for i in range(1, n+1) if not n % i])
return x
nums = []
i = 0
while i < 20:
preNum = int(input())
if(preNum > 0):
nums.append([divisor(preNum), preNum])
i += 1
nums.sort()
f=nums[len(nums)-1]
x=f.copy()
y = (x[::-1])
print(*y, sep=" ")
| StarcoderdataPython |
3215931 | <filename>opensearch_stac_adapter/models/search.py
from stac_pydantic.api import Search
from typing import Optional
from stac_pydantic.api.extensions.fields import FieldsExtension
class AdaptedSearch(Search):
"""Search model"""
token: Optional[str] = None
field: Optional[FieldsExtension] = None
| StarcoderdataPython |
1726400 | <reponame>perphyyoung/python-charts
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0.0, 5.0, 0.2)
# red dashes and black solids
plt.plot(x, x, 'r--', x, x**1.5, 'k-')
# blue squares and green triangles
plt.plot(x, x**2, 'bs', x, x**3, 'g^')
plt.show()
| StarcoderdataPython |
173722 | import jwt
import datetime
import os
import requests
from flask import jsonify, request, make_response
from flask_restful import Resource
from sqlalchemy.orm.exc import NoResultFound
from ...api.controllers import format_response
from ..models import OAuthClient, OAuthToken
from ...api.models import User
from ... import db
class Authenticate(Resource):
""" Validates user trough one the OAuth Providers, register, and generate access token """
def post(self, handle):
# Get provider authorization code
auth_code = request.form.get("auth_code")
if(not auth_code):
return format_response("missing authorization code", 400)
# Get request state
auth_state = request.form.get("auth_state")
if(not auth_state):
return format_response("missing request state", 400)
# Retrieve provider client data from db
oauth_client = OAuthClient.query.filter(OAuthClient.handle == handle).first()
if(not oauth_client):
return format_response("client not found", 404)
token_response = requests.post(
"https://github.com/login/oauth/access_token",
data = {
"client_id": oauth_client.id,
"client_secret": oauth_client.secret,
"code": auth_code,
"state": auth_state
},
headers = {
"Accept": "application/json"
}
).json()
if(not token_response):
return format_response("platform connection error", 500)
error = token_response.get("error_description")
if(error):
return format_response(error, 400)
token = token_response.get("access_token")
schema = token_response.get("token_type")
email_response = requests.get(
"https://api.github.com/user/emails",
headers={"Authorization": '{schema} {token}'.format(schema=schema, token=token)},
).json()
# Try to retrieve primary email from github
try:
email = [email.get("email") for email in email_response if email.get("primary") == True][0]
except IndexError:
return format_response("not email set with oauth provider", 500)
except Exception:
return format_response("unknow error", 500)
# Try to recover user using the email
try:
user = User.query.filter(User.email == email).one()
except NoResultFound:
user = User(email)
db.session.add(user)
# If token found, update information, create a new one otherwise
try:
oauth_token = OAuthToken.query.filter(OAuthToken.user_id == user.id).one()
oauth_token.provider = handle
oauth_token.token = token
except NoResultFound:
oauth_token = OAuthToken(handle, token, user.id)
db.session.add(oauth_token)
db.session.commit()
# API access Token payload
jwt_payload = {
"uid": str(user.id),
"eml": user.email,
"adm": user.admin,
"uct": str(user.created),
"iat": datetime.datetime.utcnow(),
"exp": datetime.datetime.utcnow() + datetime.timedelta(hours=24),
}
# Generate token
jwt_token = jwt.encode(jwt_payload, os.environ["SECRET_KEY"], algorithm="HS256").decode("utf-8")
jwt_token = "Bearer {}".format(token)
return make_response(
jsonify({
"token": jwt_token
}),
200
)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.