hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringdate
2015-01-01 00:00:47
2022-03-31 23:42:18
max_issues_repo_issues_event_max_datetime
stringdate
2015-01-01 17:43:30
2022-03-31 23:59:58
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
036543a1fbcdcc35bf430e0b5d4150196450f6d6
4,910
py
Python
mkpy3/mkpy3_finder_chart_survey_fits_image_get_v1.py
KenMighell/mkpy3
598126136b43fa93bc4aded5db65a1251d60a9ba
[ "MIT" ]
null
null
null
mkpy3/mkpy3_finder_chart_survey_fits_image_get_v1.py
KenMighell/mkpy3
598126136b43fa93bc4aded5db65a1251d60a9ba
[ "MIT" ]
null
null
null
mkpy3/mkpy3_finder_chart_survey_fits_image_get_v1.py
KenMighell/mkpy3
598126136b43fa93bc4aded5db65a1251d60a9ba
[ "MIT" ]
1
2020-11-01T18:37:53.000Z
2020-11-01T18:37:53.000Z
#!/usr/bin/env python3 # file://mkpy3_finder_chart_survey_fits_image_get_v1.py # Kenneth Mighell # SETI Institute # ============================================================================= def mkpy3_finder_chart_survey_fits_image_get_v1( ra_deg=None, dec_deg=None, radius_arcmin=None, survey=None, cframe=None, verbose=None, ): """ Function: mkpy3_finder_chart_survey_fits_image_get_v1() Purpose: Gets sky survey image data around a position on the sky. Parameters ---------- ra_deg : float (optional) right ascencsion [deg] dec_deg : float (optional) declination [deg] radius_arcmin : float (optional) radius (halfwidth and halfheight of image) [arcmin] survey : string (optional) [e.g., '2MASS-J', 'DSS2 Red', etc.] survey string name cframe : str (optional) coordinate frame name [e.g., 'fk5', 'icrs', etc.] verbose : bool (optional) if True, print extra information Returns ------- hdu : Header/Data Unit (HDU) of the survey FITS file hdr : header associated with hdu data : data associated with hdu wcs : World Coordinate System from hdu cframe : coordinate frame of the survey data Kenneth Mighell SETI Institute """ import astropy.units as u from astropy.coordinates import SkyCoord from astroquery.skyview import SkyView from astropy.wcs import WCS # if ra_deg is None: ra_deg = 291.41829 # Kepler-93b if dec_deg is None: dec_deg = 38.67236 # Kepler-93b if radius_arcmin is None: radius_arcmin = 1.99 if survey is None: survey = "2MASS-J" # alternate: 'DSS2 Red' # ^--- to see all surveys: astroquery.skyview.SkyView.list_surveys() if cframe is None: cframe = "fk5" # N.B.: '2MASS-J' uses 'fk5' if verbose is None: verbose = False if verbose: print(ra_deg, "=ra_deg") print(dec_deg, "=dec_deg") print(radius_arcmin, "=radius_arcmin") print("'%s' =survey" % (survey)) print("'%s' =cframe" % (cframe)) print(verbose, "=verbose") print() # # sc <--- astropy sky coordinates sc = SkyCoord(ra=ra_deg * u.degree, dec=dec_deg * u.degree, frame=cframe) # image list # assume that the list contains a single image imgl = SkyView.get_images( position=sc, survey=survey, radius=radius_arcmin * u.arcmin ) # # outputs: hdu = imgl[0] # Header/Data Unit of the FITS image hdr = hdu[0].header # header associated with the HDU data = hdu[0].data # data associated with the HDU wcs = WCS(hdr) # World Coordinate System from the FITS header of the survey image # return hdu, hdr, data, wcs, cframe # fed def xmkpy3_finder_chart_survey_fits_image_get_v1(): import lightkurve as lk lk.log.setLevel("INFO") import matplotlib.pyplot as plt import astropy.units as u from astropy.visualization import ImageNormalize, PercentileInterval, SqrtStretch import os import ntpath # Exoplanet Kelper-138b is "KIC 7603200": tpf = lk.search_targetpixelfile( target="kepler-138b", mission="kepler", cadence="long", quarter=10 ).download(quality_bitmask=0) print("TPF filename:", ntpath.basename(tpf.path)) print("TPF dirname: ", os.path.dirname(tpf.path)) target = "Kepler-138b" ra_deg = tpf.ra dec_deg = tpf.dec # get survey image data width_height_arcmin = 3.00 survey = "2MASS-J" ( survey_hdu, survey_hdr, survey_data, survey_wcs, survey_cframe, ) = mkpy3_finder_chart_survey_fits_image_get_v1( ra_deg, dec_deg, radius_arcmin=width_height_arcmin, survey=survey, verbose=True ) # create a matplotlib figure object fig = plt.figure(figsize=(12, 12)) # create a matplotlib axis object with right ascension and declination axes ax = plt.subplot(projection=survey_wcs) norm = ImageNormalize( survey_data, interval=PercentileInterval(99.0), stretch=SqrtStretch() ) ax.imshow(survey_data, origin="lower", norm=norm, cmap="gray_r") ax.set_xlabel("Right Ascension (J2000)") ax.set_ylabel("Declination (J2000)") ax.set_title("") plt.suptitle(target) # put a yellow circle at the target position ax.scatter( ra_deg * u.deg, dec_deg * u.deg, transform=ax.get_transform(survey_cframe), s=600, edgecolor="yellow", facecolor="None", lw=3, zorder=100, ) pname = "mkpy3_plot.png" if pname != "": plt.savefig(pname, bbox_inches="tight") print(pname, " <--- plot filename has been written! :-)\n") # fi return None # fed # ============================================================================= if __name__ == "__main__": xmkpy3_finder_chart_survey_fits_image_get_v1() # fi # EOF
26.684783
87
0.627495
0366c6b949b300f8072c9d5d7dfdc2a101c2a39c
1,737
py
Python
marathontcp.py
StevenPG/JMXMarathonDataAggregator
a976edc2ea27255dca36f584923e3a06dbdec8c6
[ "MIT" ]
null
null
null
marathontcp.py
StevenPG/JMXMarathonDataAggregator
a976edc2ea27255dca36f584923e3a06dbdec8c6
[ "MIT" ]
null
null
null
marathontcp.py
StevenPG/JMXMarathonDataAggregator
a976edc2ea27255dca36f584923e3a06dbdec8c6
[ "MIT" ]
null
null
null
""" marathontcp.py Author: Steven Gantz Date: 11/22/2016 These two classes are used as custom TCP Servers and its accompanying handler that defines each request. These class are what forward the data from the preset /metrics endpoints in the scaled marathon instances directly to the TCP servers running from this application. """ # Official Imports import socketserver import urllib.request class MarathonRedirectTCPServer(socketserver.TCPServer): """ TCP Server that takes special extra arguments if needed """ def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True, api_url="Empty Request"): # As per http://stackoverflow.com/questions/15889241/send-a-variable-to-a-tcphandler-in-python self.api_url = api_url socketserver.TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate=True) class MarathonRedirectTCPHandler(socketserver.BaseRequestHandler): """ Makes a metrics request and forwards to preset ports through the application""" def handle(self): print("Retrieving metrics from http://" + self.server.api_url + "/metrics") # Make a request to the api_url metrics and fwd to page encoded_response = urllib.request.urlopen("http://" + self.server.api_url + "/metrics") # Change encoded response in to simple string header = "HTTP/1.0 200 OK \r\n" content_type = "Content-Type: text/plain\r\n\r\n" text_response = header + content_type + encoded_response.read().decode() # self.request is the TCP socket connected to the client self.request.sendall(text_response.encode()) # Read Response to close request res = self.request.recv(1024)
40.395349
110
0.727691
03672787b107ccc21fb75165c7801c0b958f1461
4,600
py
Python
tests/test_io.py
Laharah/horcrux
68f7c6aad0678b39bae888f0dfeb9d1926501a53
[ "MIT" ]
null
null
null
tests/test_io.py
Laharah/horcrux
68f7c6aad0678b39bae888f0dfeb9d1926501a53
[ "MIT" ]
null
null
null
tests/test_io.py
Laharah/horcrux
68f7c6aad0678b39bae888f0dfeb9d1926501a53
[ "MIT" ]
null
null
null
import pytest import io import random from copy import deepcopy from horcrux import io as hio from horcrux.hrcx_pb2 import StreamBlock from horcrux.sss import Share, Point @pytest.fixture() def hx(): return hio.Horcrux(io.BytesIO()) @pytest.fixture() def share(): return Share(b'0123456789abcdef', 2, Point(0, b'123')) @pytest.fixture() def two_block_hrcx(): return io.BytesIO(b'\x1b\n\x100123456789ABCDEF\x10\x04\x1a\x05\x12\x03123\x08\n\x06' b'566784\x00\x08\x12\x06abcdef\x02\x08\x01\n\x12\x08ghijklmn') def test_init_horcrux(): h = hio.Horcrux(io.BytesIO()) def test_horcrux__write_bytes(hx): hx._write_bytes(b'123') assert hx.stream.getvalue() == b'\x03123' def test_horcurx__read_message_bytes_small(hx): hx._write_bytes(b'123') hx._write_bytes(b'4567890') stream = hx.stream del hx stream.seek(0) hx = hio.Horcrux(stream) m1 = hx._read_message_bytes() assert m1 == b'123' m2 = hx._read_message_bytes() assert m2 == b'4567890' def test_horcrux__read_message_bytes_large(hx): m1 = bytes(255 for _ in range(500)) m2 = bytes(random.getrandbits(8) for _ in range(4)) m3 = bytes(random.getrandbits(8) for _ in range(4096)) for m in (m1, m2, m3): hx._write_bytes(m) stream = hx.stream del hx stream.seek(0) hx = hio.Horcrux(stream) assert hx._read_message_bytes() == m1 assert hx._read_message_bytes() == m2 assert hx._read_message_bytes() == m3 def test_horcrux_write_data_block(hx): _id = 1 data = b'my data' hx.write_data_block(_id, data) out = hx.stream.getvalue() print(out) assert out == b'\x02\x08\x01\t\x12\x07my data' def test_horcrux_write_share_header(hx, share): hx._write_share_header(share) stream = hx.stream del hx stream.seek(0) print(stream.getvalue()) assert stream.getvalue() == b'\x1b\n\x100123456789abcdef\x10\x02\x1a\x05\x12\x03123' def test_horcrux_write_stream_header(hx): header = b'u\x14Op\xa3\x13\x01Jt\xa8' hx._write_stream_header(header) hx._write_stream_header(header, encrypted_filename=b'testname') stream = hx.stream del hx stream.seek(0) hx = hio.Horcrux(stream) h1 = hx._read_message_bytes() assert h1 == b'\n\nu\x14Op\xa3\x13\x01Jt\xa8' h2 = hx._read_message_bytes() assert h2 == b'\n\nu\x14Op\xa3\x13\x01Jt\xa8\x1a\x08testname' def test_horcrux_init_write(hx, share): cryptoheader = b'u\x14Op\xa3\x13\x01Jt\xa8' hx.init_write(share, cryptoheader, encrypted_filename=b'slkfjwnfa;') assert hx.hrcx_id == 0 stream = hx.stream del hx stream.seek(0) headers = stream.getvalue() print(headers) assert headers == ( b'\x1b\n\x100123456789abcdef\x10\x02\x1a' b'\x05\x12\x03123\x18\n\nu\x14Op\xa3\x13\x01Jt\xa8\x1a\nslkfjwnfa;') def test_horcrux_init_read(share): stream = io.BytesIO( b'\x1b\n\x100123456789abcdef\x10\x02\x1a' b'\x05\x12\x03123\x18\n\nu\x14Op\xa3\x13\x01Jt\xa8\x1a\nslkfjwnfa;') stream.seek(0) hx = hio.Horcrux(stream) hx.init_read() assert hx.share == share assert hx.hrcx_id == 0 assert hx.encrypted_filename == b'slkfjwnfa;' assert hx.next_block_id == None def test_horcrux_read_block(hx): data1 = bytes(random.getrandbits(8) for _ in range(30)) data2 = bytes(random.getrandbits(8) for _ in range(30)) hx.write_data_block(33, data1) hx.write_data_block(45, data2) stream = hx.stream stream.seek(0) del hx hx = hio.Horcrux(stream) hx._read_next_block_id() _id, d = hx.read_block() assert d == data1 assert _id == 33 _id, d = hx.read_block() assert d == data2 assert _id == 45 def test_horcrux_skip_block(hx): data1 = bytes(255 for _ in range(30)) data2 = bytes(255 for _ in range(30)) hx.write_data_block(33, data1) hx.write_data_block(45, data2) stream = hx.stream stream.seek(0) del hx hx = hio.Horcrux(stream) hx._read_next_block_id() hx.skip_block() _id, d = hx.read_block() assert d == data2 assert _id == 45 def test_get_horcrux_files(tmpdir, share): fn = 'test_horcrux' shares = [deepcopy(share) for _ in range(4)] crypto_header = b'1234567' expected = b'\x1b\n\x100123456789abcdef\x10\x02\x1a\x05\x12\x03123\t\n\x071234567' hxs = hio.get_horcrux_files(fn, shares, crypto_header, outdir=tmpdir) assert len(hxs) == 4 for h in hxs: h.stream.close() with open(h.stream.name, 'rb') as fin: assert fin.read() == expected
27.218935
88
0.668696
0369ea60607087cd24210a21d5453a467593c1f0
1,817
py
Python
template/diff.py
Nauja/Entropy
e418a7db68a55f17fb3e6c0c3b5018aed7002d4d
[ "MIT" ]
null
null
null
template/diff.py
Nauja/Entropy
e418a7db68a55f17fb3e6c0c3b5018aed7002d4d
[ "MIT" ]
null
null
null
template/diff.py
Nauja/Entropy
e418a7db68a55f17fb3e6c0c3b5018aed7002d4d
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 """ A Pandoc filter to create non-code diffs. `add` and `rm` are the classes that can be added to a `Div` or a `Span`. `add` colors the text green, and `rm` colors the text red. For HTML, `add` also underlines the text, and `rm` also strikes out the text. # Example ## `Div` Unchanged portion ::: add New paragraph > Quotes More new paragraphs ::: ## `Span` > The return type is `decltype(`_e_(`m`)`)` [for the first form]{.add}. """ import panflute as pf def action(elem, doc): if not isinstance(elem, pf.Div) and not isinstance(elem, pf.Span): return None color_name = None tag_name = None for cls in elem.classes: color_name = cls + 'color' if cls == 'add': tag_name = 'ins' elif cls == 'rm': tag_name = 'del' if tag_name is None: return None open_tag = pf.RawInline('<{}>'.format(tag_name), 'html') open_color = pf.RawInline('{{\\color{{{}}}'.format(color_name), 'tex') close_color = pf.RawInline('}', 'tex') close_tag = pf.RawInline('</{}>'.format(tag_name), 'html') color = doc.get_metadata(color_name) attributes = {} if color is None else {'style': 'color: #{}'.format(color)} if isinstance(elem, pf.Div): return pf.Div(pf.Plain(open_tag), pf.Plain(open_color), elem, pf.Plain(close_color), pf.Plain(close_tag), attributes=attributes) elif isinstance(elem, pf.Span): return pf.Span(open_tag, open_color, elem, close_color, close_tag, attributes=attributes) if __name__ == '__main__': pf.run_filter(action)
25.236111
79
0.555861
036a6e6b57ea2d5221b7c56f2e175c6cb9c0ca3b
1,304
py
Python
paxful/exceptions.py
tholness/Paxful-API-Wrapper
c66620aa2ef40b97f2794998c63a6bd7504cea3c
[ "MIT" ]
null
null
null
paxful/exceptions.py
tholness/Paxful-API-Wrapper
c66620aa2ef40b97f2794998c63a6bd7504cea3c
[ "MIT" ]
null
null
null
paxful/exceptions.py
tholness/Paxful-API-Wrapper
c66620aa2ef40b97f2794998c63a6bd7504cea3c
[ "MIT" ]
3
2020-08-09T17:02:06.000Z
2021-04-13T17:45:39.000Z
from __future__ import absolute_import, unicode_literals class PaxfulError(Exception): """Base (catch-all) client exception.""" class RequestError(PaxfulError): """Raised when an API request to fails. :ivar message: Error message. :vartype message: str | unicode :ivar url: API endpoint. :vartype url: str | unicode :ivar body: Raw response body from Pax. :vartype body: str | unicode :ivar headers: Response headers. :vartype headers: requests.structures.CaseInsensitiveDict :ivar http_code: HTTP status code. :vartype http_code: int :ivar error_code: Error code from Pax. :vartype error_code: int :ivar response: Response object. :vartype response: requests.Response """ def __init__(self, response, message, error_code=None): self.message = message self.url = response.url self.body = response.text self.headers = response.headers self.http_code = response.status_code self.error_code = error_code self.response = response Exception.__init__(self, message) class InvalidCurrencyError(PaxfulError): """Raised when an invalid major currency is given.""" class InvalidOrderBookError(PaxfulError): """Raised when an invalid order book is given."""
28.977778
61
0.692485
036aded2a5c768db98446d290a146373009c9d4d
382
py
Python
examples/mesoscope_anatomical_high_mag.py
tlambert03/image-demos
a2974bcc7f040fd4d14e659c4cbfeabcf726c707
[ "BSD-3-Clause" ]
null
null
null
examples/mesoscope_anatomical_high_mag.py
tlambert03/image-demos
a2974bcc7f040fd4d14e659c4cbfeabcf726c707
[ "BSD-3-Clause" ]
null
null
null
examples/mesoscope_anatomical_high_mag.py
tlambert03/image-demos
a2974bcc7f040fd4d14e659c4cbfeabcf726c707
[ "BSD-3-Clause" ]
null
null
null
""" Displays anatomical data from the mesoscope """ from skimage.io import imread from napari import Viewer, gui_qt stack = imread('data/mesoscope/anatomical/volume_zoomed.tif') with gui_qt(): # create an empty viewer viewer = Viewer() # add the image layer = viewer.add_image(stack, name='stack', clim=(0.0, 3000.0), clim_range=(0.0, 6000.0), colormap='gray')
22.470588
112
0.698953
036b561ed932ab60b4b97fb19bd96ddc0a940784
246
py
Python
lab_restful/app/models/Paper.py
afish1001/lab_server
c6a2b09834d73078ab52e2965849cd41ba795b4b
[ "MIT" ]
null
null
null
lab_restful/app/models/Paper.py
afish1001/lab_server
c6a2b09834d73078ab52e2965849cd41ba795b4b
[ "MIT" ]
null
null
null
lab_restful/app/models/Paper.py
afish1001/lab_server
c6a2b09834d73078ab52e2965849cd41ba795b4b
[ "MIT" ]
null
null
null
from .. import utils from ..config import table class Paper(): def __init__(self): self.collection = table.paper self.conn = utils.mongo.db.get_collection(self.collection) def list(self): pass paper = Paper()
16.4
66
0.642276
036bed92a5a2372689c9a48c62d3c2e337ec2c9b
1,459
py
Python
cogs/logs.py
CoffeeOrg/Coffee
73bf194c3811bb9cf776a0a4db4c6234e471d5ce
[ "MIT" ]
6
2021-02-06T05:43:40.000Z
2021-08-01T22:55:33.000Z
cogs/logs.py
elfw/Coffee
e83868a323084b96b0df3f916090dd17ce34de93
[ "MIT" ]
2
2021-02-06T07:18:10.000Z
2021-02-06T18:42:07.000Z
cogs/logs.py
elfw/Coffee
e83868a323084b96b0df3f916090dd17ce34de93
[ "MIT" ]
10
2021-02-06T03:31:26.000Z
2021-09-22T04:00:23.000Z
import discord from discord.ext import commands from utils.database import sqlite, create_tables class Events(commands.Cog): def __init__(self, bot): self.bot = bot self.db = sqlite.Database() def logs(self, guild_id): data = self.db.fetchrow("SELECT * FROM Logging WHERE guild_id=?", (guild_id,)) if data: return data["logs_id"] else: return None @commands.Cog.listener() async def on_message_delete(self, message): log_channel = self.bot.get_channel(self.logs(message.guild.id)) if log_channel: embed = discord.Embed( title="Message Deleted 📝", description=f"**Deleted in:** `#{message.channel}`\n**Author:** `{message.author}`\n**Message:** ```{message.content}```", color=0x2F3136 ) embed.timestamp = message.created_at await log_channel.send(embed=embed) @commands.Cog.listener() async def on_message_edit(self, before, after): log_channel = self.bot.get_channel(self.logs(before.guild.id)) if before.author.bot is True: return None if log_channel: embed = discord.Embed( title="Message Edited 📝", description=f"**Edited in:** `#{before.channel}`\n**Author:** `{before.author}`\n**Before:** ```{before.content}```\n**Now:** ```{after.content}```", color=0x2F3136 ) embed.timestamp = before.created_at await log_channel.send(embed=embed) def setup(bot): bot.add_cog(Events(bot))
29.77551
154
0.651131
036c2e095d7edeb0d2e2e4cd895d6ea7fe8b4740
1,679
py
Python
DataProcessor.py
harrys17451/CryptocurrencyPrediction
4833130c59618f0b1d060596fd39a290e9a6ed97
[ "MIT" ]
669
2017-12-31T01:50:30.000Z
2022-03-30T00:01:57.000Z
DataProcessor.py
mildronize/CryptocurrencyPrediction
7ec542bcd6bf960b115638484737f097120badcd
[ "MIT" ]
11
2018-01-08T15:40:55.000Z
2021-11-30T08:00:49.000Z
DataProcessor.py
mildronize/CryptocurrencyPrediction
7ec542bcd6bf960b115638484737f097120badcd
[ "MIT" ]
262
2018-01-01T14:51:13.000Z
2022-03-17T16:59:55.000Z
# coding: utf-8 # In[2]: import pandas as pd import numpy as np import h5py # In[24]: input_step_size = 50 output_size = 30 sliding_window = False file_name= 'bitcoin2012_2017_50_30_prediction.h5' # In[19]: df = pd.read_csv('data/bitstampUSD_1-min_data_2012-01-01_to_2017-05-31.csv').dropna().tail(1000000) df['Datetime'] = pd.to_datetime(df['Timestamp'],unit='s') df.head() # In[30]: prices= df.loc[:,'Close'].values times = df.loc[:,'Close'].values prices.shape # In[31]: outputs = [] inputs = [] output_times = [] input_times = [] if sliding_window: for i in range(len(prices)-input_step_size-output_size): inputs.append(prices[i:i + input_step_size]) input_times.append(times[i:i + input_step_size]) outputs.append(prices[i + input_step_size: i + input_step_size+ output_size]) output_times.append(times[i + input_step_size: i + input_step_size+ output_size]) else: for i in range(0,len(prices)-input_step_size-output_size, input_step_size): inputs.append(prices[i:i + input_step_size]) input_times.append(times[i:i + input_step_size]) outputs.append(prices[i + input_step_size: i + input_step_size+ output_size]) output_times.append(times[i + input_step_size: i + input_step_size+ output_size]) inputs= np.array(inputs) outputs= np.array(outputs) output_times = np.array(output_times) input_times = np.array(input_times) # In[34]: with h5py.File(file_name, 'w') as f: f.create_dataset("inputs", data = inputs) f.create_dataset('outputs', data = outputs) f.create_dataset("input_times", data = input_times) f.create_dataset('output_times', data = output_times)
23.647887
99
0.705777
036ce936f710a0a2ff80b56468ee08342b668842
776
py
Python
github api example.py
LincT/PythonExamples
a0b61e8c60cd0754f2406b6b72fcd562667c9bb0
[ "MIT" ]
null
null
null
github api example.py
LincT/PythonExamples
a0b61e8c60cd0754f2406b6b72fcd562667c9bb0
[ "MIT" ]
null
null
null
github api example.py
LincT/PythonExamples
a0b61e8c60cd0754f2406b6b72fcd562667c9bb0
[ "MIT" ]
null
null
null
import requests def wip(): print() def apiToDictionary(url, *args): request_string = url response = (requests.get(request_string)) json = response.json() response.close() return dict(json) def main(): # docDict = {"text":"592da8d73b39d3e1f54304fedf7456b1", "markdown":"6a4cccf1c66c780e72264a9fbcb9d5fe"} # resultDict = apiToDictionary("https://api.github.com/gists/" + docDict.get("markdown")) # print(dict(dict(resultDict.get('files')).get('MineCTC: Rules.md')).get('content')) # resultDict = apiToDictionary("https://en.wikipedia.org/w/api.php?action=query&titles=Hebrew_alphabet&prop=revisions&rvprop=content&format=json&formatversion=2") # print(dict(resultDict["query"])) wip() if __name__ == '__main__': main()
29.846154
166
0.693299
036da6e027a7036bf7b62f5adb2656c36fb9601d
325
py
Python
elibrary/migrations/0011_remove_book_tag.py
soma115/wikikracja
7715ca1daa4ca09888e1c7389ed5f8a2df29898b
[ "MIT" ]
7
2016-02-21T17:25:54.000Z
2021-10-09T19:36:10.000Z
elibrary/migrations/0011_remove_book_tag.py
soma115/wikikracja
7715ca1daa4ca09888e1c7389ed5f8a2df29898b
[ "MIT" ]
19
2020-02-11T23:55:01.000Z
2022-03-31T18:11:56.000Z
elibrary/migrations/0011_remove_book_tag.py
soma115/wikikracja
7715ca1daa4ca09888e1c7389ed5f8a2df29898b
[ "MIT" ]
3
2016-01-20T22:34:58.000Z
2020-09-16T07:45:42.000Z
# Generated by Django 3.1.12 on 2021-09-18 12:55 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('elibrary', '0010_auto_20210918_1434'), ] operations = [ migrations.RemoveField( model_name='book', name='tag', ), ]
18.055556
48
0.587692
036e27bede50c08e0bbb36a280baf4f71cc05bf8
15,933
py
Python
tools/test/scenarios/bin/tapiHelper.py
cornell-netlab/onos
19c32ac12d1ac3b58403cc8b763c6a0666702cb6
[ "Apache-2.0" ]
2
2019-04-02T03:48:49.000Z
2021-02-03T20:01:27.000Z
tools/test/scenarios/bin/tapiHelper.py
cornell-netlab/onos
19c32ac12d1ac3b58403cc8b763c6a0666702cb6
[ "Apache-2.0" ]
13
2020-03-04T22:56:25.000Z
2022-03-02T04:24:17.000Z
tools/test/scenarios/bin/tapiHelper.py
cornell-netlab/onos
19c32ac12d1ac3b58403cc8b763c6a0666702cb6
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python import requests import json import random from sets import Set # # Creates client-side connectivity json # def tapi_client_input(sip_uuids): create_input = { "tapi-connectivity:input": { "end-point" : [ { "local-id": sip_uuids[0], "service-interface-point": { "service-interface-point-uuid" : sip_uuids[0] } }, { "local-id": sip_uuids[1], "service-interface-point": { "service-interface-point-uuid" : sip_uuids[1] } } ] } } return create_input # # Creates line-side connectivity json # def tapi_line_input(sip_uuids): create_input = { "tapi-connectivity:input" : { "end-point": [ { "layer-protocol-qualifier": "tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC", "role": "UNKNOWN", "local-id": "Src_end_point", "direction": "BIDIRECTIONAL", "service-interface-point": { "service-interface-point-uuid" : sip_uuids[0] }, "protection-role": "WORK", "layer-protocol-name": "PHOTONIC_MEDIA" }, { "direction": "BIDIRECTIONAL", "service-interface-point": { "service-interface-point-uuid": sip_uuids[1] }, "protection-role": "WORK", "layer-protocol-name": "PHOTONIC_MEDIA", "layer-protocol-qualifier": "tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC", "role": "UNKNOWN", "local-id": "Dst_end_point" } ] } } return create_input # # Obtains TAPI context through restconf # def get_context(url_context): resp = requests.get(url_context, auth=('onos', 'rocks')) if resp.status_code != 200: raise Exception('GET {}'.format(resp.status_code)) return resp.json() # # Check if the node is transponder. # True - transponder # False - OLS # def is_transponder_node(node): if len(node["owned-node-edge-point"]) > 0 and "mapped-service-interface-point" in node["owned-node-edge-point"][0]: return True else: return False # # Parse src and dst sip-uuids of specific link from topo. # def parse_src_dst(topo, link_index=-1): if link_index == -1: # select a link randomly from all links of topo link_index = random.randint(0, len(topo["link"]) - 1) nep_pair = topo["link"][link_index]["node-edge-point"] assert topo["uuid"] == nep_pair[0]["topology-uuid"] assert topo["uuid"] == nep_pair[1]["topology-uuid"] src_onep, dst_onep = (find_line_onep(nep_pair[0], topo["node"]), find_line_onep(nep_pair[1], topo["node"])) if src_onep is not None and dst_onep is not None: # If the link is between two transponders directly pass elif src_onep is None and dst_onep is None: raise AssertionError("Impossible for that both two ports are OLS port") else: # If one of src_onep and dst_onep is None, then make src_onep not None, # and find a new dst_onep with same connection id. if src_onep is None: src_onep = dst_onep dst_onep = None conn_id = parse_value(src_onep["name"])["odtn-connection-id"] for node in topo["node"]: cep = src_onep["tapi-connectivity:cep-list"]["connection-end-point"] assert len(cep) == 1 if cep[0]["parent-node-edge-point"]["node-uuid"] != node["uuid"] and is_transponder_node(node): # If this node is not the node that includes src_onep, and not a OLS node for onep in node["owned-node-edge-point"]: if parse_value(onep["name"])["odtn-connection-id"] == conn_id: dst_onep = onep break if dst_onep is not None: break src_sip_uuid, dst_sip_uuid = \ (src_onep["mapped-service-interface-point"][0]["service-interface-point-uuid"], dst_onep["mapped-service-interface-point"][0]["service-interface-point-uuid"]) return src_onep, dst_onep, src_sip_uuid, dst_sip_uuid # # Check whether the sip uuid is used in other existed services. # def is_port_used(sip_uuid, conn_context): try: for service in conn_context["connectivity-service"]: for id in [0, 1]: if service["end-point"][id]["service-interface-point"]["service-interface-point-uuid"] == sip_uuid: return True except KeyError: print "There is no line-side service in ONOS now." return False # # Requests a connectivity service # def request_connection(url_connectivity, context): # All Context SIPs sips = context["tapi-common:context"]["service-interface-point"] # Sorted Photonic Media SIPs. filter is an iterable esips = list(filter(is_dsr_media, sorted(sips, key=lambda sip: sip["name"][0]["value"]))) endpoints = [esips[0], esips[-1]] sip_uuids = [] for sip in endpoints: sip_uuids.append(sip["uuid"]) for uuid in sip_uuids: print(uuid) create_input_json = json.dumps(tapi_client_input(sip_uuids)) print (create_input_json) headers = {'Content-type': 'application/json'} resp = requests.post(url_connectivity, data=create_input_json, headers=headers, auth=('onos', 'rocks')) if resp.status_code != 200: raise Exception('POST {}'.format(resp.status_code)) return resp # # Filter method used to keep only SIPs that are photonic_media # def is_photonic_media(sip): return sip["layer-protocol-name"] == "PHOTONIC_MEDIA" # # Filter method used to keep only SIPs that are DSR # def is_dsr_media(sip): return sip["layer-protocol-name"] == "DSR" # # Processes the topology to verify the correctness # def process_topology(): # TODO use method to parse topology # Getting the Topology # topology = context["tapi-common:context"]["tapi-topology:topology-context"]["topology"][0] # nodes = topology["node"]; # links = topology["link"]; noop # # Find mapped client-side sip_uuid according to a line-side sip_uuid. # connection-ids of these two owned-node-edge-point should be the same. # def find_mapped_client_sip_uuid(line_sip_uuid, nodes): line_node = None line_onep = None for node in nodes: if is_transponder_node(node): for onep in node["owned-node-edge-point"]: if onep["mapped-service-interface-point"][0]["service-interface-point-uuid"] == line_sip_uuid: line_node = node line_onep = onep break if line_node is None: raise AssertionError("Cannot match line-side sip uuid in topology.") conn_id = parse_value(line_onep["name"])["odtn-connection-id"] for onep in line_node["owned-node-edge-point"]: vals = parse_value(onep["name"]) if vals["odtn-connection-id"] == conn_id and vals["odtn-port-type"] == "client": return onep["mapped-service-interface-point"][0]["service-interface-point-uuid"], vals return None # # Create a client-side connection. Firstly, get the context, parsing for SIPs that connect # with each other in line-side; Secondly, issue the request # def create_client_connection(url_context, url_connectivity): headers = {'Content-type': 'application/json'} context = get_context(url_context) # select the first topo from all topologies topo = context["tapi-common:context"]["tapi-topology:topology-context"]["topology"][0] # Gather all current used sip_uuids used_sip_uuids = Set() try: services = context["tapi-common:context"]["tapi-connectivity:connectivity-context"]["connectivity-service"] for service in services: used_sip_uuids.add(service["end-point"][0]["service-interface-point"]["service-interface-point-uuid"]) used_sip_uuids.add(service["end-point"][1]["service-interface-point"]["service-interface-point-uuid"]) except KeyError: print "There is no existed connectivity service inside ONOS." # select the first available line-side service as bridge. If there is no available line-side service, # then only create a client-to-client service for src and dst node. empty_client_src_sip_uuid, empty_client_dst_sip_uuid = None, None empty_src_name, empty_dst_name, empty_client_src_name, empty_client_dst_name = None, None, None, None for link_index in range(0, len(topo["link"])): src_onep, dst_onep, src_sip_uuid, dst_sip_uuid = parse_src_dst(topo, link_index) client_src_sip_uuid, client_src_name = find_mapped_client_sip_uuid(src_sip_uuid, topo["node"]) client_dst_sip_uuid, client_dst_name = find_mapped_client_sip_uuid(dst_sip_uuid, topo["node"]) # firstly, check if line-side service exists # If line-side service exists if src_sip_uuid in used_sip_uuids and dst_sip_uuid in used_sip_uuids: # secondly, check if mapped client-side service exists if (client_src_sip_uuid not in used_sip_uuids) and (client_dst_sip_uuid not in used_sip_uuids): # If there is no such client-side connection exists # Create new client-side connection directly print "Create client-side connection between %s and %s." % \ (client_src_name["onos-cp"], client_dst_name["onos-cp"]) create_input_json = json.dumps(tapi_client_input((client_src_sip_uuid, client_dst_sip_uuid))) resp = requests.post(url_connectivity, data=create_input_json, headers=headers, auth=('onos', 'rocks')) if resp.status_code != 200: raise Exception('POST {}'.format(resp.status_code)) return resp else: # If there exists such client-side connection # Do nothing, just continue pass else: # If line-side service doesn't exist # save 4 sip uuids, and continue empty_client_src_sip_uuid = client_src_sip_uuid empty_client_dst_sip_uuid = client_dst_sip_uuid empty_client_src_name = client_src_name empty_client_dst_name = client_dst_name empty_src_name = parse_value(src_onep["name"]) empty_dst_name = parse_value(dst_onep["name"]) pass # After FOR loop, if this method doesn't return, there is no available line-side # service for mapped client-side service creation. # So, we need to create two client-side services. if empty_client_src_sip_uuid is None: # None case means all client-side services exist. raise AssertionError("There is no available client-side service could be created.") else: print "Create client-side services:" print "\t- from %s to %s." % (empty_client_src_name["onos-cp"], empty_client_dst_name["onos-cp"]) print "This service should go through:" print "\t- %s and %s." % (empty_src_name["onos-cp"], empty_dst_name["onos-cp"]) create_input_json = json.dumps(tapi_client_input((empty_client_src_sip_uuid, empty_client_dst_sip_uuid))) resp = requests.post(url_connectivity, data=create_input_json, headers=headers, auth=('onos', 'rocks')) if resp.status_code != 200: raise Exception('POST {}'.format(resp.status_code)) return resp # # Parse array structure "name" under structure "owned node edge point" # def parse_value(arr): rtn = {} for item in arr: rtn[item["value-name"]] = item["value"] return rtn # # Find node edge point of node structure in topology with client-side port, by using nep with line-side port. # The odtn-connection-id should be the same in both line-side nep and client-side nep # def find_client_onep(line_nep_in_link, nodes): for node in nodes: if node["uuid"] == line_nep_in_link["node-uuid"]: conn_id = None for onep in node["owned-node-edge-point"]: if onep["uuid"] == line_nep_in_link["node-edge-point-uuid"]: name = parse_value(onep["name"]) if name["odtn-port-type"] == "line": conn_id = name["odtn-connection-id"] break if conn_id is None: raise AssertionError("Cannot find owned node edge point with node id %s and nep id %s." % (line_nep_in_link["node-uuid"], line_nep_in_link["node-edge-point-uuid"], )) for onep in node["owned-node-edge-point"]: name = parse_value(onep["name"]) if name["odtn-port-type"] == "client" and name["odtn-connection-id"] == conn_id: return onep return None # # Create a line-side connection. Firstly, get the context, parsing for SIPs with photonic_media type, # and select one pair of them; Secondly, issue the request # def create_line_connection(url_context, url_connectivity): context = get_context(url_context) # select the first topo from all topologies topo = context["tapi-common:context"]["tapi-topology:topology-context"]["topology"][0] # select randomly the src_sip_uuid and dst_sip_uuid with same connection id. src_onep, dst_onep, src_sip_uuid, dst_sip_uuid = parse_src_dst(topo) while is_port_used(src_sip_uuid, context["tapi-common:context"]["tapi-connectivity:connectivity-context"]): print "Conflict occurs between randomly selected line-side link and existed ones." src_onep, dst_onep, src_sip_uuid, dst_sip_uuid = parse_src_dst(topo) print "\nBuild line-side connectivity:\n|Item|SRC|DST|\n|:--|:--|:--|\n|onos-cp|%s|%s|\n|connection id|%s|%s|\n|sip uuid|%s|%s|" % \ (src_onep["name"][2]["value"], dst_onep["name"][2]["value"], src_onep["name"][1]["value"], dst_onep["name"][1]["value"], src_sip_uuid, dst_sip_uuid) create_input_json = json.dumps(tapi_line_input((src_sip_uuid, dst_sip_uuid))) print "\nThe json content of creation operation for line-side connectivity service is \n\t\t%s." % \ create_input_json headers = {'Content-type': 'application/json'} resp = requests.post(url_connectivity, data=create_input_json, headers=headers, auth=('onos', 'rocks')) if resp.status_code != 200: raise Exception('POST {}'.format(resp.status_code)) return resp # # find owned-node-edge-point from all nodes according to line_nep_in_links # def find_line_onep(line_nep_in_link, nodes): for node in nodes: if node["uuid"] == line_nep_in_link["node-uuid"]: if not is_transponder_node(node): break for onep in node["owned-node-edge-point"]: if onep["uuid"] == line_nep_in_link["node-edge-point-uuid"]: # check the length equals 1 to verify the 1-to-1 mapping relationship assert len(onep["mapped-service-interface-point"]) == 1 return onep # When node is OLS, this method will return None return None # # Obtains existing connectivity services # def get_connection(url_connectivity, uuid): # uuid is useless for this method json = '{}' headers = {'Content-type': 'application/json'} resp = requests.post(url_connectivity, data=json, headers=headers, auth=('onos', 'rocks')) if resp.status_code != 200: raise Exception('POST {}'.format(resp.status_code)) return resp
40.853846
136
0.62813
036e3669e539c1ee359752125217465762d9b017
4,151
py
Python
scripts/text/text_particles.py
mou3adb/spread_the_particle
6cc666fded62f07380ed1e3ed52969c436295906
[ "MIT" ]
4
2020-08-18T18:33:05.000Z
2021-05-18T23:55:56.000Z
scripts/text/text_particles.py
mou3adb/spread_the_particle
6cc666fded62f07380ed1e3ed52969c436295906
[ "MIT" ]
null
null
null
scripts/text/text_particles.py
mou3adb/spread_the_particle
6cc666fded62f07380ed1e3ed52969c436295906
[ "MIT" ]
2
2021-03-03T18:57:06.000Z
2021-05-18T20:43:44.000Z
""" The outfile structure is the following: diameter density birth lifetime is_captured stuck_to_geometry theta (blank line) Re Ur (blank line) n_trajectory x1 y1 up1 vp1 Uf1 Vf1 gradpx1 gradpy1 ap_x1 ap_y1 af_x1 af_y1 x2 y2 up2 vp2 Uf2 Vf2 gradpx2 gradpy2 ap_x2 ap_y2 af_x2 af_y2 ... xNt yNt upNt vpNt UfNt VfNt gradpxNt gradpyNt ap_xN ap_yN af_xN af_yN """ import sys sys.path.append('..') import numpy as np from particle import Particle #============================================================================== def floatIt(l): return np.array([float(e) for e in l]) def intIt(l): return np.array([int(e) for e in l]) def write_particle(p, f): f.write('%2.3f %1.3f\n' % (p.diameter, p.density)) f.write('%d %d\n' % (p.birth, p.lifetime)) f.write('%s %s %s\n' % (p.captured, p.stuck_to_geometry, p.theta)) f.write('\n') # blank line f.write('%d %.1f\n' % (p.Re, p.Ur)) f.write('\n') Nt = len(p.trajectory) f.write('%d\n' % Nt) for n in range(Nt): f.write('%e '*12 % \ (p.trajectory[n,0], p.trajectory[n,1], p.velocities[n,0], p.velocities[n,1], p.fluid_velocities[n,0], p.fluid_velocities[n,1], p.pressure_gradients[n,0], p.pressure_gradients[n,1], p.accelerations[n,0], p.accelerations[n,1], p.fluid_accelerations[n,0], p.fluid_accelerations[n,1])) f.write('\n') def write_particles(particles, outfile): f = open(outfile, 'w') Np = len(particles) f.write('%d\n' % Np) f.write('\n') # blank line for p in particles: write_particle(p, f) f.write('\n') f.close() def read_particle(f, old_version=False): # I kept old_version because I had many particles saved before the final # update of this function. diameter, density = floatIt(f.readline().strip().split()) birth, lifetime = intIt(f.readline().strip().split()) if not(old_version): str_captured, str_stuck, str_theta = f.readline().strip().split() theta = float(str_theta) else: str_captured, str_stuck = f.readline().strip().split() captured = False if str_captured == 'False' else True stuck = None if str_stuck == 'None' else int(str_stuck) f.readline() # read the blank line Re, Ur = floatIt(f.readline().strip().split()) f.readline() Nt = int(f.readline().strip()) trajectory = [] velocities = [] fluid_velocities = [] pressure_gradients = [] accelerations = [] fluid_accelerations = [] for n in range(Nt): if old_version: x, y, u, v, U, V, gradpx, gradpy \ = floatIt(f.readline().strip().split()) else: x, y, u, v, U, V, gradpx, gradpy, ap_x, ap_y, af_x, af_y \ = floatIt(f.readline().strip().split()) trajectory.append([x, y]) velocities.append([u, v]) fluid_velocities.append([U, V]) pressure_gradients.append([gradpx, gradpy]) if not(old_version): accelerations.append([ap_x, ap_y]) fluid_accelerations.append([af_x, af_y]) pos0 = trajectory[0] u0 = velocities[0] p = Particle(diameter, density, birth, lifetime, pos0, u0) p.captured, p.stuck_to_geometry = captured, stuck p.Re, p.Ur = Re, Ur p.trajectory = np.array(trajectory) p.velocities = np.array(velocities) p.fluid_velocities = np.array(fluid_velocities) p.pressure_gradients = np.array(pressure_gradients) if not(old_version): p.accelerations = np.array(accelerations) p.fluid_accelerations = np.array(fluid_accelerations) p.theta = theta return p def read_particles(infile, old_version=False): f = open(infile, 'r') Np = int(f.readline()) f.readline() # read a blank line particles = [] for i in range(Np): particles.append(read_particle(f, old_version)) f.readline() f.close() return np.array(particles)
25.466258
79
0.578897
0371b2e5e808a3335687f6bb1d019e4b4c60e31e
1,610
py
Python
01_Modelos_Supervisionados/1.2_Analise_Discriminante_Linear_e_Quadratica/1.2.3_Formulacao_Matematica_de_Reducao_de_Dimensionalidade_LDA.py
BrunoBertti/Scikit_Learning
4b9e10ff7909f3728ac1e8bba19f5fd779340bc4
[ "MIT" ]
null
null
null
01_Modelos_Supervisionados/1.2_Analise_Discriminante_Linear_e_Quadratica/1.2.3_Formulacao_Matematica_de_Reducao_de_Dimensionalidade_LDA.py
BrunoBertti/Scikit_Learning
4b9e10ff7909f3728ac1e8bba19f5fd779340bc4
[ "MIT" ]
null
null
null
01_Modelos_Supervisionados/1.2_Analise_Discriminante_Linear_e_Quadratica/1.2.3_Formulacao_Matematica_de_Reducao_de_Dimensionalidade_LDA.py
BrunoBertti/Scikit_Learning
4b9e10ff7909f3728ac1e8bba19f5fd779340bc4
[ "MIT" ]
null
null
null
########## 1.2.3. Formulação matemática de redução de dimensionalidade LDA ########## # Primeiro note que K significa que \mu_k são vetores em \mathcal{R}^d, e eles estão em um subespaço afim H de dimensão no máximo K - 1 (2 pontos estão em uma linha, 3 pontos estão em um plano, etc.) ). # Como mencionado acima, podemos interpretar LDA como a atribuição de x à classe cuja média \mu_k é a mais próxima em termos de distância de Mahalanobis, enquanto também leva em conta as probabilidades anteriores da classe. Alternativamente, LDA é equivalente a primeiro esferificar os dados de modo que a matriz de covariância seja a identidade e, em seguida, atribuir x à média mais próxima em termos de distância euclidiana (ainda contabilizando as classes prioritárias). # Calcular distâncias euclidianas neste espaço d-dimensional é equivalente a primeiro projetar os pontos de dados em H e calcular as distâncias lá (já que as outras dimensões contribuirão igualmente para cada classe em termos de distância). Em outras palavras, se x estiver mais próximo de \mu_k no espaço original, também será o caso de H. Isso mostra que, implícito no classificador LDA, há uma redução de dimensionalidade por projeção linear em um espaço dimensional K-1 . # Podemos reduzir ainda mais a dimensão, para um L escolhido, projetando no subespaço linear H_L que maximiza a variância do \mu^*_k após a projeção (na verdade, estamos fazendo uma forma de PCA para a classe transformada significa \ mu^*_k). Este L corresponde ao parâmetro n_components usado no método de transformação. Veja 1 para mais detalhes.
161
479
0.778261
03740eb7b2e0fb107f339fc022308a8b7f886123
3,195
py
Python
csrv/model/deck.py
mrroach/CentralServer
e377c65d8f3adf5a2d3273acd4f459be697aea56
[ "Apache-2.0" ]
null
null
null
csrv/model/deck.py
mrroach/CentralServer
e377c65d8f3adf5a2d3273acd4f459be697aea56
[ "Apache-2.0" ]
null
null
null
csrv/model/deck.py
mrroach/CentralServer
e377c65d8f3adf5a2d3273acd4f459be697aea56
[ "Apache-2.0" ]
1
2020-09-20T11:26:20.000Z
2020-09-20T11:26:20.000Z
"""A collection of cards.""" import random from csrv.model import cards from csrv.model.cards import card_info # This import is just to pull in all the card definitions import csrv.model.cards.corp import csrv.model.cards.runner class Deck(object): def __init__(self, identity_name, card_names): self.identity = cards.Registry.get(identity_name) self.cards = [] self.is_valid = True for name in card_names: c = cards.Registry.get(name) if c: self.cards.append(c) def _verify_less_than_three_copies(self): """Make sure we have no more than 3 copies of a single cards""" card_list = {} for c in self.cards: card_list[c.NAME] = card_list.setdefault(c.NAME, 0) + 1 invalid_cards = filter(lambda x: card_list[x] > 3, card_list) if len(invalid_cards): return "Deck contains more than 3 copies of the following cards: {}".format(', '.join(invalid_cards)) def _verify_min_deck_size(self): """Make sure deck meets minimum deck size limit""" if len(self.cards) < self.identity.MIN_DECK_SIZE: self.is_valid = False return "Deck does not meet minimum deck size requirement" def _verify_influence_points(self): """Make sure deck doesnt exceed maximum influence points""" influence_spent = reduce(lambda x,y: x+y.influence_cost(self.identity.FACTION), self.cards, 0) if influence_spent > self.identity.MAX_INFLUENCE: return "Deck contains {} influence but only {} allowed".format(influence_spent, self.identity.MAX_INFLUENCE) def _verify_side_only(self, side): """Make sure we only have cards belonging to the correct side""" if len(filter(lambda c: c.SIDE != side, self.cards)): return "Deck contains cards from the other side (corp/runner)" class CorpDeck(Deck): """A deck for a corp.""" def validate(self): """Return a list of errors with the deck.""" return filter(None, [ self._verify_min_deck_size(), self._verify_influence_points(), self._verify_less_than_three_copies(), self._verify_in_faction_agendas(), self._verify_agenda_points(), self._verify_side_only(card_info.CORP) ]) def _verify_agenda_points(self): """Make sure deck has required agenda points based on deck size""" agenda_points = reduce(lambda x,y: x+y.AGENDA_POINTS, self.cards, 0) deck_size = len(self.cards) if agenda_points/float(deck_size) < 2.0/5.0: self.is_valid = False return "Only {} Agenda Points in deck of {} cards".format(agenda_points, deck_size) def _verify_in_faction_agendas(self): """Make sure deck only contains in faction agendas""" agendas = filter(lambda c: c.TYPE == card_info.AGENDA, self.cards) if len(filter(lambda a: not a.FACTION in [card_info.NEUTRAL, self.identity.FACTION], agendas)): return "Deck contains out-of-faction Agendas" class RunnerDeck(Deck): """A deck for a runner.""" def validate(self): """Return a list of errors with the deck.""" return filter(None, [ self._verify_min_deck_size(), self._verify_influence_points(), self._verify_less_than_three_copies(), self._verify_side_only(card_info.RUNNER) ])
35.10989
114
0.699531
03766bbc82ca9d0b806101dbc0e7af7f9c47c209
476
py
Python
data_structures_and_algorithms/04_menu.py
dileepabandara/return-python
fc269d577eade231bc9e3813654ce9c5848837ca
[ "MIT" ]
1
2022-01-12T17:44:51.000Z
2022-01-12T17:44:51.000Z
data_structures_and_algorithms/04_menu.py
dileepabandara/return-python
fc269d577eade231bc9e3813654ce9c5848837ca
[ "MIT" ]
null
null
null
data_structures_and_algorithms/04_menu.py
dileepabandara/return-python
fc269d577eade231bc9e3813654ce9c5848837ca
[ "MIT" ]
null
null
null
ans = True while ans: print(""" 1.Add a Student 2.Delete a Student 3.Look Up Student Record 4.Exit/Quit """) ans = input("What would you like to do? ") if ans == "1": print("\nStudent Added") elif ans == "2": print("\n Student Deleted") elif ans == "3": print("\n Student Record Found") elif ans == "4": print("\n Goodbye") ans = None else: print("\n Not Valid Choice Try again")
22.666667
46
0.521008
0376ebb01bd1aa62d9b4075181468b5d09068e7f
525
py
Python
libok.py
txt/se4dm
c38c742039eaa7a15730eb655c4eed067c8a5409
[ "Unlicense" ]
null
null
null
libok.py
txt/se4dm
c38c742039eaa7a15730eb655c4eed067c8a5409
[ "Unlicense" ]
9
2015-10-30T12:46:53.000Z
2015-11-25T03:27:49.000Z
libok.py
txt/se4dm
c38c742039eaa7a15730eb655c4eed067c8a5409
[ "Unlicense" ]
2
2018-06-22T15:23:44.000Z
2020-11-05T01:47:54.000Z
from __future__ import print_function, division import sys sys.dont_write_bytecode = True from lib import * @ok def _rseed(): rseed(1) one = list('abcdefghijklm') assert shuffle(one) == ['m', 'h', 'j', 'f', 'a', 'g', 'l', 'd', 'e', 'c', 'i', 'k', 'b'] @ok def _defDict(): d = DefaultDict(lambda: []) for n,c in enumerate(list('tobeornottobe')): d[c].append(n) assert d == {'b': [2, 11], 'e': [3, 12], 'o': [1, 4, 7, 10], 'n': [6], 'r': [5], 't': [0, 8, 9]}
22.826087
50
0.491429
037803daf8f26a1fd6b807cb352e059357d3aa0d
734
py
Python
setup.py
ignalex/HAP-python
855577cfcde1bf2f8562caf9fbefda3e4fa8b497
[ "Apache-2.0" ]
1
2018-09-23T20:44:46.000Z
2018-09-23T20:44:46.000Z
setup.py
ignalex/HAP-python
855577cfcde1bf2f8562caf9fbefda3e4fa8b497
[ "Apache-2.0" ]
1
2019-10-02T11:12:13.000Z
2019-10-02T11:12:13.000Z
setup.py
ilyamordasov/HAP-python
698eb612c35b5672c4aab9d7896093924cbd358c
[ "Apache-2.0" ]
null
null
null
from setuptools import setup import pyhap.const as pyhap_const PROJECT_NAME = 'HAP-python' URL = 'https://github.com/ikalchev/{}'.format(PROJECT_NAME) PROJECT_URLS = { 'Bug Reports': '{}/issues'.format(URL), 'Documentation': 'http://hap-python.readthedocs.io/en/latest/', 'Source': '{}/tree/master'.format(URL), } PYPI_URL = 'https://pypi.python.org/pypi/{}'.format(PROJECT_NAME) DOWNLOAD_URL = '{}/archive/{}.zip'.format(URL, pyhap_const.__version__) MIN_PY_VERSION = '.'.join(map(str, pyhap_const.REQUIRED_PYTHON_VER)) setup( name=PROJECT_NAME, version=pyhap_const.__version__, url=URL, project_urls=PROJECT_URLS, download_url=DOWNLOAD_URL, python_requires='>={}'.format(MIN_PY_VERSION), )
27.185185
71
0.709809
037a4b8c8dc9b844a65be270c4263033b7498224
1,291
py
Python
experiments/2014_PLOS-Comp-Bio_Wikidemics-feasibility/scrape_mmwr.py
casmlab/quac
f7b037b15f5ff0db1b9669159f645040abce1766
[ "ECL-2.0", "Apache-2.0" ]
34
2015-01-10T05:44:02.000Z
2021-05-18T02:57:19.000Z
experiments/2014_PLOS-Comp-Bio_Wikidemics-feasibility/scrape_mmwr.py
casmlab/quac
f7b037b15f5ff0db1b9669159f645040abce1766
[ "ECL-2.0", "Apache-2.0" ]
14
2015-02-15T21:58:09.000Z
2020-06-05T18:31:47.000Z
experiments/2014_PLOS-Comp-Bio_Wikidemics-feasibility/scrape_mmwr.py
casmlab/quac
f7b037b15f5ff0db1b9669159f645040abce1766
[ "ECL-2.0", "Apache-2.0" ]
19
2015-02-08T02:24:15.000Z
2020-11-07T13:39:55.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Scrape the MMWR morbidity tables at http://wonder.cdc.gov/mmwr/mmwrmorb.asp. No processing is done; we simply save the files for potential offline processing. """ # Copyright (c) Los Alamos National Security, LLC and others. from __future__ import print_function, division import requests import codecs import os mmwr_table_url = 'http://wonder.cdc.gov/mmwr/mmwr_reps.asp?mmwr_year=%d&mmwr_week=%02d&mmwr_table=%s&request=Submit' mmwr_file = '../data/mmwr/%d-%02d-%s.html' tables = {'1', '2A', '2B', '2C', '2D', '2E', '2F', '2G', '2H', '2I', '2J', '2K', '3A', '3B', '4'} error_messages = {'Data are not available for the week requested.', 'No records found.', 'does not exist before the week ending'} for year in range(1996, 2015): for week in range(1, 54): for table in tables: if not os.path.exists(mmwr_file % (year, week, table)): response = requests.get(mmwr_table_url % (year, week, table)) error = False for error_message in error_messages: if error_message in response.text: error = True break if not error: with codecs.open(mmwr_file % (year, week, table), 'w', 'utf-8') as output: output.write(response.text) print('saved %s' % (mmwr_file % (year, week, table)))
32.275
130
0.670023
037b522742c7eb6098ac17119575246e7a1d22e3
6,970
py
Python
cluster_status.py
jtimberlake/hyper-kube-config
d624f81e04d1560b584bb7b748451dd5181e15bf
[ "MIT" ]
29
2018-10-11T17:34:33.000Z
2019-10-09T04:24:22.000Z
cluster_status.py
silvermullet/kube-auth-store
a9c6966fe7b29e0bf80f9e40027310fd4a07dbc3
[ "MIT" ]
14
2018-12-18T18:14:19.000Z
2019-10-19T18:38:12.000Z
cluster_status.py
silvermullet/kube-auth-store
a9c6966fe7b29e0bf80f9e40027310fd4a07dbc3
[ "MIT" ]
6
2018-11-06T09:32:40.000Z
2019-10-17T18:18:08.000Z
import json import logging import os import traceback from boto3.dynamodb.conditions import Attr, Key import storage from util import lambda_result logger = logging.getLogger('cluster_status') if os.environ.get('DEBUG'): logger.setLevel(logging.DEBUG) def set_cluster_status(event, context): """Set the status of a cluster, ie active, inactive, maintainance_mode, etc""" CLUSTER_TABLE = storage.get_cluster_table() query_string_params = event.get('queryStringParameters', {}) cluster_status = query_string_params.get('cluster_status') if cluster_status is None: return lambda_result( {"message": f'Must provide a status variable in uri query string'}, status_code=500) cluster_name = query_string_params.get('cluster_name') if cluster_name is None: return lambda_result( {"message": (f'Must provide a cluster_name ' f'variable in uri query string')}, status_code=500) try: CLUSTER_TABLE.update_item( Key={ 'id': cluster_name, }, UpdateExpression="SET cluster_status = :r", ExpressionAttributeValues={ ':r': cluster_status }, ReturnValues="UPDATED_NEW" ) return lambda_result( {"message": (f'Updated cluster status for {cluster_name} ' f'to {cluster_status}')}) except Exception: failed_txt = f'Failed to update cluster status for {cluster_name}' logger.exception(failed_txt) return lambda_result({"message": failed_txt}, status_code=500) def set_cluster_environment(event, context): """Set the environment of a cluster, ie dev, stage, prod""" CLUSTER_TABLE = storage.get_cluster_table() query_string_params = event.get('queryStringParameters', {}) environment = query_string_params.get('environment') if environment is None: return lambda_result( {"message": f'Must provide an environment param in uri query string'}, status_code=500) cluster_name = query_string_params.get('cluster_name') if cluster_name is None: return lambda_result( {"message": (f'Must provide a cluster_name ' f'variable in uri query string')}, status_code=500) try: CLUSTER_TABLE.update_item( Key={ 'id': cluster_name, }, UpdateExpression="ADD environment :e", ExpressionAttributeValues={ ':e': set([environment]) }, ReturnValues="UPDATED_NEW" ) msg = (f'Updated cluster environment for {cluster_name} ' f'to {environment}') return lambda_result(msg) except Exception as e: failed_txt = f'Failed to update cluster environment for {cluster_name}' failed_txt += "\n{} \n{}".format( str(e), repr(traceback.format_stack())) print(failed_txt) return lambda_result({"message": failed_txt}, status_code=500) def clusters_per_environment(event, context): """Query cluster status attribute for given environment, requires 'environment' query param, or defaults to all clusters""" clusters = [] environment = event.get('queryStringParameters', {}).get('environment') items = _query_dynamodb(environment) for cluster in items: clusters.append(cluster['id']) return lambda_result(clusters) def cluster_status(event, context): """Query cluster status attribute for given environment, requires 'environment' query param, or defaults to all clusters""" clusters = [] query_string_params = event.get('queryStringParameters', {}) environment = query_string_params.get('environment') cluster_status = query_string_params.get('cluster_status') items = _query_dynamodb(environment, cluster_status) for cluster in items: clusters.append(cluster['id']) return lambda_result(clusters) def set_cluster_metadata(event, context): """Set the metadata of a cluster. metadata is a json blob use for describing extra details about a cluster. """ CLUSTER_TABLE = storage.get_cluster_table() query_string_params = event.get('queryStringParameters', {}) metadata = event.get('body', {}) cluster_name = query_string_params.get('cluster_name') if cluster_name is None: return lambda_result( {"message": (f'Must provide a cluster_name ' f'variable in uri query string')}, status_code=500) try: if isinstance(metadata, str): metadata = json.loads(metadata) CLUSTER_TABLE.update_item( Key={ 'id': cluster_name, }, UpdateExpression="set metadata = :md", ExpressionAttributeValues={ ':md': metadata }, ReturnValues="UPDATED_NEW" ) return lambda_result( {"message": f'Updated cluster metadata for {cluster_name}'} ) except Exception: failed_txt = f'Failed to update cluster metadata for {cluster_name}' logger.exception(failed_txt) logger.error(json.dumps(event)) return lambda_result({"message": failed_txt}, status_code=500) def get_cluster_metadata(event, context): """Get the metadata of a cluster. metadata is a json blob use for describing extra details about a cluster. """ CLUSTER_TABLE = storage.get_cluster_table() query_string_params = event.get('queryStringParameters', {}) cluster_name = query_string_params.get('cluster_name') if cluster_name is None: return { "statusCode": 500, "body": json.dumps( {"message": (f'Must provide a cluster_name ' f'variable in uri query string')}) } status_code = 404 db_response = CLUSTER_TABLE.get_item( Key={ 'id': cluster_name, } ) metadata = {} if 'Item' in db_response: status_code = 200 metadata = db_response['Item'].get('metadata', {}) if isinstance(metadata, str): metadata = json.loads(metadata) metadata['environment'] = db_response['Item'].get('environment') metadata['status'] = db_response['Item'].get('status') metadata['id'] = cluster_name return lambda_result(metadata, status_code=status_code) def _query_dynamodb(environment, status=None, metadata=False): CLUSTER_TABLE = storage.get_cluster_table() fkey = Attr('environment').contains(environment) if status is not None: fkey = fkey & Key('cluster_status').eq(status) response = CLUSTER_TABLE.scan( ProjectionExpression="id", FilterExpression=fkey ) return response.get('Items', [])
32.877358
79
0.628264
037cb54aac999a27c21c13f841feb80028eba68f
1,366
py
Python
ote_sdk/ote_sdk/utils/labels_utils.py
ntyukaev/training_extensions
c897d42e50828fea853ceda0795e1f0e7d6e9909
[ "Apache-2.0" ]
null
null
null
ote_sdk/ote_sdk/utils/labels_utils.py
ntyukaev/training_extensions
c897d42e50828fea853ceda0795e1f0e7d6e9909
[ "Apache-2.0" ]
null
null
null
ote_sdk/ote_sdk/utils/labels_utils.py
ntyukaev/training_extensions
c897d42e50828fea853ceda0795e1f0e7d6e9909
[ "Apache-2.0" ]
1
2020-12-13T22:13:51.000Z
2020-12-13T22:13:51.000Z
""" This module implements utilities for labels """ # Copyright (C) 2021-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from typing import List, Optional from ote_sdk.entities.label import LabelEntity from ote_sdk.entities.label_schema import LabelSchemaEntity from ote_sdk.entities.scored_label import ScoredLabel def get_empty_label(label_schema: LabelSchemaEntity) -> Optional[LabelEntity]: """ Get first empty label from label_schema """ empty_candidates = list( set(label_schema.get_labels(include_empty=True)) - set(label_schema.get_labels(include_empty=False)) ) if empty_candidates: return empty_candidates[0] return None def get_leaf_labels(label_schema: LabelSchemaEntity) -> List[LabelEntity]: """ Get leafs from label tree """ leaf_labels = [] all_labels = label_schema.get_labels(False) for lbl in all_labels: if not label_schema.get_children(lbl): leaf_labels.append(lbl) return leaf_labels def get_ancestors_by_prediction( label_schema: LabelSchemaEntity, prediction: ScoredLabel ) -> List[ScoredLabel]: """ Get all the ancestors for a given label node """ ancestor_labels = label_schema.get_ancestors(prediction.get_label()) return [ScoredLabel(al, prediction.probability) for al in ancestor_labels]
27.32
78
0.7306
cee01dc4ca892acb2cf457702c9a788e7276faf5
614
py
Python
DSA/Python/src/dsa/lib/math/nums/tests/test_three_sum.py
JackieMa000/problems
c521558830a0bbf67f94109af92d7be4397d0a43
[ "BSD-3-Clause" ]
null
null
null
DSA/Python/src/dsa/lib/math/nums/tests/test_three_sum.py
JackieMa000/problems
c521558830a0bbf67f94109af92d7be4397d0a43
[ "BSD-3-Clause" ]
1
2020-10-23T04:06:56.000Z
2020-10-23T04:06:56.000Z
DSA/Python/src/dsa/lib/math/nums/tests/test_three_sum.py
JackieMa000/problems
c521558830a0bbf67f94109af92d7be4397d0a43
[ "BSD-3-Clause" ]
null
null
null
import unittest from typing import List from dsa.lib.math.nums.three_sum import ThreeSum class ThreeSumTest(unittest.TestCase): def test_case1(self): self.assertEqual([[0, 0, 0]], self.three_sum([0, 0, 0, 0])) self.assertEqual([[-2, 0, 2]], self.three_sum([-2, 0, 0, 2, 2])) self.assertEqual([[-1, 0, 1]], self.three_sum([-1, 0, 1, 0])) def test_case2(self): self.assertIn([-1, -1, 2], [[-1, -1, 2], [-1, 0, 1]]) self.assertIn([-1, 0, 1], [[-1, -1, 2], [-1, 0, 1]]) @staticmethod def three_sum(nums: List[int]): return ThreeSum().threeSum(nums)
30.7
72
0.571661
cee3692c9f60cfa65662bb7421bd3d405f7b7920
4,329
py
Python
meggie/actions/spectrum_plot/controller/spectrum.py
Teekuningas/meggie
0790559febb990a5487d4f0c92987066632e1d99
[ "BSD-2-Clause-FreeBSD" ]
4
2020-04-29T08:57:11.000Z
2021-01-15T21:21:51.000Z
meggie/actions/spectrum_plot/controller/spectrum.py
Teekuningas/meggie
0790559febb990a5487d4f0c92987066632e1d99
[ "BSD-2-Clause-FreeBSD" ]
16
2019-05-03T10:31:16.000Z
2021-05-06T14:59:55.000Z
meggie/actions/spectrum_plot/controller/spectrum.py
cibr-jyu/meggie
0790559febb990a5487d4f0c92987066632e1d99
[ "BSD-2-Clause-FreeBSD" ]
3
2020-12-12T09:57:00.000Z
2020-12-20T17:12:05.000Z
""" Contains functions for plot spectrum action """ import mne import numpy as np import matplotlib.pyplot as plt from meggie.utilities.plotting import color_cycle from meggie.utilities.plotting import create_channel_average_plot from meggie.utilities.channels import average_to_channel_groups from meggie.utilities.channels import iterate_topography from meggie.utilities.units import get_power_unit def plot_spectrum_averages(subject, channel_groups, name, log_transformed=True): """ Plots spectrum averages. """ subject_name = subject.name spectrum = subject.spectrum.get(name) data = spectrum.content freqs = spectrum.freqs ch_names = spectrum.ch_names info = spectrum.info colors = color_cycle(len(data)) conditions = spectrum.content.keys() averages = {} for key, psd in sorted(data.items()): data_labels, averaged_data = average_to_channel_groups( psd, info, ch_names, channel_groups) for label_idx, label in enumerate(data_labels): if not label in averages: averages[label] = [] averages[label].append((key, averaged_data[label_idx])) ch_types = sorted(set([label[0] for label in averages.keys()])) for ch_type in ch_types: ch_groups = sorted([label[1] for label in averages.keys() if label[0] == ch_type]) def plot_fun(ax_idx, ax): ch_group = ch_groups[ax_idx] ax.set_title(ch_group) ax.set_xlabel('Frequency (Hz)') ax.set_ylabel('Power ({})'.format( get_power_unit(ch_type, log_transformed))) for color_idx, (key, curve) in enumerate(averages[(ch_type, ch_group)]): if log_transformed: curve = 10 * np.log10(curve) ax.plot(freqs, curve, color=colors[color_idx]) title = ' '.join([name, ch_type]) legend = list(zip(conditions, colors)) create_channel_average_plot(len(ch_groups), plot_fun, title, legend) plt.show() def plot_spectrum_topo(subject, name, log_transformed=True, ch_type='meg'): """ Plots spectrum topography. """ subject_name = subject.name spectrum = subject.spectrum.get(name) data = spectrum.content freqs = spectrum.freqs ch_names = spectrum.ch_names info = spectrum.info if ch_type == 'meg': picked_channels = [ch_name for ch_idx, ch_name in enumerate(info['ch_names']) if ch_idx in mne.pick_types(info, meg=True, eeg=False)] else: picked_channels = [ch_name for ch_idx, ch_name in enumerate(info['ch_names']) if ch_idx in mne.pick_types(info, eeg=True, meg=False)] info = info.copy().pick_channels(picked_channels) colors = color_cycle(len(data)) def individual_plot(ax, info_idx, names_idx): """ """ ch_name = ch_names[names_idx] for color_idx, (key, psd) in enumerate(sorted(data.items())): if log_transformed: curve = 10 * np.log10(psd[names_idx]) else: curve = psd[names_idx] ax.plot(freqs, curve, color=colors[color_idx], label=key) title = ' '.join([name, ch_name]) ax.figure.canvas.set_window_title(title.replace(' ', '_')) ax.figure.suptitle(title) ax.set_title('') ax.legend() ax.set_xlabel('Frequency (Hz)') ax.set_ylabel('Power ({})'.format(get_power_unit( mne.io.pick.channel_type(info, info_idx), log_transformed ))) plt.show() fig = plt.figure() for ax, info_idx, names_idx in iterate_topography( fig, info, ch_names, individual_plot): handles = [] for color_idx, (key, psd) in enumerate(sorted(data.items())): if log_transformed: curve = 10 * np.log10(psd[names_idx]) else: curve = psd[names_idx] handles.append(ax.plot(curve, color=colors[color_idx], linewidth=0.5, label=key)[0]) if not handles: return fig.legend(handles=handles) title = '{0}_{1}'.format(name, ch_type) fig.canvas.set_window_title(title) plt.show()
30.921429
85
0.612151
cee4ceb0af01d56f99e0400de26b6a95df4511da
70
py
Python
torchrl/runners/__init__.py
srikarym/torchrl
fee98e78ac1657a2c9a4063dd8d63ba207a121e2
[ "Apache-2.0" ]
3
2019-02-27T19:00:32.000Z
2020-07-19T03:18:28.000Z
torchrl/runners/__init__.py
srikarym/torchrl
fee98e78ac1657a2c9a4063dd8d63ba207a121e2
[ "Apache-2.0" ]
null
null
null
torchrl/runners/__init__.py
srikarym/torchrl
fee98e78ac1657a2c9a4063dd8d63ba207a121e2
[ "Apache-2.0" ]
null
null
null
from .base_runner import BaseRunner from .gym_runner import GymRunner
23.333333
35
0.857143
cee59d1b21ebd4c01ff98f1b398ff22e296663a6
7,733
py
Python
data/cyclesps.py
DawyD/UNet-PS-4D
bdd31308854dbd5f309aec9bcc1f7a35f267481d
[ "MIT" ]
1
2021-12-06T17:20:36.000Z
2021-12-06T17:20:36.000Z
data/cyclesps.py
DawyD/UNet-PS-4D
bdd31308854dbd5f309aec9bcc1f7a35f267481d
[ "MIT" ]
null
null
null
data/cyclesps.py
DawyD/UNet-PS-4D
bdd31308854dbd5f309aec9bcc1f7a35f267481d
[ "MIT" ]
3
2021-12-06T08:09:42.000Z
2022-03-12T08:09:34.000Z
""" DataGenerator for CyclesPS Dataset This file use substantial portion of code from the original CNN-PS repository https://github.com/satoshi-ikehata/CNN-PS/ """ import numpy as np import cv2 import os import gc from data.datagenerator import DataGenerator from data.utils import rotate_images from misc.projections import standard_proj from tensorflow.keras.models import Model class CyclesDataGenerator(DataGenerator): def __init__(self, datapath, objlist=None, batch_size=256, spatial_patch_size=5, obs_map_size=32, shuffle=False, random_illums=False, keep_axis=True, validation_split=None, nr_rotations=1, rotation_start=0, rotation_end=2 * np.pi, projection=standard_proj, add_raw=False, images=None, normals=None, masks=None, illum_dirs=None, order=2, divide_maps=False, round_nearest=True, rot_2D=False, verbose=False): self.datapath = datapath self.objlist = objlist if objlist is not None else sorted(os.listdir(datapath + '/PRPS')) self.verbose = verbose super(CyclesDataGenerator, self).__init__( batch_size=batch_size, spatial_patch_size=spatial_patch_size, obs_map_size=obs_map_size, shuffle=shuffle, random_illums=random_illums, keep_axis=keep_axis, validation_split=validation_split, nr_rotations=nr_rotations, rotation_start=rotation_start, rotation_end=rotation_end, projection=projection, add_raw=add_raw, images=images, normals=normals, masks=masks, illum_dirs=illum_dirs, order=order, divide_maps=divide_maps, round_nearest=round_nearest, rot_2D=rot_2D) def load_data(self): objid = 0 for obj in self.objlist: for dirb, dirn, scale in zip(['PRPS_Diffuse/' + '%s' % obj, 'PRPS/' + '%s' % obj, 'PRPS/' + '%s' % obj], ['images_diffuse', 'images_specular', 'images_metallic'], [1, 0.5, 0.5]): if self.verbose: print("\rPre-loading image ({:}/{:}) {:} ".format(objid + 1, self.nr_objects, dirb), end="") nr_ch = 3 if self.add_raw else 1 sample_path = os.path.join(self.datapath, dirb, dirn) imgs, nmls, msks, light_dirs = self.load_sample(sample_path, scale, -1, nr_ch) self.fill_data(imgs, nmls, msks, light_dirs, objid) if self.verbose: print("", end="\x1b[1K\r") objid += 1 if self.verbose: print() def get_max_shape(self, rotations=None): """ Returns a shape of an array (height, width, channels) which all images of various sizes under all rotations fit :param rotations: List of rotation angles (in radians) :return: max_shape [nr_objects, height, width, channels] """ max_shape = [0, 0, 0, 0] for obj in self.objlist: for p, scale in zip(['PRPS_Diffuse/' + '%s' % obj, 'PRPS/' + '%s' % obj, 'PRPS/' + '%s' % obj], [1, 0.5, 0.5]): max_shape[0] += 1 normal_path = os.path.join(self.datapath, p, 'gt_normal.tif') if not os.path.exists(normal_path): raise ValueError("Path\"{:}\"does not exists.".format(normal_path)) normals = cv2.imread(normal_path, -1) normals = cv2.resize(normals, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST) f = open(os.path.join(self.datapath, p, 'light.txt')) data = f.read() f.close() lines = data.split('\n') nr_illums = len(lines) - 1 # the last line is empty (how to fix it?) if nr_illums > max_shape[3]: max_shape[3] = nr_illums if rotations is not None: # In case of rotations, the width and height might be larger for angle in rotations: img_shape = rotate_images(2 * np.pi - angle, normals[..., 0], axes=(0, 1), order=0).shape for k in range(2): if img_shape[k] > max_shape[k+1]: max_shape[k+1] = img_shape[k] else: for k in range(2): if normals.shape[k] > max_shape[k+1]: max_shape[k+1] = normals.shape[k] gc.collect() return max_shape @staticmethod def load_sample(dirpath, scale, illum_ids=-1, nr_channels=1): assert illum_ids == -1 normal_path = os.path.join(dirpath, '../gt_normal.tif') inboundary_path = os.path.join(dirpath, '../inboundary.png') onboundary_path = os.path.join(dirpath, '../onboundary.png') if not os.path.exists(normal_path): raise ValueError("Path\"{:}\"does not exists.".format(normal_path)) # read ground truth surface normal normals = np.float32(cv2.imread(normal_path, -1)) / 65535.0 # [-1,1] normals = normals[:, :, ::-1] normals = 2 * normals - 1 normals = cv2.resize(normals, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST) normals = normals / np.sqrt(np.sum(normals**2, axis=-1, keepdims=True)) height, width = np.shape(normals)[:2] # read mask images_metallic if os.path.exists(inboundary_path) and os.path.exists(onboundary_path): inboundary = cv2.imread(inboundary_path, -1) inboundary = cv2.resize(inboundary, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST) inboundary = inboundary > 0 onboundary = cv2.imread(onboundary_path, -1) onboundary = cv2.resize(onboundary, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST) onboundary = onboundary > 0 masks = inboundary | onboundary else: masks = normals[..., 2] > 0 masks = masks[..., None] # read light filenames f = open(os.path.join(dirpath, '../light.txt')) data = f.read() f.close() lines = data.split('\n') nr_illums = len(lines) - 1 # the last line is empty (how to fix it?) light_directions = np.zeros((nr_illums, 3), np.float32) for i, l in enumerate(lines): s = l.split(' ') if len(s) == 3: light_directions[i, 0] = float(s[0]) light_directions[i, 1] = float(s[1]) light_directions[i, 2] = float(s[2]) # read images images = np.zeros((height, width, nr_illums, nr_channels), np.float32) for i in range(nr_illums): if i % np.floor(nr_illums / 10) == 0: print('.', end='') image_path = os.path.join(dirpath, '%05d.tif' % i) cv2_im = cv2.imread(image_path, -1) / 65535.0 cv2_im = cv2.resize(cv2_im, (height, width), interpolation=cv2.INTER_NEAREST) if nr_channels == 1: cv2_im = (cv2_im[:, :, 0:1] + cv2_im[:, :, 1:2] + cv2_im[:, :, 2:3]) / 3 images[:, :, i] = cv2_im return images, normals, masks, light_directions @staticmethod def load_sample_test(dir_path, obj_path, scale, index=-1): assert index == -1 obj, dirn = obj_path.split("/") return CyclesDataGenerator.load_sample(dir_path + obj, dirn, scale)
40.915344
120
0.557869
cee7987780f9d6caf6c5e3e7b2bc8abfa821e3a8
2,105
py
Python
p1_basic/day22_26oop/day26/04_异常处理.py
dong-pro/fullStackPython
5ad8662f7b57f14c8529e7eaf64290eeda773557
[ "Apache-2.0" ]
1
2020-04-03T01:32:05.000Z
2020-04-03T01:32:05.000Z
p1_basic/day22_26oop/day26/04_异常处理.py
dong-pro/fullStackPython
5ad8662f7b57f14c8529e7eaf64290eeda773557
[ "Apache-2.0" ]
null
null
null
p1_basic/day22_26oop/day26/04_异常处理.py
dong-pro/fullStackPython
5ad8662f7b57f14c8529e7eaf64290eeda773557
[ "Apache-2.0" ]
null
null
null
import os class ExistsError(Exception): pass class KeyInvalidError(Exception): pass def new_func(path, prev): """ 去path路径的文件中,找到前缀为prev的一行数据,获取数据并返回给调用者。 1000,成功 1001,文件不存在 1002,关键字为空 1003,未知错误 ... :return: """ response = {'code': 1000, 'data': None} try: if not os.path.exists(path): raise ExistsError() if not prev: raise KeyInvalidError() pass except ExistsError as e: response['code'] = 1001 response['data'] = '文件不存在' except KeyInvalidError as e: response['code'] = 1002 response['data'] = '关键字为空' except Exception as e: response['code'] = 1003 response['data'] = '未知错误' return response # def func(path, prev): # """ # 去path路径的文件中,找到前缀为prev的一行数据,获取数据并返回给调用者。 # 1000,成功 # 1001,文件不存在 # 1002,关键字为空 # 1003,未知错误 # ... # :return: # """ # response = {'code': 1000, 'data': None} # try: # if not os.path.exists(path): # response['code'] = 1001 # response['data'] = '文件不存在' # return response # if not prev: # response['code'] = 1002 # response['data'] = '关键字为空' # return response # pass # except Exception as e: # response['code'] = 1003 # response['data'] = '未知错误' # return response def show(): return 8 def run(): pass # #############自定义异常############ # class MyException(Exception): # def __init__(self, code, msg): # self.code = code # self.msg = msg # try: # raise MyException(1000, '操作异常') # # except MyException as obj: # print(obj.code, obj.msg) # 知识点:如何自定义异常类? class MyException(Exception): def __init__(self, code, msg): self.code = code self.msg = msg try: # 知识点:主动抛出异常 raise MyException(1000, '操作异常') except KeyError as obj: print(obj, 1111) except MyException as obj: # 知识点:捕获异常 print(obj, 2222) except Exception as obj: print(obj, 3333)
21.262626
45
0.535392
cee901804439796dcdaf795233ffadc0af103dcb
183
py
Python
hackerrank/Python/Hex Color Code/solution.py
ATrain951/01.python-com_Qproject
c164dd093954d006538020bdf2e59e716b24d67c
[ "MIT" ]
4
2020-07-24T01:59:50.000Z
2021-07-24T15:14:08.000Z
hackerrank/Python/Hex Color Code/solution.py
ATrain951/01.python-com_Qproject
c164dd093954d006538020bdf2e59e716b24d67c
[ "MIT" ]
null
null
null
hackerrank/Python/Hex Color Code/solution.py
ATrain951/01.python-com_Qproject
c164dd093954d006538020bdf2e59e716b24d67c
[ "MIT" ]
null
null
null
import re for _ in range(0, int(input())): matches = re.findall(r'(#(?:[\da-f]{3}){1,2})(?!\w)(?=.*;)', input(), re.IGNORECASE) if matches: print(*matches, sep='\n')
26.142857
88
0.513661
ceea1cba85bb3d624953e8ecf28fb6d54fd02614
4,429
py
Python
code/vocabulary.py
TimothyBenger/knausj_talon
10c2440fb3646abda1adc84ca9fd230f752eb353
[ "MIT" ]
null
null
null
code/vocabulary.py
TimothyBenger/knausj_talon
10c2440fb3646abda1adc84ca9fd230f752eb353
[ "MIT" ]
null
null
null
code/vocabulary.py
TimothyBenger/knausj_talon
10c2440fb3646abda1adc84ca9fd230f752eb353
[ "MIT" ]
null
null
null
from talon import Context, Module from .user_settings import get_list_from_csv mod = Module() ctx = Context() mod.list("vocabulary", desc="additional vocabulary words") # Default words that will need to be capitalized (particularly under w2l). # NB. These defaults and those later in this file are ONLY used when # auto-creating the corresponding settings/*.csv files. Those csv files # determine the contents of user.vocabulary and dictate.word_map. Once they # exist, the contents of the lists/dictionaries below are irrelevant. _capitalize_defaults = [ "I", "I'm", "I've", "I'll", "I'd", "Monday", "Mondays", "Tuesday", "Tuesdays", "Wednesday", "Wednesdays", "Thursday", "Thursdays", "Friday", "Fridays", "Saturday", "Saturdays", "Sunday", "Sundays", "January", "February", # March omitted because it's a regular word too "April", # May omitted because it's a regular word too "June", "July", "August", "September", "October", "November", "December", ] # Default words that need to be remapped. _word_map_defaults = { # E.g: # "cash": "cache", # This is the opposite ordering to words_to_replace.csv (the latter has the target word first) } _word_map_defaults.update({word.lower(): word for word in _capitalize_defaults}) # "dictate.word_map" is used by `actions.dictate.replace_words` to rewrite words # Talon recognized. Entries in word_map don't change the priority with which # Talon recognizes some words over others. ctx.settings["dictate.word_map"] = get_list_from_csv( "words_to_replace.csv", headers=("Replacement", "Original"), default=_word_map_defaults, ) # Default words that should be added to Talon's vocabulary. _simple_vocab_default = ["nmap", "admin", "Cisco", "Citrix", "VPN", "DNS", "Minecraft", "Ferran", "Angelos", "storageos"] # Defaults for different pronounciations of words that need to be added to # Talon's vocabulary. _default_vocabulary = { "N map": "nmap", "under documented": "under-documented", "koob control": "kubectl", "cube control": "kubectl", "keep control": "kubectl", "chang pod": "pod", "chang pods": "pods", "chang node": "node", "chang nodes": "nodes", "chang kubernetes": "kubernetes", "chang git": "git", "chang pull": "pull", "chang com": "com", "chang delete": "delete", "trying to lead": "delete", "replica set": "replicaset", "change delete": "delete", "name space": "namespace", "at it": "edit", "chang sudo": "sudo", "diagnostic yew till": "diagnosticutil", "stateful set": "statefulset", "in flux": "influx", "you control": "kubectl", "check out": "checkout", "make directory": "mkdir", "demon set": "daemonset", "demon sets": "daemonsets", "chang log": "log", "chang logs": "log", "koob control create from file": "kubectl create -f", "cube control create from file": "kubectl create -f", "keep control create from file": "kubectl create -f", "chang seff": "ceph", "ray doss": "RADOS", "raydos": "RADOS", "open sauce": "open-source", "all namespaces": "--all-namespaces", "output wide": "-o wide", "etsy dee": "etcd", "at city": "etcd", "at cd": "etcd", "cube system": "kube-system", "from file": " - f ", "with namespace": " - n ", "chang log": "log", "chang logs": "logs", "change directory": "cd", "storage class": "storageclass", "my sequel": "mysql", "dee bench": "dbench", "chang hay": "hey", "elastic search": "elasticsearch", "elastic such": "elasticsearch", "storage oh ess": "storageos", "store to us": "storageos", "store ous": "storageos", "store joes": "store joes" } _default_vocabulary.update({word: word for word in _simple_vocab_default}) # "user.vocabulary" is used to explicitly add words/phrases that Talon doesn't # recognize. Words in user.vocabulary (or other lists and captures) are # "command-like" and their recognition is prioritized over ordinary words. ctx.lists["user.vocabulary"] = get_list_from_csv( "additional_words.csv", headers=("Word(s)", "Spoken Form (If Different)"), default=_default_vocabulary, ) # for quick verification of the reload # print(str(ctx.settings["dictate.word_map"])) # print(str(ctx.lists["user.vocabulary"]))
29.925676
121
0.64326
ceeab48dfbbd65bcb363f2bc14497327bbed9e95
451
py
Python
api/src/controllers/utils.py
debbie-chan/SPM
f84e62779347579287aee8a2e832f72dcc53b8dd
[ "MIT" ]
null
null
null
api/src/controllers/utils.py
debbie-chan/SPM
f84e62779347579287aee8a2e832f72dcc53b8dd
[ "MIT" ]
null
null
null
api/src/controllers/utils.py
debbie-chan/SPM
f84e62779347579287aee8a2e832f72dcc53b8dd
[ "MIT" ]
null
null
null
import json from bson import ObjectId from datetime import datetime, date class JSONEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, ObjectId): return str(o) if isinstance(o, (datetime, date)): return o.isoformat() return json.JSONEncoder.default(self, o) class DatetimeConverter: def strToDatetime(o): return datetime.strptime(o["$date"], "%Y-%m-%dT%H:%M:%S.000Z")
25.055556
70
0.643016
ceed5b9c4d3963ebe8a8bb9c365ef1238097db1b
390
py
Python
tests/basics/try_reraise.py
geowor01/micropython
7fb13eeef4a85f21cae36f1d502bcc53880e1815
[ "MIT" ]
7
2019-10-18T13:41:39.000Z
2022-03-15T17:27:57.000Z
tests/basics/try_reraise.py
geowor01/micropython
7fb13eeef4a85f21cae36f1d502bcc53880e1815
[ "MIT" ]
null
null
null
tests/basics/try_reraise.py
geowor01/micropython
7fb13eeef4a85f21cae36f1d502bcc53880e1815
[ "MIT" ]
2
2020-06-23T09:10:15.000Z
2020-12-22T06:42:14.000Z
# Reraising last exception with raise w/o args def f(): try: raise ValueError("val", 3) print("FAIL") raise SystemExit except: raise try: f() print("FAIL") raise SystemExit except ValueError as e: pass # Can reraise only in except block try: raise print("FAIL") raise SystemExit except RuntimeError: print("PASS")
15
46
0.602564
ceee787626ea63b9ebed145fa361e93cb187c722
494
py
Python
molecule/default/tests/test_role.py
gantsign/ansible-role-git-credential-manager
63c9d20389447ea9a38b56a6198a385fdebe1718
[ "MIT" ]
3
2018-09-04T15:53:22.000Z
2020-06-13T17:22:47.000Z
molecule/default/tests/test_role.py
gantsign/ansible_role_git_credential_manager
b718d4133a272f766719acca5afa8e1999a4588d
[ "MIT" ]
1
2018-09-04T16:02:20.000Z
2018-09-05T14:45:34.000Z
molecule/default/tests/test_role.py
gantsign/ansible_role_git_credential_manager
b718d4133a272f766719acca5afa8e1999a4588d
[ "MIT" ]
null
null
null
import re def test_version(host): version = host.check_output('git-credential-manager-core --version') pattern = r'[0-9\.]+(\.[0-9\.]+){2}' assert re.search(pattern, version) def test_git_config(host): config = host.check_output('git config --system credential.helper') assert config == '/usr/local/share/gcm-core/git-credential-manager-core' config = host.check_output( 'git config --system credential.credentialStore') assert config == 'secretservice'
30.875
76
0.688259
ceef88b42a5304577b2b39be8918b4680ae52465
9,605
py
Python
upetem_service.py
myroslav/robot_tests.broker.upetem
323314259faa60618113fbc37b5e1f1d79c2192b
[ "Apache-2.0" ]
null
null
null
upetem_service.py
myroslav/robot_tests.broker.upetem
323314259faa60618113fbc37b5e1f1d79c2192b
[ "Apache-2.0" ]
null
null
null
upetem_service.py
myroslav/robot_tests.broker.upetem
323314259faa60618113fbc37b5e1f1d79c2192b
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 from datetime import datetime, timedelta import dateutil.parser import pytz import urllib TZ = pytz.timezone('Europe/Kiev') def adapt_data(data): data['data']['procuringEntity']['name'] = 'testuser_tender_owner' for x in data['data']['items']: x['unit']['name'] = get_unit_name(x['unit']['name']) x['deliveryAddress']['region'] = get_delivery_region(x['deliveryAddress']['region']) x['deliveryAddress']['locality'] = convert_locality(x['deliveryAddress']['locality']) x['deliveryDate']['startDate'] = adapt_delivery_date(x['deliveryDate']['startDate']) x['deliveryDate']['endDate'] = adapt_delivery_date(x['deliveryDate']['endDate']) data['data']['procuringEntity']['address']['region'] = get_delivery_region(data['data']['procuringEntity']['address']['region']) data['data']['procuringEntity']['address']['locality'] = convert_locality(data['data']['procuringEntity']['address']['locality']) data['data']['procuringEntity']['contactPoint']['telephone'] = data['data']['procuringEntity']['contactPoint']['telephone'][:13] return data def adapt_step(data, new_step): data['data']['minimalStep']['amount'] = round(new_step, 2) data['data']['lots'][0]['minimalStep']['amount'] = round(new_step, 2) def adapt_unit_name(data): return { u"наб.": u"набір", u"шт.": u"штуки", u"упак.": u"упаковка" }.get(data, data) def adapt_data_view(data): for x in data['data']['items']: x['deliveryDate']['startDate'] = adapt_delivery_date(x['deliveryDate']['startDate']) x['deliveryDate']['endDate'] = adapt_delivery_date(x['deliveryDate']['endDate']) return data def download_file(url, file_name, output_dir): urllib.urlretrieve(url, ('{}/{}'.format(output_dir, file_name))) def get_type_field(field): value = ['deliveryDate.startDate', 'deliveryDate.endDate', 'deliveryAddress.postalCode', 'deliveryAddress.region', 'deliveryAddress.streetAddress', 'additionalClassifications.id', 'classification.id', 'unit.name', 'unit.code', 'deliveryLocation.latitude', 'deliveryLocation.longitude', 'quantity', 'deliveryAddress.locality', 'title', 'value.amount', 'value.valueAddedTaxIncluded', 'minimalStep.amount', 'minimalStep.valueAddedTaxIncluded'] text = ['description', 'deliveryAddress.countryName', 'classification.scheme', 'classification.description', 'additionalClassifications.scheme', 'additionalClassifications.description', 'value.currency', 'minimalStep.currency', 'featureOf', 'status', 'resolutionType', 'resolution', 'satisfied', 'complaintID', 'cancellationReason'] if field in value: type_fields = 'value' elif field in text: type_fields = 'text' return type_fields def get_delivery_region(region): if region == u"місто Київ": delivery_region = u"м.Київ" elif region == u"Дніпропетровська область": delivery_region = u"Днiпропетровська область" elif region == u"Рівненська область": delivery_region = u"Рiвненська область" elif region == u"Чернігівська область": delivery_region = u"Чернiгiвська область" else: delivery_region = region return delivery_region def convert_float_to_string(number): return format(number, '.2f') def convert_coordinates_to_string(number): return format(number) def adapt_delivery_date(date): adapt_date = ''.join([date[:date.index('T') + 1], '00:00:00', date[date.index('+'):]]) return adapt_date def parse_date(date_str): date_str = datetime.strptime(date_str, "%d.%m.%Y %H:%M") date = datetime(date_str.year, date_str.month, date_str.day, date_str.hour, date_str.minute, date_str.second, date_str.microsecond) date = TZ.localize(date).isoformat() return date def parse_item_date(date_str): date_str = datetime.strptime(date_str, "%d.%m.%Y") date = datetime(date_str.year, date_str.month, date_str.day) date = TZ.localize(date).isoformat() return date def convert_date_to_string(date): date = dateutil.parser.parse(date) date = date.strftime("%d.%m.%Y %H:%M") return date def convert_item_date_to_string(date): date = dateutil.parser.parse(date) date = date.strftime("%d.%m.%Y") return date def parse_complaintPeriod_date(date_string): date_str = datetime.strptime(date_string, "%d.%m.%Y %H:%M") date_str -= timedelta(minutes=5) date = datetime(date_str.year, date_str.month, date_str.day, date_str.hour, date_str.minute, date_str.second, date_str.microsecond) date = TZ.localize(date).isoformat() return date def parse_complaintPeriod_endDate(date_str): if '-' in date_str: date_str = datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S") else: date_str = datetime.strptime(date_str, "%d.%m.%Y %H:%M") date = datetime(date_str.year, date_str.month, date_str.day, date_str.hour, date_str.minute, date_str.second, date_str.microsecond) date = TZ.localize(date).isoformat() return date def capitalize_first_letter(string): string = string.capitalize() return string def get_unit_name(name): return { u'штуки': u'шт.', u'упаковка': u'упак.', u'набір': u'наб.', u'кілограми': u'кг.', u'лот': u'лот', u'флакон': u'флак.', u'Флакон': u'флак.' }.get(name, name) def convert_locality(name): if name == u"Київ": adapted_name = u"М.КИЇВ" elif name == u"Дніпропетровськ": adapted_name = u"ДНІПРОПЕТРОВСЬКА ОБЛАСТЬ/М.ДНІПРО" else: adapted_name = name return adapted_name.upper() def convert_status(tender_status): status = { u'Очікування пропозицій': u'active.tendering', u'Період аукціону': u'active.auction', u'Період уточнень': u'active.enquiries', u'Перед-кваліфікаційний період': u'active.pre-qualification', u'Період оскарження': u'active.pre-qualification.stand-still' } return status[tender_status] def get_claim_status(claim_status, test_name): status = { u'Вимога': 'claim', u'Розглянуто': 'answered', u'Вирішена': 'resolved', u'Відхилено': 'cancelled', u'Відхилена': 'declined', u'Обробляється': 'pending', u'Недійсна': 'invalid', u'Проігнорована': 'ignored' } return status[claim_status] def get_resolution_type(resolution): types = { u'Вирішено': 'resolved', u'Задоволено': 'resolved', u'Відхилено': 'declined', u'Недійсно': 'invalid' } return types[resolution] def convert_satisfied(value): if value == u'Так': satisfied = True else: satisfied = False return satisfied def get_unit(field,unit_data): unit = unit_data.split() unit[1] = adapt_unit_name(unit[1]) unit_value = { 'unit.code': unit[0], 'unit.name': unit[1] } return unit_value[field] def convert_type_tender(key): type_tender = { u'Відкриті торги': 'aboveThresholdUA', u'Відкриті торги з публікацією англ.мовою': 'aboveThresholdEU', u'Переговорна процедура': 'reporting' } return type_tender[key] def convert_data_lot(key): data_lot = { u'грн.': 'UAH' } return data_lot[key] def convert_data_feature(key): data_feature = { u'Закупівлі': 'tenderer', u'Лоту': 'lot', u'Предмету лоту': 'item' } return data_feature[key] def convert_complaintID(tender_uaid, type_complaint): if 'complaint_number' not in globals(): complaint_number = 1 value = '%s.a%s' % (tender_uaid, complaint_number) global complaint_number complaint_number += 1 return value def get_pos(featureOf): if featureOf == u'Закупівлі': position = 1 elif featureOf == u'Лоту': position = 2 elif featureOf == u'Предмету лоту': position = 1 return position def get_value_feature(value): value = value * 100 value = str(int(value)) + '%' return value def get_feature_xpath(field_name, feature_id): xpath = { 'title': "//*[contains(@value, '" +feature_id+ "')]", 'description': "//*[contains(@value, '" +feature_id+ "')]/ancestor::tbody/tr[2]/td[2]/textarea", 'featureOf': "//*[contains(@value, '" +feature_id+ "')]/ancestor::tbody/tr[3]/td[2]//td[2]/div[1]/label" } return xpath[field_name] def convert_bid_status(value): status = { u'Недійсна пропозиція': 'invalid' } return status[value] def get_all_dates(initial_tender_data, key): tender_period = initial_tender_data.data.tenderPeriod start_dt = dateutil.parser.parse(tender_period['startDate']) end_dt = dateutil.parser.parse(tender_period['endDate']) data = { 'EndPeriod': start_dt.strftime("%d.%m.%Y %H:%M"), 'StartDate': start_dt.strftime("%d.%m.%Y %H:%M"), 'EndDate': end_dt.strftime("%d.%m.%Y %H:%M"), } return data.get(key, '') def increment_identifier(data): data['data']['procuringEntity']['identifier']['id'] = str(int(data['data']['procuringEntity']['identifier']['id']) + 1) def convert_cause_type(key): cause_type = { '1': 'artContestIP', '2': 'noCompetition', '4': 'twiceUnsuccessful', '5': 'additionalPurchase', '6': 'additionalConstruction', '7': 'stateLegalServices', } return cause_type[key]
30.785256
158
0.639771
ceefdd4f273021acf57a211bf9db5e4727c86333
1,638
py
Python
sway/tiling-indicator.py
iziGor/scripts
0076711ab6c423d97c2dad72119fbd57e27fb250
[ "BSD-2-Clause" ]
null
null
null
sway/tiling-indicator.py
iziGor/scripts
0076711ab6c423d97c2dad72119fbd57e27fb250
[ "BSD-2-Clause" ]
null
null
null
sway/tiling-indicator.py
iziGor/scripts
0076711ab6c423d97c2dad72119fbd57e27fb250
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python3 """ Show split layout indicator Usage: ./tiling-indicator.py Suppoused to be used inside waybar or polybar. Config example: Waybar: "custom/ws": { "exec": "python -u $HOME/.config/sway/scripts/tiling-indicator-2.py 2> /dev/null } Polybar: [module/layout] type = custom/script exec = PYTHONPATH=${XDG_CONFIG_HOME}/i3 python -u -m scripts.tiling-indicator.py 2> /dev/null interval = 0 format = "<label>" tail = true label-font = 6 github :: https://github.com/iziGor year :: 2021 """ import i3ipc i3 = i3ipc.Connection() last = '' # Font Awesome 5 Free:style=Solid # layouts = { "tabbed": ("61bbf6", "\uf24d") # , "stacked": ("00AA00", "\uf5fd") # , "splitv": ("82B8DF", "\uf103") # , "splith": ("CF4F88", "\uf101") # } layouts = { "tabbed": ("61bbf6", "\uf24d") , "stacked": ("00AA00", "\uf5fd") , "splitv": ("82B8DF", "\u2b9f") , "splith": ("CF4F88", "\u2b9e") } # Material Icons # layouts = {"tabbed":"\ue8d8", "stacked":"\ue3c7", "splitv":"\ue947", "splith":"\ue949"} def on_event(sway, _): global last layout = sway.get_tree().find_focused().parent.layout if not layout == last: ## polybar format output # print("%{{F#{}}}{}%{{F-}}".format(*layouts.get(layout, ("888800", "?")))) ## waybar format output print("<span color='#{}'>{}</span>".format(*layouts.get(layout, ("888800", "?")))) last = layout # Subscribe to events i3.on("window::focus", on_event) i3.on("binding", on_event) # Start the main loop and wait for events to come in. i3.main()
22.135135
93
0.582418
cef07786e7e7ac506670e6c1114e7cf83e1eb3a0
5,822
py
Python
cogs/tags.py
Chrovo/Productivity
4bdb7eecfb8ae16b013ce58a1b0421f8f791499e
[ "MIT" ]
null
null
null
cogs/tags.py
Chrovo/Productivity
4bdb7eecfb8ae16b013ce58a1b0421f8f791499e
[ "MIT" ]
null
null
null
cogs/tags.py
Chrovo/Productivity
4bdb7eecfb8ae16b013ce58a1b0421f8f791499e
[ "MIT" ]
null
null
null
from typing import Optional import discord import asyncpg from discord.ext import commands from .utils.pagination import create_paginated_embed class Tags(commands.Cog): """Productivity's tag system.""" def __init__(self, bot:commands.Bot) -> None: self.bot = bot self.emoji = "🏷️ " async def delete_check(self, ctx:commands.Context, tag_name) -> bool: query = """ SELECT * FROM tags WHERE tag_name = $1 AND guild_id = $2; """ async with self.bot.db.acquire() as connection: async with connection.transaction(): fetched = await connection.fetchrow(query, tag_name, ctx.guild.id) return fetched['user_id'] == ctx.author or ctx.author.guild_permissions.manage_messages @commands.group(invoke_without_command=True) @commands.cooldown(1, 5, commands.BucketType.user) async def tag(self, ctx, *, tag:str): """A tag system!""" async with self.bot.db.acquire() as connection: async with connection.transaction(): try: query = """ SELECT * FROM tags WHERE tag_name = $1 AND guild_id = $2; """ tag = await connection.fetchrow(query, tag, ctx.guild.id) return await ctx.send(tag['tag_content']) except TypeError: return await ctx.send("Tag not found.") @tag.command(description="Create a tag!", aliases=['add']) @commands.cooldown(1, 5, commands.BucketType.user) async def create(self, ctx, name, *, content): try: query = """ INSERT INTO tags (user_id, guild_id, tag_name, tag_content) VALUES ($1, $2, $3, $4); """ await self.bot.db.execute(query, ctx.author.id, ctx.guild.id, name, content) await ctx.send("Succesfully created the tag!") except Exception as e: await ctx.send(e) await ctx.send("An error has occurred whilst creating the tag") @tag.command(description="Start your use of creating tags") @commands.cooldown(1, 5, commands.BucketType.user) async def start(self, ctx): try: query = """ INSERT INTO tag_users (user_id, username) VALUES ($1, $2); """ await self.bot.db.execute(query, ctx.author.id, ctx.author.name) await ctx.send("Successfully started your use of our tag system!") except Exception: await ctx.send("You are already in our database!") @tag.command(description="Delete a tag!") @commands.cooldown(1, 5, commands.BucketType.user) async def delete(self, ctx, *, tag:str): check = await self.delete_check(ctx, tag) if check: try: query = """ DELETE FROM tags WHERE tag_name = $1 AND guild_id = $2; """ await self.bot.db.execute(query, tag, ctx.guild.id) await ctx.send("Successfully deleted tag!") except: await ctx.send("An error has occurred while attempting to delete the tag.") else: await ctx.send("You do not have permission to delete this tag!") @commands.command(description="Look at all of the tags a member has!") @commands.cooldown(1, 5, commands.BucketType.user) async def tags(self, ctx, member:Optional[discord.Member]=None): member = member or ctx.author async with self.bot.db.acquire() as connection: async with connection.transaction(): query = """ SELECT * FROM tags WHERE user_id = $1 AND guild_id = $2; """ tags = await connection.fetch(query, member.id, ctx.guild.id) paginate = create_paginated_embed(ctx, tags, 'tag_name', f"{member}'s tags", member.avatar_url, member.name) await paginate.start(ctx) @tag.command(description="Edit a tag!") @commands.cooldown(1, 5, commands.BucketType.user) async def edit(self, ctx, old_tag, new_name, *, new_content): query = """ UPDATE tags SET tag_name = $1, tag_content = $2 WHERE user_id = $3 AND tag_name = $4 AND guild_id = $5; """ try: await self.bot.db.execute(query, new_name, new_content, ctx.author.id, old_tag, ctx.guild.id) return await ctx.send("Successfully edited tag!") except Exception: return await ctx.send( """ An error occurred while editing the tag, this is likely because u dont own this tag or it doesnt exist. """ ) @tag.command(description="View information about a tag!") @commands.cooldown(1, 5, commands.BucketType.user) async def info(self, ctx, *, tag:str): async with self.bot.db.acquire() as connection: async with connection.transaction(): query = """ SELECT * FROM tags WHERE guild_id = $1 AND tag_name = $2; """ try: tag_info = await connection.fetchrow(query, ctx.guild.id, tag) owner = ctx.guild.get_member(tag_info['user_id']) embed = discord.Embed(title=tag_info['tag_name']) embed.add_field(name="Owner", value=owner.mention) embed.set_author(name=owner, icon_url=owner.avatar_url) return await ctx.send(embed=embed) except TypeError: return await ctx.send("Tag not found.") def setup(bot:commands.Bot): bot.add_cog(Tags(bot))
39.605442
124
0.5663
cef1f91b5f6b4d828f87bc4bbe7d153aead9af59
1,078
py
Python
app.py
lblod/contact-hub-harvester
75895a1920b3bae10f61e4f284499e2b23927609
[ "MIT" ]
1
2021-06-08T07:58:24.000Z
2021-06-08T07:58:24.000Z
app.py
lblod/contact-hub-harvester
75895a1920b3bae10f61e4f284499e2b23927609
[ "MIT" ]
null
null
null
app.py
lblod/contact-hub-harvester
75895a1920b3bae10f61e4f284499e2b23927609
[ "MIT" ]
null
null
null
import sys import mapping.organization as org import mapping.contact as contact import mapping.worship as worship import mapping.central as central import mapping.national as national import mapping.codelist as codelist import mapping.vocabulary as vocab import mapping.location as location import mapping.local_admin_unit as local_admin_unit import mapping.nationality as nationality def main(*args): if args[0] == 'org': org.main(args[1], args[2]) elif args[0] == 'contact': contact.main(args[1], args[2]) elif args[0] == 'worship': worship.main(args[1], args[2]) elif args[0] == 'central': central.main(args[1], args[2]) elif args[0] == 'national': national.main(args[1], args[2]) elif args[0] == 'codelist': codelist.main() elif args[0] == 'vocab': vocab.main(args[1]) elif args[0] == 'location': location.main() elif args[0] == 'local_admin_unit': local_admin_unit.main() elif args[0] == 'nationality': nationality.main() if __name__ == '__main__': args = sys.argv[1:] if len(args) > 0: main(*args)
26.292683
51
0.681818
cef4a8a3cb987de3e3d820dc98bcefb1e7cbe272
2,815
py
Python
ML_CW2/assgn_2/task_1.py
ShellySrivastava/Machine-Learning
bfdea30c06abe4228c103ae525adcf990015983f
[ "MIT" ]
null
null
null
ML_CW2/assgn_2/task_1.py
ShellySrivastava/Machine-Learning
bfdea30c06abe4228c103ae525adcf990015983f
[ "MIT" ]
null
null
null
ML_CW2/assgn_2/task_1.py
ShellySrivastava/Machine-Learning
bfdea30c06abe4228c103ae525adcf990015983f
[ "MIT" ]
null
null
null
import numpy as np import os import matplotlib.pyplot as plt from print_values import * from plot_data_all_phonemes import * from plot_data import * # File that contains the data data_npy_file = 'data/PB_data.npy' # Loading data from .npy file data = np.load(data_npy_file, allow_pickle=True) data = np.ndarray.tolist(data) # Make a folder to save the figures figures_folder = os.path.join(os.getcwd(), 'figures') if not os.path.exists(figures_folder): os.makedirs(figures_folder, exist_ok=True) # Array that contains the phoneme ID (1-10) of each sample phoneme_id = data['phoneme_id'] print(phoneme_id) # frequencies f1 and f2 f1 = data['f1'] f2 = data['f2'] print('f1 statistics:') print_values(f1) print('f2 statistics:') print_values(f2) # Initialize array containing f1 & f2, of all phonemes. X_full = np.zeros((len(f1), 2)) ######################################### # Write your code here # Store f1 in the first column of X_full, and f2 in the second column of X_full X_full[:, 0] = f1 X_full[:, 1] = f2 ########################################/ X_full = X_full.astype(np.float32) # you can use the p_id variable, to store the ID of the chosen phoneme that will be used (e.g. phoneme 1, or phoneme 2) p_id = 1 ######################################### # Write your code here # Create an array named "X_phoneme_1", containing only samples that belong to the chosen phoneme. # The shape of X_phoneme_1 will be two-dimensional. Each row will represent a sample of the dataset, and each column will represent a feature (e.g. f1 or f2) # Fill X_phoneme_1 with the samples of X_full that belong to the chosen phoneme # To fill X_phoneme_1, you can leverage the phoneme_id array, that contains the ID of each sample of X_full # Create array containing only samples that belong to phoneme 1 X_phoneme_1 = np.zeros((np.sum(phoneme_id==1), 2)) # X_phoneme = ... ######################################## # Plot array containing all phonemes # Create a figure and a subplot fig, ax1 = plt.subplots() # plot the full dataset (f1 & f2, all phonemes) plot_data_all_phonemes(X=X_full, phoneme_id=phoneme_id, ax=ax1) # save the plotted dataset as a figure plot_filename = os.path.join(os.getcwd(), 'figures', 'dataset_full.png') plt.savefig(plot_filename) ################################################ # Plot array containing phoneme 1 # Create a figure and a subplot fig, ax2 = plt.subplots() title_string = 'Phoneme 1' # plot the samples of the dataset, belonging to phoneme 1 (f1 & f2, phoneme 1) plot_data(X=X_phoneme_1, title_string=title_string, ax=ax2) # save the plotted points of phoneme 1 as a figure plot_filename = os.path.join(os.getcwd(), 'figures', 'dataset_phoneme_1.png') plt.savefig(plot_filename) # enter non-interactive mode of matplotlib, to keep figures open plt.ioff() plt.show()
34.329268
157
0.694494
cef51d1bebab54bc43146e3a95ff04dc8d153fb9
459
py
Python
students/migrations/0006_auto_20200403_2355.py
Davy-71993/MySchool
fa02c8ec19d71873fc0d714cf652d8ad05f2f0e7
[ "MIT" ]
null
null
null
students/migrations/0006_auto_20200403_2355.py
Davy-71993/MySchool
fa02c8ec19d71873fc0d714cf652d8ad05f2f0e7
[ "MIT" ]
null
null
null
students/migrations/0006_auto_20200403_2355.py
Davy-71993/MySchool
fa02c8ec19d71873fc0d714cf652d8ad05f2f0e7
[ "MIT" ]
null
null
null
# Generated by Django 2.2.6 on 2020-04-03 20:55 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('students', '0005_subject'), ] operations = [ migrations.RemoveField( model_name='subject', name='papers', ), migrations.DeleteModel( name='Paper', ), migrations.DeleteModel( name='Subject', ), ]
19.125
47
0.538126
cef53f21d6ccfd1533b28e30e6717c9396761c37
4,027
py
Python
terminalgame/world.py
naslundx/terminalgame
d855ec33ff8a057b1308ad30c54f138343baf56f
[ "MIT" ]
null
null
null
terminalgame/world.py
naslundx/terminalgame
d855ec33ff8a057b1308ad30c54f138343baf56f
[ "MIT" ]
null
null
null
terminalgame/world.py
naslundx/terminalgame
d855ec33ff8a057b1308ad30c54f138343baf56f
[ "MIT" ]
null
null
null
import curses from contextlib import contextmanager from time import sleep from typing import List, Tuple, TYPE_CHECKING from .actions import Action from .properties import Property if TYPE_CHECKING: from .object import Object class World: class __World: def __init__(self, fps: int, render: bool = True): self.fps = fps self._objects: List["Object"] = [] self._draw_queue: List[Tuple[int, int, str]] = [] self.running = True self._window = None self._height, self._width = 50, 80 # s.getmaxyx() self._key = None if render: _ = curses.initscr() curses.curs_set(0) self._window = curses.newwin(self._height, self._width, 0, 0) self._window.keypad(True) self._window.timeout(1000 // self.fps) def register(self, obj: "Object"): assert obj not in self._objects self._objects.append(obj) self._draw_queue.append((obj.x, obj.y, obj.sign)) def get_properties(self, x: int, y: int) -> List[Property]: for o in self._objects: if o.xy == (x, y) and not o.is_destroyed: return o.properties[:] return [] def draw(self): while self._draw_queue: x, y, s = self._draw_queue.pop() if self._window: if x in range(0, self.width) and y in range(0, self.height): self._window.addch(y, x, s) else: print(x, y, s) def tick(self): # Handle keypress mapping key = self.keypress if key: for obj in (o for o in self._objects if o.mapping): if key in obj.mapping: new_x, new_y = obj.x, obj.y action = obj.mapping[key] if action == Action.MOVE_UP: new_y -= 1 if action == Action.MOVE_DOWN: new_y += 1 if action == Action.MOVE_LEFT: new_x -= 1 if action == Action.MOVE_RIGHT: new_x += 1 if Property.SOLID not in self.get_properties(new_x, new_y): obj.x, obj.y = new_x, new_y # Update draw queue for obj in self._objects: if obj.is_destroyed: self._draw_queue.append((obj._oldx, obj._oldy, " ")) elif obj.has_moved: self._draw_queue.append((obj._oldx, obj._oldy, " ")) self._draw_queue.append((obj.x, obj.y, obj.sign)) obj.tick() # Render self.draw() # Remove destroyed objects self._objects = [o for o in self._objects if not o.is_destroyed] # Get keypress if self._window: self._key = self._window.getch() else: sleep(1.0 / self.fps) self._key = None return self.running def quit(self): self.running = False if self._window: curses.endwin() self._window = False @property def width(self): return self._width @property def height(self): return self._height @property def keypress(self): return self._key if self._key != -1 else None instance = None def __init__(self, *args, **kwargs): assert not World.instance World.instance = World.__World(*args, **kwargs) def __getattr__(self, name): return getattr(self.instance, name) @contextmanager def renderer(self): try: yield self finally: self.quit()
31.217054
83
0.48746
cef7afd517df5ca0ce5466fa5f955c031bbbb177
223
py
Python
src/projects/tests/factories/package.py
unikubehq/projects
0df69eafa2a0d2664a22c7a5866d4512ac4d57fe
[ "Apache-2.0" ]
1
2021-10-05T13:17:03.000Z
2021-10-05T13:17:03.000Z
src/projects/tests/factories/package.py
unikubehq/projects
0df69eafa2a0d2664a22c7a5866d4512ac4d57fe
[ "Apache-2.0" ]
48
2021-07-06T07:24:36.000Z
2022-03-24T08:27:30.000Z
src/projects/tests/factories/package.py
unikubehq/projects
0df69eafa2a0d2664a22c7a5866d4512ac4d57fe
[ "Apache-2.0" ]
null
null
null
import factory from projects.tests.factories.project import ProjectFactory class DeckFactory(factory.DjangoModelFactory): class Meta: model = "projects.Deck" project = factory.SubFactory(ProjectFactory)
20.272727
59
0.766816
cef7c80ce2c92bb1a2333400473042e3222f6983
1,012
py
Python
data/data-pipeline/data_pipeline/etl/constants.py
vim-usds/justice40-tool
6691df3e318b531b0e05454a79b8560b7d307b36
[ "CC0-1.0" ]
null
null
null
data/data-pipeline/data_pipeline/etl/constants.py
vim-usds/justice40-tool
6691df3e318b531b0e05454a79b8560b7d307b36
[ "CC0-1.0" ]
null
null
null
data/data-pipeline/data_pipeline/etl/constants.py
vim-usds/justice40-tool
6691df3e318b531b0e05454a79b8560b7d307b36
[ "CC0-1.0" ]
null
null
null
DATASET_LIST = [ { "name": "tree_equity_score", "module_dir": "tree_equity_score", "class_name": "TreeEquityScoreETL", }, { "name": "census_acs", "module_dir": "census_acs", "class_name": "CensusACSETL", }, { "name": "ejscreen", "module_dir": "ejscreen", "class_name": "EJScreenETL", }, { "name": "housing_and_transportation", "module_dir": "housing_and_transportation", "class_name": "HousingTransportationETL", }, { "name": "hud_housing", "module_dir": "hud_housing", "class_name": "HudHousingETL", }, { "name": "calenviroscreen", "module_dir": "calenviroscreen", "class_name": "CalEnviroScreenETL", }, { "name": "hud_recap", "module_dir": "hud_recap", "class_name": "HudRecapETL", }, ] CENSUS_INFO = { "name": "census", "module_dir": "census", "class_name": "CensusETL", }
23.534884
51
0.528656
cef85293a8bebf5de04a9c7c28c2ecb0f768bbf3
129
py
Python
alcor/models/__init__.py
wolvespack/alcor
dcc6b029ab7e6eb96b65b7b996cf335c3b030649
[ "MIT" ]
2
2017-05-02T11:36:46.000Z
2017-05-02T14:01:16.000Z
alcor/models/__init__.py
wolvespack/alcor
dcc6b029ab7e6eb96b65b7b996cf335c3b030649
[ "MIT" ]
14
2017-10-15T20:13:23.000Z
2017-12-03T17:00:30.000Z
alcor/models/__init__.py
wolvespack/alcor
dcc6b029ab7e6eb96b65b7b996cf335c3b030649
[ "MIT" ]
null
null
null
from .group import Group from .star import (STAR_PARAMETERS_NAMES, GalacticDiskType, Star)
25.8
41
0.581395
cef8e48081ec1240c8d15a802e15c821eaaffb84
1,064
py
Python
cfc_app/migrations/0005_auto_20201024_0139.py
ephyle/Legit-Info
7f3845563a64299aa64e4fdba75949276ed9a711
[ "BSD-2-Clause", "CC-BY-4.0", "Apache-2.0" ]
44
2020-10-19T13:06:10.000Z
2022-01-23T10:56:31.000Z
cfc_app/migrations/0005_auto_20201024_0139.py
ephyle/Legit-Info
7f3845563a64299aa64e4fdba75949276ed9a711
[ "BSD-2-Clause", "CC-BY-4.0", "Apache-2.0" ]
111
2020-10-20T22:12:58.000Z
2022-03-28T00:25:13.000Z
cfc_app/migrations/0005_auto_20201024_0139.py
ephyle/Legit-Info
7f3845563a64299aa64e4fdba75949276ed9a711
[ "BSD-2-Clause", "CC-BY-4.0", "Apache-2.0" ]
31
2021-02-08T22:32:37.000Z
2022-03-11T10:57:29.000Z
# Generated by Django 3.0.8 on 2020-10-24 01:39 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cfc_app', '0004_auto_20201024_0133'), ] operations = [ migrations.AlterField( model_name='hash', name='fob_method', field=models.CharField(editable=False, max_length=6), ), migrations.AlterField( model_name='hash', name='generated_date', field=models.DateField(editable=False), ), migrations.AlterField( model_name='hash', name='hashcode', field=models.CharField(editable=False, max_length=32), ), migrations.AlterField( model_name='hash', name='item_name', field=models.CharField(editable=False, max_length=255), ), migrations.AlterField( model_name='hash', name='size', field=models.PositiveIntegerField(editable=False), ), ]
27.282051
67
0.56391
cefa905aac2153e51d910363e7adb666340184b1
1,955
py
Python
e2e/test_get.py
sturzl/guet
b8c453f07968b689b303e20e7a31b405c02c54ef
[ "Apache-2.0" ]
null
null
null
e2e/test_get.py
sturzl/guet
b8c453f07968b689b303e20e7a31b405c02c54ef
[ "Apache-2.0" ]
null
null
null
e2e/test_get.py
sturzl/guet
b8c453f07968b689b303e20e7a31b405c02c54ef
[ "Apache-2.0" ]
null
null
null
from e2e import DockerTest class TestGet(DockerTest): def test_get_current_prints_currently_set_committers(self): self.guet_init() self.git_init() self.guet_add('initials1', 'name1', 'email1') self.guet_add('initials2', 'name2', 'email2') self.guet_start() self.guet_set(['initials1', 'initials2']) self.guet_get_current() self.save_file_content('.guet/errors') self.execute() self.assert_text_in_logs(5, 'Currently set committers') self.assert_text_in_logs(6, 'initials1 - name1 <email1>') self.assert_text_in_logs(7, 'initials2 - name2 <email2>') def test_get_committers_prints_all_committers_on_the_system(self): self.guet_init() self.guet_add('initials1', 'name1', 'email1') self.guet_add('initials2', 'name2', 'email2') self.guet_get_committers() self.save_file_content('.guet/errors') self.execute() self.assert_text_in_logs(0, 'All committers') self.assert_text_in_logs(1, 'initials1 - name1 <email1>') self.assert_text_in_logs(2, 'initials2 - name2 <email2>') def test_get_prints_error_message_if_trying_to_run_before_guet_init(self): self.guet_get_committers() self.execute() self.assert_text_in_logs(0, ('guet has not been initialized yet! ' + 'Please do so by running the command "guet init".')) def test_prints_help_message(self): self.guet_init() self.guet_get_committers(help=True) self.execute() self.assert_text_in_logs(0, 'usage: guet get <identifier> [-flag, ...]') self.assert_text_in_logs(2, 'Get currently set information.') self.assert_text_in_logs(4, 'Valid Identifier') self.assert_text_in_logs(6, '\tcurrent - lists currently set committers') self.assert_text_in_logs(7, '\tcommitters - lists all committers')
38.333333
89
0.658824
cefb3f843b82b1bef9a3afd4b8e0d08adb19182d
175
py
Python
code/eda.py
rodriggs/twosigmafinancial
a8ad216a71e4bb3fbfbd606281b101b845eae961
[ "MIT" ]
null
null
null
code/eda.py
rodriggs/twosigmafinancial
a8ad216a71e4bb3fbfbd606281b101b845eae961
[ "MIT" ]
null
null
null
code/eda.py
rodriggs/twosigmafinancial
a8ad216a71e4bb3fbfbd606281b101b845eae961
[ "MIT" ]
null
null
null
import numpy as np import pandas as pd import h5py # docker run -it kagglegym # python # >>> import kagglegym # >>> kagglegym.test() train = pd.read_hdf("../data/train.h5")
15.909091
39
0.691429
cefe60a9049748b68fc54c632b52a7d08d5a4d7b
442
py
Python
w3r-basic-1-50.py
WLi-syd/python
e8bd743535a1eca3493ea7f74a7475d1fb49de98
[ "CNRI-Python" ]
null
null
null
w3r-basic-1-50.py
WLi-syd/python
e8bd743535a1eca3493ea7f74a7475d1fb49de98
[ "CNRI-Python" ]
null
null
null
w3r-basic-1-50.py
WLi-syd/python
e8bd743535a1eca3493ea7f74a7475d1fb49de98
[ "CNRI-Python" ]
null
null
null
# 50. Write a Python program to print without newline or space. # Question: # Input: # Output: # Solution: https://www.w3resource.com/python-exercises/python-basic-exercise-50.php # Ideas: """ 1. Badly phrased qquestion. Question should be using for loop to generate 10 off "*" and print them in one line. """ # Steps: """ """ # Notes: """ """ # Code: for i in range(0, 10): print('*', end="") print("\n") # Testing: """ """
15.241379
114
0.624434
ceffd051d030d11340ed2510ffc0183d9860e640
1,559
py
Python
chris_backend/servicefiles/models.py
rudolphpienaar/ChRIS_ultron_backEnd
5de4e255fb151ac7a6f900327704831da11dcd1f
[ "MIT" ]
null
null
null
chris_backend/servicefiles/models.py
rudolphpienaar/ChRIS_ultron_backEnd
5de4e255fb151ac7a6f900327704831da11dcd1f
[ "MIT" ]
null
null
null
chris_backend/servicefiles/models.py
rudolphpienaar/ChRIS_ultron_backEnd
5de4e255fb151ac7a6f900327704831da11dcd1f
[ "MIT" ]
null
null
null
from django.db import models import django_filters from django_filters.rest_framework import FilterSet REGISTERED_SERVICES = ['PACS'] class Service(models.Model): identifier = models.CharField(max_length=20, unique=True) def __str__(self): return self.identifier class ServiceFile(models.Model): creation_date = models.DateTimeField(auto_now_add=True) fname = models.FileField(max_length=512, unique=True) service = models.ForeignKey(Service, db_index=True, on_delete=models.CASCADE) class Meta: ordering = ('-fname',) def __str__(self): return self.fname.name class ServiceFileFilter(FilterSet): min_creation_date = django_filters.DateFilter(field_name='creation_date', lookup_expr='gte') max_creation_date = django_filters.DateFilter(field_name='creation_date', lookup_expr='lte') fname = django_filters.CharFilter(field_name='fname', lookup_expr='startswith') fname_exact = django_filters.CharFilter(field_name='fname', lookup_expr='exact') service_identifier = django_filters.CharFilter(field_name='service__identifier', lookup_expr='exact') service_id = django_filters.CharFilter(field_name='service_id', lookup_expr='exact') class Meta: model = ServiceFile fields = ['id', 'min_creation_date', 'max_creation_date', 'fname', 'fname_exact', 'service_identifier', 'service_id']
35.431818
89
0.667736
cefff0cd7583924688b44d3f4da180ddf1bf3140
1,813
py
Python
lesion_tool/waimea.py
alaurent4/nighres
ffb4a478a224190ffe0112f7e4d214ad6825716e
[ "Apache-2.0" ]
null
null
null
lesion_tool/waimea.py
alaurent4/nighres
ffb4a478a224190ffe0112f7e4d214ad6825716e
[ "Apache-2.0" ]
null
null
null
lesion_tool/waimea.py
alaurent4/nighres
ffb4a478a224190ffe0112f7e4d214ad6825716e
[ "Apache-2.0" ]
1
2019-01-21T10:53:38.000Z
2019-01-21T10:53:38.000Z
#!/usr/bin/env python """ """ from xml.etree.ElementTree import Element import xml.etree.ElementTree as etree import xml.dom.minidom import re import sys import getopt import os from time import gmtime, strftime from nipype import config, logging from nighres.lesion_tool.lesion_pipeline import Lesion_extractor def main(): try: o, a = getopt.getopt(sys.argv[1:], "n:d:s:f:a:l:") except getopt.GetoptError as err: print(err) print('waimea.py -n <directory> -d <base_directory> -s <subject> -f <freesurfer dir> -a <atlas> -l <labels>') sys.exit(2) if len(o) < 4: print('waimea.py -n <directory> -d <base_directory> -s <subject> -f <freesurfer dir> -a <atlas> -l <labels>') sys.exit(2) for opt, arg in o: if opt == '-n': wf_name = arg elif opt == '-d': base_dir = arg elif opt == '-s': sub = arg elif opt == '-f': fsdir = arg elif opt == '-a': atlas = arg elif opt == '-l': labels = arg wf = Lesion_extractor(wf_name=wf_name, base_dir=base_dir, subjects=[sub], #main=main, #acc=acc, atlas=atlas, fs_subjects_dir=fsdir, labels=labels) config.update_config({'logging': {'log_directory': wf.base_dir,'log_to_file': True}}) logging.update_logging(config) config.set('execution','job_finished_timeout','20.0') wf.config['execution'] = {'job_finished_timeout': '10.0'} try: wf.run() except: print('Error! Pipeline exited ') raise if __name__ == "__main__": main()
29.241935
117
0.529509
3002f09ad0b6466dc363ed3ed13747fd93bb53e6
2,208
py
Python
system/__init__.py
JHUAPL/meta-system
d3e80e50d64e1a9e83d81efbcb8de1ec9cc34e03
[ "Apache-2.0" ]
5
2021-07-30T00:59:59.000Z
2022-03-23T16:52:46.000Z
system/__init__.py
JHUAPL/meta-system
d3e80e50d64e1a9e83d81efbcb8de1ec9cc34e03
[ "Apache-2.0" ]
null
null
null
system/__init__.py
JHUAPL/meta-system
d3e80e50d64e1a9e83d81efbcb8de1ec9cc34e03
[ "Apache-2.0" ]
null
null
null
# ********************************************************************** # Copyright (C) 2020 Johns Hopkins University Applied Physics Laboratory # # All Rights Reserved. # For any other permission, please contact the Legal Office at JHU/APL. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ********************************************************************** from flask import Flask from shared.config import config from shared.log import logger from system.extensions import FlaskExtensions, JobManagerClient, DockerClient from system.job_queue_manager import job_queue_watchdog cors = FlaskExtensions.cors mail = FlaskExtensions.mail mongodb = FlaskExtensions.mongodb jwt = FlaskExtensions.jwt bcrypt = FlaskExtensions.bcrypt class FlaskApp(object): def __init__(self): self.app = Flask(__name__, static_folder=config.STATIC_DIR, static_url_path="") self.app.config.update(config.dict()) bcrypt.init_app(self.app) jwt.init_app(self.app) mongodb.init_app(self.app) mail.init_app(self.app) cors.init_app(self.app) DockerClient() JobManagerClient() job_queue_watchdog() self.register_routes() def register_routes(self): from system.api.web import web_bp self.app.register_blueprint(web_bp) from system.api.info import info_bp self.app.register_blueprint(info_bp) from system.api.database import database_bp self.app.register_blueprint(database_bp) from system.api.jobs import jobs_bp self.app.register_blueprint(jobs_bp) from system.api.results import results_bp self.app.register_blueprint(results_bp)
32.955224
87
0.682065
30030def9ba42ea5610ba31e8842674e5faf76f9
80
py
Python
demo/py/constants.py
arjunsatyapal/lantern
073e1f86c6ef9234ab654af035e3870cb22f0dc4
[ "Apache-2.0" ]
1
2019-02-03T08:41:52.000Z
2019-02-03T08:41:52.000Z
demo/py/constants.py
arjunsatyapal/lantern
073e1f86c6ef9234ab654af035e3870cb22f0dc4
[ "Apache-2.0" ]
null
null
null
demo/py/constants.py
arjunsatyapal/lantern
073e1f86c6ef9234ab654af035e3870cb22f0dc4
[ "Apache-2.0" ]
null
null
null
# Constants DEFAULT_TITLE = 'Khan Academy' HOME_DOMAIN = 'www.khanacademy.org'
16
35
0.7625
3003b7db9f50c73f072be5e7001460eee57f2c46
1,950
py
Python
indexes/utils/dataset.py
war-and-peace/dss
8980477fc118b4f6fc2724427ba101c80eeb69d3
[ "MIT" ]
null
null
null
indexes/utils/dataset.py
war-and-peace/dss
8980477fc118b4f6fc2724427ba101c80eeb69d3
[ "MIT" ]
6
2021-02-24T21:40:27.000Z
2021-03-11T21:35:28.000Z
indexes/utils/dataset.py
war-and-peace/dss
8980477fc118b4f6fc2724427ba101c80eeb69d3
[ "MIT" ]
null
null
null
# ------------------------------------------------------------------------------ # Copyright (c) Abdurasul Rakhimov 24.2.2021. # ------------------------------------------------------------------------------ import numpy as np from overrides import overrides class Dataset: def __init__(self, name, dataset_path): self.name = name self.dataset_path = dataset_path self.dimensions = 0 self.size = 0 self.data = None self.loaded = False def get_data(self): return self.data def get_size(self): return self.size def get_dimensions(self): return self.dimensions def set_size(self, size): self.size = size def set_dimensions(self, dimensions): self.dimensions = dimensions def load_dataset(self, amount=-1): if self.loaded: return self.data data = np.array(np.load(self.dataset_path), dtype=np.float32) if amount == -1: amount = data.shape[0] self.data = data[:max(0, min(data.shape[0], amount))] self.dimensions = self.data.shape[1] self.size = self.data.shape[0] self.loaded = True return self.data def load_dataset_from_numpy(self, numpy_array): self.data = numpy_array self.dimensions = self.data.shape[1] self.size = self.data.shape[0] self.loaded = True return self.data def unload_dataset(self): self.data = None self.loaded = False def get_exact_query_results(self, queries, k, distance_function): results = [[(distance_function(self.data[idx], queries[q_id]), idx) for idx in range(self.data.shape[0])] for q_id in range(len(queries))] return [[elem for elem in list(sorted(res))[:k]] for res in results] class BasicDataset(Dataset): def __init__(self, name, dataset_path): super().__init__(name, dataset_path)
29.545455
117
0.567179
3004b0eda50f3f009063212317a8fd0157f3b144
369
py
Python
tools/configen/example/gen/configen/samples/user/conf/User.py
wpc/hydra
43b8a0f06eaf80f75810cc249e1eaa8a211e39c4
[ "MIT" ]
1
2021-02-23T00:00:20.000Z
2021-02-23T00:00:20.000Z
tools/configen/example/gen/configen/samples/user/conf/User.py
wpc/hydra
43b8a0f06eaf80f75810cc249e1eaa8a211e39c4
[ "MIT" ]
null
null
null
tools/configen/example/gen/configen/samples/user/conf/User.py
wpc/hydra
43b8a0f06eaf80f75810cc249e1eaa8a211e39c4
[ "MIT" ]
null
null
null
# Generated by configen, do not edit. # See https://github.com/facebookresearch/hydra/tree/master/tools/configen # fmt: off # isort:skip_file # flake8: noqa from dataclasses import dataclass from typing import * from omegaconf import MISSING @dataclass class UserConf: _target_: str = "configen.samples.user.User" age: int = MISSING name: str = MISSING
20.5
74
0.742547
3004c3f59cd9236cf1638000226994613db59de2
733
py
Python
src/kinect/Kinect.py
florianletsch/kinect-juggling
f320cc0b55adf65d338d25986a03106a7e3f46ef
[ "Unlicense", "MIT" ]
7
2015-11-27T09:53:32.000Z
2021-01-13T17:35:54.000Z
src/kinect/Kinect.py
florianletsch/kinect-juggling
f320cc0b55adf65d338d25986a03106a7e3f46ef
[ "Unlicense", "MIT" ]
null
null
null
src/kinect/Kinect.py
florianletsch/kinect-juggling
f320cc0b55adf65d338d25986a03106a7e3f46ef
[ "Unlicense", "MIT" ]
null
null
null
import time import numpy as np from freenect import sync_get_depth as get_depth, sync_get_video as get_video class Kinect(object): """Offers access to rgb and depth from the real Kinect""" def __init__(self): pass def get_frame(self, record=False): # Get a fresh frame (depth,_) = get_depth(format=4) (rgb,_) = get_video() if record: self.snapshot(rgb, depth) return (rgb, depth) def snapshot(self, rgb, depth): filename = "frames/frame-%d" % int(time.time()*1000) filename_rgb = filename + "-rgb" filename_depth = filename + "-depth" np.save(filename_rgb, rgb) np.save(filename_depth, depth)
31.869565
78
0.604366
30050dc99f412b59d4c595ff2aeb87a711b709c5
1,262
py
Python
main.py
hakierspejs/corononews
f1103da57d5c39694649bf7d7ba7748541dcfbe0
[ "WTFPL" ]
null
null
null
main.py
hakierspejs/corononews
f1103da57d5c39694649bf7d7ba7748541dcfbe0
[ "WTFPL" ]
null
null
null
main.py
hakierspejs/corononews
f1103da57d5c39694649bf7d7ba7748541dcfbe0
[ "WTFPL" ]
null
null
null
#!/usr/bin/env python import flask import requests import lxml.html import logging app = flask.Flask(__name__) LOGGER = logging.getLogger(__name__) HN_BASE_URL = 'https://news.ycombinator.com/' def has_virus(url): if not url.startswith('http://') and not url.startswith('https://'): return True s = requests.get(url).text.lower() for w in ['covid', 'virus']: if w in s: return True return False @app.route('/') def main(): h = lxml.html.fromstring(requests.get(HN_BASE_URL).text) ret = '<ol>' for n, row in enumerate(h.xpath('//tr [@id]')[1:]): story = row.xpath('.//a [@class="storylink"]').pop() LOGGER.info('%d: %s', n, story.get('href')) c_row = row.getnext() comments = c_row.xpath('.//a [contains(@href, "item?id=")]')[-1] comments_url = HN_BASE_URL + comments.get('href') if has_virus(story.get('href')) or has_virus(comments_url): continue ret += f''' <li> <a href="{story.get("href")}">{story.text}</a> (<a href="{comments_url}">{comments.text}</a>) </li>''' return ret if __name__ == '__main__': logging.basicConfig(level='INFO') app.run(host='0.0.0.0')
28.681818
72
0.568938
3007056835a3f4cdbc36d9cf5d7aec07fbd6a6ae
7,134
py
Python
code/func/func.py
lindenmp/neurodev_long
d6efc6b2e212bc6fc0669c80efcfa0b67d1e4b06
[ "MIT" ]
null
null
null
code/func/func.py
lindenmp/neurodev_long
d6efc6b2e212bc6fc0669c80efcfa0b67d1e4b06
[ "MIT" ]
5
2020-03-24T17:56:29.000Z
2021-12-13T20:35:48.000Z
code/func/func.py
lindenmp/neurodev_long
d6efc6b2e212bc6fc0669c80efcfa0b67d1e4b06
[ "MIT" ]
null
null
null
# Functions for project: NormativeNeuroDev_Longitudinal # Linden Parkes, 2019 # lindenmp@seas.upenn.edu from IPython.display import clear_output import numpy as np import scipy as sp from scipy import stats import pandas as pd from statsmodels.stats import multitest def get_cmap(which_type = 'qual1', num_classes = 8): # Returns a nice set of colors to make a nice colormap using the color schemes # from http://colorbrewer2.org/ # # The online tool, colorbrewer2, is copyright Cynthia Brewer, Mark Harrower and # The Pennsylvania State University. if which_type == 'linden': cmap_base = np.array([[255,105,97],[97,168,255],[178,223,138],[117,112,179],[255,179,71]]) elif which_type == 'pair': cmap_base = np.array([[124,230,199],[255,169,132]]) elif which_type == 'qual1': cmap_base = np.array([[166,206,227],[31,120,180],[178,223,138],[51,160,44],[251,154,153],[227,26,28], [253,191,111],[255,127,0],[202,178,214],[106,61,154],[255,255,153],[177,89,40]]) elif which_type == 'qual2': cmap_base = np.array([[141,211,199],[255,255,179],[190,186,218],[251,128,114],[128,177,211],[253,180,98], [179,222,105],[252,205,229],[217,217,217],[188,128,189],[204,235,197],[255,237,111]]) elif which_type == 'seq_red': cmap_base = np.array([[255,245,240],[254,224,210],[252,187,161],[252,146,114],[251,106,74], [239,59,44],[203,24,29],[165,15,21],[103,0,13]]) elif which_type == 'seq_blu': cmap_base = np.array([[247,251,255],[222,235,247],[198,219,239],[158,202,225],[107,174,214], [66,146,198],[33,113,181],[8,81,156],[8,48,107]]) elif which_type == 'redblu_pair': cmap_base = np.array([[222,45,38],[49,130,189]]) elif which_type == 'yeo17': cmap_base = np.array([[97,38,107], # VisCent [194,33,39], # VisPeri [79,130,165], # SomMotA [44,181,140], # SomMotB [75,148,72], # DorsAttnA [23,116,62], # DorsAttnB [149,77,158], # SalVentAttnA [222,130,177], # SalVentAttnB [75,87,61], # LimbicA [149,166,110], # LimbicB [210,135,47], # ContA [132,48,73], # ContB [92,107,131], # ContC [218,221,50], # DefaultA [175,49,69], # DefaultB [41,38,99], # DefaultC [53,75,158] # TempPar ]) elif which_type == 'yeo17_downsampled': cmap_base = np.array([[97,38,107], # VisCent [79,130,165], # SomMotA [75,148,72], # DorsAttnA [149,77,158], # SalVentAttnA [75,87,61], # LimbicA [210,135,47], # ContA [218,221,50], # DefaultA [53,75,158] # TempPar ]) if cmap_base.shape[0] > num_classes: cmap = cmap_base[0:num_classes] else: cmap = cmap_base cmap = cmap / 255 return cmap def update_progress(progress, my_str = ''): bar_length = 20 if isinstance(progress, int): progress = float(progress) if not isinstance(progress, float): progress = 0 if progress < 0: progress = 0 if progress >= 1: progress = 1 block = int(round(bar_length * progress)) clear_output(wait = True) text = my_str + " Progress: [{0}] {1:.1f}%".format( "#" * block + "-" * (bar_length - block), progress * 100) print(text) def get_synth_cov(df, cov = 'scanageYears', stp = 1): # Synthetic cov data X_range = [np.min(df[cov]), np.max(df[cov])] X = np.arange(X_range[0],X_range[1],stp) X = X.reshape(-1,1) return X def run_corr(df_X, df_y, typ = 'spearmanr'): df_corr = pd.DataFrame(index = df_y.columns, columns = ['coef', 'p']) for i, row in df_corr.iterrows(): if typ == 'spearmanr': df_corr.loc[i] = sp.stats.spearmanr(df_X, df_y[i]) elif typ == 'pearsonr': df_corr.loc[i] = sp.stats.pearsonr(df_X, df_y[i]) return df_corr def get_fdr_p(p_vals): out = multitest.multipletests(p_vals, alpha = 0.05, method = 'fdr_bh') p_fdr = out[1] return p_fdr def get_fdr_p_df(p_vals): p_fdr = pd.DataFrame(index = p_vals.index, columns = p_vals.columns, data = np.reshape(get_fdr_p(p_vals.values.flatten()), p_vals.shape)) return p_fdr def mark_outliers(x, thresh = 3, c = 1.4826): my_med = np.median(x) mad = np.median(abs(x - my_med))/c cut_off = mad * thresh upper = my_med + cut_off lower = my_med - cut_off outliers = np.logical_or(x > upper, x < lower) return outliers def perc_dev(Z, thr = 2.6, sign = 'abs'): if sign == 'abs': bol = np.abs(Z) > thr; elif sign == 'pos': bol = Z > thr; elif sign == 'neg': bol = Z < -thr; # count the number that have supra-threshold z-stats and store as percentage Z_perc = np.sum(bol, axis = 1) / Z.shape[1] * 100 return Z_perc def evd(Z, thr = 0.01, sign = 'abs'): m = Z.shape l = np.int(m[1] * thr) # assumes features are on dim 1, subjs on dim 0 if sign == 'abs': T = np.sort(np.abs(Z), axis = 1)[:,m[1] - l:m[1]] elif sign == 'pos': T = np.sort(Z, axis = 1)[:,m[1] - l:m[1]] elif sign == 'neg': T = np.sort(Z, axis = 1)[:,:l] E = sp.stats.trim_mean(T, 0.1, axis = 1) return E def summarise_network(df, roi_loc, network_idx, metrics = ('ct',), method = 'mean'): df_out = pd.DataFrame() for metric in metrics: if metric == 'ct': if method == 'median': df_tmp = df.filter(regex = metric).groupby(network_idx[roi_loc == 1], axis = 1).median() if method == 'mean': df_tmp = df.filter(regex = metric).groupby(network_idx[roi_loc == 1], axis = 1).mean() if method == 'max': df_tmp = df.filter(regex = metric).groupby(network_idx[roi_loc == 1], axis = 1).max() my_list = [metric + '_' + str(i) for i in np.unique(network_idx[roi_loc == 1]).astype(int)] df_tmp.columns = my_list else: if method == 'median': df_tmp = df.filter(regex = metric).groupby(network_idx, axis = 1).median() if method == 'mean': df_tmp = df.filter(regex = metric).groupby(network_idx, axis = 1).mean() if method == 'max': df_tmp = df.filter(regex = metric).groupby(network_idx, axis = 1).max() my_list = [metric + '_' + str(i) for i in np.unique(network_idx).astype(int)] df_tmp.columns = my_list df_out = pd.concat((df_out, df_tmp), axis = 1) return df_out
36.963731
123
0.528595
3007a5e506648223a9acc7a03be0c3a03d473f6f
15,808
py
Python
sympy/simplify/sqrtdenest.py
goodok/sympy
de84ed2139125a755ea7b6ba91d945d9fbbe5ed9
[ "BSD-3-Clause" ]
2
2015-05-11T12:26:38.000Z
2016-08-19T00:11:03.000Z
sympy/simplify/sqrtdenest.py
goodok/sympy
de84ed2139125a755ea7b6ba91d945d9fbbe5ed9
[ "BSD-3-Clause" ]
null
null
null
sympy/simplify/sqrtdenest.py
goodok/sympy
de84ed2139125a755ea7b6ba91d945d9fbbe5ed9
[ "BSD-3-Clause" ]
null
null
null
from sympy.functions import sqrt, sign, root from sympy.core import S, Wild, sympify, Mul, Add, Expr from sympy.core.function import expand_multinomial, expand_mul from sympy.core.symbol import Dummy from sympy.polys import Poly, PolynomialError from sympy.core.function import count_ops def _mexpand(expr): return expand_mul(expand_multinomial(expr)) def is_sqrt(expr): """Return True if expr is a sqrt, otherwise False.""" return expr.is_Pow and expr.exp.is_Rational and abs(expr.exp) is S.Half def sqrt_depth(p): """Return the maximum depth of any square root argument of p. >>> from sympy.functions.elementary.miscellaneous import sqrt >>> from sympy.simplify.sqrtdenest import sqrt_depth Neither of these square roots contains any other square roots so the depth is 1: >>> sqrt_depth(1 + sqrt(2)*(1 + sqrt(3))) 1 The sqrt(3) is contained within a square root so the depth is 2: >>> sqrt_depth(1 + sqrt(2)*sqrt(1 + sqrt(3))) 2 """ if p.is_Atom: return 0 elif p.is_Add or p.is_Mul: return max([sqrt_depth(x) for x in p.args]) elif is_sqrt(p): return sqrt_depth(p.base) + 1 else: return 0 def is_algebraic(p): """Return True if p is comprised of only Rationals or square roots of Rationals and algebraic operations. Examples ======== >>> from sympy.functions.elementary.miscellaneous import sqrt >>> from sympy.simplify.sqrtdenest import is_algebraic >>> from sympy import cos >>> is_algebraic(sqrt(2)*(3/(sqrt(7) + sqrt(5)*sqrt(2)))) True >>> is_algebraic(sqrt(2)*(3/(sqrt(7) + sqrt(5)*cos(2)))) False """ if p.is_Rational: return True elif p.is_Atom: return False elif is_sqrt(p) or p.is_Pow and p.exp.is_Integer: return is_algebraic(p.base) elif p.is_Add or p.is_Mul: return all(is_algebraic(x) for x in p.args) else: return False def subsets(n): """ Returns all possible subsets of the set (0, 1, ..., n-1) except the empty set, listed in reversed lexicographical order according to binary representation, so that the case of the fourth root is treated last. Examples ======== >>> from sympy.simplify.sqrtdenest import subsets >>> subsets(2) [[1, 0], [0, 1], [1, 1]] """ if n == 1: a = [[1]] elif n == 2: a = [[1, 0], [0, 1], [1, 1]] elif n == 3: a = [[1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]] else: b = subsets(n-1) a0 = [x+[0] for x in b] a1 = [x+[1] for x in b] a = a0 + [[0]*(n-1) + [1]] + a1 return a def sqrtdenest(expr, max_iter=3): """Denests sqrts in an expression that contain other square roots if possible, otherwise returns the expr unchanged. This is based on the algorithms of [1]. Examples ======== >>> from sympy.simplify.sqrtdenest import sqrtdenest >>> from sympy import sqrt >>> sqrtdenest(sqrt(5 + 2 * sqrt(6))) sqrt(2) + sqrt(3) See Also ======== sympy.solvers.solvers.unrad References ========== [1] http://www.almaden.ibm.com/cs/people/fagin/symb85.pdf [2] D. J. Jeffrey and A. D. Rich, 'Symplifying Square Roots of Square Roots by Denesting' (available at http://www.cybertester.com/data/denest.pdf) """ expr = expand_mul(sympify(expr)) for i in range(max_iter): z = _sqrtdenest0(expr) if expr == z: return expr expr = z return expr def _sqrt_match(p): """Return [a, b, r] for p.match(a + b*sqrt(r)) where, in addition to matching, sqrt(r) also has then maximal sqrt_depth among addends of p. Examples ======== >>> from sympy.functions.elementary.miscellaneous import sqrt >>> from sympy.simplify.sqrtdenest import _sqrt_match >>> _sqrt_match(1 + sqrt(2) + sqrt(2)*sqrt(3) + 2*sqrt(1+sqrt(5))) [1 + sqrt(2) + sqrt(6), 2, 1 + sqrt(5)] """ p = _mexpand(p) if p.is_Number: res = (p, S.Zero, S.Zero) elif p.is_Add: pargs = list(p.args) # to make the process canonical, the argument is included in the tuple # so when the max is selected, it will be the largest arg having a # given depth v = [(sqrt_depth(x), x, i) for i, x in enumerate(pargs)] nmax = max(v) if nmax[0] == 0: res = [] else: depth, _, i = nmax r = pargs.pop(i) a = Add._from_args(pargs) b = S.One if r.is_Mul: bv = [] rv = [] for x in r.args: if sqrt_depth(x) < depth: bv.append(x) else: rv.append(x) b = Mul._from_args(bv) r = Mul._from_args(rv) res = (a, b, r**2) else: b, r = p.as_coeff_Mul() if is_sqrt(r): res = (S.Zero, b, r**2) else: res = [] return list(res) class SqrtdenestStopIteration(StopIteration): pass def _sqrtdenest0(expr): """Returns expr after denesting its arguments.""" if is_sqrt(expr): n, d = expr.as_numer_denom() if d is S.One: # n is a square root if n.base.is_Add: args = n.base.args if len(args) > 2 and all((x**2).is_Integer for x in args): try: return _sqrtdenest_rec(n) except SqrtdenestStopIteration: pass expr = sqrt(_mexpand(Add(*[_sqrtdenest0(x) for x in args]))) return _sqrtdenest1(expr) else: n, d = [_sqrtdenest0(i) for i in (n, d)] return n/d if isinstance(expr, Expr): args = expr.args if args: return expr.func(*[_sqrtdenest0(a) for a in args]) return expr def _sqrtdenest_rec(expr): """Helper that denests the square root of three or more surds. It returns the denested expression; if it cannot be denested it throws SqrtdenestStopIteration Algorithm: expr.base is in the extension Q_m = Q(sqrt(r_1),..,sqrt(r_k)); split expr.base = a + b*sqrt(r_k), where `a` and `b` are on Q_(m-1) = Q(sqrt(r_1),..,sqrt(r_(k-1))); then a**2 - b**2*r_k is on Q_(m-1); denest sqrt(a**2 - b**2*r_k) and so on. See [1], section 6. Examples ======== >>> from sympy import sqrt >>> from sympy.simplify.sqrtdenest import _sqrtdenest_rec >>> _sqrtdenest_rec(sqrt(-72*sqrt(2) + 158*sqrt(5) + 498)) -sqrt(10) + sqrt(2) + 9 + 9*sqrt(5) >>> w=-6*sqrt(55)-6*sqrt(35)-2*sqrt(22)-2*sqrt(14)+2*sqrt(77)+6*sqrt(10)+65 >>> _sqrtdenest_rec(sqrt(w)) -sqrt(11) - sqrt(7) + sqrt(2) + 3*sqrt(5) """ from sympy.simplify.simplify import radsimp, split_surds, rad_rationalize if expr.base < 0: return sqrt(-1)*_sqrtdenest_rec(sqrt(-expr.base)) a, b = split_surds(expr.base) if a < b: a, b = b, a c2 = _mexpand(a**2 - b**2) if len(c2.args) > 2: a1, b1 = split_surds(c2) if a1 < b1: a1, b1 = b1, a1 c2_1 = _mexpand(a1**2 - b1**2) c_1 = _sqrtdenest_rec(sqrt(c2_1)) d_1 = _sqrtdenest_rec(sqrt(a1 + c_1)) num, den = rad_rationalize(b1, d_1) c = _mexpand(d_1/sqrt(2) + num/(den*sqrt(2))) else: c = _sqrtdenest1(sqrt(c2)) if sqrt_depth(c) > 1: raise SqrtdenestStopIteration ac = a + c if len(ac.args) >= len(expr.args): if count_ops(ac) >= count_ops(expr.base): raise SqrtdenestStopIteration d = sqrtdenest(sqrt(ac)) if sqrt_depth(d) > 1: raise SqrtdenestStopIteration num, den = rad_rationalize(b, d) r = d/sqrt(2) + num/(den*sqrt(2)) r = radsimp(r) return _mexpand(r) def _sqrtdenest1(expr): """Return denested expr after denesting with simpler methods or, that failing, using the denester.""" from sympy.simplify.simplify import radsimp if not is_sqrt(expr): return expr a = expr.base if a.is_Atom: return expr val = _sqrt_match(a) if not val: return expr a, b, r = val # try a quick numeric denesting d2 = _mexpand(a**2 - b**2*r) if d2.is_Rational: if d2.is_positive: z = _sqrt_numeric_denest(a, b, r, d2) if z is not None: return z else: # fourth root case # sqrtdenest(sqrt(3 + 2*sqrt(3))) = # sqrt(2)*3**(1/4)/2 + sqrt(2)*3**(3/4)/2 dr2 = _mexpand(-d2*r) dr = sqrt(dr2) if dr.is_Rational: z = _sqrt_numeric_denest(_mexpand(b*r), a, r, dr2) if z is not None: return z/root(r, 4) else: z = _sqrt_symbolic_denest(a, b, r) if z is not None: return z if not is_algebraic(expr): return expr # now call to the denester av0 = [a, b, r, d2] z = _denester([radsimp(expr**2)], av0, 0, sqrt_depth(expr) - 1)[0] if av0[1] is None: return expr if z is not None: return z return expr def _sqrt_symbolic_denest(a, b, r): """Given an expression, sqrt(a + b*sqrt(b)), return the denested expression or None. Algorithm: If r = ra + rb*sqrt(rr), try replacing sqrt(rr) in ``a`` with (y**2 - ra)/rb, and if the result is a quadratic, ca*y**2 + cb*y + cc, and (cb + b)**2 - 4*ca*cc is 0, then sqrt(a + b*sqrt(r)) can be rewritten as sqrt(ca*(sqrt(r) + (cb + b)/(2*ca))**2). Examples ======== >>> from sympy.simplify.sqrtdenest import _sqrt_symbolic_denest, sqrtdenest >>> from sympy import sqrt, Symbol, Poly >>> from sympy.abc import x >>> a, b, r = 16 - 2*sqrt(29), 2, -10*sqrt(29) + 55 >>> _sqrt_symbolic_denest(a, b, r) sqrt(-2*sqrt(29) + 11) + sqrt(5) If the expression is numeric, it will be simplified: >>> w = sqrt(sqrt(sqrt(3) + 1) + 1) + 1 + sqrt(2) >>> sqrtdenest(sqrt((w**2).expand())) 1 + sqrt(2) + sqrt(1 + sqrt(1 + sqrt(3))) Otherwise, it will only be simplified if assumptions allow: >>> w = w.subs(sqrt(3), sqrt(x + 3)) >>> sqrtdenest(sqrt((w**2).expand())) sqrt((sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2))**2) Notice that the argument of the sqrt is a square. If x is made positive then the sqrt of the square is resolved: >>> _.subs(x, Symbol('x', positive=True)) sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2) """ a, b, r = sympify([a, b, r]) rval = _sqrt_match(r) if not rval: return None ra, rb, rr = rval if rb: y = Dummy('y', positive=True) try: newa = Poly(a.subs(sqrt(rr), (y**2 - ra)/rb), y) except PolynomialError: return None if newa.degree() == 2: ca, cb, cc = newa.all_coeffs() cb += b if _mexpand(cb**2 - 4*ca*cc).equals(0): z = sqrt(ca*(sqrt(r) + cb/(2*ca))**2) if z.is_number: z = _mexpand(Mul._from_args(z.as_content_primitive())) return z def _sqrt_numeric_denest(a, b, r, d2): """Helper that denest expr = a + b*sqrt(r), with d2 = a**2 - b**2*r > 0 or returns None if not denested. """ from sympy.simplify.simplify import radsimp depthr = sqrt_depth(r) d = sqrt(d2) vad = a + d # sqrt_depth(res) <= sqrt_depth(vad) + 1 # sqrt_depth(expr) = depthr + 2 # there is denesting if sqrt_depth(vad)+1 < depthr + 2 # if vad**2 is Number there is a fourth root if sqrt_depth(vad) < depthr + 1 or (vad**2).is_Rational: vad1 = radsimp(1/vad) return (sqrt(vad/2) + sign(b)*sqrt((b**2*r*vad1/2).expand())).expand() def _denester(nested, av0, h, max_depth_level): """Denests a list of expressions that contain nested square roots. Algorithm based on <http://www.almaden.ibm.com/cs/people/fagin/symb85.pdf>. It is assumed that all of the elements of 'nested' share the same bottom-level radicand. (This is stated in the paper, on page 177, in the paragraph immediately preceding the algorithm.) When evaluating all of the arguments in parallel, the bottom-level radicand only needs to be denested once. This means that calling _denester with x arguments results in a recursive invocation with x+1 arguments; hence _denester has polynomial complexity. However, if the arguments were evaluated separately, each call would result in two recursive invocations, and the algorithm would have exponential complexity. This is discussed in the paper in the middle paragraph of page 179. """ from sympy.simplify.simplify import radsimp if h > max_depth_level: return None, None if av0[1] is None: return None, None if (av0[0] is None and all(n.is_Number for n in nested)): # no arguments are nested for f in subsets(len(nested)): # test subset 'f' of nested p = _mexpand(Mul(*[nested[i] for i in range(len(f)) if f[i]])) if f.count(1) > 1 and f[-1]: p = -p sqp = sqrt(p) if sqp.is_Rational: return sqp, f # got a perfect square so return its square root. # Otherwise, return the radicand from the previous invocation. return sqrt(nested[-1]), [0]*len(nested) else: R = None if av0[0] is not None: values = [av0[:2]] R = av0[2] nested2 = [av0[3], R] av0[0] = None else: values = filter(None, [_sqrt_match(expr) for expr in nested]) for v in values: if v[2]: #Since if b=0, r is not defined if R is not None: if R != v[2]: av0[1] = None return None, None else: R = v[2] if R is None: # return the radicand from the previous invocation return sqrt(nested[-1]), [0]*len(nested) nested2 = [_mexpand(v[0]**2) - _mexpand(R*v[1]**2) for v in values] + [R] d, f = _denester(nested2, av0, h + 1, max_depth_level) if not f: return None, None if not any(f[i] for i in range(len(nested))): v = values[-1] return sqrt(v[0] + v[1]*d), f else: p = Mul(*[nested[i] for i in range(len(nested)) if f[i]]) v = _sqrt_match(p) if 1 in f and f.index(1) < len(nested) - 1 and f[len(nested) - 1]: v[0] = -v[0] v[1] = -v[1] if not f[len(nested)]: #Solution denests with square roots vad = _mexpand(v[0] + d) if vad <= 0: # return the radicand from the previous invocation. return sqrt(nested[-1]), [0]*len(nested) if not(sqrt_depth(vad) < sqrt_depth(R) + 1 or (vad**2).is_Number): av0[1] = None return None, None vad1 = radsimp(1/vad) return _mexpand(sqrt(vad/2) + sign(v[1])*sqrt(_mexpand(v[1]**2*R*vad1/2))), f else: #Solution requires a fourth root s2 = _mexpand(v[1]*R) + d if s2 <= 0: return sqrt(nested[-1]), [0]*len(nested) FR, s = root(_mexpand(R), 4), sqrt(s2) return _mexpand(s/(sqrt(2)*FR) + v[0]*FR/(sqrt(2)*s)), f
32.459959
79
0.546748
3007da11fc7ae07380226f4b542b097442751381
17,074
py
Python
backend/modules/doc/views/doc.py
YouFacai/iWiki
7a2cbb514f25b72932b0212f6165cdb426243243
[ "MIT" ]
null
null
null
backend/modules/doc/views/doc.py
YouFacai/iWiki
7a2cbb514f25b72932b0212f6165cdb426243243
[ "MIT" ]
null
null
null
backend/modules/doc/views/doc.py
YouFacai/iWiki
7a2cbb514f25b72932b0212f6165cdb426243243
[ "MIT" ]
null
null
null
import datetime import os import shutil from django.conf import settings from django.contrib.auth import get_user_model from django.core.cache import cache from django.db import transaction, IntegrityError from django.db.models import Q, F from django.http import FileResponse from django.utils.encoding import escape_uri_path from django.utils.translation import gettext as _ from rest_framework.decorators import action from rest_framework.response import Response from rest_framework.viewsets import ModelViewSet, GenericViewSet from constents import DocAvailableChoices, RepoTypeChoices, UserTypeChoices from modules.account.serializers import UserInfoSerializer from modules.doc.models import Doc, DocVersion, DocCollaborator, Comment from modules.doc.permissions import DocManagePermission, DocCommonPermission from modules.doc.serializers import ( DocCommonSerializer, DocListSerializer, DocUpdateSerializer, DocVersionSerializer, DocPublishChartSerializer, ) from modules.repo.models import Repo, RepoUser from modules.repo.serializers import RepoSerializer from utils.authenticators import SessionAuthenticate from utils.exceptions import Error404, ParamsNotFound, UserNotExist, OperationError from utils.paginations import NumPagination from utils.throttlers import DocSearchThrottle from utils.viewsets import ThrottleAPIView USER_MODEL = get_user_model() class DocManageView(ModelViewSet): """文章管理入口""" queryset = Doc.objects.filter(is_deleted=False) serializer_class = DocCommonSerializer permission_classes = [ DocManagePermission, ] def perform_create(self, serializer): return serializer.save() def list(self, request, *args, **kwargs): """个人文章""" self.serializer_class = DocListSerializer # 获取个人的所有文章 sql = ( "SELECT d.*, r.name 'repo_name' FROM `doc_doc` d " "JOIN `repo_repo` r ON d.repo_id=r.id " "JOIN `auth_user` au ON au.uid=d.creator " "WHERE d.creator=%s AND NOT d.is_deleted " "{} " "ORDER BY d.id DESC;" ) # 标题关键字搜索 search_key = request.GET.get("searchKey", "") if search_key: sql = sql.format("AND d.title like %s") search_key = f"%%{search_key}%%" self.queryset = self.queryset.raw(sql, [request.user.uid, search_key]) else: sql = sql.format("") self.queryset = self.queryset.raw(sql, [request.user.uid]) return super().list(request, *args, **kwargs) def create(self, request, *args, **kwargs): """新建文章""" request.data["creator"] = request.user.uid serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) with transaction.atomic(): instance = self.perform_create(serializer) DocVersion.objects.create(**DocVersionSerializer(instance).data) return Response({"id": instance.id}) def update(self, request, *args, **kwargs): """更新文章""" partial = kwargs.pop("partial", False) instance = self.get_object() serializer = DocUpdateSerializer(instance, data=request.data, partial=partial) serializer.is_valid(raise_exception=True) with transaction.atomic(): serializer.save(update_by=request.user.uid) DocVersion.objects.create(**DocVersionSerializer(instance).data) return Response({"id": instance.id}) def destroy(self, request, *args, **kwargs): instance = self.get_object() self.perform_destroy(instance) return Response() @action(detail=True, methods=["GET"]) def list_collaborator(self, request, *args, **kwargs): """获取协作者""" instance = self.get_object() sql = ( "SELECT au.* " "FROM `doc_collaborator` dc " "JOIN `doc_doc` dd ON dd.id = dc.doc_id AND dd.id = %s " "JOIN `auth_user` au on dc.uid = au.uid;" ) collaborators = USER_MODEL.objects.raw(sql, [instance.id]) serializer = UserInfoSerializer(collaborators, many=True) return Response(serializer.data) @action(detail=True, methods=["POST"]) def add_collaborator(self, request, *args, **kwargs): """增加协作者""" instance = self.get_object() uid = request.data.get("uid") if not uid or uid == request.user.uid: raise OperationError() try: DocCollaborator.objects.create(doc_id=instance.id, uid=uid) except IntegrityError: raise OperationError(_("已添加该用户为协作者,请勿重复添加")) return Response() @action(detail=True, methods=["POST"]) def remove_collaborator(self, request, *args, **kwargs): """删除协作者""" instance = self.get_object() uid = request.data.get("uid") if not uid or uid == request.user.uid: raise OperationError() DocCollaborator.objects.filter(doc_id=instance.id, uid=uid).delete() return Response() @action(detail=True, methods=["GET"]) def edit_status(self, request, *args, **kwargs): """为文章添加编辑中状态""" instance = self.get_object() cache_key = f"{self.__class__.__name__}:{self.action}:{instance.id}" uid = cache.get(cache_key) if uid is None or uid == request.user.uid: cache.set(cache_key, request.user.uid, 60) return Response(True) else: return Response(False) @action(detail=True, methods=["GET"]) def export(self, request, *args, **kwargs): """导出文章""" instance = self.get_object() sql = ( "SELECT dc.*, au.username FROM `doc_comment` dc " "JOIN `auth_user` au ON au.uid=dc.creator " "WHERE dc.doc_id=%s AND NOT dc.is_deleted " "ORDER BY dc.id DESC;" ) comments = Comment.objects.raw(sql, [instance.id]) file_dir = os.path.join( settings.BASE_DIR, "tmp", "doc", request.user.uid, str(instance.id) ) if os.path.exists(file_dir): shutil.rmtree(file_dir) os.makedirs(file_dir) filename = "{}.md".format(instance.title.replace(" ", "").replace("/", "")) file_path = os.path.join(file_dir, filename) with open(file_path, "w", encoding="utf-8") as file: file.write(instance.content) for comment in comments: file.write("\n\n---\n\n") file.write(comment.content) file = open(file_path, "rb") response = FileResponse(file) response["Content-Type"] = "application/octet-stream" response[ "Content-Disposition" ] = f"attachment; filename={escape_uri_path(filename)}" return response class DocCommonView(GenericViewSet): """文章常规入口""" queryset = Doc.objects.filter(is_deleted=False, is_publish=True) serializer_class = DocListSerializer permission_classes = [DocCommonPermission] authentication_classes = [SessionAuthenticate] def list(self, request, *args, **kwargs): """获取仓库文章""" repo_id = request.GET.get("repo_id", None) # 没有传参直接返回 if repo_id is None: raise Error404() # 传入参数获取对应仓库的文章 try: Repo.objects.get(id=repo_id, is_deleted=False) except Repo.DoesNotExist: raise Error404() # 获取 仓库 的 公开或自己的 文章 sql = ( "SELECT d.*, au.username creator_name, r.name repo_name " "FROM `doc_doc` d " "JOIN `repo_repo` r ON r.id=d.repo_id " "LEFT JOIN `doc_pin` dp ON dp.doc_id=d.id AND dp.in_use " "LEFT JOIN `auth_user` au ON au.uid=d.creator " "WHERE NOT d.`is_deleted` AND d.`is_publish` " "AND dp.in_use IS NULL " "AND d.repo_id = %s " "AND (d.available = %s OR d.creator = %s) " "AND d.title like %s " "ORDER BY d.id DESC" ) search_key = request.GET.get("searchKey") search_key = f"%%{search_key}%%" if search_key else "%%" queryset = self.queryset.raw( sql, [repo_id, DocAvailableChoices.PUBLIC, request.user.uid, search_key] ) page = self.paginate_queryset(queryset) serializer = self.get_serializer(page, many=True) return self.get_paginated_response(serializer.data) def retrieve(self, request, *args, **kwargs): """获取文章详情""" instance = self.get_object() Doc.objects.filter(id=instance.id).update(pv=F("pv") + 1) instance.pv += 1 serializer = DocCommonSerializer(instance) return Response(serializer.data) @action(detail=True, methods=["GET"]) def is_collaborator(self, request, *args, **kwargs): """判断是否是协作者""" instance = self.get_object() try: DocCollaborator.objects.get(doc_id=instance.id, uid=request.user.uid) return Response() except DocCollaborator.DoesNotExist: return Response({"result": False}) @action(detail=False, methods=["GET"]) def load_pin_doc(self, request, *args, **kwargs): """获取置顶文章""" repo_id = request.GET.get("repo_id", None) # 没有传参直接返回 if repo_id is None: raise Error404() # 传入参数获取对应仓库的文章 try: Repo.objects.get(id=repo_id, is_deleted=False) except Repo.DoesNotExist: raise Error404() sql = ( "SELECT distinct dd.*, au.username creator_name, rr.name repo_name " "FROM `doc_doc` dd " "JOIN `auth_user` au ON dd.creator=au.uid " "JOIN `repo_repo` rr ON rr.id=dd.repo_id " "JOIN `doc_pin` dp ON dp.doc_id=dd.id AND dp.in_use " "WHERE rr.id=%s AND dd.available=%s " "AND dd.is_publish AND NOT dd.is_deleted; " ) queryset = Doc.objects.raw(sql, [repo_id, DocAvailableChoices.PUBLIC]) serializer = self.get_serializer(queryset, many=True) return Response(serializer.data) class DocPublicView(GenericViewSet): """公共入口""" queryset = Doc.objects.filter( is_deleted=False, is_publish=True, available=DocAvailableChoices.PUBLIC ) authentication_classes = [SessionAuthenticate] def list(self, request, *args, **kwargs): # 获取 公开或成员仓库 的 公开或自己的 文章 sql = ( "SELECT d.*, au.username creator_name, r.name repo_name " "FROM `repo_repo` r " "JOIN `repo_user` ru ON r.id=ru.repo_id AND ru.u_type!=%s " "JOIN `doc_doc` d ON r.id=d.repo_id " "JOIN `auth_user` au ON au.uid=d.creator " "WHERE NOT r.is_deleted AND (ru.uid=%s OR r.r_type=%s) " "AND (d.available = %s OR d.creator = %s) AND NOT d.`is_deleted` AND d.`is_publish` " "GROUP BY d.id " "ORDER BY d.id DESC;" ) docs = Doc.objects.raw( sql, [ UserTypeChoices.VISITOR, request.user.uid, RepoTypeChoices.PUBLIC, DocAvailableChoices.PUBLIC, request.user.uid, ], ) page = NumPagination() queryset = page.paginate_queryset(docs, request, self) serializer = DocListSerializer(queryset, many=True) return page.get_paginated_response(serializer.data) @action(detail=False, methods=["GET"]) def recent(self, request, *args, **kwargs): """热门文章""" cache_key = f"{self.__class__.__name__}:{self.action}" cache_data = cache.get(cache_key) if cache_data is not None: return Response(cache_data) # 公开库的近期文章 public_repo_ids = Repo.objects.filter( r_type=RepoTypeChoices.PUBLIC, is_deleted=False ).values("id") queryset = self.queryset.filter(repo_id__in=public_repo_ids, pv__gt=0).order_by( "-pv" )[:10] serializer = DocListSerializer(queryset, many=True) cache.set(cache_key, serializer.data, 1800) return Response(serializer.data) @action(detail=False, methods=["GET"]) def hot_repo(self, request, *args, **kwargs): """热门库""" cache_key = f"{self.__class__.__name__}:{self.action}" cache_data = cache.get(cache_key) if cache_data is not None: return Response(cache_data) sql = ( "SELECT rr.*, dd.repo_id, COUNT(1) 'count' " "FROM `doc_doc` dd " "JOIN (SELECT MIN(dd2.id) 'min_id' from `doc_doc` dd2 ORDER BY dd2.id DESC LIMIT 100) dd3 " "JOIN `repo_repo` rr ON rr.id=dd.repo_id " "WHERE dd.id>=dd3.min_id " "GROUP BY dd.repo_id " "ORDER BY count DESC " "LIMIT 10" ) repos = Repo.objects.raw(sql) serializer = RepoSerializer(repos, many=True) cache.set(cache_key, serializer.data, 1800) return Response(serializer.data) @action(detail=False, methods=["GET"]) def user_doc(self, request, *args, **kwargs): """用户发布文章""" username = request.GET.get("username") if not username: raise ParamsNotFound(_("用户名不能为空")) try: user = USER_MODEL.objects.get(username=username) except USER_MODEL.DoesNotExist: raise UserNotExist() # 共同或公开仓库 的 公开文章 union_repo_ids = RepoUser.objects.filter( Q(uid=request.user.uid) & ~Q(u_type=UserTypeChoices.VISITOR) ).values("repo_id") allowed_repo_ids = Repo.objects.filter( Q(r_type=RepoTypeChoices.PUBLIC) | Q(id__in=union_repo_ids) ).values("id") docs = self.queryset.filter( creator=user.uid, repo_id__in=allowed_repo_ids ).order_by("-id") page = NumPagination() queryset = page.paginate_queryset(docs, request, self) serializer = DocListSerializer(queryset, many=True) return page.get_paginated_response(serializer.data) @action(detail=False, methods=["GET"]) def recent_chart(self, request, *args, **kwargs): """文章发布图表数据""" cache_key = f"{self.__class__.__name__}:{self.action}" cache_data = cache.get(cache_key) if cache_data is not None: return Response(cache_data) today = datetime.datetime.today() last_day = today - datetime.timedelta(days=30) sql = ( "SELECT dd.id, DATE_FORMAT(dd.update_at, \"%%m-%%d\") 'date', COUNT(1) 'count' " "FROM `doc_doc` dd " "WHERE dd.update_at>='{}' AND NOT dd.is_deleted AND dd.available = '{}' " "GROUP BY DATE(dd.update_at); " ).format(last_day, DocAvailableChoices.PUBLIC) docs_count = Doc.objects.raw(sql) serializer = DocPublishChartSerializer(docs_count, many=True) data = {item["date"]: item["count"] for item in serializer.data} cache.set(cache_key, data, 1800) return Response(data) class SearchDocView(ThrottleAPIView): """搜索入口""" throttle_classes = [ DocSearchThrottle, ] def post(self, request, *args, **kwargs): search_key = request.data.get("searchKey") if not search_key: raise ParamsNotFound(_("搜索关键字不能为空")) # 公开或成员仓库 的 公开或个人文章 sql = ( "SELECT dd.*, au.username creator_name, rr.name repo_name " "FROM `repo_repo` rr " "JOIN `repo_user` ru ON ru.repo_id=rr.id AND ru.u_type!=%s " "JOIN `doc_doc` dd ON rr.id = dd.repo_id " "JOIN `auth_user` au ON au.uid = dd.creator " "WHERE NOT rr.is_deleted AND (ru.uid = %s OR rr.r_type = %s) " "AND NOT dd.is_deleted AND dd.is_publish AND (dd.available = %s OR dd.creator = %s) " "AND (({}) OR ({})) " "GROUP BY dd.id " "ORDER BY dd.id DESC;" ) # 处理 key extend_title_sqls = [] extend_content_sqls = [] params_keys = [] for key in search_key: if key: extend_title_sqls.append(" dd.title like %s ") extend_content_sqls.append(" dd.content like %s ") params_keys.append(f"%{key}%") extend_title_sql = "AND".join(extend_title_sqls) extend_content_sql = "AND".join(extend_content_sqls) sql = sql.format(extend_title_sql, extend_content_sql) docs = Doc.objects.raw( sql, [ UserTypeChoices.VISITOR, request.user.uid, RepoTypeChoices.PUBLIC, DocAvailableChoices.PUBLIC, request.user.uid, *params_keys, *params_keys, ], ) page = NumPagination() queryset = page.paginate_queryset(docs, request, self) serializer = DocListSerializer(queryset, many=True) return page.get_paginated_response(serializer.data)
38.541761
103
0.602554
3007e107fbb9661b8e7f7e4c1d4a01f5c735b272
21,175
py
Python
python/cli/report_study.py
mediumroast/mr_sdk
55c7a13c5cef73e677297026b41b7ec23855391f
[ "Apache-2.0" ]
1
2021-10-06T02:46:48.000Z
2021-10-06T02:46:48.000Z
python/cli/report_study.py
mediumroast/mr_sdk
55c7a13c5cef73e677297026b41b7ec23855391f
[ "Apache-2.0" ]
3
2021-10-16T03:34:07.000Z
2022-02-23T05:10:12.000Z
python/cli/report_study.py
mediumroast/mr_sdk
55c7a13c5cef73e677297026b41b7ec23855391f
[ "Apache-2.0" ]
null
null
null
#!/bin/env python3 import sys import argparse import configparser import docx from docx import Document from docx.shared import Pt, Inches from docx.enum.dml import MSO_THEME_COLOR_INDEX from docx.enum.section import WD_ORIENT, WD_SECTION from datetime import datetime from mediumroast.api.high_level import Auth as authenticate from mediumroast.api.high_level import Studies as study from mediumroast.api.high_level import Interactions as interaction ### General utilities def parse_cli_args(program_name='report_study', desc='A mediumroast.io utility that generates a Microsoft Word formatted report for a study.'): parser = argparse.ArgumentParser(prog=program_name, description=desc) parser.add_argument('--exclude_substudies', help="The names for the substudies to exclude in a comma separated list", type=str, dest='exclude_substudies', default=None) parser.add_argument('--rest_url', help="The URL of the target REST server", type=str, dest='rest_url', default='http://mr-01:3000') parser.add_argument('--guid', help="The GUID for the study to be reported on.", type=str, dest='guid', required=True) parser.add_argument('--org', help="The organization name for the report.", type=str, dest='org', required=True) parser.add_argument('--user', help="User name", type=str, dest='user', default='foo') parser.add_argument('--secret', help="Secret or password", type=str, dest='secret', default='bar') parser.add_argument('--config_file', help="The location to the configuration files", type=str, dest='config_file', default='./reports.ini') cli_args = parser.parse_args() return cli_args def read_config(conf_file='./reports.ini'): c = configparser.ConfigParser() c.read(conf_file) return c def get_interaction_name(guid): """Get the interaction name by the GUID """ interaction_ctl = interaction(credential) return interaction_ctl.get_name_by_guid(guid)[1]['interactionName'] def _create_header(doc_obj, conf, font_size=7): date_string = f'{datetime.now():%Y-%m-%d %H:%M}' s = doc_obj.sections[0] header = s.header header_p = header.paragraphs[0] header_p.text = conf['org'] + "\t | \t Created on: " + date_string style = doc_obj.styles['Header'] font = style.font font.name = conf['font'] font.size = Pt(font_size) header_p.style = doc_obj.styles['Header'] def _create_footer(doc_obj, conf, font_size=7): date_string = f'{datetime.now():%Y-%m-%d %H:%M}' s = doc_obj.sections[0] footer = s.footer footer_p = footer.paragraphs[0] footer_p.text = conf['confidentiality'] + "\t | \t" + conf['copyright'] style = doc_obj.styles['Footer'] font = style.font font.name = conf['font'] font.size = Pt(font_size) footer_p.style = doc_obj.styles['Footer'] def _create_cover_page(doc_obj, study, conf, logo_size=60, font_size=30): # Generics title_font_size = Pt(font_size) # Title Font Size logo_size = Pt(font_size*2.5) # Organization name and logo logo = conf['logo'] logo_title = doc_obj.add_paragraph().add_run() logo_title.add_picture(logo, height=logo_size) # Define the Cover Title Style org = conf['org'] # Organization title = "\n\nTitle: " + study['studyName'] cover_title = doc_obj.add_paragraph(title) style = doc_obj.styles['Title'] font = style.font font.name = conf['font'] font.size = title_font_size cover_title.style = doc_obj.styles['Title'] # Define the Subtitle content subtitle = "A " + org + " study report enabling attributable market insights." cover_subtitle = doc_obj.add_paragraph("") s = cover_subtitle.add_run(subtitle) subtitle_font = s.font subtitle_font.bold = True # Define the Author content author = "Mediumroast Barrista Robot" cover_author = doc_obj.add_paragraph("\nAuthor: ") a = cover_author.add_run(author) author_font = a.font author_font.bold = True # Define the Creation date content creation_date = f'{datetime.now():%Y-%m-%d %H:%M}' cover_date = doc_obj.add_paragraph("Creation Date: ") d = cover_date.add_run(creation_date) date_font = d.font date_font.bold = True # Add a page break doc_obj.add_page_break() def _create_summary(doc_obj, study_doc, conf): # Create the Introduction section section_title = doc_obj.add_paragraph( 'Findings') # Create the Findings section section_title.style = doc_obj.styles['Title'] doc_obj.add_heading('Introduction') clean_intro = " ".join(study_doc['Introduction'].split("\n")) doc_obj.add_paragraph(clean_intro) # Create the Opportunity section doc_obj.add_heading('Opportunity') clean_opportunity = " ".join(study_doc['Opportunity']['text'].split("\n")) doc_obj.add_paragraph(clean_opportunity) # Remove the text section before we process the numbered bullets del(study_doc['Opportunity']['text']) for opp in study_doc['Opportunity']: clean_opp = " ".join(study_doc['Opportunity'][opp].split("\n")) doc_obj.add_paragraph(clean_opp, style='List Bullet') # Create the Action section doc_obj.add_heading('Actions') clean_action = " ".join(study_doc['Action']['text'].split("\n")) doc_obj.add_paragraph(clean_action) # Remove the text section before we process the numbered bullets del(study_doc['Action']['text']) for action in study_doc['Action']: clean_act = " ".join(study_doc['Action'][action].split("\n")) doc_obj.add_paragraph(clean_act, style='List Number') # Add a page break doc_obj.add_page_break() def _add_hyperlink(paragraph, text, url): """Taken from https://stackoverflow.com/questions/47666642/adding-an-hyperlink-in-msword-by-using-python-docx """ # This gets access to the document.xml.rels file and gets a new relation id value part = paragraph.part r_id = part.relate_to( url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True) # Create the w:hyperlink tag and add needed values hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink') hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, ) # Create a w:r element and a new w:rPr element new_run = docx.oxml.shared.OxmlElement('w:r') rPr = docx.oxml.shared.OxmlElement('w:rPr') # Join all the xml elements together add add the required text to the w:r element new_run.append(rPr) new_run.text = text hyperlink.append(new_run) # Create a new Run object and add the hyperlink into it r = paragraph.add_run() r._r.append(hyperlink) # A workaround for the lack of a hyperlink style (doesn't go purple after using the link) # Delete this if using a template that has the hyperlink style in it r.font.color.theme_color = MSO_THEME_COLOR_INDEX.HYPERLINK r.font.underline = True return hyperlink def _create_reference(interaction_guid, substudy, doc_obj, conf, char_limit=500): interaction_ctl = interaction(credential) success, interaction_data = interaction_ctl.get_by_guid(interaction_guid) if success: doc_obj.add_heading(interaction_data['interactionName'], 2) my_time = str(interaction_data['time'][0:2]) + \ ':' + str(interaction_data['time'][2:4]) my_date = str(interaction_data['date'][0:4]) + '-' + str(interaction_data['date'][4:6]) + '-' \ + str(interaction_data['date'][6:8]) interaction_meta = "\t\t|\t".join(['Date: ' + my_date + "\t" + my_time, 'Sub-Study Identifier: ' + substudy]) doc_obj.add_paragraph(interaction_meta) doc_obj.add_paragraph( interaction_data['abstract'][0:char_limit] + '...') resource = doc_obj.add_paragraph('Interaction Resource: ') _add_hyperlink( resource, interaction_data['interactionName'], interaction_data['url'].replace('s3', 'http')) else: print( 'Something went wrong obtaining the interaction data for [' + interaction_guid + ']') def _create_references(doc_obj, substudy_list, conf): section_title = doc_obj.add_paragraph( 'References') # Create the References section section_title.style = doc_obj.styles['Title'] for substudy in substudy_list: for interaction in substudy_list[substudy]['interactions']: interaction_guid = substudy_list[substudy]['interactions'][interaction]['GUID'] _create_reference(interaction_guid, substudy, doc_obj, conf) def _create_quote(doc_obj, quote, indent, font_size): my_quote = quote my_para = doc_obj.add_paragraph(style='List Bullet') my_para.paragraph_format.left_indent = Pt(1.5 * indent) my_bullet = my_para.add_run(my_quote) my_bullet.font.size = Pt(font_size) my_para.paragraph_format.space_after = Pt(3) def _create_quotes(doc_obj, quotes, indent, font_size, location='quotes'): for quote in quotes: my_quote = quotes[quote][location] my_para = doc_obj.add_paragraph(style='List Bullet') my_para.paragraph_format.left_indent = Pt(1.5 * indent) my_bullet = my_para.add_run(my_quote) my_bullet.font.size = Pt(font_size) my_para.paragraph_format.space_after = Pt(3) def _create_subsection(doc_obj, start_text, body_text, indent, font_size, to_bold=False, to_italics=False): para = doc_obj.add_paragraph() para.paragraph_format.left_indent = Pt(indent) start_run = para.add_run(start_text) start_run.font.bold = to_bold start_run.font.size = Pt(font_size) body_run=para.add_run(body_text) body_run.font.size = Pt(font_size) if to_italics: body_run.font.italic = to_italics def _create_intro(doc_obj, intro_name, intro_body, heading_level=2): doc_obj.add_heading(intro_name, level=heading_level) doc_obj.add_paragraph(intro_body) def _create_key_theme(doc_obj, themes, quotes, conf, include_fortune=True): ### Define the summary theme _create_intro(doc_obj, 'Summary Theme', conf['themes']['summary_intro'].replace("\n", " ")) ## Create the definition theme = 'summary_theme' _create_subsection(doc_obj, 'Definition: ', themes[theme]['description'], int(conf['themes']['indent']), font_size = int(conf['themes']['font_size']), to_bold = True) ## Determine if we should include the theme fortune or not if include_fortune: _create_subsection(doc_obj, 'Fortune: ', themes[theme]['fortune'][0].upper() + themes[theme]['fortune'][1:] + ' [system generated]', int(conf['themes']['indent']), font_size = int(conf['themes']['font_size']), to_bold = True) ## Create the tags _create_subsection(doc_obj, 'Tags: ', " | ".join(themes[theme]['tags'].keys()), int(conf['themes']['indent']), font_size = int(conf['themes']['font_size']), to_bold = True, to_italics = True) ## Create the quotes subsection_name = 'Theme Quotes' doc_obj.add_heading(subsection_name, level=3) _create_quotes(doc_obj, quotes['summary'], int(conf['themes']['indent']), font_size = int(conf['themes']['font_size'])) ### Add the discrete/detailed themes theme_loc = 'discrete_themes' quotes_loc = 'discrete' ## Create the starting paragraph _create_intro(doc_obj, 'Detailed Themes', conf['themes']['discrete_intro'].replace("\n", " ")) ## Add in the individual themes and their quotes my_themes = themes[theme_loc] for my_theme in my_themes: # Put in the theme identifier _create_intro(doc_obj, 'Detailed Theme Identifier: ' + my_theme, conf['themes']['discrete_theme_intro'].replace("\n", " "), heading_level=3) # Add the description _create_subsection(doc_obj, 'Definition: ', my_themes[my_theme]['description'], int(conf['themes']['indent']), font_size = int(conf['themes']['font_size']), to_bold = True) # Include the fortune if the setting is true if include_fortune: _create_subsection(doc_obj, 'Fortune: ', my_themes[my_theme]['fortune'][0].upper() + my_themes[my_theme]['fortune'][1:] + ' [system generated]', int(conf['themes']['indent']), font_size = int(conf['themes']['font_size']), to_bold = True) # Add the tags _create_subsection(doc_obj, 'Tags: ', " | ".join(my_themes[my_theme]['tags'].keys()), int(conf['themes']['indent']), font_size = int(conf['themes']['font_size']), to_bold = True, to_italics = True) # Pull in the quotes subsection_name = 'Theme Quotes by Interaction' doc_obj.add_heading(subsection_name, level=4) if my_theme in quotes[quotes_loc]: for interaction in quotes[quotes_loc][my_theme]: doc_obj.add_heading(get_interaction_name(interaction), level=5) the_quotes = quotes[quotes_loc][my_theme][interaction]['quotes'] # Explain that the system was not able to find a relevant quote if not the_quotes: the_quotes=[['mediumroast.io was unable to find a relevant quote or text snippet for this theme.']] for my_quote in the_quotes: _create_quote(doc_obj, my_quote[0], int(conf['themes']['indent']), font_size = int(conf['themes']['font_size'])) _create_subsection(doc_obj, 'Frequency: ', str(quotes[quotes_loc][my_theme][interaction]['frequency']), int(conf['themes']['indent']), font_size = int(conf['themes']['font_size']), to_bold = True, to_italics = True) doc_obj.add_page_break() def _create_key_themes(doc_obj, substudies, conf, substudy_excludes=list()): section_title = doc_obj.add_paragraph( 'Key Themes by Sub-Study') # Create the Themes section section_title.style = doc_obj.styles['Title'] doc_obj.add_paragraph(conf['themes']['intro'].replace("\n", " ")) for substudy in substudies: if substudy in substudy_excludes: continue doc_obj.add_heading('Sub-Study Identifier: ' + substudy + ' — ' + substudies[substudy]['description'], 1) _create_key_theme( doc_obj, substudies[substudy]['keyThemes'], substudies[substudy]['keyThemeQuotes'], conf) def change_orientation(doc_obj): current_section = doc_obj.sections[-1] new_width, new_height = current_section.page_height, current_section.page_width new_section = doc_obj.add_section(WD_SECTION.NEW_PAGE) new_section.orientation = WD_ORIENT.LANDSCAPE new_section.page_width = new_width new_section.page_height = new_height return new_section def _create_row(the_row, id, type,freq, src, snip): ID = 0 TYPE = 1 FREQ = 2 SNIP = 4 SRC = 3 the_row[ID].text = str(id) the_row[TYPE].text = str(type) the_row[FREQ].text = str(freq) the_row[SNIP].text = str(snip) the_row[SRC].text = str(src) def _create_rows(): """ For summary create single row For discrete foreach theme create single row """ pass def _create_summary_theme_tables(doc_obj, substudies, conf, substudy_excludes=list()): change_orientation(doc_obj) # Flip to landscape mode my_widths = [Inches(1.5), Inches(0.75), Inches(0.75), Inches(1.5), Inches(3.5)] section_title = doc_obj.add_paragraph( 'Key Theme Summary Tables') # Create the References section section_title.style = doc_obj.styles['Title'] for substudy in substudies: if substudy in substudy_excludes: continue doc_obj.add_heading('Sub-Study Identifier: ' + substudy + ' — ' + substudies[substudy]['description'], 1) my_table = doc_obj.add_table(rows=1, cols=5) my_table.style = 'Colorful Grid' header_row = my_table.rows[0].cells header_row[0].text = 'Identifier' header_row[1].text = 'Type' header_row[2].text = 'Frequency' header_row[3].text = 'Source' header_row[4].text = 'Snippet' my_row = my_table.add_row().cells ## Process the summary theme my_theme = 'Summary Theme' my_type = 'Summary' my_frequency = 'N/A' my_interaction = list(substudies[substudy]['keyThemeQuotes']['summary'].keys())[0] my_snippet = substudies[substudy]['keyThemeQuotes']['summary'][my_interaction]['quotes'][0] my_source = get_interaction_name(my_interaction) _create_row(my_row, my_theme, my_type, my_frequency, my_source, my_snippet) ## Process the discrete themes theme_loc = 'discrete_themes' quotes_loc = 'discrete' ## Add in the individual themes and their quotes my_themes = substudies[substudy]['keyThemes'][theme_loc] my_quotes = substudies[substudy]['keyThemeQuotes'][quotes_loc] my_type = 'Detailed' for my_theme in my_themes: if my_theme in my_quotes: my_row = my_table.add_row().cells my_interaction = list(my_quotes[my_theme].keys())[0] my_source = get_interaction_name(my_interaction) the_quotes = my_quotes[my_theme][my_interaction]['quotes'] # Explain that the system was not able to find a relevant quote if not the_quotes: the_quotes=[['mediumroast.io was unable to find a relevant quote or text snippet for this theme.']] my_snippet = the_quotes[0][0] my_frequency = my_themes[my_theme]['frequency'] _create_row(my_row, my_theme, my_type, my_frequency, my_source, my_snippet) doc_obj.add_page_break() change_orientation(doc_obj) # Flip to portrait mode def report(study, conf, substudy_excludes): # Document generics d = Document() # Create doc object style = d.styles['Normal'] font = style.font font.name = conf['font'] font.size = Pt(int(conf['font_size'])) _create_cover_page(d, study, conf) # Create the cover page _create_header(d, conf) # Create the doc header _create_footer(d, conf) # Create the doc footer ### Intro, opportunity and actions sections _create_summary(d, study['document'], conf) ### Key Themes ## Key Themes Summary Table _create_summary_theme_tables(d, study['substudies'], conf, substudy_excludes) ## Detailed Key Themes _create_key_themes(d, study['substudies'], conf, substudy_excludes) ### References _create_references(d, study['substudies'], conf) return d if __name__ == "__main__": my_args = parse_cli_args() configurator = read_config(conf_file=my_args.config_file) my_org = my_args.org.upper() # Set default items from the configuration file for the report report_conf = { 'org': configurator[my_org]['organization_name'], 'logo': configurator[my_org]['logo_image'], 'font': configurator[my_org]['font_type'], 'font_size': configurator[my_org]['font_size'], 'font_measure': configurator[my_org]['font_measure'], 'copyright': configurator[my_org]['copyright_notice'], 'confidentiality': configurator[my_org]['confidential_notice'], 'themes': { 'font_size': configurator['THEME_FORMAT']['font_size'], 'intro': configurator['THEME_FORMAT']['key_theme_intro'], 'summary_intro': configurator['THEME_FORMAT']['summary_theme_intro'], 'discrete_intro': configurator['THEME_FORMAT']['discrete_themes_intro'], 'discrete_theme_intro': configurator['THEME_FORMAT']['discrete_theme_intro'], 'indent': configurator['THEME_FORMAT']['indent'], } } auth_ctl = authenticate( user_name=my_args.user, secret=my_args.secret, rest_server_url=my_args.rest_url) credential = auth_ctl.login() substudy_excludes = my_args.exclude_substudies.split(',') if my_args.exclude_substudies else list() study_ctl = study(credential) success, study_obj = study_ctl.get_by_guid(my_args.guid) if success: doc_name = study_obj['studyName'].replace( ' ', '_') + "_study_report.docx" document = report(study_obj, report_conf, substudy_excludes) document.save(doc_name) else: print('CLI ERROR: This is a generic error message, as something went wrong.') sys.exit(-1)
39.653558
143
0.648501
300d26a47173ec68c9cbc913d71ba1fd8d873df4
17,388
py
Python
hubcheck/browser/browser.py
codedsk/hubcheck
2ff506eb56ba00f035300862f8848e4168452a17
[ "MIT" ]
1
2016-02-13T13:42:23.000Z
2016-02-13T13:42:23.000Z
hubcheck/browser/browser.py
codedsk/hubcheck
2ff506eb56ba00f035300862f8848e4168452a17
[ "MIT" ]
null
null
null
hubcheck/browser/browser.py
codedsk/hubcheck
2ff506eb56ba00f035300862f8848e4168452a17
[ "MIT" ]
null
null
null
import pprint import logging import datetime from selenium import webdriver import hubcheck.conf # block websites that make linkcheck slow # these are usually blocked by the workspace firewall # mozillalabs comes from using a nightly version of firefox browser # many of the others are from login authentication sites PROXY_BLACKLIST = [ "http(s)?://.*mozillalabs\\.com/?.*", # testpilot.mozillalabs.com "http(s)?://.*google-analytics\\.com/.*", # ssl.google-analytics.com 'http(s)?://.*facebook\\.com/?.*', # www.facebook.com/login.php 'http(s)?://.*fbcdn\\.com/?.*', # www.facebook.com/login.php 'http(s)?://.*accounts\\.google\\.com/?.*', # accounts.google.com 'http(s)?://.*linkedin\\.com/?.*', # linkedin.com 'http(s)?://.*twitter\\.com/?.*', # api.twitter.com # 'http(s)?://.*purdue\\.edu/apps/account/cas/?.*', # purdue cas ] MIMETYPES = [ "appl/text", # .doc \ "application/acad", # .dwg \ "application/acrobat", # .pdf \ "application/autocad_dwg", # .dwg \ "application/doc", # .doc, .rtf \ "application/dwg", # .dwg \ "application/eps", # .eps \ "application/futuresplash", # .swf \ "application/gzip", # .gz \ "application/gzipped", # .gz \ "application/gzip-compressed", # .gz \ "application/jpg", # .jpg \ "application/ms-powerpoint", # .ppt \ "application/msexcel", # .xls \ "application/mspowerpnt", # .ppt \ "application/mspowerpoint", # .ppt \ "application/msword", # .doc, .rtf \ "application/octet-stream", # .gz, .zip \ "application/pdf", # .pdf \ "application/photoshop", # .psd \ "application/postscript", # .ps, .avi, .eps \ "application/powerpoint", # .ppt \ "application/psd", # .psd \ "application/rss+xml", # .rss \ "application/rtf", # .rtf \ "application/tar", # .tar \ "application/vnd.ms-excel", # .xls, .xlt, .xla \ "application/vnd.ms-excel.addin.macroEnabled.12", # .xlam \ "application/vnd.ms-excel.sheet.binary.macroEnabled.12", # .xlsb \ "application/vnd.ms-excel.sheet.macroEnabled.12", # .xlsm \ "application/vnd.ms-excel.template.macroEnabled.12", # .xltm \ "application/vnd.ms-powerpoint", # .pps, .ppt, .pot, .ppa \ "application/vnd.ms-powerpoint.addin.macroEnabled.12", # .ppam \ "application/vnd.ms-powerpoint.presentation.macroEnabled.12", # .pptm \ "application/vnd.ms-powerpoint.slideshow.macroEnabled.12", # .ppsm \ "application/vnd.ms-powerpoint.template.macroEnabled.12", # .potm \ "application/vnd.ms-word", # .doc \ "application/vnd.ms-word.document.macroEnabled.12", # .docm \ "application/vnd.ms-word.template.macroEnabled.12", # .dotm \ "application/vnd.msexcel", # .xls \ "application/vnd.mspowerpoint", # .ppt \ "application/vnd.msword", # .doc \ "application/vnd.openxmlformats-officedocument.presentationml.presentation", # .pptx \ "application/vnd.openxmlformats-officedocument.presentationml.template", # .potx \ "application/vnd.openxmlformats-officedocument.presentationml.slideshow", # .ppsx \ "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", # .xlsx \ "application/vnd.openxmlformats-officedocument.spreadsheetml.template", # .xltx \ "application/vnd.openxmlformats-officedocument.wordprocessingml.document", # .docx \ "application/vnd.openxmlformats-officedocument.wordprocessingml.template", # .dotx \ "application/vnd.pdf", # .pdf \ "application/vnd-mspowerpoint", # .ppt \ "application/winword", # .doc \ "application/word", # .doc \ "application/x-acad", # .dwg \ "application/x-apple-diskimage", # .dmg \ "application/x-autocad", # .dwg \ "application/x-bibtex", # .bib \ "application/x-compress", # .gz, .tar, .zip \ "application/x-compressed", # .gz, .tar, .zip \ "application/x-dos_ms_excel", # .xls \ "application/x-dwg", # .dwg \ "application/x-endnote-refer", # .enw \ "application/x-eps", # .eps \ "application/x-excel", # .xls \ "application/x-gtar", # .tar \ "application/x-gunzip", # .gz \ "application/x-gzip", # .gz \ "application/x-jpg", # .jpg \ "application/x-m", # .ppt \ "application/x-ms-excel", # .xls \ "application/x-msexcel", # .xls \ "application/x-mspublisher", # .pub \ "application/x-msw6", # .doc \ "application/x-msword", # .doc \ "application/x-ole-storage", # .msi \ "application/x-pdf", # .pdf \ "application/x-powerpoint", # .ppt \ "application/x-rtf", # .rtf \ "application/x-shockwave-flash", # .swf \ "application/x-shockwave-flash2-preview", # .swf \ "application/x-tar", # .tar \ "application/x-troff-msvideo", # .avi \ "application/x-soffice", # .rtf \ "application/x-xml", # .xml, .pub \ "application/x-zip", # .zip \ "application/x-zip-compressed", # .zip \ "application/xls", # .xls \ "application/xml", # .xml, .pub \ "application/zip", # .zip \ "audio/aiff", # .avi, .mov \ "audio/avi", # .avi \ "audio/mp3", # .mp3 \ "audio/mp4", # .mp4 \ "audio/mpg", # .mp3 \ "audio/mpeg", # .mp3 \ "audio/mpeg3", # .mp3 \ "audio/x-midi", # .mov \ "audio/x-mp3", # .mp3 \ "audio/x-mpg", # .mp3 \ "audio/x-mpeg", # .mp3 \ "audio/x-mpeg3", # .mp3 \ "audio/x-mpegaudio", # .mp3 \ "audio/x-wav", # .mov \ "drawing/dwg", # .dwg \ "gzip/document", # .gz \ "image/avi", # .avi \ "image/eps", # .eps \ "image/gi_", # .gif \ "image/gif", # .eps, .gif \ "image/jpeg", # .jpg, .jpeg \ "image/jpg", # .jpg \ "image/jp_", # .jpg \ "image/mpeg", # .mpeg \ "image/mov", # .mov \ "image/photoshop", # .psd \ "image/pipeg", # .jpg \ "image/pjpeg", # .jpg \ "image/png", # .png \ "image/psd", # .psd \ "image/vnd.dwg", # .dwg \ "image/vnd.rn-realflash", # .swf \ "image/vnd.swiftview-jpeg", # .jpg \ "image/x-eps", # .eps \ "image/x-dwg", # .dwg \ "image/x-photoshop", # .psd \ "image/x-xbitmap", # .gif, .jpg \ "multipart/x-tar", # .tar \ "multipart/x-zip", # .zip \ "octet-stream", # possibly some .ppt files \ "text/csv", # .csv \ "text/mspg-legacyinfo", # .msi \ "text/pdf", # .pdf \ "text/richtext", # .rtf \ "text/rtf", # .rtf \ "text/x-pdf", # .pdf \ "text/xml", # .xml, .rss \ "video/avi", # .avi, .mov \ "video/mp4v-es", # .mp4 \ "video/msvideo", # .avi \ "video/quicktime", # .mov \ "video/x-flv", # .flv \ "video/x-m4v", # .m4v \ "video/x-msvideo", # .avi \ "video/x-quicktime", # .mov \ "video/xmpg2", # .avi \ "zz-application/zz-winassoc-psd", # .psd \ ] class Browser(object): """hubcheck webdriver interface""" def __init__(self, mimetypes=[], downloaddir='/tmp'): self.logger = logging.getLogger(__name__) self.logger.info("setting up a web browser") self._browser = None self.wait_time = 2 self.marker = 0 self.proxy_client = None self.proxy_blacklist = PROXY_BLACKLIST self.profile = None self.downloaddir = downloaddir self.mimetypes = mimetypes def __del__(self): self.close() def setup_browser_preferences(self): """browser preferences should be setup by subclasses """ pass def start_proxy_client(self): # setup proxy if needed if hubcheck.conf.settings.proxy is None: self.logger.info("proxy not started, not starting client") return # start the client self.proxy_client = hubcheck.conf.settings.proxy.create_client() # setup the proxy website blacklist if self.proxy_client is not None: self.logger.info("setting up proxy blacklist") for url_re in self.proxy_blacklist: self.logger.debug("blacklisting %s" % url_re) self.proxy_client.blacklist(url_re,200) def stop_proxy_client(self): if self.proxy_client is not None: self.logger.info("stopping proxy client") self.proxy_client.close() self.proxy_client = None def setup_browser_size_and_position(self): # set the amount of time to wait for an element to appear on the page self._browser.implicitly_wait(self.wait_time) # place the browser window in the upper left corner of the screen self._browser.set_window_position(0, 0) # resize the window to just shy of our 1024x768 screen self._browser.set_window_size(1070,700) def launch(self): """subclass should add code required to launch the browser """ pass def get(self,url): if self._browser is None: self.launch() self.logger.debug("retrieving url: %s" % (url)) self._browser.get(url) def close(self): if self._browser is None: return self.logger.info("closing browser") self._browser.quit() self._browser = None self.profile self.stop_proxy_client() def error_loading_page(self,har_entry): """ check if there was an error loading the web page returns True or False """ harurl = har_entry['request']['url'] harstatus = har_entry['response']['status'] self.logger.debug("%s returned status %s" % (harurl,harstatus)) result = None if (harstatus >= 100) and (harstatus <= 199): # information codes result = False elif (harstatus >= 200) and (harstatus <= 299): # success codes result = False elif (harstatus >= 300) and (harstatus <= 399): # redirect codes result = False elif (harstatus >= 400) and (harstatus <= 499): # client error codes # client made an invalid request (bad links) # page does not exist result = True elif (harstatus >= 500) and (harstatus <= 599): # server error codes # client made a valid request, # but server failed while responsing. result = True else: result = True return result def page_load_details(self,url=None,follow_redirects=True): """ return the har entry for the last page loaded follow redirects to make sure you get the har entry for the page that was eventually loaded. A return value of None means no page was ever loaded. """ if not self.proxy_client: return None if url is None: url = self._browser.current_url self.logger.debug("processing har for %s" % (url)) har = self.proxy_client.har self.logger.debug("har entry = %s" % (pprint.pformat(har))) return_entry = None for entry in har['log']['entries']: harurl = entry['request']['url'] harstatus = entry['response']['status'] if url == None: # we are following a redirect from below return_entry = entry elif url == harurl: # the original url matches the url for this har entry exactly return_entry = entry elif (not url.endswith('/')) and (url+'/' == harurl): # the original url almost matches the url for this har entry return_entry = entry if return_entry is not None: if follow_redirects and (harstatus >= 300) and (harstatus <= 399): # follow the redirect (should be the next har entry) url = None continue else: # found our match break self.logger.debug("har for url = %s" % (pprint.pformat(return_entry))) return return_entry def take_screenshot(self,filename=None): """ Take a screen shot of the browser, store it in filename. """ if self._browser is None: return if filename is None: dts = datetime.datetime.today().strftime("%Y%m%d%H%M%S") filename = 'hcss_%s.png' % dts self.logger.debug("screenshot filename: %s" % (filename)) self._browser.save_screenshot(filename) def next_marker(self): self.marker += 1 return self.marker
45.046632
93
0.41868
300da26c3c232a2570a4579306f4ee78357682ca
4,278
py
Python
python/caty/jsontools/selector/parser.py
hidaruma/caty
f71d2ab0a001ea4f7a96a6e02211187ebbf54773
[ "MIT" ]
null
null
null
python/caty/jsontools/selector/parser.py
hidaruma/caty
f71d2ab0a001ea4f7a96a6e02211187ebbf54773
[ "MIT" ]
null
null
null
python/caty/jsontools/selector/parser.py
hidaruma/caty
f71d2ab0a001ea4f7a96a6e02211187ebbf54773
[ "MIT" ]
null
null
null
from topdown import * from caty.jsontools import stdjson from caty.jsontools import xjson from caty.jsontools.selector import stm as default_factory from caty.core.spectypes import UNDEFINED from caty.core.language import name_token class JSONPathSelectorParser(Parser): def __init__(self, empty_when_error=False, ignore_rest=False, factory=None): Parser.__init__(self) self.empty_when_error = empty_when_error self.ignore_rest = ignore_rest self.factory = factory if factory else default_factory def __call__(self, seq): o = chainl([self.all, self.tag, self.exp_tag, self.untagged, self.length, self.it, self.name, self.index, self.namewildcard, self.itemwildcard, try_(self.oldtag), ], self.dot)(seq) o = self.factory.SelectorWrapper(o) optional = option(u'?')(seq) if optional and option('=')(seq): d = xjson.parse(seq) else: d = UNDEFINED if not seq.eof and not self.ignore_rest: raise ParseFailed(seq, self) o.set_optional(optional) o.set_default(d) return o def apply_option(self, stm): stm.empty_when_error = self.empty_when_error return stm def dot(self, seq): seq.parse('.') def _(a, b): #self.apply_option(a) #self.apply_option(b) return a.chain(b) return _ def all(self, seq): seq.parse('$') return self.factory.AllSelector() def name(self, seq): key = seq.parse([self.namestr, lambda s:self.quoted(s, '"'), lambda s: self.quoted(s, "'")]) optional = False #optional = option(u'?')(seq) return self.factory.PropertySelector(key, optional) def namestr(self, seq): return seq.parse(name_token) def quoted(self, seq, qc): def series_of_escape(s): import itertools return len(list(itertools.takewhile(lambda c: c=='\\', reversed(s)))) try: seq.ignore_hook = True st = [seq.parse(qc)] s = seq.parse(until(qc)) while True: if series_of_escape(s) % 2 == 0: st.append(s) break else: st.append(s) s = seq.parse(Regex(r'%s[^%s]*' % (qc, qc))) st.append(seq.parse(qc)) return stdjson.loads('"%s"'%''.join(st[1:-1])) except EndOfBuffer, e: raise ParseFailed(seq, string) finally: seq.ignore_hook = False def index(self, seq): idx = int(seq.parse(Regex(r'([0-9]+)'))) optional = False #optional = option(u'?')(seq) return self.factory.ItemSelector(idx, optional) def namewildcard(self, seq): seq.parse('*') return self.factory.NameWildcardSelector() def itemwildcard(self, seq): seq.parse('#') return self.factory.ItemWildcardSelector() def oldtag(self, seq): seq.parse('^') name = seq.parse(option([self.namestr, lambda s:self.quoted(s, '"'), lambda s: self.quoted(s, "'")], None)) if name is not None: return TagSelector(name, False) t = seq.parse(['*', '^']) if t == '*': e = seq.parse(option('!', None)) return self.factory.TagSelector(None, bool(e)) else: e = seq.parse(option('!', None)) return self.factory.TagReplacer(None, bool(e)) def tag(self, seq): seq.parse('tag()') return self.factory.TagNameSelector(False) def exp_tag(self, seq): seq.parse('exp-tag()') return self.factory.TagNameSelector(True) def untagged(self, seq): v = seq.parse(choice('untagged()', 'content()')) return self.factory.TagContentSelector(v) def length(self, seq): seq.parse('length()') return self.factory.LengthSelector() def it(self, seq): seq.parse('it()') return self.factory.ItSelector()
31.688889
115
0.542076
300f199f66802964a7132356800aed7b1a0be7f9
7,057
py
Python
appengine/gae_defer_manager/deferred_manager/tests.py
meedan/montage
4da0116931edc9af91f226876330645837dc9bcc
[ "Apache-2.0" ]
6
2018-07-31T16:48:07.000Z
2020-02-01T03:17:51.000Z
appengine/gae_defer_manager/deferred_manager/tests.py
meedan/montage
4da0116931edc9af91f226876330645837dc9bcc
[ "Apache-2.0" ]
41
2018-08-07T16:43:07.000Z
2020-06-05T18:54:50.000Z
appengine/gae_defer_manager/deferred_manager/tests.py
meedan/montage
4da0116931edc9af91f226876330645837dc9bcc
[ "Apache-2.0" ]
1
2018-08-07T16:40:18.000Z
2018-08-07T16:40:18.000Z
# -*- coding: utf8 -*- import datetime import mock import os import unittest import webapp2 from google.appengine.ext import testbed, deferred from google.appengine.api import queueinfo from . import models from .handler import application from .wrapper import defer TESTCONFIG_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "testconfig") def noop(*args, **kwargs): pass def noop_fail(*args, **kwargs): raise Exception def noop_permanent_fail(*args, **kwargs): raise deferred.PermanentTaskFailure class Foo(object): def bar(self): pass def __call__(self): pass class BaseTest(unittest.TestCase): def setUp(self): self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_datastore_v3_stub() self.testbed.init_taskqueue_stub(root_path=TESTCONFIG_DIR) self.taskqueue_stub = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME) super(BaseTest, self).setUp() def reload(self, obj): return obj.get(obj.key()) class DeferTaskTests(BaseTest): def test_creates_state(self): task_state = defer(noop) queue_state = models.QueueState.get_by_key_name("default") self.assertTrue(queue_state) self.assertEqual(task_state.parent().key(), queue_state.key()) def test_unique_task_ref(self): unique_until = datetime.datetime.utcnow() + datetime.timedelta(days=1) self.assertRaises(AssertionError, defer, noop, unique_until=unique_until) self.assertTrue(defer(noop, task_reference="project1", unique_until=unique_until)) self.assertFalse(defer(noop, task_reference="project1", unique_until=unique_until)) def test_args_repr(self): task_state = defer(noop, 2, u"bår") self.assertEqual(task_state.deferred_args, u"(2, u'b\\xe5r')") def test_kwargs_repr(self): task_state = defer(noop, foo="bår", _bar="foo") self.assertEqual(task_state.deferred_kwargs, u"{'foo': 'b\\xc3\\xa5r'}") def test_class_method_repr(self): task_state = defer(Foo().bar) self.assertEqual(task_state.deferred_function, u"<class 'deferred_manager.tests.Foo'>.bar") def test_module_func_repr(self): task_state = defer(noop) self.assertEqual(task_state.deferred_function, u"deferred_manager.tests.noop") def test_builtin_func_repr(self): task_state = defer(map) self.assertEqual(task_state.deferred_function, u"map") def test_callable_obj_func_repr(self): task_state = defer(Foo) self.assertEqual(task_state.deferred_function, u"deferred_manager.tests.Foo") def test_builtin_method_repr(self): task_state = defer(datetime.datetime.utcnow) self.assertEqual(task_state.deferred_function, u"<type 'datetime.datetime'>.utcnow") class ModelTaskTests(unittest.TestCase): def test_queue_state(self): queue_state = models.QueueState(name="default") self.assertEqual(queue_state.retry_limit, 7) self.assertEqual(queue_state.age_limit, 2*24*3600) # 2 days class HandlerTests(BaseTest): def make_request(self, path, task_name, queue_name, headers=None, environ=None, **kwargs): request_headers = { "X-AppEngine-TaskName": task_name, "X-AppEngine-QueueName": queue_name, 'X-AppEngine-TaskExecutionCount': kwargs.pop('retries', 0) } if headers: request_headers.update(headers) request_environ = { "SERVER_SOFTWARE": "Development" } if environ: request_environ.update(environ) return webapp2.Request.blank('/', environ=request_environ, headers=request_headers, **kwargs) def test_success(self): task_state = defer(noop) noop_pickle = deferred.serialize(noop) request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle) response = request.get_response(application) self.assertEqual(response.status_int, 200) task_state = self.reload(task_state) self.assertTrue(task_state.task_name) self.assertTrue(task_state.is_complete) self.assertFalse(task_state.is_running) self.assertFalse(task_state.is_permanently_failed) def test_failure(self): task_state = defer(noop_fail) noop_pickle = deferred.serialize(noop_fail) request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle) response = request.get_response(application) self.assertEqual(response.status_int, 500) task_state = self.reload(task_state) self.assertFalse(task_state.is_complete) self.assertFalse(task_state.is_running) self.assertFalse(task_state.is_permanently_failed) def test_retry_success(self): task_state = defer(noop) noop_pickle = deferred.serialize(noop) request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle, retries=2) response = request.get_response(application) self.assertEqual(response.status_int, 200) task_state = self.reload(task_state) self.assertEqual(task_state.retry_count, 2) self.assertTrue(task_state.is_complete) self.assertFalse(task_state.is_running) self.assertFalse(task_state.is_permanently_failed) def test_retry_max_retries(self): task_state = defer(noop_fail) # give the task an old age. tasks must fail both the retry and age conditions (if specified) task_state.first_run = datetime.datetime.utcnow() - datetime.timedelta(days=2) task_state.put() noop_pickle = deferred.serialize(noop_fail) request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle, retries=8) response = request.get_response(application) self.assertEqual(response.status_int, 500) task_state = self.reload(task_state) self.assertEqual(task_state.retry_count, 8) self.assertTrue(task_state.is_complete) self.assertFalse(task_state.is_running) self.assertTrue(task_state.is_permanently_failed) def test_permanent_failure(self): task_state = defer(noop_permanent_fail) noop_pickle = deferred.serialize(noop_permanent_fail) request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle) response = request.get_response(application) self.assertEqual(response.status_int, 200) task_state = self.reload(task_state) self.assertEqual(task_state.retry_count, 0) self.assertTrue(task_state.is_complete) self.assertFalse(task_state.is_running) self.assertTrue(task_state.is_permanently_failed) def test_no_task_state(self): noop_pickle = deferred.serialize(noop) request = self.make_request("/", 'task1', 'default', POST=noop_pickle) response = request.get_response(application) self.assertEqual(response.status_int, 200)
34.257282
102
0.69803
3010ae6e12181dbe483e73b05d5e55639ba72b1f
240
py
Python
backend/categories/models.py
cristianemoyano/django-react-webapp
c91d263f58b0d66a8c260e095d0ec6cee66f8afd
[ "MIT" ]
null
null
null
backend/categories/models.py
cristianemoyano/django-react-webapp
c91d263f58b0d66a8c260e095d0ec6cee66f8afd
[ "MIT" ]
null
null
null
backend/categories/models.py
cristianemoyano/django-react-webapp
c91d263f58b0d66a8c260e095d0ec6cee66f8afd
[ "MIT" ]
null
null
null
from django.db import models class Category(models.Model): """Category model.""" name = models.CharField(max_length=100, unique=True) class Meta: ordering = ('name',) def __str__(self): return self.name
17.142857
56
0.633333
30113171dc48ed74cadebf84f1f1fd11cb8f6566
4,023
py
Python
BdBG.py
rongjiewang/BdBG
b4a8fab0fa083aecab10f15431e37b0445722007
[ "MIT" ]
2
2018-11-21T06:39:34.000Z
2018-11-21T06:43:53.000Z
BdBG.py
rongjiewang/BdBG
b4a8fab0fa083aecab10f15431e37b0445722007
[ "MIT" ]
null
null
null
BdBG.py
rongjiewang/BdBG
b4a8fab0fa083aecab10f15431e37b0445722007
[ "MIT" ]
null
null
null
"""MIT License Copyright (c) 2018 rongjiewang Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" import sys import argparse from bucket import encodeBucketClass, decodeBucketClass from deBruijnGraph import encodeGraphClass, decodeGraphClass def args_check(args): if not args.encode and not args.decode: sys.exit("you must give a -e or -d for encode/decode") if not args.input and not args.paired: sys.exit("you must give a file input with -i input for single end data or -p -1 input1 -2 input2 for paired-end data") if not args.output: sys.exit("you must give a file output with -o output") return def main(args): args_check(args) #encode if args.encode: en_bucket = encodeBucketClass(args.input, args.output, args.paired, \ args.input1, args.input2, args.kmer, args.lossless, args.verbose) en_bucket.encode() en_graph = encodeGraphClass(args.output, args.paired, args.kmer, \ args.verbose, en_bucket.sequenceTableSave) del en_bucket en_graph.encode() del en_graph sys.exit() #decode else: de_bucket = decodeBucketClass(args.input, args.output, args.verbose) de_bucket.decode() de_graph = decodeGraphClass(args.input, args.output, de_bucket.paired, de_bucket.readNum,\ de_bucket.bucketIndexLen, de_bucket.lossless, de_bucket.verbose) de_graph.loadBucktData(de_bucket.bucketIndex, de_bucket.bucketCov, de_bucket.readIndexPos,\ de_bucket.readrc, de_bucket.readN, de_bucket.readLen, de_bucket.readOrder) del de_bucket de_graph.decode() del de_graph sys.exit() if __name__ == '__main__': parser = argparse.ArgumentParser(description = 'BdBG') parser.add_argument("-e", "--encode", help="encoding",action="store_true") parser.add_argument("-d", "--decode", help="decoding",action="store_true") parser.add_argument("-i", "--input",type=str, help="inputFile") parser.add_argument("-o", "--output", help="outputFile") parser.add_argument("-p", "--paired", help="paired-end flag",action="store_true") parser.add_argument("-1", "--input1", help="paired-end file1") parser.add_argument("-2", "--input2", help="paired-end file2") parser.add_argument("-l", "--lossless", help="keep the reads orders, default:false, \ if encode paired-end files, default:ture ",action="store_true") parser.add_argument("-k", "--kmer",type=int, default=15, help="kmer size for bucket and de Bruijn graph, default=15") parser.add_argument("-v","--verbose", action="store_true", help="verbose information") args = parser.parse_args() main(args)
36.243243
126
0.653492
30119e15a78b5e7aea8cf1c27d45b2140994ce7e
10,820
py
Python
web/lib/console.py
jonathanverner/brython-jinja2
cec6e16de1750203a858d0acf590f230fc3bf848
[ "BSD-3-Clause" ]
2
2020-09-13T17:51:55.000Z
2020-11-25T18:47:12.000Z
web/lib/console.py
jonathanverner/brython-jinja2
cec6e16de1750203a858d0acf590f230fc3bf848
[ "BSD-3-Clause" ]
2
2020-11-25T19:18:15.000Z
2021-06-01T21:48:12.000Z
web/lib/console.py
jonathanverner/brython-jinja2
cec6e16de1750203a858d0acf590f230fc3bf848
[ "BSD-3-Clause" ]
null
null
null
""" This module provides the interactive Python console. """ import sys import traceback from browser import window class Console: """ A class providing a console widget. The constructor accepts a domnode which should be a textarea and it takes it over and turns it into a python interactive console. """ _credits = """ Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands for supporting Python development. See www.python.org for more information. """ _copyright = """Copyright (c) 2012, Pierre Quentel pierre.quentel@gmail.com All Rights Reserved. Copyright (c) 2001-2013 Python Software Foundation. All Rights Reserved. Copyright (c) 2000 BeOpen.com. All Rights Reserved. Copyright (c) 1995-2001 Corporation for National Research Initiatives. All Rights Reserved. Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam. All Rights Reserved. """ _license = """Copyright (c) 2012, Pierre Quentel pierre.quentel@gmail.com All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ def __init__(self, elem): self._elem = elem self.credits.__repr__ = lambda: Console._credits self.copyright.__repr__ = lambda: Console._copyright self.license.__repr__ = lambda: Console._license self._redirected = False self._oldstdout = None self._oldstderr = None self.history = [] self.current = 0 self._status = "main" # or "block" if typing inside a block self.current_line = "" # execution namespace self.editor_ns = { 'credits': self.credits, 'copyright': self.copyright, 'license': self.license, '__name__': '__console__', } self._elem.bind('keypress', self.my_key_press) self._elem.bind('keydown', self.my_key_down) self._elem.bind('click', self.cursor_to_end) version = sys.implementation.version self._elem.value = "Brython %s.%s.%s on %s %s\n%s\n>>> " % (version[0], version[1], version[2], window.navigator.appName, window.navigator.appVersion, 'Type "copyright()", "credits()" or "license()" for more information.') self._elem.focus() self.cursor_to_end() def add_to_ns(self, key, value): """ Adds key to the console's local scope. Think: ``` key=value ``` """ self.editor_ns[key] = value def _redirect_out(self): if self._redirected: sys.__console__ = False sys.stdout = self._oldstdout sys.stderr = self._oldstderr self._redirected = False else: sys.__console__ = True self._oldstdout = sys.stdout self._oldstderr = sys.stderr sys.stdout = self sys.stderr = self self._redirected = True def credits(self): self.write(self._credits) def copyright(self): self.write(self._copyright) def license(self): self.write(self._license) def write(self, data): self._elem.value += str(data) def cursor_to_end(self, *_args): pos = len(self._elem.value) self._elem.setSelectionRange(pos, pos) self._elem.scrollTop = self._elem.scrollHeight def get_col(self, _area): """ returns the column position of the cursor """ sel = self._elem.selectionStart lines = self._elem.value.split('\n') for line in lines[:-1]: sel -= len(line) + 1 return sel def my_key_press(self, event): if event.keyCode == 9: # tab key event.preventDefault() self._elem.value += " " elif event.keyCode == 13: # return src = self._elem.value if self._status == "main": self.current_line = src[src.rfind('>>>') + 4:] elif self._status == "3string": self.current_line = src[src.rfind('>>>') + 4:] self.current_line = self.current_line.replace('\n... ', '\n') else: self.current_line = src[src.rfind('...') + 4:] if self._status == 'main' and not self.current_line.strip(): self._elem.value += '\n>>> ' event.preventDefault() return self._elem.value += '\n' self.history.append(self.current_line) self.current = len(self.history) if self._status == "main" or self._status == "3string": try: self._redirect_out() _ = self.editor_ns['_'] = eval(self.current_line, self.editor_ns) if _ is not None: self.write(repr(_) + '\n') self._elem.value += '>>> ' self._status = "main" except IndentationError: self._elem.value += '... ' self._status = "block" except SyntaxError as msg: if str(msg) == 'invalid syntax : triple string end not found' or \ str(msg).startswith('Unbalanced bracket'): self._elem.value += '... ' self._status = "3string" elif str(msg) == 'eval() argument must be an expression': try: self._redirect_out() exec(self.current_line, self.editor_ns) except: # pylint: disable=bare-except; any exception can happen here traceback.print_exc(self) finally: self._redirect_out() self._elem.value += '>>> ' self._status = "main" elif str(msg) == 'decorator expects function': self._elem.value += '... ' self._status = "block" else: traceback.print_exc(self) self._elem.value += '>>> ' self._status = "main" # pylint: disable=bare-except; any exception can happen here except: traceback.print_exc(self) self._elem.value += '>>> ' self._status = "main" finally: self._redirect_out() elif self.current_line == "": # end of block block = src[src.rfind('>>>') + 4:].splitlines() block = [block[0]] + [b[4:] for b in block[1:]] block_src = '\n'.join(block) # status must be set before executing code in globals() self._status = "main" try: self._redirect_out() _ = exec(block_src, self.editor_ns) if _ is not None: print(repr(_)) # pylint: disable=bare-except; any exception can happen here except: traceback.print_exc(self) finally: self._redirect_out() self._elem.value += '>>> ' else: self._elem.value += '... ' self.cursor_to_end() event.preventDefault() def my_key_down(self, event): if event.keyCode == 37: # left arrow sel = self.get_col(self._elem) if sel < 5: event.preventDefault() event.stopPropagation() elif event.keyCode == 36: # line start pos = self._elem.selectionStart col = self.get_col(self._elem) self._elem.setSelectionRange(pos - col + 4, pos - col + 4) event.preventDefault() elif event.keyCode == 38: # up if self.current > 0: pos = self._elem.selectionStart col = self.get_col(self._elem) # remove self.current line self._elem.value = self._elem.value[:pos - col + 4] self.current -= 1 self._elem.value += self.history[self.current] event.preventDefault() elif event.keyCode == 40: # down if self.current < len(self.history) - 1: pos = self._elem.selectionStart col = self.get_col(self._elem) # remove self.current line self._elem.value = self._elem.value[:pos - col + 4] self.current += 1 self._elem.value += self.history[self.current] event.preventDefault() elif event.keyCode == 8: # backspace src = self._elem.value lstart = src.rfind('\n') if (lstart == -1 and len(src) < 5) or (len(src) - lstart < 6): event.preventDefault() event.stopPropagation()
39.926199
139
0.539464
30124f8335d73ee7802841f7737a00cbfad26c9f
1,333
py
Python
lldb/test/API/functionalities/breakpoint/breakpoint_on_overload/TestBreakOnOverload.py
LaudateCorpus1/llvm-project
ff2e0f0c1112558b3f30d8afec7c9882c33c79e3
[ "Apache-2.0" ]
null
null
null
lldb/test/API/functionalities/breakpoint/breakpoint_on_overload/TestBreakOnOverload.py
LaudateCorpus1/llvm-project
ff2e0f0c1112558b3f30d8afec7c9882c33c79e3
[ "Apache-2.0" ]
null
null
null
lldb/test/API/functionalities/breakpoint/breakpoint_on_overload/TestBreakOnOverload.py
LaudateCorpus1/llvm-project
ff2e0f0c1112558b3f30d8afec7c9882c33c79e3
[ "Apache-2.0" ]
null
null
null
""" Test setting a breakpoint on an overloaded function by name. """ import re import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil class TestBreakpointOnOverload(TestBase): mydir = TestBase.compute_mydir(__file__) def check_breakpoint(self, name): bkpt = self.target.BreakpointCreateByName(name) self.assertEqual(bkpt.num_locations, 1, "Got one location") addr = bkpt.locations[0].GetAddress() self.assertTrue(addr.function.IsValid(), "Got a real function") # On Window, the name of the function includes the return value. # We still succeed in setting the breakpoint, but the resultant # name is not the same. # So just look for the name we used for the breakpoint in the # function name, rather than doing an equality check. self.assertIn(name, addr.function.name, "Got the right name") def test_break_on_overload(self): self.build() self.target = lldbutil.run_to_breakpoint_make_target(self) self.check_breakpoint("a_function(int)") self.check_breakpoint("a_function(double)") self.check_breakpoint("a_function(int, double)") self.check_breakpoint("a_function(double, int)")
35.078947
72
0.685671
30142c5188e376313f7d79178393a7007c7faa25
2,611
py
Python
Intermedio/28 Pomodoro/main.py
YosafatM/100-days-of-Python
e81ab663b7aacb7a904f27a4e6774837cf3594a1
[ "MIT" ]
null
null
null
Intermedio/28 Pomodoro/main.py
YosafatM/100-days-of-Python
e81ab663b7aacb7a904f27a4e6774837cf3594a1
[ "MIT" ]
null
null
null
Intermedio/28 Pomodoro/main.py
YosafatM/100-days-of-Python
e81ab663b7aacb7a904f27a4e6774837cf3594a1
[ "MIT" ]
null
null
null
from tkinter import * # ---------------------------- CONSTANTS ------------------------------- # PINK = "#e2979c" RED = "#e7305b" GREEN = "#9bdeac" YELLOW = "#f7f5dd" FONT_NAME = "Courier" WORK_MIN = 25 SHORT_BREAK_MIN = 5 LONG_BREAK_MIN = 20 is_counting = False reps = 0 timer = None # ---------------------------- TIMER RESET ------------------------------- # def reset_timer(): global timer, reps, is_counting if timer is not None: window.after_cancel(timer) lb_title.config(text="Timer", fg=GREEN) canvas.itemconfig(count_text, text="00:00") lb_checks["text"] = "" timer = None reps = 0 is_counting = False # ---------------------------- TIMER MECHANISM ------------------------------- # def start_timer(): global is_counting, reps if is_counting: pass reps += 1 if reps % 8 == 0: lb_title.config(text="Break", fg=RED) minutes = LONG_BREAK_MIN elif reps % 2 == 0: lb_title.config(text="Break", fg=PINK) minutes = SHORT_BREAK_MIN else: lb_title.config(text="Work", fg=GREEN) minutes = WORK_MIN is_counting = True count_down(minutes * 60) # ---------------------------- COUNTDOWN MECHANISM ------------------------------- # def count_down(count): global reps minutes = count // 60 seconds = count % 60 seconds = f"0{seconds}" if seconds < 10 else seconds canvas.itemconfig(count_text, text=f"{minutes}:{seconds}") if count > 0: global timer timer = window.after(1000, count_down, count - 1) elif reps % 2 == 1: global is_counting is_counting = False lb_checks["text"] += "✅" start_timer() # Break # ---------------------------- UI SETUP ------------------------------- # window = Tk() window.title("Pomodoro") window.config(padx=100, pady=50, bg=YELLOW) canvas = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0) image = PhotoImage(file="tomato.png") canvas.create_image(100, 112, image=image) count_text = canvas.create_text(100, 130, text="00:00", fill="white", font=(FONT_NAME, 35, "bold")) bt_start = Button(text="Start", highlightthickness=0, command=start_timer) bt_reset = Button(text="Reset", highlightthickness=0, command=reset_timer) lb_checks = Label(text="", fg=GREEN, bg=YELLOW) lb_title = Label(text="Timer", fg=GREEN, bg=YELLOW, font=(FONT_NAME, 30, "bold")) lb_title.grid(column=1, row=0) canvas.grid(column=1, row=1) bt_start.grid(column=0, row=2) bt_reset.grid(column=2, row=2) lb_checks.grid(column=1, row=3) window.mainloop()
27.484211
99
0.573727
301761f399153aa6d426b47322db15a73620a802
1,983
py
Python
ImageAnalyses/tests_maps.py
nicholst/narps
0a9d71a5ab435b40dcd50fea82d2961a43f0a3fc
[ "MIT" ]
30
2019-07-17T22:27:02.000Z
2021-12-21T11:38:12.000Z
ImageAnalyses/tests_maps.py
nicholst/narps
0a9d71a5ab435b40dcd50fea82d2961a43f0a3fc
[ "MIT" ]
21
2019-07-18T14:26:14.000Z
2020-01-20T17:59:19.000Z
ImageAnalyses/tests_maps.py
nicholst/narps
0a9d71a5ab435b40dcd50fea82d2961a43f0a3fc
[ "MIT" ]
12
2019-07-20T06:14:21.000Z
2021-09-25T00:35:29.000Z
# tests for narps code # - currently these are all just smoke tests import pytest import os import pandas from narps import Narps from AnalyzeMaps import mk_overlap_maps,\ mk_range_maps, mk_std_maps,\ mk_correlation_maps_unthresh, analyze_clusters,\ plot_distance_from_mean, get_thresh_similarity from MetaAnalysis import get_thresholded_Z_maps from ThreshVoxelStatistics import get_thresh_voxel_stats,\ get_zstat_diagnostics from GetMeanSimilarity import get_similarity_summary # Use a fixed base dir so that we can # access the results as a circleci artifact @pytest.fixture(scope="session") def narps(): basedir = '/tmp/data' assert os.path.exists(basedir) narps = Narps(basedir) narps.load_data() narps.metadata = pandas.read_csv( os.path.join(narps.dirs.dirs['metadata'], 'all_metadata.csv')) return(narps) # tests # AnalyzeMaps def test_mk_overlap_maps(narps): # create maps showing overlap of thresholded images mk_overlap_maps(narps) def test_mk_range_maps(narps): mk_range_maps(narps) def test_mk_std_maps(narps): mk_std_maps(narps) def test_unthresh_correlation_analysis(narps): # conbine these into a single test # since they share data corr_type = 'spearman' dendrograms, membership = mk_correlation_maps_unthresh( narps, corr_type=corr_type) _ = analyze_clusters( narps, dendrograms, membership, corr_type=corr_type) def test_plot_distance_from_mean(narps): plot_distance_from_mean(narps) def test_get_thresh_similarity(narps): get_thresh_similarity(narps) # this was created for ALE but we do it earlier here def test_thresh_zmap(narps): # create thresholded versions of Z maps narps = get_thresholded_Z_maps( narps) def test_thresh_voxel_stats(narps): get_zstat_diagnostics(narps) get_thresh_voxel_stats(narps.basedir) def test_mean_similarity(narps): _ = get_similarity_summary(narps)
24.182927
70
0.753404
3017c55b6cc7146b6404407438f7cdac4217ef3c
2,529
py
Python
dtcwt/plotting.py
santosh653/dtcwt
01d9e87dc9abfa244a89c1f05aebf3dec6999f3a
[ "BSD-2-Clause" ]
61
2015-01-04T09:21:29.000Z
2022-03-07T16:25:02.000Z
dtcwt/plotting.py
santosh653/dtcwt
01d9e87dc9abfa244a89c1f05aebf3dec6999f3a
[ "BSD-2-Clause" ]
17
2015-04-02T13:37:07.000Z
2018-03-07T09:57:57.000Z
dtcwt/plotting.py
santosh653/dtcwt
01d9e87dc9abfa244a89c1f05aebf3dec6999f3a
[ "BSD-2-Clause" ]
26
2015-04-16T06:22:16.000Z
2021-12-07T09:17:44.000Z
""" Convenience functions for plotting DTCWT-related objects. """ from __future__ import absolute_import import numpy as np from matplotlib.pyplot import * __all__ = ( 'overlay_quiver', ) def overlay_quiver(image, vectorField, level, offset): """Overlays nicely coloured quiver plot of complex coefficients over original full-size image, providing a useful phase visualisation. :param image: array holding grayscale values on the interval [0, 255] to display :param vectorField: a single [MxNx6] numpy array of DTCWT coefficients :param level: the transform level (1-indexed) of *vectorField*. :param offset: Offset for DTCWT coefficients (typically 0.5) .. note:: The *level* parameter is 1-indexed meaning that the third level has index "3". This is unusual in Python but is kept for compatibility with similar MATLAB routines. Should also work with other types of complex arrays (e.g., SLP coefficients), as long as the format is the same. Usage example: .. plot:: :include-source: true import dtcwt import dtcwt.plotting as plotting mandrill = datasets.mandrill() transform2d = dtcwt.Transform2d() mandrill_t = transform2d.forward(mandrill, nlevels=5) plotting.overlay_quiver(mandrill*255, mandrill_t.highpasses[-1], 5, 0.5) .. codeauthor:: R. Anderson, 2005 (MATLAB) .. codeauthor:: S. C. Forshaw, 2014 (Python) """ # Make sure imshow() uses the full range of greyscale values imshow(image, cmap=cm.gray, clim=(0,255)) hold(True) # Set up the grid for the quiver plot g1 = np.kron(np.arange(0, vectorField[:,:,0].shape[0]).T, np.ones((1,vectorField[:,:,0].shape[1]))) g2 = np.kron(np.ones((vectorField[:,:,0].shape[0], 1)), np.arange(0, vectorField[:,:,0].shape[1])) # Choose a coloUrmap cmap = cm.spectral scalefactor = np.max(np.max(np.max(np.max(np.abs(vectorField))))) vectorField[-1,-1,:] = scalefactor for sb in range(0, vectorField.shape[2]): hold(True) thiscolour = cmap(sb / float(vectorField.shape[2])) # Select colour for this subband hq = quiver(g2*(2**level) + offset*(2**level), g1*(2**level) + offset*(2**level), np.real(vectorField[:,:,sb]), \ np.imag(vectorField[:,:,sb]), color=thiscolour, scale=scalefactor*2**level) quiverkey(hq, 1.05, 1.00-0.035*sb, 0, "subband " + np.str(sb), coordinates='axes', color=thiscolour, labelcolor=thiscolour, labelpos='E') hold(False) return hq
34.643836
145
0.670621
30183648f14231172bd2966dd1817ea75267e595
428
py
Python
src/grokcore/registries/tests/registries/global.py
zopefoundation/grokcore.registries
90c5221867e07c0194df36f13fa4dda5f3a4923c
[ "ZPL-2.1" ]
null
null
null
src/grokcore/registries/tests/registries/global.py
zopefoundation/grokcore.registries
90c5221867e07c0194df36f13fa4dda5f3a4923c
[ "ZPL-2.1" ]
null
null
null
src/grokcore/registries/tests/registries/global.py
zopefoundation/grokcore.registries
90c5221867e07c0194df36f13fa4dda5f3a4923c
[ "ZPL-2.1" ]
null
null
null
import grokcore.component as grok from grokcore.component.interfaces import IContext import grokcore.view as view from zope.interface import Interface from grokcore.registries.tests.registries.interfaces import IExample class MyExample(grok.GlobalUtility): grok.name('global') grok.implements(IExample) class Page(view.View): grok.context(IContext) def render(self): return u"I Am grabbed from GSM"
23.777778
68
0.773364
301a3402ce430cb702ae2f205a2ed74937b55dc4
2,481
py
Python
graphdata/loglog.py
whalenpt/graphdata
d169150f860551d2049342ecf310dc1783987266
[ "MIT" ]
null
null
null
graphdata/loglog.py
whalenpt/graphdata
d169150f860551d2049342ecf310dc1783987266
[ "MIT" ]
null
null
null
graphdata/loglog.py
whalenpt/graphdata
d169150f860551d2049342ecf310dc1783987266
[ "MIT" ]
null
null
null
from graphdata.shared.shared1D import AuxPlotLabelLL1D from graphdata.shared.shared1D import ProcessData1D from graphdata.shared.shared1D import LoadData1D from graphdata.shared.figsizes import LogLogSize from graphdata.shared.shared import ExtendDictionary from graphdata.shared.shared import ProcessComplex from graphdata import plt from graphdata import np from graphdata import configs def loglog(filename,figsize=None,decades=None,xlim=None,ylim=None,\ complex_op=None,overwrite=False,**kwargs): """ Loglog graph of 1D data file using Matplotlib plt.loglog INPUTS: filename: string name of file containing 1D data to be plotted figsize: tuple (width,height) size of figure to be displayed xlim: np.array x-axis limits of graph ylim: np.array x-axis limits of graph decades: int number of decades of data below maximum to plot overwrite: bool add lines to an existing plt.semilogy graph if it exists (default is False which will create graph on a new figure) **kwargs: dictionary (optional) arguments to be passed onto plt.loglog plot OUTPUTS: ax : matplotlib.axes.Axes Matplotlib axes object, allows for setting limits and other manipulation of the axes (e.g. ax.set_xlim([0,1]) would set the graph x-limits to be between 0 and 1) """ x,y,auxDict = LoadData1D(filename) if complex_op is not None: y = ProcessComplex(complex_op,y) if decades is None: decades = configs._G['decades'] if xlim is None: xlim = [x[0],x[-1]] if ylim is None: ylim = [np.min(y),np.max(y)] figsize = LogLogSize(figsize) ExtendDictionary(auxDict,figsize=figsize,decades=decades,\ xlim=xlim,ylim=ylim,overwrite=overwrite) x,y,auxDict = ProcessData1D(x,y,auxDict) figsize = LogLogSize(figsize) if overwrite: labs = plt.get_figlabels() if "LogLog" not in labs: configs.defaultLS() else: configs.toggleLS() plt.figure("LogLog",figsize=figsize) else: configs.defaultLS() plt.figure(figsize=figsize) fig = plt.loglog(x,y,configs.LS,**kwargs) plt.grid(True) AuxPlotLabelLL1D(auxDict) if xlim: plt.xlim(xlim) if ylim: plt.ylim(ylim) plt.ion() plt.show() return fig
29.535714
96
0.646514
301d726bb49a99005fdbccd8050406dd9847256c
5,387
py
Python
integration-testing/tests/suites/test_premium_account.py
pwei1018/bcrs-testing
318845dede6ce5994b74b976d01f36a503036551
[ "Apache-2.0" ]
2
2020-10-23T22:08:34.000Z
2021-10-19T19:37:21.000Z
integration-testing/tests/suites/test_premium_account.py
pwei1018/bcrs-testing
318845dede6ce5994b74b976d01f36a503036551
[ "Apache-2.0" ]
6
2020-09-29T23:05:34.000Z
2022-01-29T20:59:08.000Z
integration-testing/tests/suites/test_premium_account.py
pwei1018/bcrs-testing
318845dede6ce5994b74b976d01f36a503036551
[ "Apache-2.0" ]
10
2020-09-29T23:05:46.000Z
2021-11-29T23:07:10.000Z
import datetime import json import requests import pytest import random from tests.suites.test_payment import TestPayment from tests.utilities.settings import get_settings, get_test_data, setup_access_data @pytest.mark.incremental @pytest.mark.parametrize('login_session', setup_access_data('PREMIUM', ['BCSC']), indirect=True, scope='class') @pytest.mark.usefixtures('setup_data') class TestPremiumAccount: __test__ = True def test_get_user_profile(self, testing_config, logger): """Test get user profile. After login, the user should be created in db.""" response = requests.get(f'{testing_config.auth_api_url}/users/@me', headers={'Authorization': f'Bearer {testing_config.keycloak_token}'}) assert response.status_code == 200 response_json = response.json() testing_config.user_id = response_json.get('keycloakGuid') def test_get_last_terms(self, testing_config, logger): """Get last version of termofuse.""" response = requests.get(f'{testing_config.auth_api_url}/documents/termsofuse', headers={'Authorization': f'Bearer {testing_config.keycloak_token}'}) assert response.status_code == 200 response_json = response.json() testing_config.terms_version = response_json.get('versionId') def test_accept_terms(self, testing_config, logger): """Test accept termofuser.""" input_data = json.dumps({'termsversion': testing_config.terms_version, 'istermsaccepted': True}) response = requests.patch(f'{testing_config.auth_api_url}/users/@me', headers={'Authorization': f'Bearer {testing_config.keycloak_token}', 'Content-Type': 'application/json'}, data=input_data) assert response.status_code == 200 def test_get_user_profile(self, testing_config, logger): """Test get user profile.""" response = requests.get(f'{testing_config.auth_api_url}/users/@me', headers={'Authorization': f'Bearer {testing_config.keycloak_token}'}) assert response.status_code == 200 response_json = response.json() testing_config.user_id = response_json.get('keycloakGuid') @pytest.mark.skip_login_as('bcsc_member') def test_link_bcol_account(self, testing_config, logger): """Test link bcol account.""" load_data = random.sample(get_settings().BCOL_USERS, 1)[0] input_data = json.dumps({ 'userId': load_data.username, 'password': load_data.password }) response = requests.post(f'{testing_config.auth_api_url}/bcol-profiles', headers={'Authorization': f'Bearer {testing_config.keycloak_token}', 'Content-Type': 'application/json'}, data=input_data) assert response.status_code == 200 response_json = response.json() @pytest.mark.skip_login_as('bcsc_member') def test_create_account(self, testing_config, logger): """Test create account.""" input_data = json.dumps(get_test_data(testing_config.test_data['org'])) response = requests.post(f'{testing_config.auth_api_url}/orgs', headers={'Authorization': f'Bearer {testing_config.keycloak_token}', 'Content-Type': 'application/json'}, data=input_data) assert response.status_code == 201 response_json = response.json() testing_config.org_id = response_json.get('id') def test_create_user_profile(self, testing_config, logger): """Test create user profile (contact information).""" input_data = json.dumps(get_test_data(testing_config.test_data['user_profile'])) response = requests.post(f'{testing_config.auth_api_url}/users/contacts', headers={'Authorization': f'Bearer {testing_config.keycloak_token}', 'Content-Type': 'application/json'}, data=input_data) assert response.status_code == 201 def test_get_account(self, testing_config, logger): """Test get account.""" response = requests.get(f'{testing_config.auth_api_url}/orgs/{testing_config.org_id}', headers={'Authorization': f'Bearer {testing_config.keycloak_token}'}) assert response.status_code == 200 def test_get_user_settings(self, testing_config, logger): """Test get user settings.""" response = requests.get(f'{testing_config.auth_api_url}/users/{testing_config.user_id}/settings', headers={'Authorization': f'Bearer {testing_config.keycloak_token}'}) assert response.status_code == 200 def test_get_user_notifications(self, testing_config, logger): """Test get user notifications.""" response = requests.get(f'{testing_config.auth_api_url}/users/{testing_config.user_id}/org/{testing_config.org_id}/notifications', headers={'Authorization': f'Bearer {testing_config.keycloak_token}'}) assert response.status_code == 200
51.304762
138
0.634305
302088ce171e1ce5fee7f69b6466cc5d52936180
3,960
py
Python
dingtalk/python/alibabacloud_dingtalk/workrecord_1_0/client.py
yndu13/dingtalk-sdk
700fb7bb49c4d3167f84afc5fcb5e7aa5a09735f
[ "Apache-2.0" ]
15
2020-08-27T04:10:26.000Z
2022-03-07T06:25:42.000Z
dingtalk/python/alibabacloud_dingtalk/workrecord_1_0/client.py
yndu13/dingtalk-sdk
700fb7bb49c4d3167f84afc5fcb5e7aa5a09735f
[ "Apache-2.0" ]
1
2020-09-27T01:30:46.000Z
2021-12-29T09:15:34.000Z
dingtalk/python/alibabacloud_dingtalk/workrecord_1_0/client.py
yndu13/dingtalk-sdk
700fb7bb49c4d3167f84afc5fcb5e7aa5a09735f
[ "Apache-2.0" ]
5
2020-08-27T04:07:44.000Z
2021-12-03T02:55:20.000Z
# -*- coding: utf-8 -*- # This file is auto-generated, don't edit it. Thanks. from Tea.core import TeaCore from alibabacloud_tea_openapi.client import Client as OpenApiClient from alibabacloud_tea_openapi import models as open_api_models from alibabacloud_tea_util.client import Client as UtilClient from alibabacloud_dingtalk.workrecord_1_0 import models as dingtalkworkrecord__1__0_models from alibabacloud_tea_util import models as util_models from alibabacloud_openapi_util.client import Client as OpenApiUtilClient class Client(OpenApiClient): """ *\ """ def __init__( self, config: open_api_models.Config, ): super().__init__(config) self._endpoint_rule = '' if UtilClient.empty(self._endpoint): self._endpoint = 'api.dingtalk.com' def count_work_record( self, request: dingtalkworkrecord__1__0_models.CountWorkRecordRequest, ) -> dingtalkworkrecord__1__0_models.CountWorkRecordResponse: runtime = util_models.RuntimeOptions() headers = dingtalkworkrecord__1__0_models.CountWorkRecordHeaders() return self.count_work_record_with_options(request, headers, runtime) async def count_work_record_async( self, request: dingtalkworkrecord__1__0_models.CountWorkRecordRequest, ) -> dingtalkworkrecord__1__0_models.CountWorkRecordResponse: runtime = util_models.RuntimeOptions() headers = dingtalkworkrecord__1__0_models.CountWorkRecordHeaders() return await self.count_work_record_with_options_async(request, headers, runtime) def count_work_record_with_options( self, request: dingtalkworkrecord__1__0_models.CountWorkRecordRequest, headers: dingtalkworkrecord__1__0_models.CountWorkRecordHeaders, runtime: util_models.RuntimeOptions, ) -> dingtalkworkrecord__1__0_models.CountWorkRecordResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.user_id): query['userId'] = request.user_id real_headers = {} if not UtilClient.is_unset(headers.common_headers): real_headers = headers.common_headers if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token): real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token req = open_api_models.OpenApiRequest( headers=real_headers, query=OpenApiUtilClient.query(query) ) return TeaCore.from_map( dingtalkworkrecord__1__0_models.CountWorkRecordResponse(), self.do_roarequest('CountWorkRecord', 'workrecord_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/workrecord/counts', 'json', req, runtime) ) async def count_work_record_with_options_async( self, request: dingtalkworkrecord__1__0_models.CountWorkRecordRequest, headers: dingtalkworkrecord__1__0_models.CountWorkRecordHeaders, runtime: util_models.RuntimeOptions, ) -> dingtalkworkrecord__1__0_models.CountWorkRecordResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.user_id): query['userId'] = request.user_id real_headers = {} if not UtilClient.is_unset(headers.common_headers): real_headers = headers.common_headers if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token): real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token req = open_api_models.OpenApiRequest( headers=real_headers, query=OpenApiUtilClient.query(query) ) return TeaCore.from_map( dingtalkworkrecord__1__0_models.CountWorkRecordResponse(), await self.do_roarequest_async('CountWorkRecord', 'workrecord_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/workrecord/counts', 'json', req, runtime) )
44.494382
150
0.716162
30220c4e2730501b7f29fa25506bdd0fdf76716c
16,677
py
Python
ehr_ml/clmbr/__init__.py
som-shahlab/ehr_ml
4f83ac5b882916a175f0d242b38d914d00bf8a7c
[ "MIT" ]
4
2021-03-12T21:41:37.000Z
2021-06-25T16:49:52.000Z
ehr_ml/clmbr/__init__.py
som-shahlab/ehr_ml
4f83ac5b882916a175f0d242b38d914d00bf8a7c
[ "MIT" ]
22
2020-11-19T00:04:27.000Z
2022-03-02T18:16:08.000Z
ehr_ml/clmbr/__init__.py
som-shahlab/ehr_ml
4f83ac5b882916a175f0d242b38d914d00bf8a7c
[ "MIT" ]
2
2021-05-12T13:11:46.000Z
2021-10-15T18:30:14.000Z
from __future__ import annotations import argparse import pickle import numpy as np import json import logging import math import glob import random import os import sys import datetime import time from functools import partial from pathlib import Path from collections import defaultdict from shutil import copyfile from tqdm import tqdm import sklearn.model_selection import sklearn.metrics import torch from ..extension.clmbr import * from .. import timeline from .. import ontology from .. import labeler from .dataset import DataLoader, convert_patient_data from .prediction_model import CLMBR from .trainer import Trainer from .utils import read_config, read_info, device_from_config from ..featurizer import ColumnValue, Featurizer from ..splits import read_time_split from ..utils import OnlineStatistics, set_up_logging from .opt import OpenAIAdam from typing import Mapping, Any, Dict, Optional, Tuple def check_dir_for_overwrite(dirname: str) -> bool: return bool( glob.glob(os.path.join(dirname, "*.json")) or glob.glob(os.path.join(dirname, "checkpoints")) ) def create_info_program() -> None: parser = argparse.ArgumentParser( description="Precompute training data summary statistics etc for CLMBR experiments" ) parser.add_argument( "input_data_dir", type=str, help="Location of the dataset extract to be used for CLMBR training", ) parser.add_argument( "save_dir", type=str, help="Location where model info is to be saved", ) parser.add_argument( "train_end_date", type=str, help="The end date for training" ) parser.add_argument( "val_end_date", type=str, help="The end date for validation. Should be later than the end date for training", ) parser.add_argument( "--min_patient_count", type=int, default=100, help="Only keep statistics on codes/terms that appear for this many patients (default 100)", ) parser.add_argument( "--excluded_patient_file", type=str, help="A file containing a list of patients to exclude from training. " "Any patient ID you plan to use for finetuning / evaluation should be " "listed in this file. If not provided, exclude_patient_ratio must be specified.", default=None, ) parser.add_argument( "--exclude_patient_ratio", type=float, default=None, help="Ratio of patients to exclude from pre-training between 0 and 1." " If provided, excluded patient IDs will " "be randomly selected and written out to a file " '"excluded_patient_ids.txt" in the save directory. If not ' "provided, excluded_patient_file must be specified.", ) parser.add_argument( "--seed", type=int, default=3451235, help="Random seed (default 3451235)", ) args = parser.parse_args() if args.save_dir is None: print("Error - must specify save_dir", file=sys.stderr) exit(1) else: save_dir = args.save_dir os.makedirs(save_dir, exist_ok=True) set_up_logging(os.path.join(save_dir, "create_info.log")) logging.info("Args: %s", str(args)) if check_dir_for_overwrite(save_dir): print( "Fatal error - model dir {} is not empty".format(save_dir), file=sys.stderr, ) logging.info("Fatal error - model dir {} is not empty".format(save_dir)) exit(1) ontologies_path = os.path.join(args.input_data_dir, "ontology.db") timelines_path = os.path.join(args.input_data_dir, "extract.db") train_end_date = datetime.datetime.fromisoformat(args.train_end_date) val_end_date = datetime.datetime.fromisoformat(args.val_end_date) if train_end_date == val_end_date: logging.info("Could not creat info with the same train and validation end date") exit(1) result = json.loads( create_info( timelines_path, ontologies_path, train_end_date, val_end_date, args.min_patient_count, ) ) result["extract_dir"] = args.input_data_dir result["extract_file"] = "extract.db" result["train_start_date"] = "1900-01-01" result["train_end_date"] = args.train_end_date result["val_start_date"] = args.train_end_date result["val_end_date"] = args.val_end_date result["seed"] = args.seed result["min_patient_count"] = args.min_patient_count def remove_pids(a, x): return [(p, c) for p, c in a if p not in x] if args.excluded_patient_file is not None: with open(args.excluded_patient_file) as f: pids = {int(a) for a in f} result["train_patient_ids_with_length"] = remove_pids( result["train_patient_ids_with_length"], pids ) result["val_patient_ids_with_length"] = remove_pids( result["val_patient_ids_with_length"], pids ) logging.info( "Removed %d patient IDs from file %s" % (len(pids), args.excluded_patient_file) ) elif args.exclude_patient_ratio is not None: assert 0 < args.exclude_patient_ratio and args.exclude_patient_ratio < 1 train_pids = set([x[0] for x in result["train_patient_ids_with_length"]]) val_pids = set([x[0] for x in result["val_patient_ids_with_length"]]) all_pids = train_pids.union(val_pids) excluded_pids = set( random.sample( list(all_pids), int(round(len(all_pids) * args.exclude_patient_ratio)), ) ) result["train_patient_ids_with_length"] = remove_pids( result["train_patient_ids_with_length"], excluded_pids ) result["val_patient_ids_with_length"] = remove_pids( result["val_patient_ids_with_length"], excluded_pids ) with open( os.path.join(args.save_dir, "excluded_patient_ids.txt"), "w" ) as f: for pid in excluded_pids: f.write("%d\n" % pid) logging.info( "Removed %d patient IDs using ratio %f" % (len(excluded_pids), args.exclude_patient_ratio) ) def count_frequent_items(counts: Mapping[Any, int], threshold: int) -> int: return len( {item for item, count in counts.items() if count >= threshold} ) logging.info( "Codes with >= 10 {}".format( count_frequent_items(result["code_counts"], 10) ) ) logging.info( "Codes with >= 25 {}".format( count_frequent_items(result["code_counts"], 25) ) ) logging.info( "Codes with >= 50 {}".format( count_frequent_items(result["code_counts"], 50) ) ) logging.info( "Codes with >= 100 {}".format( count_frequent_items(result["code_counts"], 100) ) ) logging.info( "Codes with >= 1000 {}".format( count_frequent_items(result["code_counts"], 1000) ) ) logging.info("Number codes: {}".format(len(result["code_counts"]))) logging.info("Number valid codes: {}".format(len(result["valid_code_map"]))) with open(os.path.join(args.save_dir, "info.json"), "w") as fp: json.dump(result, fp) def train_model() -> None: parser = argparse.ArgumentParser( description="Representation Learning Experiments" ) # paths parser.add_argument( "model_dir", type=str, help="Location where model logs and weights should be saved", ) parser.add_argument( "info_dir", type=str, help="Location where `clmbr_create_info` results were saved", ) parser.add_argument( "--extract_dir", action="store_true", help="Use the doctorai task definition", ) # model specification parser.add_argument( "--size", default=768, type=int, help="Dimensionality of the output embeddings", ) parser.add_argument( "--encoder_type", default="gru", choices=["gru", "lstm", "transformer"], help='the sequence encoder module type (default "gru")', ) parser.add_argument("--no_tied_weights", default=False, action="store_true") parser.add_argument( "--rnn_layers", default=1, type=int, help='number of recurrent layers to use if encoder_type is "gru" or ' '"lstm" (default 1), not used if encoder_type is "transformer"', ) parser.add_argument( "--dropout", default=0, type=float, help="dropout percentage (default 0)", ) # optimization specification parser.add_argument( "--batch_size", type=int, default=500, help="Batch size (default 500)" ) parser.add_argument( "--eval_batch_size", type=int, default=2000, help="Batch size during evaluation (default 2000)", ) parser.add_argument( "--epochs", type=int, default=50, help="Number of training epochs (default 50)", ) parser.add_argument( "--warmup_epochs", type=int, default=2, help="Number of warmup epochs (default 2)", ) parser.add_argument( "--lr", type=float, default=0.01, help="learning rate (default 0.01)" ) parser.add_argument( "--l2", default=0.01, type=float, help="l2 regularization strength (default 0.01)", ) parser.add_argument( "--device", default="cpu", help='Specify whether the model should be run on CPU or GPU. Can specify a specific GPU, e.g. "cuda:0" (default "cpu")', ) parser.add_argument("--code_dropout", type=float, default=0.2) # Day dropout added in reference to Lawrence's comment, # although Ethan mentioned it should be removed from the API parser.add_argument("--day_dropout", type=float, default=0.2) args = parser.parse_args() model_dir = args.model_dir os.makedirs(model_dir, exist_ok=True) if check_dir_for_overwrite(model_dir): print( "Fatal error - model dir {} is not empty".format(model_dir), file=sys.stderr, ) logging.info( "Fatal error - model dir {} is not empty".format(model_dir) ) exit(1) # Try to load info.json file; see create_info above for details. info = read_info(os.path.join(args.info_dir, "info.json")) copyfile( os.path.join(args.info_dir, "info.json"), os.path.join(model_dir, "info.json"), ) first_too_small_index = float("inf") for code, index in info["valid_code_map"].items(): if info["code_counts"][code] < 10 * info["min_patient_count"]: first_too_small_index = min(first_too_small_index, index) print(len(info["valid_code_map"]), flush=True) # Create and save config dictionary config = { "batch_size": args.batch_size, "eval_batch_size": args.eval_batch_size, "num_first": first_too_small_index, "num_second": len(info["valid_code_map"]) - first_too_small_index, "size": args.size, "lr": args.lr, "dropout": args.dropout, "encoder_type": args.encoder_type, "rnn_layers": args.rnn_layers, "tied_weights": not args.no_tied_weights, "l2": args.l2, "b1": 0.9, "b2": 0.999, "e": 1e-8, "epochs_per_cycle": args.epochs, "warmup_epochs": args.warmup_epochs, "code_dropout": args.code_dropout, "day_dropout": args.day_dropout, "model_dir": os.path.abspath(model_dir), } with open(os.path.join(model_dir, "config.json"), "w") as outfile: json.dump(config, outfile) set_up_logging(os.path.join(model_dir, "train.log")) logging.info("Args: %s", str(args)) dataset = PatientTimelineDataset( os.path.join(info["extract_dir"], "extract.db"), os.path.join(info["extract_dir"], "ontology.db"), os.path.join(args.info_dir, "info.json"), ) random.seed(info["seed"]) model = CLMBR(config, info).to(torch.device(args.device)) trainer = Trainer(model) trainer.train(dataset, use_pbar=False) def debug_model() -> None: parser = argparse.ArgumentParser( description="Representation Learning Experiments" ) parser.add_argument( "--model_dir", type=str, help="Override where model is saved" ) args = parser.parse_args() model_dir = args.model_dir config = read_config(os.path.join(model_dir, "config.json")) info = read_info(os.path.join(model_dir, "info.json")) use_cuda = torch.cuda.is_available() model = CLMBR(config, info).to(device_from_config(use_cuda=use_cuda)) model_data = torch.load(os.path.join(model_dir, "best"), map_location="cpu") model.load_state_dict(model_data) loaded_data = PatientTimelineDataset( os.path.join(info["extract_dir"], "extract.db"), os.path.join(info["extract_dir"], "ontology.db"), os.path.join(model_dir, "info.json"), ) ontologies = ontology.OntologyReader( os.path.join(info["extract_dir"], "ontology.db") ) timelines = timeline.TimelineReader( os.path.join(info["extract_dir"], "extract.db") ) reverse_map = {} for b, a in info["valid_code_map"].items(): word = ontologies.get_dictionary().get_word(b) reverse_map[a] = word reverse_map[len(info["valid_code_map"])] = "None" with DataLoader( loaded_data, threshold=config["num_first"], is_val=True, batch_size=1, seed=info["seed"], day_dropout=0, code_dropout=0, ) as batches: for batch in batches: if batch["task"][0].size()[0] == 0: continue values, non_text_loss = model(batch) values = torch.sigmoid(values) patient_id = int(batch["pid"][0]) patient = timelines.get_patient(patient_id) original_day_indices = batch["day_index"][0] indices, targets, seen_before, _, _, _ = batch["task"] day_indices = indices[:, 0] word_indices = indices[:, 1] ( all_non_text_codes, all_non_text_offsets, all_non_text_codes1, all_non_text_offsets1, all_day_information, all_positional_encoding, all_lengths, ) = batch["rnn"] all_non_text_codes = list(all_non_text_codes) all_non_text_offsets = list(all_non_text_offsets) + [ len(all_non_text_codes) ] print(patient_id, batch["pid"], original_day_indices) all_seen = set() for i, index in enumerate(original_day_indices): day = patient.days[index] print("------------------") print(patient_id, i, index, day.age / 365, day.date) words = set() for code in day.observations: for subword in ontologies.get_subwords(code): words.add(ontologies.get_dictionary().get_word(subword)) all_seen.add( ontologies.get_dictionary().get_word(subword) ) print("Source", words) wordsA = set() if (i + 1) < len(all_non_text_offsets): for code in all_non_text_codes[ all_non_text_offsets[i] : all_non_text_offsets[i + 1] ]: wordsA.add(reverse_map[code.item()]) print("Given", wordsA) day_mask = day_indices == i w = word_indices[day_mask] p = values[day_mask] t = targets[day_mask] f = seen_before[day_mask] items = [ ( t_i.item(), reverse_map[w_i.item()], p_i.item(), reverse_map[w_i.item()] in all_seen, w_i.item(), f_i.item(), ) for p_i, t_i, w_i, f_i in zip(p, t, w, f) ] items.sort(key=lambda x: (-x[0], x[1])) for a in items: print(a)
32.009597
128
0.59651
30246a2f75b29a8ba062fda27b38c1e541c4ddd0
190
py
Python
exercicios nivel 4/06-PA.py
Carlosxc-dev/LINGUAGEM_PYTHON
5d512038d9c841dea96e4ccea18da76265318199
[ "MIT" ]
1
2021-11-25T00:29:07.000Z
2021-11-25T00:29:07.000Z
exercicios nivel 4/06-PA.py
Carlosxc-dev/LINGUAGEM_PYTHON
5d512038d9c841dea96e4ccea18da76265318199
[ "MIT" ]
null
null
null
exercicios nivel 4/06-PA.py
Carlosxc-dev/LINGUAGEM_PYTHON
5d512038d9c841dea96e4ccea18da76265318199
[ "MIT" ]
null
null
null
prim = int(input('primeiro termo: ')) raz = int(input('razao: ')) dec = prim + (10 - 1) * raz for c in range(prim, dec + raz, raz): print(' {}'.format(c,), end=' ->') print('acabou')
19
38
0.547368
302482d09ddf1f774c52862c7051a379cb2cfac1
8,215
py
Python
scenarios.py
Sanghyun-Hong/DeepSloth
92b3d0d3ef3f974d8bce7b4b4a1828776227e3c6
[ "MIT" ]
9
2020-12-16T04:55:57.000Z
2022-01-13T08:28:11.000Z
scenarios.py
Sanghyun-Hong/DeepSloth
92b3d0d3ef3f974d8bce7b4b4a1828776227e3c6
[ "MIT" ]
null
null
null
scenarios.py
Sanghyun-Hong/DeepSloth
92b3d0d3ef3f974d8bce7b4b4a1828776227e3c6
[ "MIT" ]
1
2021-10-11T06:21:04.000Z
2021-10-11T06:21:04.000Z
""" A script that partitions the dataset for transferability scenarios """ # basics import numpy as np from PIL import Image # torch... import torch # custom libs import utils # ------------------------------------------------------------------------------ # Misc. functions # ------------------------------------------------------------------------------ def update_numpy(acc, term, func): if acc is None: acc = term else: acc = func((acc, term)) return acc def get_class_wise_lists(n_classes_cifar10, return_test=False): if not return_test: class_wise_dataset = [] for n_class in range(n_classes_cifar10): train_data, train_labels, _, _ = af.get_cifar10_class_data(n_class) # don't use class_wise_dataset.append((train_data, train_labels)) return class_wise_dataset else: class_wise_dataset = [] test_class_wise_dataset = [] for n_class in range(n_classes_cifar10): train_data, train_labels, test_data, test_labels = af.get_cifar10_class_data(n_class) # don't use class_wise_dataset.append((train_data, train_labels)) test_class_wise_dataset.append((test_data, test_labels)) return class_wise_dataset, test_class_wise_dataset # ------------------------------------------------------------------------------ # Scenario related... # ------------------------------------------------------------------------------ def scenario_1_split(int_percentages=None): np.random.seed(0) """ Scenario 1) Train CIFAR10 models that use 10%, 25%, 50% of the full training set. Chooses p% of data in each class (and corresponding labels) Parameter int_percentages contains percentages as integers, NOT FLOATS! Returns: - percent_loaders (dict): each key p% contains an af.ManualData object containing p% of dataset (p% from each label) * Loader data contains p% of images (p% of class 0, ..., p% of class 9) - consecutive * Loader labels (np.ndarray): contains p% of labels (p% 0s, ..., p% 9s) - consecutive """ if int_percentages is None: int_percentages = [10, 25, 50, 100] print('Running scenario_1_split\n') n_classes_cifar10 = 10 # get a list containing CIFAR10 data class by class (class k at index k) class_wise_dataset = get_class_wise_lists(n_classes_cifar10) percent_loaders = {} for p in int_percentages: subset_data = None subset_labels = None for n_class in range(n_classes_cifar10): crt_train_data, crt_train_labels = class_wise_dataset[n_class] count = crt_train_data.shape[0] how_many_2_choose = int(count * p / 100.0) indexes = np.random.choice(np.arange(count), how_many_2_choose, replace=False) subset_data = update_numpy(acc=subset_data, term=np.copy(crt_train_data[indexes]), func=np.vstack) subset_labels = update_numpy(acc=subset_labels, term=np.copy(crt_train_labels[indexes]), func=np.hstack) # end for n_class print(f'p={p}, data: {subset_data.shape}, labels: {subset_labels.shape}\n') percent_loaders[p] = af.ManualData(data=subset_data, labels=subset_labels) # end for p np.random.seed(af.get_random_seed()) return percent_loaders def scenario_2_split(int_classes=None): np.random.seed(0) """ Scenario 2) Split CIFAR10 training set into non-overlapping 5 classes - 5 classes, 6 - 6 and 7 - 7. Parameter int_classes_left: - each value c is used to generate the two datasets that contain c classes Returns: - percent_loaders (dict): each key c contains a pair of af.ManualData meaning ( Dataset w c classes, another dataset c classes) * Loader data contains p% of images (p% of class 0, ..., p% of class 9) - consecutive * Loader labels (np.ndarray): contains p% of labels (p% 0s, ..., p% 9s) - consecutive """ if int_classes is None: int_classes = [5, 6, 7] print('Running scenario_2_split\n') n_classes_cifar10 = 10 # get a list containing CIFAR10 data class by class (class k at index k) class_wise_dataset, test_class_wise_dataset = get_class_wise_lists(n_classes_cifar10, return_test=True) all_classes = np.arange(n_classes_cifar10) class_loaders = {} for classes in int_classes: num_class_overlap = 2*(classes - 5) class_indexes_overlap = np.random.choice(all_classes, num_class_overlap, replace=False) left_unique_classes = np.random.choice([x for x in all_classes if x not in class_indexes_overlap], classes-num_class_overlap, replace=False) right_unique_classes = [x for x in all_classes if (x not in class_indexes_overlap) and (x not in left_unique_classes)] class_indexes_left = np.array(list(left_unique_classes) + list(class_indexes_overlap)) class_indexes_right = np.array(list(right_unique_classes) + list(class_indexes_overlap)) print(class_indexes_left) print(class_indexes_right) subset_data_left, subset_labels_left = None, None subset_data_right, subset_labels_right = None, None subset_test_data_left, subset_test_labels_left = None, None subset_test_data_right, subset_test_labels_right = None, None label_left = 0 label_right = 0 for n_class in all_classes: crt_train_data, crt_train_labels = class_wise_dataset[n_class] crt_test_data, crt_test_labels = test_class_wise_dataset[n_class] if n_class in class_indexes_left: new_train_labels = np.ones(crt_train_labels.shape) * label_left # we have to relabel the dataset because pytorch expects labels as 0,1,2,3,... subset_data_left = update_numpy(acc=subset_data_left, term=np.copy(crt_train_data), func=np.vstack) subset_labels_left = update_numpy(acc=subset_labels_left, term=np.copy(new_train_labels), func=np.hstack) new_test_labels = np.ones(crt_test_labels.shape) * label_left subset_test_data_left = update_numpy(acc=subset_test_data_left, term=np.copy(crt_test_data), func=np.vstack) subset_test_labels_left = update_numpy(acc=subset_test_labels_left, term=np.copy(new_test_labels), func=np.hstack) label_left += 1 if n_class in class_indexes_right: new_train_labels = np.ones(crt_train_labels.shape) * label_right # we have to relabel the dataset because pytorch expects labels as 0,1,2,3,... subset_data_right = update_numpy(acc=subset_data_right, term=np.copy(crt_train_data), func=np.vstack) subset_labels_right = update_numpy(acc=subset_labels_right, term=np.copy(new_train_labels), func=np.hstack) new_test_labels = np.ones(crt_test_labels.shape) * label_right subset_test_data_right = update_numpy(acc=subset_test_data_right, term=np.copy(crt_test_data), func=np.vstack) subset_test_labels_right = update_numpy(acc=subset_test_labels_right, term=np.copy(new_test_labels), func=np.hstack) label_right += 1 # end for n_class print(f'{classes}: train - data-left: {subset_data_left.shape}, labels-left: {subset_labels_left.shape}, data-right: {subset_data_right.shape}, labels-right: {subset_labels_right.shape}\n') print(f'{classes}: test - data-left: {subset_test_data_left.shape}, labels-left: {subset_test_labels_left.shape}, data-right: {subset_test_data_right.shape}, labels-right: {subset_test_labels_right.shape}\n') loaders_left = (af.ManualData(data=subset_data_left, labels=subset_labels_left), af.ManualData(data=subset_test_data_left, labels=subset_test_labels_left)) loaders_right = (af.ManualData(data=subset_data_right, labels=subset_labels_right), af.ManualData(data=subset_test_data_right, labels=subset_test_labels_right)) class_loaders[classes] = (loaders_left, loaders_right) np.random.seed(af.get_random_seed()) # end for class_left, class_right return class_loaders
45.893855
216
0.667925
3024da0c8cff966a108eb9eabba60d6ffdc885bc
2,878
py
Python
Gmail.py
renauddahou/appointment_bot
f78c4242c6d0eb1dfc2edfdca8c4f37b22d9cf93
[ "MIT" ]
3
2021-05-12T22:27:08.000Z
2022-03-24T14:50:20.000Z
Gmail.py
renauddahou/appointment_bot
f78c4242c6d0eb1dfc2edfdca8c4f37b22d9cf93
[ "MIT" ]
3
2021-12-26T12:38:57.000Z
2022-03-25T19:34:31.000Z
Gmail.py
renauddahou/appointment_bot
f78c4242c6d0eb1dfc2edfdca8c4f37b22d9cf93
[ "MIT" ]
4
2021-07-30T16:41:30.000Z
2022-02-01T14:48:45.000Z
# -*- coding: utf-8 -*- """ Created on Mon Aug 10 17:49:07 2020 @author: Amir """ import email import imaplib from email.header import decode_header import re def gmail_ckeck(email_address, password): mail = imaplib.IMAP4_SSL('imap.gmail.com') (retcode, capabilities) = mail.login(email_address,password) mail.list() mail.select('inbox') flag = False while 1: opt_list = [] mail = imaplib.IMAP4_SSL('imap.gmail.com') (retcode, capabilities) = mail.login(email_address, password) mail.list() mail.select('inbox') status, messages = mail.search(None, '(UNSEEN)') if messages[0].decode("utf-8") == '': flag = False else: res = messages[0].decode("utf-8") messages = res.split(' ') for i in range(len(messages)): typ, data = mail.fetch(messages[i],'(RFC822)') for response in data: if isinstance(response, tuple): msg = email.message_from_bytes(response[1]) subject = decode_header(msg["Subject"])[0][0] if isinstance(subject, bytes): subject = subject.decode() if subject == 'One-time Password (OTP) Confirmation Email': flag = True if msg.is_multipart(): for part in msg.walk(): # iterate over email parts # extract content type of email content_type = part.get_content_type() try: # get the email body body = part.get_payload(decode=True).decode() except: pass if content_type == "text/plain": opt = re.findall(r'(\d{6})', body) opt_list.append(opt) else: content_type = msg.get_content_type() # extract content type of email body = msg.get_payload(decode=True).decode() if content_type == "text/plain": opt = re.findall(r'(\d{6})', body) opt_list.append(opt) if flag: break else: pass mail.logout() return opt_list[-1]
35.530864
105
0.407575
3026fa5fec5a07d66102ae71523732602b37bf87
6,637
py
Python
Multi-agent Transfer RL/Transfer across tasks/Bayes-ToMoP/games.py
TJU-DRL-LAB/transfer-and-multi-task-reinforcement-learning
2d8c12c2b5a4865c02934b63091945d3e2c92e90
[ "MIT" ]
null
null
null
Multi-agent Transfer RL/Transfer across tasks/Bayes-ToMoP/games.py
TJU-DRL-LAB/transfer-and-multi-task-reinforcement-learning
2d8c12c2b5a4865c02934b63091945d3e2c92e90
[ "MIT" ]
null
null
null
Multi-agent Transfer RL/Transfer across tasks/Bayes-ToMoP/games.py
TJU-DRL-LAB/transfer-and-multi-task-reinforcement-learning
2d8c12c2b5a4865c02934b63091945d3e2c92e90
[ "MIT" ]
null
null
null
# coding=utf-8 import numpy as np import imageio from gym import spaces import tkinter as tk from PIL import Image, ImageTk import matplotlib.pyplot as plt import time CELL, BLOCK, AGENT_GOAL, OPPONENT_GOAL, AGENT, OPPONENT = range(6) WIN, LOSE = 5, -5 UP, RIGHT, DOWN, LEFT, HOLD = range(5) UNIT = 40 class Soccer(tk.Tk, object): playground = [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 3, 0, 0, 0, 0, 0, 2, 3, 0, 0, 0, 0, 0, 2, 3, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1] action_map = { UP: np.array([-1, 0]), RIGHT: np.array([0, 1]), DOWN: np.array([1, 0]), LEFT: np.array([0, -1]), HOLD: np.array([0, 0])} def __init__(self): super(Soccer, self).__init__() self.size = 7 self.agent = np.array([3, 1]) self.opponent = np.array([3, 5]) self.grids = np.array(self.playground).reshape(self.size, self.size) self.agent_keep_ball = False self.action_space = [UP, RIGHT, DOWN, LEFT, HOLD] self.n_actions = len(self.action_space) self.n_features = 5 self.visualize() # low high to observe #self.observation_space = spaces.Discrete(7 * 7 * 2) def step(self, act_a, act_o): new_pos_a = self.agent + self.action_map[act_a] new_pos_o = self.opponent + self.action_map[act_o] reward, done, s_ = 0, False, [] # opponent win if self.grids[tuple(new_pos_o)] == 3 and not self.agent_keep_ball: reward = LOSE done = True # agent win if self.grids[tuple(new_pos_a)] == 2 and self.agent_keep_ball: reward = WIN done = True # valid check for opponent and agent if self.grids[tuple(new_pos_a)] in (1, 2, 3): new_pos_a = self.agent if self.grids[tuple(new_pos_o)] in (1, 2, 3): new_pos_o = self.opponent # collision if np.array_equal(new_pos_a, new_pos_o) and self.grids[tuple(new_pos_a)] != 1: self.agent_keep_ball = not self.agent_keep_ball #print(self.canvas.coords(self.agent_rect)) self.agent = new_pos_a self.opponent = new_pos_o self.canvas.delete(self.agent_rect) self.canvas.delete(self.opp_rect) self.agent_rect = self.canvas.create_rectangle(self.agent[1] * UNIT, self.agent[0] * UNIT, (self.agent[1] + 1) * UNIT, (self.agent[0] + 1) * UNIT, fill='red') self.opp_rect = self.canvas.create_rectangle(self.opponent[1] * UNIT, self.opponent[0] * UNIT, (self.opponent[1] + 1) * UNIT, (self.opponent[0] + 1) * UNIT, fill='blue') self.canvas.delete(self.ball_rect) if self.agent_keep_ball: self.ball_rect = self.canvas.create_oval((self.agent[1] * UNIT, self.agent[0] * UNIT, (self.agent[1] + 1) * UNIT, (self.agent[0] + 1) * UNIT), fill='white') else: self.ball_rect = self.canvas.create_oval(self.opponent[1] * UNIT, self.opponent[0] * UNIT, (self.opponent[1] + 1) * UNIT, (self.opponent[0] + 1) * UNIT, fill='white') s_ = [self.agent[0], self.agent[1], self.opponent[0], self.opponent[1]] if self.agent_keep_ball: s_.append(0) else: s_.append(1) s_ = np.array(s_[:5])/ 10 return s_, reward, done # reset position and ball def reset(self): self.agent = np.array([3, 1]) self.opponent = np.array([3, 5]) self.agent_keep_ball = False self.update() s_ = [self.agent[0], self.agent[1], self.opponent[0], self.opponent[1]] if self.agent_keep_ball: s_.append(0) else: s_.append(1) s_ = np.array(s_[:5])/ 10 return s_ # render array def render(self): m = np.copy(self.grids) m[tuple(self.agent)] = 4 m[tuple(self.opponent)] = 5 if self.agent_keep_ball: m[tuple(self.agent)] += 2 else: m[tuple(self.opponent)] += 2 #print(m, end='\n\n') self.update() return m.reshape(49) # render img def visualize(self): self.canvas = tk.Canvas(self, bg='white', height=self.size * UNIT, width=self.size * UNIT) # create grids for c in range(0, self.size * UNIT, UNIT): x0, y0, x1, y1 = c, 0, c, self.size * UNIT self.canvas.create_line(x0, y0, x1, y1) for r in range(0, self.size * UNIT, UNIT): x0, y0, x1, y1 = 0, r, self.size * UNIT, r self.canvas.create_line(x0, y0, x1, y1) m = np.copy(self.grids) m[tuple(self.agent)] = 4 m[tuple(self.opponent)] = 5 #print(m) for j in range(self.size): for i in range(self.size): if m[j, i] == 1: self.canvas.create_rectangle(i * UNIT, j * UNIT, (i + 1) * UNIT, (j + 1) * UNIT, fill='black') elif m[j, i] == 2 or m[j, i] == 3: self.canvas.create_rectangle(i * UNIT, j * UNIT, (i + 1) * UNIT, (j + 1) * UNIT, fill='white') elif m[j, i] == 0 or m[j, i] == 4 or m[j, i] == 5: self.canvas.create_rectangle(i * UNIT, j * UNIT, (i + 1) * UNIT, (j + 1) * UNIT, fill='green') self.agent_rect = self.canvas.create_rectangle(self.agent[1] * UNIT, self.agent[0] * UNIT, (self.agent[1] + 1) * UNIT, (self.agent[0] + 1) * UNIT, fill='red') self.opp_rect = self.canvas.create_rectangle(self.opponent[1] * UNIT, self.opponent[0] * UNIT, (self.opponent[1] + 1) * UNIT, (self.opponent[0] + 1) * UNIT, fill='blue') if self.agent_keep_ball: self.ball_rect = self.canvas.create_oval((self.agent[0] * UNIT, self.agent[0] * UNIT, (self.agent[1] + 1) * UNIT, (self.agent[1] + 1) * UNIT), fill='white') else: self.ball_rect = self.canvas.create_oval(self.opponent[1] * UNIT, self.opponent[0] * UNIT, (self.opponent[1] + 1) * UNIT, (self.opponent[0] + 1) * UNIT, fill='white') # pack all self.canvas.pack() if __name__ == '__main__': env = Soccer() env.reset() # agent strategy agent_actions = [RIGHT, RIGHT, UP, RIGHT, RIGHT, RIGHT] # opponent strategy, you can initialize it randomly opponent_actions = [UP, LEFT, LEFT, LEFT, LEFT, LEFT, LEFT] for a_a, a_o in zip(agent_actions, opponent_actions): env.render() env.step(a_a, a_o) time.sleep(1) #env.after(100, run_maze) #env.mainloop() # env.render()
39.041176
178
0.552509
302802899cbe877d306af7d92c456a506768688c
103
py
Python
utils/__init__.py
riven314/AL-MDN
5f843e2e7c99f3ae36e8139ec0d8aaa4a78d4962
[ "BSD-Source-Code" ]
59
2021-10-13T22:59:19.000Z
2022-03-26T20:44:47.000Z
utils/__init__.py
jwchoi384/AL-MDN
54aaae4405c69b33998e5b5306c7c645780d473c
[ "BSD-Source-Code" ]
10
2021-11-02T06:35:24.000Z
2022-03-26T21:08:06.000Z
utils/__init__.py
jwchoi384/AL-MDN
54aaae4405c69b33998e5b5306c7c645780d473c
[ "BSD-Source-Code" ]
9
2021-10-14T09:48:05.000Z
2022-03-27T06:04:32.000Z
# Originated from https://github.com/amdegroot/ssd.pytorch from .augmentations import SSDAugmentation
25.75
58
0.825243
30292b7c2f6979a04692b3c67d475af07d5a822b
184
py
Python
modules/moderation/__init__.py
H3xadecimal/Nest
4422cf5725a71603ff58ccbf9f261b28ff96f70e
[ "MIT" ]
10
2018-04-21T07:29:42.000Z
2019-02-01T20:46:48.000Z
modules/moderation/__init__.py
H3xadecimal/Nest
4422cf5725a71603ff58ccbf9f261b28ff96f70e
[ "MIT" ]
2
2018-09-10T00:58:40.000Z
2019-12-22T11:19:58.000Z
modules/moderation/__init__.py
H3xadecimal/Nest
4422cf5725a71603ff58ccbf9f261b28ff96f70e
[ "MIT" ]
2
2018-09-09T23:07:56.000Z
2019-10-19T15:26:56.000Z
""" Basic moderation utilities for Birb. """ from .staff import CheckMods from .actions import ModActions def setup(bot): bot.add_cog(CheckMods()) bot.add_cog(ModActions())
15.333333
36
0.717391
302af14bd5d8ff0171af453caa11c3079af12cfe
227
py
Python
docker_compose/tweet_collector/config.py
lenaromanenko/twitter_sentiment_analysis
ac642ee4a6c2af01bd413326ff0bae406c2e2efe
[ "MIT" ]
1
2021-03-23T10:27:06.000Z
2021-03-23T10:27:06.000Z
docker_compose/tweet_collector/config.py
lenaromanenko/twitter_sentiment_analysis
ac642ee4a6c2af01bd413326ff0bae406c2e2efe
[ "MIT" ]
null
null
null
docker_compose/tweet_collector/config.py
lenaromanenko/twitter_sentiment_analysis
ac642ee4a6c2af01bd413326ff0bae406c2e2efe
[ "MIT" ]
1
2021-05-31T15:39:30.000Z
2021-05-31T15:39:30.000Z
import os API_KEY = os.getenv('API_KEY') API_SECRET = os.getenv('API_SECRET') ACCESS_TOKEN = os.getenv('ACCESS_TOKEN') ACCESS_TOKEN_SECRET = os.getenv('ACCESS_TOKEN_SECRET') POSTGRES_PASSWORD = os.getenv('POSTGRES_PASSWORD')
25.222222
54
0.784141
302cdd3bbef43498903450f4d3f2b0902dce4b22
605
py
Python
opm/presentation/presentation.py
Open-Prose-Metrics/open_prose_metrics_app-core
9df65edfe9ee9af0a0731c3f2e21ea25bced250c
[ "MIT" ]
null
null
null
opm/presentation/presentation.py
Open-Prose-Metrics/open_prose_metrics_app-core
9df65edfe9ee9af0a0731c3f2e21ea25bced250c
[ "MIT" ]
4
2021-04-30T21:38:10.000Z
2022-01-13T03:32:33.000Z
opm/presentation/presentation.py
Open-Prose-Metrics/open_prose_metrics_app-core
9df65edfe9ee9af0a0731c3f2e21ea25bced250c
[ "MIT" ]
1
2021-03-21T14:08:28.000Z
2021-03-21T14:08:28.000Z
class FrontEnd(object): def __init__(self): self.theme = "slate" self.logo = "" # path from static/ self.app_name = "Open Prose Metrics" self.report_title = "Results" self.theme_cdn = self.bootswatch_url(self.theme) def bootswatch_url(self, label): if label == "cyborg": return "https://stackpath.bootstrapcdn.com/bootswatch/3.4.1/cyborg/bootstrap.min.css" elif label == "slate": return "https://stackpath.bootstrapcdn.com/bootswatch/3.4.1/slate/bootstrap.min.css" else: return ""
37.8125
98
0.596694
302e0d78b3644fab203f5a2140185b921dd4135b
7,140
py
Python
tests/test_jid.py
nicfit/vexmpp
e67070d2822da8356345976fb15d365935b550a6
[ "MIT" ]
null
null
null
tests/test_jid.py
nicfit/vexmpp
e67070d2822da8356345976fb15d365935b550a6
[ "MIT" ]
349
2017-02-18T22:48:17.000Z
2021-12-13T19:50:23.000Z
tests/test_jid.py
nicfit/vexmpp
e67070d2822da8356345976fb15d365935b550a6
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ test_jid ---------------------------------- Tests for `vexmpp.xmpp.jid` module. """ import unittest from vexmpp.jid import Jid, _parse, _prep, InvalidJidError, internJid class TestJidParsing(unittest.TestCase): def setUp(self): pass def test_basic(self): self.assertEqual(_parse("user@host/resource"), ("user", "host", "resource")) self.assertEqual(_parse("user@host"), ("user", "host", None)) self.assertEqual(_parse("host"), (None, "host", None)) self.assertEqual(_parse("host/resource"), (None, "host", "resource")) self.assertEqual(_parse("foo/bar@baz"), (None, "foo", "bar@baz")) self.assertEqual(_parse("boo@foo/bar@baz"), ("boo", "foo", "bar@baz")) self.assertEqual(_parse("boo@foo/bar/baz"), ("boo", "foo", "bar/baz")) self.assertEqual(_parse("boo/foo@bar@baz"), (None, "boo", "foo@bar@baz")) self.assertEqual(_parse("boo/foo/bar"), (None, "boo", "foo/bar")) self.assertEqual(_parse("boo//foo"), (None, "boo", "/foo")) def test_noHost(self): ''' Test for failure on no host part. ''' self.assertRaises(InvalidJidError, _parse, "user@") def test_doubleAt(self): """ Test for failure on double @ signs. This should fail because @ is not a valid character for the host part of the JID. """ self.assertRaises(UnicodeError, _parse, "user@@host") def test_multipleAt(self): """ Test for failure on two @ signs. This should fail because @ is not a valid character for the host part of the JID. """ self.assertRaises(UnicodeError, _parse, "user@host@host") # Basic tests for case mapping. These are fallback tests for the # prepping done in twisted.words.protocols.jabber.xmpp_stringprep def test_prepCaseMapUser(self): """ Test case mapping of the user part of the JID. """ self.assertEqual(_prep("UsEr", "host", "resource"), ("user", "host", "resource")) def test_prepCaseMapHost(self): """ Test case mapping of the host part of the JID. """ self.assertEqual(_prep("user", "hoST", "resource"), ("user", "host", "resource")) def test_prepNoCaseMapResource(self): """ Test no case mapping of the resourcce part of the JID. """ self.assertEqual(_prep("user", "hoST", "resource"), ("user", "host", "resource")) self.assertNotEqual(_prep("user", "host", "Resource"), ("user", "host", "resource")) def tearDown(self): pass class TestJidObject(unittest.TestCase): def setUp(self): pass def test_ctor_types(self): self.assertRaises(ValueError, Jid, b"bytes") self.assertRaises(ValueError, Jid, (b"user", "host", "rsrc")) self.assertRaises(ValueError, Jid, ("user", b"host", "rsrc")) self.assertRaises(ValueError, Jid, ("user", "host", b"rsrc")) def tearDown(self): pass def test_noneArguments(self): """ Test that using no arguments raises an exception. """ self.assertRaises(TypeError, Jid) def test_attributes(self): """ Test that the attributes correspond with the JID parts. """ j = Jid("user@host/resource") self.assertEqual(j.user, "user") self.assertEqual(j.host, "host") self.assertEqual(j.resource, "resource") def test_userhost(self): """ Test the extraction of the bare JID. """ j = Jid("user@host/resource") self.assertEqual("user@host", j.bare) def test_userhostOnlyHost(self): """ Test the extraction of the bare JID of the full form host/resource. """ j = Jid("host/resource") self.assertEqual("host", j.bare) def test_userhostJID(self): """ Test getting a JID object of the bare JID. """ j1 = Jid("user@host/resource") j2 = internJid("user@host") self.assertEqual(id(j2), id(j1.bare_jid)) def test_userhostJIDNoResource(self): """ Test getting a JID object of the bare JID when there was no resource. """ j = Jid("user@host") self.assertEqual(id(j), id(j.bare_jid)) def test_fullHost(self): """ Test giving a string representation of the JID with only a host part. """ j = Jid((None, 'host', None)) self.assertEqual('host', j.full) def test_fullHostResource(self): """ Test giving a string representation of the JID with host, resource. """ j = Jid((None, 'host', 'resource')) self.assertEqual('host/resource', j.full) def test_fullUserHost(self): """ Test giving a string representation of the JID with user, host. """ j = Jid(('user', 'host', None)) self.assertEqual('user@host', j.full) def test_fullAll(self): """ Test giving a string representation of the JID. """ j = Jid(('user', 'host', 'resource')) self.assertEqual('user@host/resource', j.full) def test_equality(self): """ Test JID equality. """ j1 = Jid("user@host/resource") j2 = Jid("user@host/resource") self.assertNotEqual(id(j1), id(j2)) self.assertEqual(j1, j2) def test_equalityWithNonJIDs(self): """ Test JID equality. """ j = Jid("user@host/resource") try: res = (j == "user@host/resource") except NotImplementedError: pass else: self.assertFalse("Jid and strings should not be comparable") def test_inequality(self): """ Test JID inequality. """ j1 = Jid("user1@host/resource") j2 = Jid("user2@host/resource") self.assertNotEqual(j1, j2) def test_inequalityWithNonJIDs(self): """ Test JID equality. """ j = Jid("user@host/resource") self.assertNotEqual(j, 'user@host/resource') def test_hashable(self): """ Test JID hashability. """ j1 = Jid("user@host/resource") j2 = Jid("user@host/resource") self.assertEqual(hash(j1), hash(j2)) def test_unicode(self): """ Test unicode representation of JIDs. """ j = Jid(('user', 'host', 'resource')) self.assertEqual("user@host/resource", j.full) def test_repr(self): """ Test representation of JID objects. """ j = Jid(('user', 'host', 'resource')) self.assertEqual("Jid('user@host/resource')", repr(j))
29.874477
77
0.539076
302e8f1be7a7ffb783af9f6bd1bdc7f3405e6a18
745
py
Python
brainutils/context.py
jimbuho/django-brain
201237266a64e49b5c37f3d373ff6913dfbd099e
[ "BSD-2-Clause", "Apache-2.0" ]
null
null
null
brainutils/context.py
jimbuho/django-brain
201237266a64e49b5c37f3d373ff6913dfbd099e
[ "BSD-2-Clause", "Apache-2.0" ]
null
null
null
brainutils/context.py
jimbuho/django-brain
201237266a64e49b5c37f3d373ff6913dfbd099e
[ "BSD-2-Clause", "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ .. module:: dbu - context :platform: Unix, Windows :synopsis: Contexto Principal por defecto .. moduleauthor:: Diego Gonzalez <dgonzalez.jim@gmail.com> """ from . import configuration from . import models def load_context(request): """ Load Context Description Carga las variables de contexto principales :param request: :return: """ IS_TEST_MODE = configuration.isTESTMode() IS_MAINTENANCE = configuration.isMaintenanceMode() try: LANGUAGES = models.Language.objects.get_active() except: LANGUAGES = [] return { 'IS_TEST_MODE' : IS_TEST_MODE, 'IS_MAINTENANCE' : IS_MAINTENANCE, 'LANGUAGES' : LANGUAGES }
19.102564
58
0.640268
302ff0768eb4d4a5356d5db0b329d974eeccb455
689
py
Python
tests/trainer/test_multi_trainer.py
michael-aloys/knodle
393e7ba0558036828fb228875511977c40000ed5
[ "Apache-2.0" ]
71
2021-04-26T10:39:56.000Z
2022-03-28T14:36:16.000Z
tests/trainer/test_multi_trainer.py
michael-aloys/knodle
393e7ba0558036828fb228875511977c40000ed5
[ "Apache-2.0" ]
92
2021-04-08T12:49:38.000Z
2022-02-03T14:24:05.000Z
tests/trainer/test_multi_trainer.py
michael-aloys/knodle
393e7ba0558036828fb228875511977c40000ed5
[ "Apache-2.0" ]
10
2021-07-08T06:49:28.000Z
2022-01-15T23:28:13.000Z
from tests.trainer.generic import std_trainer_input_1 from knodle.trainer.multi_trainer import MultiTrainer def test_auto_train(std_trainer_input_1): ( model, model_input_x, rule_matches_z, mapping_rules_labels_t, y_labels ) = std_trainer_input_1 trainers = ["majority", "snorkel", "knn", "snorkel_knn"] trainer = MultiTrainer( name=trainers, model=model, mapping_rules_labels_t=mapping_rules_labels_t, model_input_x=model_input_x, rule_matches_z=rule_matches_z, ) trainer.train() metrics = trainer.test(model_input_x, y_labels) # Check whether the code ran up to here assert 2 == 2
25.518519
62
0.69521
3030b091442a66a1758237c47ca2d3b1d6082b1e
368
py
Python
core/models.py
dilshodbekikromov/weatherapp
105ca8c7a142a4afdff9bff7c758896f0335610c
[ "PostgreSQL" ]
1
2021-12-17T05:17:22.000Z
2021-12-17T05:17:22.000Z
core/models.py
GiovannaK/Weather-app-django
3138f03b34baa5866ed63f52dc4b759be6eb7aa4
[ "MIT" ]
null
null
null
core/models.py
GiovannaK/Weather-app-django
3138f03b34baa5866ed63f52dc4b759be6eb7aa4
[ "MIT" ]
null
null
null
from django.db import models from django.contrib.auth.models import User class City(models.Model): user = models.ManyToManyField(User, default=None,) name = models.CharField(max_length=255, verbose_name="Nome da cidade") def __str__(self): return self.name class Meta: verbose_name = "Cidade" verbose_name_plural = 'Cidades'
26.285714
74
0.701087
30325bbf9ed4d06063912ef3759ddd07cefbb4f5
133
py
Python
python/numba_chol.py
sevagh/cholesky
bf5404bdaab497409c517312d5a234c6a5ffe9cf
[ "BSD-3-Clause" ]
null
null
null
python/numba_chol.py
sevagh/cholesky
bf5404bdaab497409c517312d5a234c6a5ffe9cf
[ "BSD-3-Clause" ]
null
null
null
python/numba_chol.py
sevagh/cholesky
bf5404bdaab497409c517312d5a234c6a5ffe9cf
[ "BSD-3-Clause" ]
null
null
null
import numpy as np from numba import jit @jit def cholesky(in_arr, out_arr, n): np.copyto(out_arr, np.linalg.cholesky(in_arr))
16.625
50
0.736842
30332c35e591fa52e355d5d29a0ebf609792b6f8
191
py
Python
doxieapi/__init__.py
Stealthii/doxieapi
bf6803a579b9a9f768e3e602e3cb2fb1eb0e8f3c
[ "0BSD" ]
null
null
null
doxieapi/__init__.py
Stealthii/doxieapi
bf6803a579b9a9f768e3e602e3cb2fb1eb0e8f3c
[ "0BSD" ]
null
null
null
doxieapi/__init__.py
Stealthii/doxieapi
bf6803a579b9a9f768e3e602e3cb2fb1eb0e8f3c
[ "0BSD" ]
null
null
null
# -*- coding: utf-8 -*- """ doxieapi ~~~~~~~~ A Python library for the developer API of the Doxie Go Wi-Fi document scanner. """ from .api import DoxieScanner __all__ = ['DoxieScanner']
13.642857
78
0.65445
3033df96925128ff7539e6a8f86e4620e486d85d
18,447
py
Python
iogt_content_migration/management/commands/load_v1_db.py
Albert-Jokelin/iogt
79b1b86c11df7d61ddbbd4ce16303dfe4a1b8465
[ "BSD-2-Clause" ]
null
null
null
iogt_content_migration/management/commands/load_v1_db.py
Albert-Jokelin/iogt
79b1b86c11df7d61ddbbd4ce16303dfe4a1b8465
[ "BSD-2-Clause" ]
null
null
null
iogt_content_migration/management/commands/load_v1_db.py
Albert-Jokelin/iogt
79b1b86c11df7d61ddbbd4ce16303dfe4a1b8465
[ "BSD-2-Clause" ]
null
null
null
from pathlib import Path from django.core.management.base import BaseCommand from wagtail.core.models import Page, Site, Locale from django.core.files.images import ImageFile from wagtail.images.models import Image from wagtail_localize.models import Translation from wagtail_localize.views.submit_translations import TranslationCreator import home.models as models import psycopg2 import psycopg2.extras import json class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( '--host', default='0.0.0.0', help='IoGT V1 database host' ) parser.add_argument( '--port', default='5432', help='IoGT V1 database port' ) parser.add_argument( '--name', default='postgres', help='IoGT V1 database name' ) parser.add_argument( '--user', default='postgres', help='IoGT V1 database user' ) parser.add_argument( '--password', default='', help='IoGT V1 database password' ) parser.add_argument( '--media-dir', required=True, help='Path to IoGT v1 media directory' ) parser.add_argument( '--skip-locales', action='store_true', help='Skip data of locales other than default language' ) def handle(self, *args, **options): self.db_connect(options) self.media_dir = options.get('media_dir') self.skip_locales = options.get('skip_locales') self.image_map = {} self.page_translation_map = {} self.v1_to_v2_page_map = {} self.clear() self.stdout.write('Existing site structure cleared') root = Page.get_first_root_node() self.migrate(root) def clear(self): models.FooterPage.objects.all().delete() models.FooterIndexPage.objects.all().delete() models.BannerPage.objects.all().delete() models.BannerIndexPage.objects.all().delete() models.Article.objects.all().delete() models.Section.objects.all().delete() models.SectionIndexPage.objects.all().delete() models.HomePage.objects.all().delete() Site.objects.all().delete() Image.objects.all().delete() def db_connect(self, options): connection_string = self.create_connection_string(options) self.stdout.write(f'DB connection string created, string={connection_string}') self.v1_conn = psycopg2.connect(connection_string) self.stdout.write('Connected to v1 DB') def __del__(self): try: self.v1_conn.close() self.stdout.write('Closed connection to v1 DB') except AttributeError: pass def create_connection_string(self, options): host = options.get('host', '0.0.0.0') port = options.get('port', '5432') name = options.get('name', 'postgres') user = options.get('user', 'postgres') password = options.get('password', '') return f"host={host} port={port} dbname={name} user={user} password={password}" def db_query(self, q): cur = self.v1_conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) cur.execute(q) return cur def migrate(self, root): self.migrate_images() self.load_page_translation_map() home = self.create_home_page(root) section_index_page, banner_index_page, footer_index_page = self.create_index_pages(home) self.migrate_sections(section_index_page) self.migrate_articles(section_index_page) self.migrate_banners(banner_index_page) self.migrate_footers(footer_index_page) self.stop_translations() Page.fix_tree() def create_home_page(self, root): sql = 'select * from core_main main join wagtailcore_page page on main.page_ptr_id = page.id' cur = self.db_query(sql) main = cur.fetchone() cur.close() home = None if main: home = models.HomePage( title=main['title'], draft_title=main['draft_title'], seo_title=main['seo_title'], slug=main['slug'], live=main['live'], latest_revision_created_at=main['latest_revision_created_at'], first_published_at=main['first_published_at'], last_published_at=main['last_published_at'], ) root.add_child(instance=home) else: raise Exception('Could not find a main page in v1 DB') cur.close() cur = self.db_query('select * from wagtailcore_site') v1_site = cur.fetchone() cur.close() if v1_site: Site.objects.create( hostname=v1_site['hostname'], port=v1_site['port'], root_page=home, is_default_site=True, site_name=v1_site['site_name'] if v1_site['site_name'] else 'Internet of Good Things', ) else: raise Exception('Could not find site in v1 DB') return home def create_index_pages(self, homepage): section_index_page = models.SectionIndexPage(title='Sections') homepage.add_child(instance=section_index_page) banner_index_page = models.BannerIndexPage(title='Banners') homepage.add_child(instance=banner_index_page) footer_footer_page = models.FooterIndexPage(title='Footers') homepage.add_child(instance=footer_footer_page) return section_index_page, banner_index_page, footer_footer_page def migrate_images(self): cur = self.db_query('select * from wagtailimages_image') content_type = self.find_content_type_id('wagtailimages', 'image') for row in cur: image_file = self.open_image_file(row['file']) if image_file: image = Image.objects.create( title=row['title'], file=ImageFile(image_file, name=row['file'].split('/')[-1]), focal_point_x=row['focal_point_x'], focal_point_y=row['focal_point_y'], focal_point_width=row['focal_point_width'], focal_point_height=row['focal_point_height'], # uploaded_by_user='', ) image.get_file_size() image.get_file_hash() tags = self.find_tags(content_type, row['id']) if tags: image.tags.add(*tags) self.image_map.update({ row['id']: image }) cur.close() self.stdout.write('Images migrated') def find_content_type_id(self, app_label, model): cur = self.db_query(f"select id from django_content_type where app_label = '{app_label}' and model = '{model}'") content_type = cur.fetchone() cur.close() return content_type.get('id') def open_image_file(self, file): file_path = Path(self.media_dir) / file try: return open(file_path, 'rb') except: self.stdout.write(f"Image file not found: {file_path}") def find_tags(self, content_type, object_id): tags_query = 'select t.name from taggit_tag t join taggit_taggeditem ti on t.id = ti.tag_id where ti.content_type_id = {} and ti.object_id = {}' cur = self.db_query(tags_query.format(content_type, object_id)) tags = [tag['name'] for tag in cur] cur.close() return tags def migrate_sections(self, section_index_page): sql = "select * " \ "from core_sectionpage csp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \ "where csp.page_ptr_id = wcp.id " \ "and wcp.id = clr.page_id " \ "and clr.language_id = csl.id " if self.skip_locales: sql += " and locale = 'en' " sql += 'order by wcp.path' cur = self.db_query(sql) section_page_translations = [] for row in cur: if row['page_ptr_id'] in self.page_translation_map: section_page_translations.append(row) else: self.create_section(section_index_page, row) else: for row in section_page_translations: section = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']]) locale, __ = Locale.objects.get_or_create(language_code=row['locale']) self.translate_page(locale=locale, page=section) translated_section = section.get_translation_or_none(locale) if translated_section: translated_section.title = row['title'] translated_section.draft_title = row['draft_title'] translated_section.live = row['live'] translated_section.save(update_fields=['title', 'draft_title', 'slug', 'live']) self.stdout.write(f"Translated section, title={row['title']}") cur.close() def create_section(self, section_index_page, row): section = models.Section( title=row['title'], draft_title=row['draft_title'], show_in_menus=True, font_color='1CABE2', slug=row['slug'], path=section_index_page.path + row['path'][12:], depth=row['depth'], numchild=row['numchild'], live=row['live'], ) section.save() self.v1_to_v2_page_map.update({ row['page_ptr_id']: section }) self.stdout.write(f"saved section, title={section.title}") def migrate_articles(self, section_index_page): sql = "select * " \ "from core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \ "where cap.page_ptr_id = wcp.id " \ "and wcp.id = clr.page_id " \ "and clr.language_id = csl.id " if self.skip_locales: sql += "and locale = 'en' " sql += " and wcp.path like '000100010002%'order by wcp.path" cur = self.db_query(sql) article_page_translations = [] for row in cur: if row['page_ptr_id'] in self.page_translation_map: article_page_translations.append(row) else: self.create_article(section_index_page, row) else: for row in article_page_translations: article = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']]) locale, __ = Locale.objects.get_or_create(language_code=row['locale']) self.translate_page(locale=locale, page=article) translated_article = article.get_translation_or_none(locale) if translated_article: translated_article.lead_image = self.image_map.get(row['image_id']) translated_article.title = row['title'] translated_article.draft_title = row['draft_title'] translated_article.live = row['live'] translated_article.body = self.map_article_body(row['body']) translated_article.save(update_fields=['lead_image', 'title', 'draft_title', 'slug', 'live', 'body']) self.stdout.write(f"Translated article, title={row['title']}") cur.close() def create_article(self, section_index_page, row): article = models.Article( lead_image=self.image_map.get(row['image_id']), title=row['title'], draft_title=row['draft_title'], slug=row['slug'], path=section_index_page.path + row['path'][12:], depth=row['depth'], numchild=row['numchild'], live=row['live'], body=self.map_article_body(row['body']), ) try: article.save() self.v1_to_v2_page_map.update({ row['page_ptr_id']: article }) except Page.DoesNotExist: self.stdout.write(f"Skipping page with missing parent: title={row['title']}") return self.stdout.write(f"saved article, title={article.title}") def map_article_body(self, v1_body): v2_body = json.loads(v1_body) for block in v2_body: if block['type'] == 'paragraph': block['type'] = 'markdown' return json.dumps(v2_body) def migrate_banners(self, banner_index_page): sql = "select * " \ "from core_bannerpage cbp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \ "where cbp.page_ptr_id = wcp.id " \ "and wcp.id = clr.page_id " \ "and clr.language_id = csl.id " if self.skip_locales: sql += " and locale = 'en' " sql += ' order by wcp.path' cur = self.db_query(sql) banner_page_translations = [] for row in cur: if row['page_ptr_id'] in self.page_translation_map: banner_page_translations.append(row) else: self.create_banner(banner_index_page, row) else: for row in banner_page_translations: banner = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']]) locale, __ = Locale.objects.get_or_create(language_code=row['locale']) try: self.translate_page(locale=locale, page=banner) except: continue translated_banner = banner.get_translation_or_none(locale) if translated_banner: translated_banner.banner_image = self.image_map.get(row['banner_id']) translated_banner.banner_link_page = self.v1_to_v2_page_map.get(row['banner_link_page_id']) translated_banner.title = row['title'] translated_banner.draft_title = row['draft_title'] translated_banner.live = row['live'] translated_banner.save(update_fields=['banner_image', 'title', 'draft_title', 'slug', 'live']) self.stdout.write(f"Translated banner, title={row['title']}") cur.close() def create_banner(self, banner_index_page, row): banner = models.BannerPage( banner_image=self.image_map.get(row['banner_id']), banner_link_page=self.v1_to_v2_page_map.get(row['banner_link_page_id']), title=row['title'], draft_title=row['draft_title'], slug=row['slug'], path=banner_index_page.path + row['path'][12:], depth=row['depth'], numchild=row['numchild'], live=row['live'], banner_description='' ) banner.save() self.v1_to_v2_page_map.update({ row['page_ptr_id']: banner }) self.stdout.write(f"saved banner, title={banner.title}") def migrate_footers(self, footer_index_page): sql = "select * " \ "from core_footerpage cfp, core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \ "where cfp.articlepage_ptr_id = cap.page_ptr_id " \ "and cap.page_ptr_id = wcp.id " \ "and wcp.id = clr.page_id " \ "and clr.language_id = csl.id " if self.skip_locales: sql += " and locale = 'en' " sql += ' order by wcp.path' cur = self.db_query(sql) footer_page_translations = [] for row in cur: if row['page_ptr_id'] in self.page_translation_map: footer_page_translations.append(row) else: self.create_footer(footer_index_page, row) else: for row in footer_page_translations: footer = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']]) locale, __ = Locale.objects.get_or_create(language_code=row['locale']) self.translate_page(locale=locale, page=footer) translated_footer = footer.get_translation_or_none(locale) if translated_footer: translated_footer.lead_image = self.image_map.get(row['image_id']) translated_footer.title = row['title'] translated_footer.draft_title = row['draft_title'] translated_footer.live = row['live'] translated_footer.body = self.map_article_body(row['body']) translated_footer.save(update_fields=['lead_image', 'title', 'draft_title', 'slug', 'live', 'body']) self.stdout.write(f"Translated footer, title={row['title']}") cur.close() def create_footer(self, footer_index_page, row): footer = models.FooterPage( lead_image=self.image_map.get(row['image_id']), title=row['title'], draft_title=row['draft_title'], slug=row['slug'], path=footer_index_page.path + row['path'][12:], depth=row['depth'], numchild=row['numchild'], live=row['live'], body=self.map_article_body(row['body']), ) footer.save() self.v1_to_v2_page_map.update({ row['page_ptr_id']: footer }) self.stdout.write(f"saved footer, title={footer.title}") def load_page_translation_map(self): sql = "select * " \ "from core_pagetranslation" cur = self.db_query(sql) for row in cur: self.page_translation_map.update({ row['translated_page_id']: row['page_id'], }) cur.close() self.stdout.write('Page translation map loaded.') def translate_page(self, locale, page): translator = TranslationCreator(user=None, target_locales=[locale]) translator.create_translations(page) def stop_translations(self): Translation.objects.update(enabled=False) self.stdout.write('Translations stopped.')
40.277293
152
0.58725
3033fd8167635b4a38bf68dbe82419686667a557
2,968
py
Python
run_files/cip_area_threshold/tissue_data/run_CIP_relaxation_times.py
jessiesrr/VTdyn
6f71ef94525d95221f5bd5e5290f4df10648cd18
[ "MIT" ]
null
null
null
run_files/cip_area_threshold/tissue_data/run_CIP_relaxation_times.py
jessiesrr/VTdyn
6f71ef94525d95221f5bd5e5290f4df10648cd18
[ "MIT" ]
null
null
null
run_files/cip_area_threshold/tissue_data/run_CIP_relaxation_times.py
jessiesrr/VTdyn
6f71ef94525d95221f5bd5e5290f4df10648cd18
[ "MIT" ]
null
null
null
import numpy as np import libs.contact_inhibition_lib as lib #library for simulation routines import libs.data as data import libs.plot as vplt #plotting library from structure.global_constants import * import structure.initialisation as init from structure.cell import Tissue, BasicSpringForceNoGrowth import matplotlib.pyplot as plt import os """run a single voronoi tessellation model simulation""" OUTDIR = "CIP_cell_division_relaxation_time2/" l = 10 # population size N=l*l timend = 30. # simulation time (hours) timestep = 1.0 # time intervals to save simulation history rand = np.random.RandomState() simulation = lib.simulation_contact_inhibition_area_dependent #simulation routine imported from lib threshold_area_fraction=1.0 DEATH_RATE = 1./12 rates = (DEATH_RATE,DEATH_RATE/0.4) #death_rate,division_rate domain_size_multiplier=0.980940 eta,mu,dt=1.,-250,0.001 T_m_init=0.1 def get_relaxation_data(T_m_vals,T_m_init,eta,mu,dt,relaxtime): history = lib.run_simulation(simulation,l,timestep,timend,rand,progress_on=True, init_time=None,til_fix=False,save_areas=True,cycle_phase=None,eta=eta,mu=mu,dt=dt,T_m=T_m_init, return_events=False,save_cell_histories=True,domain_size_multiplier=domain_size_multiplier, rates=rates,threshold_area_fraction=threshold_area_fraction) tissue = lib.run_return_final_tissue(lib.simulation_no_division(history[-1],dt,200,rand,eta),200) division_ready = lib.check_area_threshold(tissue.mesh,threshold_area_fraction) mother = rand.choice(division_ready) tissue.add_daughter_cells(mother,rand) tissue.remove(mother,True) tissue.update(dt) init_tissues = [tissue.copy() for T_m in T_m_vals] for T_m,tissue in zip(T_m_vals,init_tissues): tissue.Force = BasicSpringForceNoGrowth(mu,T_m) histories = [lib.run(lib.simulation_no_division(tissue,dt,int(relaxtime/dt),rand,eta),int(relaxtime/dt),1) for tissue in init_tissues] for T_m,history in zip(T_m_vals,histories): cell1,cell2 = len(history[0])-2,len(history[0])-1 sibling_distance = get_sibling_distance(history,cell1,cell2) mean_area = np.array([np.mean(tissue.mesh.areas[-2:]) for tissue in history]) time = np.arange(0,relaxtime,dt) data = np.vstack((time,sibling_distance,mean_area)) try: np.savetxt(OUTDIR+"T_m=%.3f.txt"%T_m,data) except IOError: os.makedirs(OUTDIR) np.savetxt(OUTDIR+"T_m=%.3f.txt"%T_m,data) def narg(tissue,i,j): try: return np.where(tissue.mesh.neighbours[i]==j)[0][0] except IndexError: return np.nan def get_sibling_distance(history,cell1,cell2): return np.array([tissue.mesh.distances[cell1][narg(tissue,cell1,cell2)] if narg(tissue,cell1,cell2)<100 else np.nan for tissue in history]) relaxtime = 2.0 T_m_vals=[0.001,0.01,0.1,0.25,0.5,1.0,2.0] get_relaxation_data(T_m_vals,T_m_init,eta,mu,dt,relaxtime)
44.298507
143
0.738544
30350c07d409b3ddee9b9e26e89145475e5ec90e
120
py
Python
tmp/admin.py
infiltratorXYZ/invoicer
9b3542040b0e8268b2f725eeee8b806f9ca61a6d
[ "MIT" ]
null
null
null
tmp/admin.py
infiltratorXYZ/invoicer
9b3542040b0e8268b2f725eeee8b806f9ca61a6d
[ "MIT" ]
null
null
null
tmp/admin.py
infiltratorXYZ/invoicer
9b3542040b0e8268b2f725eeee8b806f9ca61a6d
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Invoice # Register your models here. admin.site.register(Invoice)
20
32
0.808333
3035188b0c9611cb3581b6c9c987990a72ba1ab9
17,920
py
Python
TAPI_RI/flask_server/tapi_server/models/connection_end_point.py
bartoszm/Snowmass-ONFOpenTransport
874e7a3f311d915d692b27fcbd24032c89064f00
[ "Apache-2.0" ]
null
null
null
TAPI_RI/flask_server/tapi_server/models/connection_end_point.py
bartoszm/Snowmass-ONFOpenTransport
874e7a3f311d915d692b27fcbd24032c89064f00
[ "Apache-2.0" ]
null
null
null
TAPI_RI/flask_server/tapi_server/models/connection_end_point.py
bartoszm/Snowmass-ONFOpenTransport
874e7a3f311d915d692b27fcbd24032c89064f00
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from tapi_server.models.base_model_ import Model from tapi_server.models.name_and_value import NameAndValue # noqa: F401,E501 from tapi_server.models.operational_state_pac import OperationalStatePac # noqa: F401,E501 from tapi_server.models.resource_spec import ResourceSpec # noqa: F401,E501 from tapi_server.models.termination_pac import TerminationPac # noqa: F401,E501 from tapi_server import util class ConnectionEndPoint(Model): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, uuid: str=None, name: List[NameAndValue]=None, operational_state: str=None, lifecycle_state: str=None, termination_direction: str=None, termination_state: str=None, layer_protocol_name: str=None, connectivity_service_end_point: str=None, parent_node_edge_point: List[str]=None, client_node_edge_point: List[str]=None, connection_port_direction: str=None, connection_port_role: str=None): # noqa: E501 """ConnectionEndPoint - a model defined in Swagger :param uuid: The uuid of this ConnectionEndPoint. # noqa: E501 :type uuid: str :param name: The name of this ConnectionEndPoint. # noqa: E501 :type name: List[NameAndValue] :param operational_state: The operational_state of this ConnectionEndPoint. # noqa: E501 :type operational_state: str :param lifecycle_state: The lifecycle_state of this ConnectionEndPoint. # noqa: E501 :type lifecycle_state: str :param termination_direction: The termination_direction of this ConnectionEndPoint. # noqa: E501 :type termination_direction: str :param termination_state: The termination_state of this ConnectionEndPoint. # noqa: E501 :type termination_state: str :param layer_protocol_name: The layer_protocol_name of this ConnectionEndPoint. # noqa: E501 :type layer_protocol_name: str :param connectivity_service_end_point: The connectivity_service_end_point of this ConnectionEndPoint. # noqa: E501 :type connectivity_service_end_point: str :param parent_node_edge_point: The parent_node_edge_point of this ConnectionEndPoint. # noqa: E501 :type parent_node_edge_point: List[str] :param client_node_edge_point: The client_node_edge_point of this ConnectionEndPoint. # noqa: E501 :type client_node_edge_point: List[str] :param connection_port_direction: The connection_port_direction of this ConnectionEndPoint. # noqa: E501 :type connection_port_direction: str :param connection_port_role: The connection_port_role of this ConnectionEndPoint. # noqa: E501 :type connection_port_role: str """ self.swagger_types = { 'uuid': str, 'name': List[NameAndValue], 'operational_state': str, 'lifecycle_state': str, 'termination_direction': str, 'termination_state': str, 'layer_protocol_name': str, 'connectivity_service_end_point': str, 'parent_node_edge_point': List[str], 'client_node_edge_point': List[str], 'connection_port_direction': str, 'connection_port_role': str } self.attribute_map = { 'uuid': 'uuid', 'name': 'name', 'operational_state': 'operational-state', 'lifecycle_state': 'lifecycle-state', 'termination_direction': 'termination-direction', 'termination_state': 'termination-state', 'layer_protocol_name': 'layer-protocol-name', 'connectivity_service_end_point': 'connectivity-service-end-point', 'parent_node_edge_point': 'parent-node-edge-point', 'client_node_edge_point': 'client-node-edge-point', 'connection_port_direction': 'connection-port-direction', 'connection_port_role': 'connection-port-role' } self._uuid = uuid self._name = name self._operational_state = operational_state self._lifecycle_state = lifecycle_state self._termination_direction = termination_direction self._termination_state = termination_state self._layer_protocol_name = layer_protocol_name self._connectivity_service_end_point = connectivity_service_end_point self._parent_node_edge_point = parent_node_edge_point self._client_node_edge_point = client_node_edge_point self._connection_port_direction = connection_port_direction self._connection_port_role = connection_port_role @classmethod def from_dict(cls, dikt) -> 'ConnectionEndPoint': """Returns the dict as a model :param dikt: A dict. :type: dict :return: The connection-end-point of this ConnectionEndPoint. # noqa: E501 :rtype: ConnectionEndPoint """ return util.deserialize_model(dikt, cls) @property def uuid(self) -> str: """Gets the uuid of this ConnectionEndPoint. UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501 :return: The uuid of this ConnectionEndPoint. :rtype: str """ return self._uuid @uuid.setter def uuid(self, uuid: str): """Sets the uuid of this ConnectionEndPoint. UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501 :param uuid: The uuid of this ConnectionEndPoint. :type uuid: str """ self._uuid = uuid @property def name(self) -> List[NameAndValue]: """Gets the name of this ConnectionEndPoint. List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501 :return: The name of this ConnectionEndPoint. :rtype: List[NameAndValue] """ return self._name @name.setter def name(self, name: List[NameAndValue]): """Sets the name of this ConnectionEndPoint. List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501 :param name: The name of this ConnectionEndPoint. :type name: List[NameAndValue] """ self._name = name @property def operational_state(self) -> str: """Gets the operational_state of this ConnectionEndPoint. :return: The operational_state of this ConnectionEndPoint. :rtype: str """ return self._operational_state @operational_state.setter def operational_state(self, operational_state: str): """Sets the operational_state of this ConnectionEndPoint. :param operational_state: The operational_state of this ConnectionEndPoint. :type operational_state: str """ allowed_values = ["DISABLED", "ENABLED"] # noqa: E501 if operational_state not in allowed_values: raise ValueError( "Invalid value for `operational_state` ({0}), must be one of {1}" .format(operational_state, allowed_values) ) self._operational_state = operational_state @property def lifecycle_state(self) -> str: """Gets the lifecycle_state of this ConnectionEndPoint. :return: The lifecycle_state of this ConnectionEndPoint. :rtype: str """ return self._lifecycle_state @lifecycle_state.setter def lifecycle_state(self, lifecycle_state: str): """Sets the lifecycle_state of this ConnectionEndPoint. :param lifecycle_state: The lifecycle_state of this ConnectionEndPoint. :type lifecycle_state: str """ allowed_values = ["PLANNED", "POTENTIAL_AVAILABLE", "POTENTIAL_BUSY", "INSTALLED", "PENDING_REMOVAL"] # noqa: E501 if lifecycle_state not in allowed_values: raise ValueError( "Invalid value for `lifecycle_state` ({0}), must be one of {1}" .format(lifecycle_state, allowed_values) ) self._lifecycle_state = lifecycle_state @property def termination_direction(self) -> str: """Gets the termination_direction of this ConnectionEndPoint. The overall directionality of the LP. - A BIDIRECTIONAL LP will have some SINK and/or SOURCE flowss. - A SINK LP can only contain elements with SINK flows or CONTRA_DIRECTION_SOURCE flows - A SOURCE LP can only contain SOURCE flows or CONTRA_DIRECTION_SINK flows # noqa: E501 :return: The termination_direction of this ConnectionEndPoint. :rtype: str """ return self._termination_direction @termination_direction.setter def termination_direction(self, termination_direction: str): """Sets the termination_direction of this ConnectionEndPoint. The overall directionality of the LP. - A BIDIRECTIONAL LP will have some SINK and/or SOURCE flowss. - A SINK LP can only contain elements with SINK flows or CONTRA_DIRECTION_SOURCE flows - A SOURCE LP can only contain SOURCE flows or CONTRA_DIRECTION_SINK flows # noqa: E501 :param termination_direction: The termination_direction of this ConnectionEndPoint. :type termination_direction: str """ allowed_values = ["BIDIRECTIONAL", "SINK", "SOURCE", "UNDEFINED_OR_UNKNOWN"] # noqa: E501 if termination_direction not in allowed_values: raise ValueError( "Invalid value for `termination_direction` ({0}), must be one of {1}" .format(termination_direction, allowed_values) ) self._termination_direction = termination_direction @property def termination_state(self) -> str: """Gets the termination_state of this ConnectionEndPoint. Indicates whether the layer is terminated and if so how. # noqa: E501 :return: The termination_state of this ConnectionEndPoint. :rtype: str """ return self._termination_state @termination_state.setter def termination_state(self, termination_state: str): """Sets the termination_state of this ConnectionEndPoint. Indicates whether the layer is terminated and if so how. # noqa: E501 :param termination_state: The termination_state of this ConnectionEndPoint. :type termination_state: str """ allowed_values = ["LP_CAN_NEVER_TERMINATE", "LT_NOT_TERMINATED", "TERMINATED_SERVER_TO_CLIENT_FLOW", "TERMINATED_CLIENT_TO_SERVER_FLOW", "TERMINATED_BIDIRECTIONAL", "LT_PERMENANTLY_TERMINATED", "TERMINATION_STATE_UNKNOWN"] # noqa: E501 if termination_state not in allowed_values: raise ValueError( "Invalid value for `termination_state` ({0}), must be one of {1}" .format(termination_state, allowed_values) ) self._termination_state = termination_state @property def layer_protocol_name(self) -> str: """Gets the layer_protocol_name of this ConnectionEndPoint. :return: The layer_protocol_name of this ConnectionEndPoint. :rtype: str """ return self._layer_protocol_name @layer_protocol_name.setter def layer_protocol_name(self, layer_protocol_name: str): """Sets the layer_protocol_name of this ConnectionEndPoint. :param layer_protocol_name: The layer_protocol_name of this ConnectionEndPoint. :type layer_protocol_name: str """ allowed_values = ["OTSiA", "OCH", "OTU", "ODU", "ETH", "ETY", "DSR"] # noqa: E501 if layer_protocol_name not in allowed_values: raise ValueError( "Invalid value for `layer_protocol_name` ({0}), must be one of {1}" .format(layer_protocol_name, allowed_values) ) self._layer_protocol_name = layer_protocol_name @property def connectivity_service_end_point(self) -> str: """Gets the connectivity_service_end_point of this ConnectionEndPoint. :return: The connectivity_service_end_point of this ConnectionEndPoint. :rtype: str """ return self._connectivity_service_end_point @connectivity_service_end_point.setter def connectivity_service_end_point(self, connectivity_service_end_point: str): """Sets the connectivity_service_end_point of this ConnectionEndPoint. :param connectivity_service_end_point: The connectivity_service_end_point of this ConnectionEndPoint. :type connectivity_service_end_point: str """ self._connectivity_service_end_point = connectivity_service_end_point @property def parent_node_edge_point(self) -> List[str]: """Gets the parent_node_edge_point of this ConnectionEndPoint. :return: The parent_node_edge_point of this ConnectionEndPoint. :rtype: List[str] """ return self._parent_node_edge_point @parent_node_edge_point.setter def parent_node_edge_point(self, parent_node_edge_point: List[str]): """Sets the parent_node_edge_point of this ConnectionEndPoint. :param parent_node_edge_point: The parent_node_edge_point of this ConnectionEndPoint. :type parent_node_edge_point: List[str] """ self._parent_node_edge_point = parent_node_edge_point @property def client_node_edge_point(self) -> List[str]: """Gets the client_node_edge_point of this ConnectionEndPoint. :return: The client_node_edge_point of this ConnectionEndPoint. :rtype: List[str] """ return self._client_node_edge_point @client_node_edge_point.setter def client_node_edge_point(self, client_node_edge_point: List[str]): """Sets the client_node_edge_point of this ConnectionEndPoint. :param client_node_edge_point: The client_node_edge_point of this ConnectionEndPoint. :type client_node_edge_point: List[str] """ self._client_node_edge_point = client_node_edge_point @property def connection_port_direction(self) -> str: """Gets the connection_port_direction of this ConnectionEndPoint. The orientation of defined flow at the EndPoint. # noqa: E501 :return: The connection_port_direction of this ConnectionEndPoint. :rtype: str """ return self._connection_port_direction @connection_port_direction.setter def connection_port_direction(self, connection_port_direction: str): """Sets the connection_port_direction of this ConnectionEndPoint. The orientation of defined flow at the EndPoint. # noqa: E501 :param connection_port_direction: The connection_port_direction of this ConnectionEndPoint. :type connection_port_direction: str """ allowed_values = ["BIDIRECTIONAL", "INPUT", "OUTPUT", "UNIDENTIFIED_OR_UNKNOWN"] # noqa: E501 if connection_port_direction not in allowed_values: raise ValueError( "Invalid value for `connection_port_direction` ({0}), must be one of {1}" .format(connection_port_direction, allowed_values) ) self._connection_port_direction = connection_port_direction @property def connection_port_role(self) -> str: """Gets the connection_port_role of this ConnectionEndPoint. Each EP of the FC has a role (e.g., working, protection, protected, symmetric, hub, spoke, leaf, root) in the context of the FC with respect to the FC function. # noqa: E501 :return: The connection_port_role of this ConnectionEndPoint. :rtype: str """ return self._connection_port_role @connection_port_role.setter def connection_port_role(self, connection_port_role: str): """Sets the connection_port_role of this ConnectionEndPoint. Each EP of the FC has a role (e.g., working, protection, protected, symmetric, hub, spoke, leaf, root) in the context of the FC with respect to the FC function. # noqa: E501 :param connection_port_role: The connection_port_role of this ConnectionEndPoint. :type connection_port_role: str """ allowed_values = ["SYMMETRIC", "ROOT", "LEAF", "TRUNK", "UNKNOWN"] # noqa: E501 if connection_port_role not in allowed_values: raise ValueError( "Invalid value for `connection_port_role` ({0}), must be one of {1}" .format(connection_port_role, allowed_values) ) self._connection_port_role = connection_port_role
43.814181
536
0.69135
30357aaf0a514c1636520e15938cba9dc811ec7d
901
py
Python
idom_jupyter/jupyter_server_extension.py
idom-team/idom-jupyter
21037d41c51d4d9e23cca4486a850f2915f27d29
[ "MIT" ]
28
2020-09-12T19:59:27.000Z
2022-03-14T10:08:13.000Z
idom_jupyter/jupyter_server_extension.py
idom-team/idom-jupyter
21037d41c51d4d9e23cca4486a850f2915f27d29
[ "MIT" ]
11
2020-10-05T06:54:43.000Z
2022-02-19T21:16:31.000Z
idom_jupyter/jupyter_server_extension.py
idom-team/idom-jupyter
21037d41c51d4d9e23cca4486a850f2915f27d29
[ "MIT" ]
null
null
null
from urllib.parse import urljoin from appdirs import user_data_dir from notebook.notebookapp import NotebookApp from idom.config import IDOM_WED_MODULES_DIR from tornado.web import StaticFileHandler from tornado.web import Application IDOM_WED_MODULES_DIR.current = user_data_dir("idom-jupyter", "idom-team") def _load_jupyter_server_extension(notebook_app: NotebookApp): web_app: Application = notebook_app.web_app base_url = web_app.settings["base_url"] route_pattern = urljoin(base_url, rf"_idom_web_modules/(.*)") web_app.add_handlers( host_pattern=".*$", host_handlers=[ ( route_pattern, StaticFileHandler, {"path": str(IDOM_WED_MODULES_DIR.current.absolute())}, ), ], ) # compat for older versions of Jupyter load_jupyter_server_extension = _load_jupyter_server_extension
29.064516
73
0.712542
303647975ecbe068790f55c360ba681d772f2c4f
1,033
py
Python
dobot_gym/envs/real/dobot_env.py
sandipan1/dobot_gym
acea98da2506653d45d55e15a036da583415f31d
[ "MIT" ]
1
2020-11-22T11:07:01.000Z
2020-11-22T11:07:01.000Z
dobot_gym/envs/real/dobot_env.py
sandipan1/dobot_gym
acea98da2506653d45d55e15a036da583415f31d
[ "MIT" ]
null
null
null
dobot_gym/envs/real/dobot_env.py
sandipan1/dobot_gym
acea98da2506653d45d55e15a036da583415f31d
[ "MIT" ]
1
2021-01-10T09:36:25.000Z
2021-01-10T09:36:25.000Z
## common class for only dobot with cam import gym from gym import utils from glob import glob from dobot_gym.utils.dobot_controller import DobotController from gym.spaces import MultiDiscrete class DobotRealEnv(gym.Env, utils.EzPickle): def __init__(self): super().__init__() # Find the port on which dobot is connected available_ports = glob('/dev/tty*USB*') if len(available_ports) == 0: print('no port found for Dobot Magician') exit(1) def_port = available_ports[0] self.dobot = DobotController(port=def_port) self.observation_space = None self.action_space = MultiDiscrete([3, 3, 3]) def compute_reward(self): return 0 def step(self, action): real_action = action - 1 self.dobot.moveangleinc(*real_action, r=0, q=1) reward = self.compute_reward(image, centroid) poses = self.dobot.get_dobot_joint() done = False info =None return poses,reward, done, info
28.694444
60
0.648596
3037ce4b050caa1080564ccd27af84ba3f81c62a
1,609
py
Python
Calibration/LumiAlCaRecoProducers/test/crab3_raw_corrC.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
Calibration/LumiAlCaRecoProducers/test/crab3_raw_corrC.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
Calibration/LumiAlCaRecoProducers/test/crab3_raw_corrC.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
######################### #Author: Sam Higginbotham ######################## from WMCore.Configuration import Configuration config = Configuration() #name='Pt11to30' config.section_("General") config.General.requestName = 'PCC_Run2017E_Corrections' config.General.workArea = 'RawPCCZeroBias2017' config.section_("JobType") config.JobType.pluginName = 'Analysis' config.JobType.psetName = 'raw_corr_Random_cfg.py' config.JobType.allowUndistributedCMSSW = True config.JobType.outputFiles = ['rawPCC.csv'] config.JobType.inputFiles = ['c.db'] config.section_("Data") #config.Data.inputDataset = '/AlCaLumiPixels/Run2017E-AlCaPCCZeroBias-PromptReco-v1/ALCARECO' config.Data.userInputFiles=['/store/data/Run2017E/AlCaLumiPixels/ALCARECO/AlCaPCCRandom-PromptReco-v1/000/303/832/00000/E6B8ACA4-95A4-E711-9AA2-02163E014793.root'] #config.Data.lumiMask = '' #config.Data.runRange='303382'#,297283,297278,297280,297281,297271,297227,297230,297276,297261,297266' config.Data.ignoreLocality = True #useParent = True config.Data.inputDBS = 'global' #config.Data.splitting = 'LumiBased' config.Data.splitting = 'FileBased' config.Data.publication = False config.Data.unitsPerJob = 1000 #config.Data.totalUnits = -1 #config.Data.publishDbsUrl = 'test' config.Data.outputDatasetTag = 'PCC_AlCaLumiPixels_Run2017C_1kLS_NoZeroes' config.Data.outLFNDirBase = '/store/group/comm_luminosity/PCC/ForLumiComputations/2017/5Feb2018' config.section_("Site") config.Site.storageSite = 'T2_CH_CERN' config.Site.whitelist=['T2_FR_CCIN2P3','T2_IT_Pisa','T2_UK_London_IC','T2_HU_Budapest'] #config.Site.whitelist=['T2_FR_CCIN2P3']
36.568182
163
0.778745
3037ecccd158f87250f163febc7c04a882e857b4
6,819
py
Python
active_frame.py
binhhoangtieu/C3D-tensorflow
d85ef6156abc7fcdb4ab91e5b47a50c5ef5123c6
[ "MIT" ]
1
2019-02-11T15:47:52.000Z
2019-02-11T15:47:52.000Z
active_frame.py
binhhoangtieu/C3D-tensorflow
d85ef6156abc7fcdb4ab91e5b47a50c5ef5123c6
[ "MIT" ]
null
null
null
active_frame.py
binhhoangtieu/C3D-tensorflow
d85ef6156abc7fcdb4ab91e5b47a50c5ef5123c6
[ "MIT" ]
1
2018-12-04T04:55:19.000Z
2018-12-04T04:55:19.000Z
import cv2 import os import glob import numpy as np from operator import itemgetter # import matplotlib.pyplot as plt import math import scipy.stats as stats def main(): video_dir = './UCF-101' #./testdata result_dir = './UCF101-OF' #test-image loaddata(video_dir = video_dir, depth = 24, dest_forder=result_dir) def save_image_to_file(frame_array, folder): for i in range(np.size(frame_array,axis=0)): cv2.imwrite(folder +"/" + format(i,'05d')+'.jpg', frame_array[i]) def loaddata(video_dir, depth, dest_forder): #video_dir can contain sub_directory dirs = os.listdir(video_dir) class_number = -1 #pbar = tqdm(total=len(files)) for dir in dirs: path = os.path.join(video_dir, dir, '*.avi') files = sorted(glob.glob(path),key=lambda name: path ) for filename in files: print('Extracting file:',filename) # frame_array = video3d_overlap(filename, depth) # frame_array = video3d_selected_active_frame(filename, depth) # frame_array = full_selected_active_frame(filename, depth) frame_array = video3d_opticalflow(filename, depth) newdir = dir + "/" + os.path.splitext(os.path.basename(filename))[0] directory = os.path.join(dest_forder,newdir) if not os.path.exists(directory): os.makedirs(directory) save_image_to_file(frame_array, directory) def active_frames(frame_array): d=[] #euclid distance frames =[] for i in range(np.size(frame_array,axis=0)-1): d.append((np.linalg.norm(frame_array[i+1]-frame_array[i]),i,0)) #Sort d[i] accending under first column of di d.sort(key=itemgetter(0)) #get the order of active frame d = normal_distribution(d) #assign each d one value based on normal distribution d.sort(key=itemgetter(1)) #re_order frames.append(frame_array[0]) for i in range(1,np.size(d,axis=0)): temp_frame = frame_array[i] * d[i][2] frames.append(temp_frame) temp_frame = np.sum(frames, axis = 0) temp_frame = cv2.normalize(temp_frame, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U) return np.array(temp_frame) #This function select numbers of the most active frame in a segment def selected_active_frame(frame_array): max_euclidean_distance = 0 temp_frame = frame_array[0] #assign first frame max = 0 for i in range(np.size(frame_array,axis=0)-1): euclidean_distant = np.linalg.norm(frame_array[i+1]-frame_array[i]) if euclidean_distant > max_euclidean_distance: max_euclidean_distance = euclidean_distant temp_frame = frame_array[i+1] max = i+1 # print(max) return np.array(temp_frame) #this function get the most active frame def full_selected_active_frame(filename, depth): cap_images = read_video_from_file(filename) framearray = [] distance = [] for i in range(np.size(cap_images,axis=0)-1): distance.append((np.linalg.norm(cap_images[i+1]-cap_images[i]),i+1)) frames = [item[1] for item in sorted(distance,key = itemgetter(0))[-depth:]] frames.sort() for i in range(np.size(frames,axis=0)): framearray.append(cap_images[frames[i]]) # print(frames[i]) return framearray def video3d_selected_active_frame(filename, depth): cap_images = read_video_from_file(filename) framearray = [] flatten_framearray = [] nframe = np.size(cap_images,axis = 0) frames = [np.int(x * nframe / depth) for x in range(depth)] # print(nframe, frames) for i in range(np.size(frames,axis=0)): if i < np.size(frames,axis=0)-1: flatten_framearray = cap_images[frames[i]:frames[i+1]] # print(frames[i],frames[i+1]) else: #last frame flatten_framearray = cap_images[frames[i]:nframe] # print(frames[i], nframe) # newframe = selected_active_frame(flatten_framearray) # framearray.append(newframe) return np.array(framearray) def video3d_overlap(filename, depth = 16, overlap = 5, ): cap_images = read_video_from_file(filename) frame_array = [] flatten_framearray = [] nframe = np.size(cap_images,axis = 0) frames = [np.int(x * nframe / depth) for x in range(depth)] fromframe = 0 toframe = 0 for i in range(np.size(frames)): fromframe = frames[i] - overlap toframe = frames[i] + overlap if fromframe < 0: fromframe = 0 if toframe > nframe-1: toframe = nframe-1 flatten_framearray = cap_images[fromframe:toframe] frame = active_frames(flatten_framearray) frame_array.append(frame) return np.array(frame_array) def read_video_from_file(filename): video_cap = cv2.VideoCapture(filename) nframe = np.int(video_cap.get(cv2.CAP_PROP_FRAME_COUNT)) frameWidth = np.int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) frameHeight = np.int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) j = 0 ret = True cap_images = np.empty((nframe, frameHeight, frameWidth, 3)) while (j < nframe and ret): ret, cap_images[j] = video_cap.read() if ret != True: cap_images = cap_images[0:j-1] break else: j += 1 return cap_images def normal_distribution(d): dmax = max(l[0] for l in d) dmin = min(l[0] for l in d) mean = (dmax - dmin)/2 sd = (mean - dmin)/3 for i in range(np.size(d,axis=0)): temp = list(d[i]) if dmax == dmin: #2frame is definitely the same temp[2] = 1 else: # temp[2] = 5*i+1 temp[2] = alpha(16,i) # temp[2] = normpdf(i,mean,sd) # temp[2] = stats.norm(mean,sd).pdf(i) d[i] = tuple(temp) return d def video3d_opticalflow(filename, depth): framearray = [] cap_images = read_video_from_file(filename) nframe = np.size(cap_images,axis = 0) frames = [np.int(x * nframe / depth) for x in range(depth)] fromframe = 0 toframe = 0 cap_images = np.asarray(cap_images, dtype=np.float32) for i in range(np.size(frames)): fromframe = frames[i] toframe = frames[i] + 1 if toframe > nframe-1: fromframe = nframe-2 toframe = nframe-1 prevframe = cv2.cvtColor(cap_images[fromframe],cv2.COLOR_BGR2GRAY) nextframe = cv2.cvtColor(cap_images[toframe],cv2.COLOR_BGR2GRAY) hsvImg = np.zeros((np.size(cap_images[fromframe],axis=0), np.size(cap_images[fromframe],axis=1),3)) hsvImg[..., 1] = 0 flow = cv2.calcOpticalFlowFarneback(prevframe, nextframe, None, 0.5, 3, 15, 3, 5, 1.2, 0) mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1]) hsvImg[..., 0] = 0.5 * ang * 180 / np.pi hsvImg[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX) hsvImg = np.asarray(hsvImg,dtype=np.float32) frame = cv2.cvtColor(hsvImg, cv2.COLOR_HSV2BGR) framearray.append(frame) return np.array(framearray) #https://en.wikipedia.org/wiki/Normal_distribution#Probability_density_function def normpdf(x, mean, sd): var = float(sd)**2 pi = 3.1415926 denom = (2*pi*var)**.5 num = math.exp(-(float(x)-float(mean))**2/(2*var)) return num/denom def alpha(T, t): return 2*(T-t+1)-(T+1)*(Harmonic_number(T)-Harmonic_number(t-1)) def Harmonic_number(n): if n==0: return 0 return sum(1.0/i for i in range(1,n+1)) if __name__ == '__main__': main()
31.569444
110
0.706555
303842a63bcf998cc97937b86976891ee5b81ac2
5,547
py
Python
lift-learn/metrics/_values.py
smn-ailab/lift-learn
158b8be554aad49033ee2eacf14fbbeb2418d6f7
[ "MIT" ]
1
2019-03-12T11:07:16.000Z
2019-03-12T11:07:16.000Z
lift-learn/metrics/_values.py
smn-ailab/lift-learn
158b8be554aad49033ee2eacf14fbbeb2418d6f7
[ "MIT" ]
1
2019-02-15T06:21:33.000Z
2019-02-20T01:03:54.000Z
lift-learn/metrics/_values.py
smn-ailab/lift-learn
158b8be554aad49033ee2eacf14fbbeb2418d6f7
[ "MIT" ]
null
null
null
"""Metrics to assess performance on ite prediction task.""" from typing import Optional import numpy as np import pandas as pd def expected_response(y: np.ndarray, w: np.ndarray, policy: np.ndarray, mu: Optional[np.ndarray]=None, ps: Optional[np.ndarray]=None) -> float: """Estimate expected response. Parameters ---------- y: array-like of shape = (n_samples) Observed target values. w: array-like of shape = shape = (n_samples) Treatment assignment variables. policy: array-like of shape = (n_samples) Estimated treatment policy. mu: array-like of shape = (n_samples, n_trts), optional Estimated potential outcomes. ps: array-like of shape = (n_samples, n_trts), optional Estimated propensity scores. Returns ------- expected_response: float Estimated expected_response. """ mu = np.zeros((w.shape[0], np.unique(w).shape[0])) if mu is None else mu ps = pd.get_dummies(w).mean(axis=0).values if ps is None else ps indicator = np.array(w == policy, dtype=int) expected_response = np.mean(mu[np.arange(w.shape[0]), policy] + (y - mu[np.arange(w.shape[0]), policy]) * indicator / ps[w]) return expected_response def ips_value(y: np.ndarray, w: np.ndarray, policy: np.ndarray, ps: Optional[np.ndarray]=None) -> float: """Decision Value Estimator based on Inverse Propensity Score Weighting method. Parameters ---------- y: array-like of shape = (n_samples) Observed target values. w: array-like of shape = shape = (n_samples) Treatment assignment indicators. policy: array-like of shape = (n_samples) Estimated decision model. ps: array-like of shape = (n_samples), optional Estimated propensity scores. Returns ------- decision_value: float Estimated decision value using Inverse Propensity Score Weighting method. References ---------- [1] Y. Zhao, X. Fang, D. S. Levi: Uplift modeling with multiple treatments and general response types, 2017. [2] A. Schuler, M. Baiocchi, R. Tibshirani, N. Shah: A comparison of methods for model selection when estimating individual treatment effects, 2018. """ if not isinstance(y, np.ndarray): raise TypeError("y must be a numpy.ndarray.") if not isinstance(w, np.ndarray): raise TypeError("w must be a numpy.ndarray.") if not isinstance(policy, np.ndarray): raise TypeError("policy must be a numpy.ndarray.") if ps is None: trts_probs = pd.get_dummies(w).mean(axis=0).values ps = np.ones((w.shape[0], np.unique(w).shape[0])) * np.expand_dims(trts_probs, axis=0) else: assert (np.max(ps) < 1) and (np.min(ps) > 0), "ps must be strictly between 0 and 1." treatment_matrix = pd.get_dummies(w).values if np.unique(policy).shape[0] == np.unique(w).shape[0]: policy = pd.get_dummies(policy).values else: diff = np.setdiff1d(np.unique(w), np.unique(policy)) policy = pd.get_dummies(policy).values for _diff in diff: policy = np.insert(policy, _diff, 0, axis=1) indicator_matrix = policy * treatment_matrix outcome_matrix = np.expand_dims(y, axis=1) * treatment_matrix decision_value = np.mean(np.sum(indicator_matrix * (outcome_matrix / ps), axis=1)) return decision_value def dr_value(y: np.ndarray, w: np.ndarray, policy: np.ndarray, mu: np.ndarray, ps: Optional[np.ndarray]=None) -> float: """Decision Value Estimator based on Doubly Robust method. Parameters ---------- y: array-like of shape = (n_samples) Observed target values. w: array-like of shape = (n_samples) Treatment assignment indicators. policy: array-like of shape = (n_samples) Estimated decision model. ps: array-like of shape = (n_samples) Estimated propensity scores. mu: array-like of shape = (n_samples, n_treatments), optional Estimated potential outcome for each treatment. Returns ------- decision_value: float Estimated decision value using Doubly Robust method. References ---------- [1] A. Schuler, M. Baiocchi, R. Tibshirani, N. Shah: A comparison of methods for model selection when estimating individual treatment effects, 2018. """ if not isinstance(y, np.ndarray): raise TypeError("y must be a numpy.ndarray.") if not isinstance(w, np.ndarray): raise TypeError("w must be a numpy.ndarray.") if not isinstance(policy, np.ndarray): raise TypeError("policy must be a numpy.ndarray.") if not isinstance(mu, np.ndarray): raise TypeError("mu must be a numpy.ndarray.") if ps is None: trts_probs = pd.get_dummies(w).mean(axis=0).values ps = np.ones((w.shape[0], np.unique(w).shape[0])) * np.expand_dims(trts_probs, axis=0) else: assert (np.max(ps) < 1) and (np.min(ps) > 0), "ps must be strictly between 0 and 1." treatment_matrix = pd.get_dummies(w).values policy = pd.get_dummies(policy).values diff = np.setdiff1d(np.unique(w), np.unique(policy)) for _diff in diff: policy = np.insert(policy, _diff, 0, axis=1) indicator_matrix = policy * treatment_matrix outcome_matrix = np.expand_dims(y, axis=1) * treatment_matrix decision_value = np.mean(np.sum(treatment_matrix * mu + indicator_matrix * (outcome_matrix - mu) / ps, axis=1)) return decision_value
36.493421
152
0.650442
30393f74ae0c5c36f82aebb7e0b95f7f112a1231
1,133
py
Python
data_parser.py
JanoHorvath/k-means-clustering
c84e858c3e2bb417ffea11d441797a15c659a7ee
[ "MIT" ]
null
null
null
data_parser.py
JanoHorvath/k-means-clustering
c84e858c3e2bb417ffea11d441797a15c659a7ee
[ "MIT" ]
null
null
null
data_parser.py
JanoHorvath/k-means-clustering
c84e858c3e2bb417ffea11d441797a15c659a7ee
[ "MIT" ]
null
null
null
from random import randint class Dataset: def get_mock_scattered_dataset(self, numberOf, x_upper_bound, y_upper_bound): """ Mock 2D dataset with scattered data points. """ points = [] for i in range(numberOf): point = [randint(0,x_upper_bound), randint(0,y_upper_bound), 'black'] points.append(point) return points def get_mock_dataset(self, numberOf, x_upper_bound, y_upper_bound): """ Mock 2D dataset with clustered data points. """ points = [] clusters = [] """ Creates between 2 to 10 cluster areas with random x/y values """ for i in range(randint(2, 10)): cluster = [randint(0, x_upper_bound), randint(0, y_upper_bound)] clusters.append(cluster) """ Creates numberOf points each randomly assigned to one cluster area and random x/y values near that area """ for i in range(numberOf): j = randint(0, len(clusters)-1) point = [randint(0,30)+clusters[j][0], randint(0,30)+clusters[j][1], 'black'] points.append(point) return points
33.323529
119
0.614298
303a0f7118c7766ed025bf5dc2723fe60db8bf3e
623
py
Python
backend/colleges/utils.py
cesko-digital/zacni-uc
281c56aec5509d5dd8bbbd60f054ffcd9156609e
[ "MIT" ]
4
2021-02-26T09:28:14.000Z
2021-07-08T19:21:57.000Z
backend/colleges/utils.py
cesko-digital/zacni-uc
281c56aec5509d5dd8bbbd60f054ffcd9156609e
[ "MIT" ]
35
2021-01-27T08:38:59.000Z
2021-12-13T19:42:38.000Z
backend/colleges/utils.py
cesko-digital/zacni-uc
281c56aec5509d5dd8bbbd60f054ffcd9156609e
[ "MIT" ]
5
2021-01-21T21:35:42.000Z
2022-01-06T10:07:58.000Z
from openpyxl import load_workbook def import_msmt_college_registry_xlsx(path, sheet_name): """ Import XLSX from https://regvssp.msmt.cz/registrvssp/cvslist.aspx (list of colleges and faculties). Parameters: path -- path to XLSX file sheet_name -- "ExportVS" or "ExportFakulty" """ workbook = load_workbook(path) sheet = workbook[sheet_name] out = [] columns = [k.value.strip() for k in sheet[1]] for i in range(2, sheet.max_row + 1): values = [i.value for i in sheet[i]] item = dict(zip(columns, values)) out.append(item) return out
25.958333
69
0.638844
303aea533163720c626b39e6c0bbce695a86963f
16,358
py
Python
tests/cases/matching_graph.py
nilsec/mtrack
76652c468417c7e3ac9903586c0127b884d6b032
[ "MIT" ]
null
null
null
tests/cases/matching_graph.py
nilsec/mtrack
76652c468417c7e3ac9903586c0127b884d6b032
[ "MIT" ]
null
null
null
tests/cases/matching_graph.py
nilsec/mtrack
76652c468417c7e3ac9903586c0127b884d6b032
[ "MIT" ]
null
null
null
import unittest import numpy as np from mtrack.graphs import G1 from mtrack.evaluation.matching_graph import MatchingGraph from mtrack.evaluation.voxel_skeleton import VoxelSkeleton from comatch import match_components import json test_data_dir = "./data" class ParallelLinesSetUp(unittest.TestCase): def setUp(self): """ o o | | | | | | | | | | o o | | | | | | | | o o | | | | . . . . . . """ self.gt_vertices = 10 self.rec_vertices = 10 self.gt = G1(self.gt_vertices) self.rec = G1(self.rec_vertices) z = 0 for v in self.gt.get_vertex_iterator(): self.gt.set_position(v, np.array([100,100,z])) self.gt.set_orientation(v, np.array([1,0,0])) z += 5 if int(v)<self.gt_vertices-1: self.gt.add_edge(int(v), int(v)+1) self.vs_gt = VoxelSkeleton(self.gt, voxel_size=[1.,1.,1.], verbose=True) # Different offset: z = 0 for v in self.rec.get_vertex_iterator(): self.rec.set_position(v, np.array([150,100,z])) self.rec.set_orientation(v, np.array([1,0,0])) z += 5 if int(v)<self.rec_vertices-1: self.rec.add_edge(int(v), int(v)+1) self.vs_rec = VoxelSkeleton(self.rec, voxel_size=[1.,1.,1.], verbose=True) self.groundtruth_skeletons = [self.vs_gt] self.reconstructed_skeletons = [self.vs_rec] self.skeletons = {"gt": self.vs_gt, "rec": self.vs_rec} self.distance_threshold = 50.1 self.voxel_size = [1.,1.,1.] class ErrorTestSetUpSameDistance(unittest.TestCase): def setUp(self): self.gt_vertices = 10 self.rec1_vertices = 5 self.rec2_vertices = 5 self.gt = G1(self.gt_vertices) self.rec1 = G1(self.rec1_vertices) self.rec2 = G1(self.rec2_vertices) z = 0 for v in self.gt.get_vertex_iterator(): self.gt.set_position(v, np.array([100,100,z])) self.gt.set_orientation(v, np.array([1,0,0])) z += 5 if int(v)<self.gt_vertices-1: self.gt.add_edge(int(v), int(v)+1) self.vs_gt = VoxelSkeleton(self.gt, voxel_size=[1.,1.,1.], verbose=True, subsample=5) z = 0 for v in self.rec1.get_vertex_iterator(): self.rec1.set_position(v, np.array([160,100, z])) self.rec1.set_orientation(v, np.array([1,0,0])) z += 5 if int(v)<self.rec1_vertices-1: self.rec1.add_edge(int(v), int(v)+1) z = 0 for v in self.rec2.get_vertex_iterator(): self.rec2.set_position(v, np.array([100,150, z])) self.rec2.set_orientation(v, np.array([1,0,0])) z += 5 if int(v)<self.rec2_vertices-1: self.rec2.add_edge(int(v), int(v)+1) self.vs_rec1 = VoxelSkeleton(self.rec1, voxel_size=[1.,1.,1.], verbose=True, subsample=5) self.vs_rec2 = VoxelSkeleton(self.rec2, voxel_size=[1.,1.,1.], verbose=True, subsample=5) self.groundtruth_skeletons = [self.vs_gt] self.reconstructed_skeletons = [self.vs_rec1, self.vs_rec2] self.skeletons = {"gt": [self.vs_gt], "rec": [self.vs_rec1, self.vs_rec2]} self.distance_threshold = 51 self.voxel_size = [1.,1.,1.] class MatchingGraphNoInitAllTestCase(ParallelLinesSetUp): def runTest(self): mg = MatchingGraph(self.groundtruth_skeletons, self.reconstructed_skeletons, self.distance_threshold, self.voxel_size, verbose=True, initialize_all=False) self.assertTrue(mg.total_vertices ==\ self.vs_gt.get_graph().get_number_of_vertices() +\ self.vs_rec.get_graph().get_number_of_vertices()) class MatchingGraphInitializeTestCase(ParallelLinesSetUp): def runTest(self): mg = MatchingGraph(self.groundtruth_skeletons, self.reconstructed_skeletons, self.distance_threshold, self.voxel_size, verbose=True, initialize_all=False) # Test private methods too as internals are complex: matching_graph, mappings, mv_to_v, v_to_mv = mg._MatchingGraph__initialize() self.assertTrue(matching_graph.get_number_of_vertices() ==\ mg._MatchingGraph__get_total_vertices()) self.assertTrue(matching_graph.get_number_of_edges()==0) for tag in ["gt", "rec"]: for graph in mg.graphs[tag]: for v in graph.get_vertex_iterator(): mv = v_to_mv[(graph, int(v))] pos_v = np.array(graph.get_position(v)) pos_mv = np.array(matching_graph.get_position(mv)) self.assertTrue(np.all(pos_v == pos_mv)) mv_ids_rec = mappings["rec"]["mv_ids"] mv_ids_gt = mappings["gt"]["mv_ids"] self.assertTrue(set(mv_ids_rec) & set(mv_ids_gt) == set([])) self.assertTrue(sorted(mv_ids_rec + mv_ids_gt) ==\ range(matching_graph.get_number_of_vertices())) for i in range(len(mv_ids_gt)): mv_id = mv_ids_gt[i] graph_pos = np.array(matching_graph.get_position(mv_id)) mapping_pos = mappings["gt"]["positions"][i] self.assertTrue(np.all(graph_pos == mapping_pos)) for i in range(len(mv_ids_rec)): mv_id = mv_ids_rec[i] graph_pos = np.array(matching_graph.get_position(mv_id)) mapping_pos = mappings["rec"]["positions"][i] self.assertTrue(np.all(graph_pos == mapping_pos)) class MatchingGraphAddSkeletonEdgesTestCase(ParallelLinesSetUp): def runTest(self): mg = MatchingGraph(self.groundtruth_skeletons, self.reconstructed_skeletons, self.distance_threshold, self.voxel_size, verbose=True, initialize_all=False) matching_graph, mappings, mv_to_v, v_to_mv = mg._MatchingGraph__initialize() mg.matching_graph = matching_graph mg.mappings = mappings mg.mv_to_v = mv_to_v mg.v_to_mv = v_to_mv self.assertTrue(matching_graph.get_number_of_edges() == 0) mg._MatchingGraph__add_skeleton_edges() self.assertTrue(matching_graph.get_number_of_edges() ==\ self.vs_gt.get_graph().get_number_of_edges() +\ self.vs_rec.get_graph().get_number_of_edges()) # Check that all edges are attached to the correct vertices: for e in matching_graph.get_edge_iterator(): mv0 = e.source() mv1 = e.target() v0 = mv_to_v[mv0] v1 = mv_to_v[mv1] # Compare graphs self.assertTrue(v0[0] == v1[0]) edge = v0[0].get_edge(v0[1], v1[1]) # Raises value error if not there pos_v0 = np.array(v0[0].get_position(v0[1])) pos_v1 = np.array(v0[0].get_position(v1[1])) pos_mv0 = np.array(matching_graph.get_position(mv0)) pos_mv1 = np.array(matching_graph.get_position(mv1)) self.assertTrue(np.all(pos_v0 == pos_mv0)) self.assertTrue(np.all(pos_v1 == pos_mv1)) class MatchingGraphAddMatchingEdgesTestCase(ParallelLinesSetUp): def runTest(self): mg = MatchingGraph(self.groundtruth_skeletons, self.reconstructed_skeletons, self.distance_threshold, self.voxel_size, verbose=True, initialize_all=False) matching_graph, mappings, mv_to_v, v_to_mv = mg._MatchingGraph__initialize() mg.matching_graph = matching_graph mg.mappings = mappings mg.mv_to_v = mv_to_v mg.v_to_mv = v_to_mv mg._MatchingGraph__add_skeleton_edges() edges_pre_add = mg.matching_graph.get_number_of_edges() mg._MatchingGraph__add_matching_edges(self.distance_threshold, self.voxel_size) edges_post_add = mg.matching_graph.get_number_of_edges() self.assertTrue(edges_post_add > edges_pre_add) mg.mask_skeleton_edges() edges_post_masking = mg.matching_graph.get_number_of_edges() self.assertTrue(edges_post_masking == edges_pre_add) for e in mg.matching_graph.get_edge_iterator(): self.assertTrue(mg.get_edge_type(e) == "skeleton") mg.clear_edge_masks() self.assertTrue(edges_post_add == mg.matching_graph.get_number_of_edges()) mg.mask_matching_edges() self.assertTrue(edges_post_add - edges_pre_add ==\ mg.matching_graph.get_number_of_edges()) for e in mg.matching_graph.get_edge_iterator(): self.assertTrue(mg.get_edge_type(e) == "matching") v0_gt = mg.is_groundtruth_mv(e.source()) v1_gt = mg.is_groundtruth_mv(e.target()) self.assertTrue(int(v0_gt) != int(v1_gt)) mg.clear_edge_masks() mg.to_nml(test_data_dir + "/matching_graph.nml") class MatchingGraphExportToComatch(ParallelLinesSetUp): def runTest(self): mg = MatchingGraph(self.groundtruth_skeletons, self.reconstructed_skeletons, self.distance_threshold, self.voxel_size, verbose=True, initialize_all=True) nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec, edge_costs, edge_conflicts, edge_pairs = mg.export_to_comatch() for v_gt in nodes_gt: self.assertTrue(mg.is_groundtruth_mv(v_gt)) for v_rec in nodes_rec: self.assertFalse(mg.is_groundtruth_mv(v_rec)) mg.mask_matching_edges() self.assertTrue(len(edges_gt_rec) == mg.get_number_of_edges()) mg.clear_edge_masks() self.assertTrue(len(nodes_gt) + len(nodes_rec) == mg.get_number_of_vertices()) class MatchingGraphImportMatches(ParallelLinesSetUp): def runTest(self): print "Import matches" mg = MatchingGraph(self.groundtruth_skeletons, self.reconstructed_skeletons, self.distance_threshold, self.voxel_size, verbose=True, initialize_all=True) nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec, edge_costs, edge_conflicts, edge_pairs = mg.export_to_comatch() label_matches, node_matches, num_splits, num_merges, num_fps, num_fns = match_components(nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec) matches = node_matches # Everything is matched self.assertTrue(len(matches) == mg.get_number_of_vertices()/2) mg.import_matches(matches) for v in mg.get_vertex_iterator(): self.assertTrue(mg.is_tp(v)) self.assertFalse(mg.is_fp(v)) self.assertFalse(mg.is_fn(v)) for e in mg.get_edge_iterator(): self.assertFalse(mg.is_split(e)) self.assertFalse(mg.is_merge(e)) class MatchingGraphExportToComatch(ParallelLinesSetUp): def runTest(self): mg = MatchingGraph(self.groundtruth_skeletons, self.reconstructed_skeletons, self.distance_threshold, self.voxel_size, verbose=True, initialize_all=True) nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec, edge_costs, edge_conflicts, edge_pairs = mg.export_to_comatch() for v_gt in nodes_gt: self.assertTrue(mg.is_groundtruth_mv(v_gt)) for v_rec in nodes_rec: self.assertFalse(mg.is_groundtruth_mv(v_rec)) mg.mask_matching_edges() self.assertTrue(len(edges_gt_rec) == mg.get_number_of_edges()) mg.clear_edge_masks() self.assertTrue(len(nodes_gt) + len(nodes_rec) == mg.get_number_of_vertices()) class TestOneToOne(ErrorTestSetUpSameDistance): def runTest(self): print "OneToOne" mg = MatchingGraph(self.groundtruth_skeletons, self.reconstructed_skeletons, self.distance_threshold, self.voxel_size, verbose=True, distance_cost=True, initialize_all=True) nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec, edge_costs, edge_conflicts, edge_pairs = mg.export_to_comatch() try: # Quadmatch label_matches, node_matches, num_splits, num_merges, num_fps, num_fns = match_components(nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec, edge_conflicts=edge_conflicts, max_edges=1, edge_costs=edge_costs) except TypeError: # Comatch label_matches, node_matches, num_splits, num_merges, num_fps, num_fns = match_components(nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec, allow_many_to_many=False, edge_costs=edge_costs, no_match_costs=1000.) print "label matches:", label_matches print "node_matches:", node_matches comatch_errors = {"splits": num_splits, "num_merges": num_merges, "num_fps": num_fps, "num_fns": num_fns} print comatch_errors mg.import_matches(node_matches) output_dir = test_data_dir + "/MatchingOnetoOne" mg.export_all(output_dir) with open(output_dir + "/macro_errors.json", "w+") as f: json.dump(comatch_errors, f) class TestManyToMany(ErrorTestSetUpSameDistance): def runTest(self): print "ManyToMany" mg = MatchingGraph(self.groundtruth_skeletons, self.reconstructed_skeletons, self.distance_threshold, self.voxel_size, verbose=True, distance_cost=True, initialize_all=True) nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec, edge_costs, edge_conflicts, edge_pairs = mg.export_to_comatch() try: # Quadmatch label_matches, node_matches, num_splits, num_merges, num_fps, num_fns = match_components(nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec, edge_conflicts=edge_conflicts, max_edges=10, edge_costs=edge_costs) except TypeError: # Comatch label_matches, node_matches, num_splits, num_merges, num_fps, num_fns = match_components(nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec, allow_many_to_many=True, edge_costs=edge_costs, no_match_costs=1000.) print "label matches:", label_matches print "node_matches:", node_matches comatch_errors = {"splits": num_splits, "num_merges": num_merges, "num_fps": num_fps, "num_fns": num_fns} print comatch_errors mg.import_matches(node_matches) output_dir = test_data_dir + "/MatchingManytoMany" mg.export_all(output_dir) with open(output_dir + "/macro_errors.json", "w+") as f: json.dump(comatch_errors, f) if __name__ == "__main__": unittest.main()
38.399061
183
0.578127
303b9455bfc79c1d55b5b456a3c9a97a10ddaa26
5,706
py
Python
act/qc/arm.py
jrobrien91/ACT
604b93d75366d23029f89d88df9053d52825c214
[ "BSD-3-Clause" ]
9
2019-03-11T19:41:34.000Z
2019-09-17T08:34:19.000Z
act/qc/arm.py
jrobrien91/ACT
604b93d75366d23029f89d88df9053d52825c214
[ "BSD-3-Clause" ]
127
2019-03-18T12:24:17.000Z
2020-01-06T20:53:06.000Z
act/qc/arm.py
jrobrien91/ACT
604b93d75366d23029f89d88df9053d52825c214
[ "BSD-3-Clause" ]
15
2019-03-11T15:30:56.000Z
2019-11-01T19:10:11.000Z
""" Functions specifically for working with QC/DQRs from the Atmospheric Radiation Measurement Program (ARM). """ import datetime as dt import numpy as np import requests from act.config import DEFAULT_DATASTREAM_NAME def add_dqr_to_qc( obj, variable=None, assessment='incorrect,suspect', exclude=None, include=None, normalize_assessment=True, cleanup_qc=True, ): """ Function to query the ARM DQR web service for reports and add as a new quality control test to ancillary quality control variable. If no anicllary quality control variable exist a new one will be created and lined to the data variable through ancillary_variables attribure. See online documentation from ARM Data Quality Office on the use of the DQR web service. https://code.arm.gov/docs/dqrws-examples/wikis/home Information about the DQR web-service avaible at https://adc.arm.gov/dqrws/ Parameters ---------- obj : xarray Dataset Data object variable : string, or list of str, or None Variables to check DQR web service. If set to None will attempt to update all variables. assessment : string assessment type to get DQRs. Current options include 'missing', 'suspect', 'incorrect' or any combination separated by a comma. exclude : list of strings DQR IDs to exclude from adding into QC include : list of strings List of DQR IDs to include in flagging of data. Any other DQR IDs will be ignored. normalize_assessment : boolean The DQR assessment term is different than the embedded QC term. Embedded QC uses "Bad" and "Indeterminate" while DQRs use "Incorrect" and "Suspect". Setting this will ensure the same terms are used for both. cleanup_qc : boolean Call clean.cleanup() method to convert to standardized ancillary quality control variables. Has a little bit of overhead so if the Dataset has already been cleaned up, no need to run. Returns ------- obj : xarray Dataset Data object Examples -------- .. code-block:: python from act.qc.arm import add_dqr_to_qc obj = add_dqr_to_qc(obj, variable=['temp_mean', 'atmos_pressure']) """ # DQR Webservice goes off datastreams, pull from object if 'datastream' in obj.attrs: datastream = obj.attrs['datastream'] elif '_datastream' in obj.attrs: datastream = obj.attrs['_datastream'] else: raise ValueError('Object does not have datastream attribute') if datastream == DEFAULT_DATASTREAM_NAME: raise ValueError("'datastream' name required for DQR service set to default value " f"{datastream}. Unable to perform DQR service query.") # Clean up QC to conform to CF conventions if cleanup_qc: obj.clean.cleanup() # In order to properly flag data, get all variables if None. Exclude QC variables. if variable is None: variable = list(set(obj.data_vars) - set(obj.clean.matched_qc_variables)) # Check to ensure variable is list if not isinstance(variable, (list, tuple)): variable = [variable] # Loop through each variable and call web service for that variable for var_name in variable: # Create URL url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream=' url += datastream url += '&varname=' + var_name url += ''.join( [ '&searchmetric=', assessment, '&dqrfields=dqrid,starttime,endtime,metric,subject', ] ) # Call web service req = requests.get(url) # Check status values and raise error if not successful status = req.status_code if status == 400: raise ValueError('Check parameters') if status == 500: raise ValueError('DQR Webservice Temporarily Down') # Get data and run through each dqr dqrs = req.text.splitlines() time = obj['time'].values dqr_results = {} for line in dqrs: line = line.split('|') dqr_no = line[0] # Exclude DQRs if in list if exclude is not None and dqr_no in exclude: continue # Only include if in include list if include is not None and dqr_no not in include: continue starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1]))) endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2]))) ind = np.where((time >= starttime) & (time <= endtime)) if ind[0].size == 0: continue if dqr_no in dqr_results.keys(): dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind) else: dqr_results[dqr_no] = { 'index': ind, 'test_assessment': line[3], 'test_meaning': ': '.join([dqr_no, line[-1]]), } for key, value in dqr_results.items(): try: obj.qcfilter.add_test( var_name, index=value['index'], test_meaning=value['test_meaning'], test_assessment=value['test_assessment'], ) except IndexError: print(f"Skipping '{var_name}' DQR application because of IndexError") if normalize_assessment: obj.clean.normalize_assessment(variables=var_name) return obj
33.174419
91
0.608132
303cbb53a5783485cb968c141cc21be11daa907d
1,458
py
Python
tests/test_main_page.py
litovsky2/shop_test
a618f23debfb2efa6f1fceb7eff6443a10f0b11f
[ "Apache-2.0" ]
1
2020-05-03T21:32:49.000Z
2020-05-03T21:32:49.000Z
tests/test_main_page.py
litovsky2/shop_test
a618f23debfb2efa6f1fceb7eff6443a10f0b11f
[ "Apache-2.0" ]
1
2020-07-01T11:12:02.000Z
2020-07-01T11:12:02.000Z
tests/test_main_page.py
litovsky2/shop_test
a618f23debfb2efa6f1fceb7eff6443a10f0b11f
[ "Apache-2.0" ]
2
2021-03-08T14:46:09.000Z
2021-08-30T13:12:21.000Z
import allure from common.constans import PrintedDress, PrintedSummerDress, Colors @allure.step('Product Card') def test_open_product_card(app, login): """ 1. Open page 2. Choose product 3. Open product card 4. Check product info """ app.page.select_woman_category() app.page.open_product('Printed Dress') assert app.page.product_name() == PrintedDress.name assert app.page.product_model() == PrintedDress.model assert app.page.product_description() == PrintedDress.description assert app.page.product_price() == PrintedDress.price @allure.step('Color Test') def test_color_dress(app, login): """ 1. Open page 2. Choose product 3. Open product card 4. Check product colors """ app.page.select_woman_category() app.page.open_product('Printed Summer Dress') assert app.page.product_name() == PrintedSummerDress.name assert app.page.product_model() == PrintedSummerDress.model assert app.page.product_description() == PrintedSummerDress.description assert app.page.product_price() == PrintedSummerDress.price assert app.page.get_color(app.page.black_color()) == Colors.black assert app.page.get_color(app.page.orange_color()) == Colors.orange assert app.page.get_color(app.page.blue_color()) == Colors.blue assert app.page.get_color(app.page.yellow_color()) == Colors.yellow
37.384615
76
0.688615