id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5148442 | <gh_stars>0
"""For a detailed guide on all the features of the Circuit Playground Express (cpx) library:
https://adafru.it/cp-made-easy-on-cpx"""
import time
import math
import random
# from threading import Thread
import microcontroller
from adafruit_circuitplayground.express import cpx
# Set TONE_PIANO to True to enable a tone piano on the touch pads!
TONE_PIANO = True
# Set this as a float from 0 to 1 to change the brightness. The decimal represents a percentage.
# So, 0.3 means 30% brightness!
cpx.pixels.brightness = 0.01
# Changes to NeoPixel state will not happen without explicitly calling show()
cpx.pixels.auto_write = False
# Digi-Key colors: red and white!
digi_key_colors = ((255, 0, 0), (180, 180, 150))
# Python colors: blue and yellow!
python_colors = ((32, 64, 255), (255, 180, 20))
color_index = 0
pixel_number = 0
# time.monotonic() allows for non-blocking LED animations!
start = time.monotonic()
def color_wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition red - green - blue - back to red.
if pos < 0 or pos > 255:
return (0, 0, 0)
if pos < 85:
return (int(255 - pos*3), int(pos*3), 0)
if pos < 170:
pos -= 85
return (0, int(255 - pos*3), int(pos*3))
pos -= 170
return (int(pos * 3), 0, int(255 - (pos*3)))
def piano():
# Set TONE_PIANO to True above to enable a tone piano on the touch pads!
if TONE_PIANO:
if cpx.touch_A1:
cpx.start_tone(262)
elif cpx.touch_A2:
cpx.start_tone(294)
elif cpx.touch_A3:
cpx.start_tone(330)
elif cpx.touch_A4:
cpx.start_tone(349)
elif cpx.touch_A5:
cpx.start_tone(392)
elif cpx.touch_A6:
cpx.start_tone(440)
elif cpx.touch_A7:
cpx.start_tone(494)
else:
cpx.stop_tone()
def play_wavs():
# Press the buttons to play sounds!
if cpx.button_a:
cpx.play_file("drama.wav")
elif cpx.button_b:
cpx.play_file("low_fade.wav")
def show_colors():
global start, color_index, pixel_number
now = time.monotonic()
# Startup behavior is based on your board's unique ID!
# uid returns a bytearray. The individual numbers are summed then modulo by 3.
board_id = 2 #sum(microcontroller.cpu.uid) % 3
if board_id == 0:
# Flash Digi-Key colors!
if now - start > 0.5:
color_index = (color_index + 1) % len(digi_key_colors)
cpx.pixels.fill(digi_key_colors[color_index])
cpx.pixels.show()
start = now
elif board_id == 1:
# Flash Python colors!
if now - start > 0.5:
color_index = (color_index + 1) % len(python_colors)
cpx.pixels.fill(python_colors[color_index])
cpx.pixels.show()
start = now
elif board_id == 2:
# Red-comet rainbow swirl!
pixel_number = (pixel_number + 1) % 10
for p in range(10):
color = color_wheel(25 * ((pixel_number + p) % 10))
cpx.pixels[p] = tuple([int(c * (10 - (pixel_number + p) % 10) / 10.0) for c in color])
cpx.pixels.show()
while True:
# If the switch is to the left, it returns True!
cpx.red_led = cpx.switch
# play_wavs()
piano()
| StarcoderdataPython |
6448720 | import hikari
import tanjun
import spotipy
import sqlite3
import json
import requests
import typing
from tanjun.abc import SlashContext
conn = sqlite3.connect('database.db')
c = conn.cursor()
#c.execute("""CREATE TABLE database (
#users_id integer,
#playlist_id BLOB,
#playlist_link BLOB)""")
component = tanjun.Component()
@component.with_command
@tanjun.with_str_slash_option("track", "The Spotify link to the song.")
@tanjun.as_slash_command("add", f"Add tracks to your playlist.")
async def interactive_post(
ctx: SlashContext, track: typing.Optional[str],
bot: hikari.GatewayBot = tanjun.injected(type=hikari.GatewayBot),
client: tanjun.Client = tanjun.injected(type=tanjun.Client)
) -> None:
get_token()
spotifyObject = spotipy.Spotify(get_token.variable)
id = ctx.author.id
c.execute("SELECT * FROM database WHERE users_id = ?", (id,))
temp = c.fetchall()
if len(temp) == 0:
embed = hikari.Embed(title="", colour=0x00ffd5,description="You need a Personal Playlist first to use this command. \nUse the `begin` command to create one. \nUse the `add-common` command to add to the common playlist.")
await ctx.respond(embed=embed)
else:
def split(track):
return list(track)
word = split({track})
var = word[0]
word = split(var)
song = []
n = 0
while n < 22:
song.append(word[n + 31])
n += 1
preUri = ''.join([str(elem) for elem in song])
Uri = 'spotify:track:' + preUri
c.execute("SELECT playlist_id FROM database WHERE users_id = ?", (id,))
q = c.fetchall()
f = q[0]
g = str(f[0])
spotifyObject.playlist_add_items(playlist_id=g, items=[Uri])
embed = hikari.Embed(title="", colour=0x00ffd5,description=f"Track added!")
await ctx.respond(embed=embed)
def get_token():
headers = {'Authorization': 'Basic ZjUxZTBhNTE1NjlmNGQ0NjkxZTc1ZTFiYzU2MzE0YjU6NzQ0YmUwYzc2ZWQxNGNjOTk4Y2JiYzAwNmFhYmNjMTg=', }
data = {
'grant_type': 'refresh_token',
'refresh_token': "<KEY>",
}
response = requests.post(
'https://accounts.spotify.com/api/token', headers=headers, data=data)
x = json.loads(response.text)
get_token.variable = x['access_token']
@ tanjun.as_loader
def load(client: tanjun.abc.Client) -> None:
client.add_component(component.copy())
| StarcoderdataPython |
5037772 | <reponame>miffyrcee/tf
import numpy as np
import tensorflow as tf
import tensorflow_probability.python as tfp
tfd = tfp.distributions
num_schools = 8 # number of schools
treatment_effects = np.array([28, 8, -3, 7, -1, 1, 18, 12],
dtype=np.float32) # treatment effects
treatment_stddevs = np.array([15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float32)
model = tfd.JointDistributionSequential([
tfd.Normal(loc=0., scale=10., name="mu"), # `mu` above
tfd.Normal(loc=5., scale=1., name="log_tau"), # `log(tau)` above
tfd.Independent(
tfd.Normal(loc=tf.zeros(num_schools),
scale=tf.ones(num_schools),
name="school_effects_standard"), # `theta_prime`
reinterpreted_batch_ndims=1),
lambda school_effects_standard, avg_stddev, avg_effect: (
tfd.Independent(
tfd.Normal(
loc=(avg_effect[..., tf.newaxis] + tf.exp(avg_stddev[
..., tf.newaxis]) * school_effects_standard
), # `theta` above
scale=treatment_stddevs),
name="treatment_effects", # `y` above
reinterpreted_batch_ndims=1))
])
def target_log_prob_fn(avg_effect, avg_stddev, school_effects_standard):
"""Unnormalized target density as a function of states."""
return model.log_prob(
(avg_effect, avg_stddev, school_effects_standard, treatment_effects))
num_results = 5000
num_burnin_steps = 3000
@tf.function(autograph=False)
def do_sampling():
return tfp.mcmc.sample_chain(num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=[
tf.zeros([], name='init_avg_effect'),
tf.zeros([], name='init_avg_stddev'),
tf.ones(
[num_schools],
name='init_school_effect_standard')
],
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.4,
num_leapfrog_steps=3))
states, kernel_results = do_sampling()
avg_effect, avg_stddev, school_effects_standard = states
| StarcoderdataPython |
6579732 | <filename>crypto.py<gh_stars>1-10
SUFFIX_PRIVKEY_COMPRESSED = 0x01
PREFIX_P2PKH = 0x00 # Publick Key Hash
PREFIX_PUBKEY_EVEN = 0x02
PREFIX_PUBKEY_ODD = 0x03
PREFIX_PUBKEY_FULL = 0x04
PREFIX_P2SH = 0x05 # https://github.com/bitcoinbook/bitcoinbook/blob/develop/ch07.asciidoc#pay-to-script-hash-p2sh
PREFIX_TESTNET_P2PKH = 0x6F
PREFIX_TESTNEY_P2SH = 0xc4
PREFIX_PRIVKEY = 0x80
PREFIX_ENCPRIVKEY = 0x0142 # BIP-38
PREFIX_EXTPUBKEY = 0x0488B21E # BIP-32
# TODO: SEGWIT https://github.com/bitcoinbook/bitcoinbook/blob/develop/ch07.asciidoc#segregated-witness
if bytes == str: # python2
str2bytes = lambda s: s
bytes2str = lambda b: b
str2list = lambda s: [ord(c) for c in s]
else: # python3
str2bytes = lambda s: s.encode('latin-1')
bytes2str = lambda b: ''.join(map(chr, b))
str2list = lambda s: [c for c in s]
def seed2bin(seed, nonce=0):
from hash import sha256, keccak256, blake256
from struct import pack
data = pack(">L", nonce) + str2bytes(seed)
return sha256(keccak256(blake256(data)))
def bin2privkey(data):
privkey = bytearray(data)
"""
Clamping the lower bits ensures the key is a multiple of the cofactor. This is done to prevent small subgroup attacks.
Clamping the (second most) upper bit to one is done because certain implementations of the Montgomery Ladder don't correctly handle this bit being zero.
"""
privkey[0] &= 248
privkey[31] &= 127
privkey[31] |= 64
return bytes(privkey)
def seed2privkey(seed, nonce=0):
return bin2privkey(seed2bin(seed, nonce))
def privkey2privwif(privkey, compressed=True):
from base58check import base58CheckEncode
if compressed:
privkey += bytes([SUFFIX_PRIVKEY_COMPRESSED]) # https://github.com/bitcoinbook/bitcoinbook/blob/develop/ch04.asciidoc#comp_priv
return base58CheckEncode(bytes([PREFIX_PRIVKEY]) + privkey)
def privwif2privkey(privwif):
from base58check import base58CheckDecode
privkey = base58CheckDecode(privwif)
if privkey[0] != PREFIX_PRIVKEY:
raise Exception('prefix missmatch')
else:
privkey = privkey[1:]
if len(privkey) == 33 and privkey[-1] == SUFFIX_PRIVKEY_COMPRESSED:
return (privkey[:-1], True) # https://github.com/bitcoinbook/bitcoinbook/blob/develop/ch04.asciidoc#comp_priv
return (privkey, False)
def privkey2pubkey(privkey):
import ecdsa
sk = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.SECP256k1)
return sk.verifying_key.to_string()
def sign_hash(privkey, datahash):
import ecdsa
sk = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.SECP256k1)
return sk.sign_digest(datahash, sigencode=ecdsa.util.sigencode_der_canonize)
def sign_data(privkey, data):
from hash import sha256
datahash = sha256(sha256(data))
return sign_hash(privkey=privkey, datahash=datahash)
def pubkey2pubwif(pubkey, compressed=True):
if not compressed:
return bytes([PREFIX_PUBKEY_FULL]) + pubkey
x, y = pubkey[:32], pubkey[32:]
prefix = PREFIX_PUBKEY_EVEN if (y[-1] % 2) == 0 else PREFIX_PUBKEY_ODD
return bytes([prefix]) + x
def pubkey2addr(pubkey, compressed=True):
from base58check import base58CheckEncode
from hash import hash160
pubwif = pubkey2pubwif(pubkey, compressed)
return base58CheckEncode(bytes([PREFIX_P2PKH]) + hash160(pubwif))
def privkey2addr(privkey, compressed=True):
return pubkey2addr(privkey2pubkey(privkey), compressed)
"""
>>> p = 115792089237316195423570985008687907853269984665640564039457584007908834671663
>>> x = 55066263022277343669578718895168534326250603453777594175500187360389116729240
>>> y = 32670510020758816978083085130507043184471273380659243275938904335757337482424
>>> (x ** 3 + 7) % p == y**2 % p
"""
| StarcoderdataPython |
239077 | from PySide2.QtWidgets import (QWidgetAction, QWidget, QPushButton, QGridLayout, QMenu, QButtonGroup)
from PySide2.QtCore import Qt, QPoint, Signal
from PySide2.QtGui import (QColor, QPixmap, QPainter, QIcon)
COLOR_PALETTE = [
QColor(179,102,102),QColor(255,128,128),QColor(255,0,0),QColor(191,0,0),QColor(128,0,0),QColor(0,0,0),
QColor(179,141,102),QColor(225,192,128),QColor(255,128,0),QColor(191,95,0),QColor(128,64,0),QColor(23,23,23),
QColor(179,179,102),QColor(225,225,128),QColor(255,225,0),QColor(191,191,0),QColor(128,128,0),QColor(46,46,46),
QColor(141,179,102),QColor(192,225,128),QColor(128,225,0),QColor(95,191,0),QColor(94,128,0),QColor(69,69,69),
QColor(102,179,102),QColor(128,225,128),QColor(0,225,0),QColor(0,191,0),QColor(0,128,0),QColor(93,93,93),
QColor(102,179,141),QColor(128,225,192),QColor(0,225,128),QColor(0,191,95),QColor(0,128,64),QColor(116,116,116),
QColor(102,179,179),QColor(128,225,255),QColor(0,225,255),QColor(0,191,191),QColor(0,128,128),QColor(139,139,139),
QColor(102,141,179),QColor(128,192,255),QColor(0,128,255),QColor(0,95,191),QColor(0,64,128),QColor(162,162,162),
QColor(102,102,179),QColor(128,128,255),QColor(0,0,255),QColor(0,0,191),QColor(0,0,128),QColor(186,186,186),
QColor(141,102,179),QColor(192,128,255),QColor(128,0,255),QColor(95,0,191),QColor(64,0,128),QColor(209,209,209),
QColor(179,102,179),QColor(255,128,255),QColor(255,0,255),QColor(191,0,191),QColor(128,0,128),QColor(232,232,232),
QColor(179,102,141),QColor(255,128,192),QColor(255,0,128),QColor(191,0,95),QColor(128,0,64),QColor(255,255,255)
]
BW_PALETTE = [QColor(0,0,0),QColor(255,255,255)]
class QPushColorButton(QPushButton):
colorSelected = Signal(QColor)
def __init__(self, parent=None, columns=int(0), rows=int(0), palette=list):
super(QPushColorButton, self).__init__(parent, "")
self.setMaximumWidth(25)
self.setMaximumHeight(25)
self.currentColor = QColor(255,255,255)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.palette = palette
self._columns = columns
self._rows = rows
def paintEvent(self,event):
super(QPushColorButton, self).paintEvent(event)
size = 13
height = (self.height() - size)/2
width = (self.width() - size)/2
qp = QPainter(self)
# qp.begin(self)
qp.setPen(Qt.NoPen)
qp.setBrush(self.currentColor)
qp.drawRect(width, height, size, size)
def color_menu(self, QPos=list):
'''
Show color menu.
Parameters
----------
QPos: (list)
list of x and y location.
'''
self.mainMenu = QMenu()
self.mainMenu.setStyleSheet("QMenu {background-color: #222222;}")
colorAction = ColorAction(self.mainMenu, self._columns, self._rows, self.palette)
colorAction.colorSelected.connect(self.handleColorSelected)
self.mainMenu.addAction(colorAction)
pos = self.mapToGlobal(QPoint(0,0))
self.mainMenu.move(pos + QPos)
self.mainMenu.show()
def get_palette(self):
return self.palette
def set_palette(self, value=list):
self.palette = value
Palette= property(get_palette, set_palette)
def get_columns(self):
return self._columns
def set_columns(self, value=int):
self._columns = value
Columns = property(get_columns,set_columns)
def get_rows(self):
return self._rows
def set_rows(self, value=int):
self._rows = value
Rows = property(get_rows,set_rows)
def handleColorSelected(self, color=QColor):
self.currentColor = color
self.colorSelected.emit(color)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.color_menu(event.pos())
super(QPushColorButton, self).mousePressEvent(event)
def get_current_color(self):
return self.currentColor
def set_current_color(self, color=QColor):
self.currentColor = color
self.update()
CurrentColor = property(get_current_color,set_current_color)
class ColorAction(QWidgetAction):
colorSelected = Signal(QColor)
def __init__(self, parent=None, columns=int(0), rows=int(0), palette=list):
# QWidgetAction.__init__(self, parent)
super(ColorAction, self).__init__(parent)
self._columns = columns
self._rows = rows
if columns < 1:
self._columns = len(palette)
if rows < 1:
self._rows = len(palette)
self.palette = palette
self.init()
def init(self):
widget = QWidget()
layout = QGridLayout(widget)
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
num = 0
self.ColorDict = dict()
self.ButtonList = QButtonGroup()
for column in range(self._columns):
for row in range(self._rows):
if num < len(self.palette):
newColor = self.palette[num]
button = QPushButton('')
button.setContentsMargins(0,0,0,0)
button.setStyleSheet("padding: 0px;margin: 0px;")
button.setFixedSize(20,20)
self.ColorDict[button] = self.palette[num]
self.ButtonList.addButton(button)
pixmap = QPixmap(20, 20)
pixmap.fill(newColor)
button.setIcon(QIcon(pixmap))
layout.addWidget(button, row, column)
num+=1
else:
break
self.ButtonList.buttonClicked.connect(self.handleButton)
self.setDefaultWidget(widget)
def handleButton(self, buttonID=QPushButton):
self.parent().hide()
self.colorSelected.emit(self.ColorDict[buttonID])
| StarcoderdataPython |
5112209 | <filename>tests/test_verilog_preproc.py
import unittest
from os import path
try:
from tests.basic_tc import TEST_DIR
except ImportError:
from basic_tc import TEST_DIR
from hdlConvertor import ParseException, HdlConvertor
from hdlConvertor.language import Language
SV = Language.SYSTEM_VERILOG_2012
def _test_run(test_file, golden_file):
c = HdlConvertor()
incdirs = ['.', '..', path.join('sv_pp', 'src')]
test_result = c.verilog_pp(
test_file, incdirs, SV)
with open(golden_file) as myfile:
test_golden = myfile.read()
return test_result, test_golden
def _test_run_rel(test_file, golden_file):
return _test_run(
path.join(TEST_DIR, 'sv_pp', 'src', test_file),
path.join(TEST_DIR, 'sv_pp', 'expected', golden_file)
)
class VerilogPreprocTC(unittest.TestCase):
def assertPPWorks(self, test_file):
golden_file = test_file # same name but will get a diffent folder
result, ref = _test_run_rel(test_file, golden_file)
self.assertEqual(result, ref)
def test_2012_p641(self):
self.assertPPWorks('2012_p641.txt')
def test_2012_p642(self):
self.assertPPWorks('2012_p642.txt')
def test_2012_p642_2(self):
self.assertPPWorks('2012_p642_2.txt')
def test_2012_p643(self):
self.assertPPWorks('2012_p643.txt')
def test_2012_p643_2(self):
self.assertPPWorks('2012_p643_2.txt')
def test_2012_p643_3(self):
self.assertPPWorks('2012_p643_3.txt')
def test_2012_p644(self):
self.assertPPWorks('2012_p644.txt')
def assertPPError(self, file, err_msg):
with self.assertRaises(ParseException) as context:
f = path.join(TEST_DIR, 'sv_pp', 'src', file)
c = HdlConvertor()
c.verilog_pp(
f,
['.', '..', path.join('sv_pp', 'src')],
SV
)
self.assertEqual(err_msg, context.exception.__str__())
def test_2012_p644_2(self):
# [TODO] platform dependent path
self.assertPPError(
'2012_p644_2.txt',
'/home/mydir/myfile was not found in include directories\n'
)
def test_2012_p641_il1(self):
self.assertPPError(
'2012_p641_il1.txt',
'Macro D missing value for parameter y'
' (2 arguments expected but 1 provided).'
)
def test_2012_p641_il2(self):
self.assertPPError(
'2012_p641_il2.txt',
'Macro D missing value for parameter x and for parameters'
' after (2 arguments expected but 0 provided).'
)
def test_2012_p641_il3(self):
self.assertPPError(
'2012_p641_il3.txt',
'Macro D expected 2 arguments but 3 provided.'
)
def test_2012_p642_il1(self):
self.assertPPError(
'2012_p642_il1.txt',
'Macro MACRO1 missing value for parameter c'
' (1 to 3 arguments expected but 2 provided).'
)
# `MACRO3 must have parentesis according to the SV specification.
def test_2012_p642_il2(self):
self.assertPPError(
'2012_p642_il2.txt',
'Macro MACRO3 requires braces and expects (0 to 3 arguments).'
)
# No check that string are not split
@unittest.expectedFailure
def test_2012_p642_il3(self):
self.assertPPError(
'2012_p642_il3.txt',
'an error message'
)
def test_FILE_LINE(self):
c = HdlConvertor()
f = path.join(path.dirname(__file__), 'sv_pp', 'src', 'test_FILE_LINE.sv')
incdirs = ['.', '..', path.join('sv_pp', 'src')]
test_result = c.verilog_pp(f, incdirs, SV)
expected_val = path.join(path.dirname(__file__),
'sv_pp', 'src', 'test_FILE_LINE.sv'
)
test_golden = ("module tb();\n\ninitial\n\t$display("
"\"Internal error: null handle at %s, line %d.\",\n")
test_golden += "\"" + expected_val + "\", 5);\n\n\nendmodule\n"
self.assertEqual(test_result, test_golden)
if __name__ == "__main__":
suite = unittest.TestSuite()
# suite.addTest(VerilogPreprocTC('test_2012_p641'))
suite.addTest(unittest.makeSuite(VerilogPreprocTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| StarcoderdataPython |
1914793 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-08-11 11:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='attendee',
options={'ordering': ('event', 'fullname', 'email'), 'verbose_name': 'Attendee', 'verbose_name_plural': 'Attendees'},
),
migrations.AlterModelOptions(
name='event',
options={'ordering': ('account', 'title', 'date_begin'), 'verbose_name': 'Event', 'verbose_name_plural': 'Events'},
),
migrations.AlterModelOptions(
name='eventtoken',
options={'verbose_name': 'EventToken', 'verbose_name_plural': 'EventTokens'},
),
]
| StarcoderdataPython |
9785056 | <reponame>luqidndx/PyWake
import json
import os
from os.path import dirname
from os.path import join as pjoin
import re
import ssl
import sys
class Notebook():
def __init__(self, filename):
self.filename = filename
try:
self.nb = self.load_notebook(self.filename)
except Exception as e:
raise Exception('Error in ', os.path.relpath(filename)) from e
def load_notebook(self, filename):
with open(filename, encoding='utf-8') as fid:
nb = json.load(fid)
return nb
def save(self, filename=None):
filename = filename or self.filename
with open(filename, 'w') as fid:
json.dump(self.nb, fid, indent=4)
def __getitem__(self, key):
return self.nb[key]
def __setitem__(self, key, value):
self.nb[key] = value
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
if name in self.nb.keys():
return self.nb[name]
raise
def insert_markdown_cell(self, index, text):
self.cells.insert(index, {"cell_type": "markdown",
"metadata": {},
"source": [l + "\n" for l in text.split("\n")]
})
def insert_code_cell(self, index, code):
self.cells.insert(index,
{"cell_type": "code",
"execution_count": 0,
"metadata": {},
"outputs": [],
"source": [l + "\n" for l in code.split("\n")]
})
def replace_include_tag(self):
cells = []
for cell in self.nb['cells']:
if cell['cell_type'] == 'code' and len(cell['source']) > 0 and '%%include' in cell['source'][0]:
filename = pjoin(dirname(self.filename), cell['source'][0].replace('%%include', '').strip())
nb = Notebook(filename)
nb.replace_include_tag()
cells.extend(nb.cells)
else:
cells.append(cell)
return cells
def get_code(self):
code = []
for cell in self.cells:
if cell['cell_type'] == "code":
if "".join(cell['source']).strip() != "":
code.append("".join(cell['source']))
return code
def get_text(self):
txt = []
for cell in self.cells:
if cell['cell_type'] == "markdown":
if "".join(cell['source']).strip() != "":
txt.append("".join(cell['source']))
return txt
def check_code(self):
code = "\n".join(self.get_code())
def fix(line):
for p in ['%', '!']:
if line.strip().startswith(p):
line = line.replace(p, "pass #")
return line
lines = [fix(l) for l in code.split("\n")]
if len(lines) == 1 and lines[0] == '':
return
try:
import contextlib
with contextlib.redirect_stdout(None):
with contextlib.redirect_stderr(None):
exec("def test():\n " + "\n ".join(lines) + "\ntest()", {}, {})
except Exception as e:
raise type(e)("Code error in %s\n%s\n" % (self.filename, str(e))).with_traceback(sys.exc_info()[2])
def check_links(self):
txt = "\n".join(self.get_text())
for link in re.finditer(r"\[([^]]*)]\(([^)]*)\)", txt):
label, url = link.groups()
# print(label)
# print(url)
if url.startswith('attachment') or url.startswith("#"):
continue
if url.startswith("../_static"):
assert os.path.isfile(os.path.join(os.path.dirname(self.filename), url))
return
try:
import urllib.request
context = ssl._create_unverified_context()
assert urllib.request.urlopen(url, context=context).getcode() == 200
except Exception as e:
print("%s broken in %s\n%s" % (url, self.filename, str(e)))
# traceback.print_exc()
# print(txt)
if __name__ == '__main__':
nb = Notebook('elements/v80.ipynb')
nb.check_code()
nb.check_links()
| StarcoderdataPython |
9610768 | <reponame>meawoppl/numba
"""
>>> temp_string_var()
hellohello0
>>> temp_string()
hellohello0
>>> temp_string2()
hellohello0
>>> temp_string3()
hellohello0
hellohello1
hellohello2
>>> eq("foo", "foo")
True
>>> eq("foo", "bar")
False
>>> ne("foo", "foo")
False
>>> ne("foo", "bar")
True
>>> lt("foo", "foo")
False
>>> lt("foo", "bar")
False
>>> lt("bar", "foo")
True
>>> interpolate("%s and %s", "ham", "eggs")
'ham and eggs'
>>> autojit(string_len)("hello")
5
>>> autojit(nopython=True)(string_len)("hello")
5
"""
import sys
from numba import *
def get_string(i=0):
s = "hello"
return s * 2 + str(i)
@autojit(backend='ast', locals=dict(s=c_string_type))
def temp_string_var():
s = get_string()
print(s)
@autojit(backend='ast', locals=dict(s=c_string_type))
def temp_string():
s = c_string_type(get_string())
print(s)
@autojit(backend='ast')
def temp_string2():
print((c_string_type(get_string())))
@autojit(backend='ast', locals=dict(s=c_string_type))
def temp_string3():
for i in range(3):
s = c_string_type(get_string(i))
print(s)
@autojit(backend='ast')
def test():
return object()
@jit(void())
def string_constant():
print("hello world")
@jit(bool_(c_string_type, c_string_type))
def eq(s1, s2):
return s1 == s2
@jit(bool_(c_string_type, c_string_type))
def ne(s1, s2):
return s1 != s2
@jit(bool_(c_string_type, c_string_type))
def lt(s1, s2):
return s1 < s2
@jit(c_string_type(c_string_type, c_string_type))
def concat(s1, s2):
return s1 + s2
@jit(c_string_type(c_string_type, c_string_type, c_string_type))
def interpolate(s, s1, s2):
return s % (s1, s2)
def string_len(s):
return len(s)
if __name__ == '__main__':
import numba
numba.testing.testmod()
| StarcoderdataPython |
62319 | <reponame>mustard-seed/SparseNN_training
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.quantization import QuantStub, DeQuantStub
import torch.optim as optim
import horovod.torch as hvd
from torchvision import datasets, transforms
import torch.utils.data.distributed as distributed
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import argparse
import os
import shutil
from custom_modules.custom_modules import ConvBNReLU, LinearReLU, ConvReLU
import pruning.pruning as custom_pruning
import experiment.experiment as experiment
from utils.meters import ClassificationMeter, TimeMeter
from experiment.experiment import experimentBase, globalActivationDict, globalWeightDict, hook_activation
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.convBN1 = ConvBNReLU(1, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.convBN2 = ConvBNReLU(6, 16, 5)
self.fc1 = LinearReLU(16 * 7 * 7, 120)
self.fc2 = LinearReLU(120, 84)
self.fc3 = nn.Linear(84, 10)
self.quant = QuantStub()
self.deQuant = DeQuantStub()
# weight and BN parameter initialization
# BN: set gamma (a.k.a weight) to 1, and bias to zero
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
# BN statistics initialization
def forward(self, x):
x = self.quant(x)
x = self.pool(self.convBN1(x))
x = self.pool(self.convBN2(x))
x = x.view(-1, 16 * 7 * 7)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
x = self.deQuant(x)
return F.log_softmax(x)
# Fuse convBNReLU prior to quantization
def fuse_model(self):
for m in self.modules():
if type(m) == ConvBNReLU:
# Fuse the layers in ConvBNReLU module, which is derived from nn.Sequential
# Use the default fuser function
torch.quantization.fuse_modules(m, ['0', '1', '2'], inplace=True)
elif type(m) == LinearReLU:
torch.quantization.fuse_modules(m, ['0', '1'], inplace=True)
class experimentLeNet(experimentBase):
def __init__(self, configFile, multiprocessing=False):
super().__init__(configFile, multiprocessing)
self.model = LeNet()
# Dataset rootdir relative to the python script
# dataKwargs = {'num_workers': 1, 'pin_memory': True}
# When supported, use 'forkserver' to spawn dataloader workers instead of 'fork' to prevent
# issues with Infiniband implementations that are not fork-safe
# if (dataKwargs.get('num_workers', 0) > 0 and hasattr(mp, '_supports_context') and
# mp._supports_context and 'forkserver' in mp.get_all_start_methods()):
# dataKwargs['multiprocessing_context'] = 'forkserver'
datasetDir = self.config.dataTrainDir
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
self.trainDataSet = datasets.MNIST(datasetDir, train=True, download=False,
transform=transform)
self.trainDataSampler = distributed.DistributedSampler(
self.trainDataSet, num_replicas=hvd.size(), rank=hvd.rank()
) if multiprocessing is True \
else None
self.trainDataLoader = DataLoader(
self.trainDataSet,
batch_size=self.config.batchSizePerWorker,
sampler=self.trainDataSampler,
shuffle=True if self.trainDataSampler is None else False
#,**dataKwargs
)
self.valDataSet = datasets.MNIST(datasetDir, train=False, download=False,
transform=transform)
self.valDataSampler = distributed.DistributedSampler(
self.valDataSet, num_replicas=hvd.size(), rank=hvd.rank()
) if multiprocessing is True \
else None
self.valDataLoader = DataLoader(
self.valDataSet,
batch_size=self.config.batchSizePerWorker,
sampler=self.valDataSampler,
shuffle=True if self.valDataSampler is None else False
#,**dataKwargs
)
if (multiprocessing is True and hvd.rank() == 0) or multiprocessing is False:
if not os.path.exists(self.config.logDir):
os.makedirs(self.config.logDir)
self.logWriter = SummaryWriter(self.config.logDir)
self.trainMeter = ClassificationMeter(
multiprocessing,
self.logWriter,
logPrefix='Train'
)
self.valMeter = ClassificationMeter(
multiprocessing,
self.logWriter,
logPrefix='Validation'
)
self.trainTimeMeter = TimeMeter(
multiprocessing,
self.logWriter,
logPrefix='Train'
)
# End of __init__
def evaluate_loss(self, output: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return F.nll_loss(output, target)
def apply_hook_activation(self, module: torch.nn.Module, prefix=None) -> dict:
pruneDict = {'convBN1': self.model.convBN1,
'convBN2': self.model.convBN2,
'convBN3' : self.model.fc1,
'convBN4' : self.model.fc2}
forwardHookHandlesDict = {}
for name, m in pruneDict.items():
handle = m.register_forward_hook(hook_activation)
forwardHookHandlesDict[name] = handle
return forwardHookHandlesDict
def extract_weight(self, module: torch.nn.Module) -> None:
super().extract_weight(module)
def prune_network(self, sparsityTarget) -> None:
pruneList = [self.model.convBN1, self.model.convBN2, self.model.fc1,
self.model.fc2, self.model.fc3]
for m in pruneList:
if isinstance(m, (ConvBNReLU, ConvReLU, LinearReLU)):
layer = list(m.children())[0]
custom_pruning.applyBalancedPruning(layer,
"weight",
clusterSize=self.config.pruneCluster,
pruneRangeInCluster=self.config.pruneRangeInCluster,
sparsity=sparsityTarget)
elif isinstance(m, nn.Linear):
layer = m
custom_pruning.applyBalancedPruning(layer,
"weight",
clusterSize=self.config.pruneCluster,
pruneRangeInCluster=self.config.pruneRangeInCluster,
sparsity=sparsityTarget)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="LeNet experiment")
parser.add_argument('--mode', type=str, choices=['train', 'evaluate_sparsity'], default='train',
help='Mode. Valid choices are train, and evaluate_sparsity')
parser.add_argument('--config_file', type=str, required=True,
help='Path to the experiment configuration file. Required')
parser.add_argument('--load_checkpoint', type=int, choices=[0, 1, 2], default=0,
help='Load experiment from checkpoint. Default: 0. 0: start from scratch; 1: load full experiment; 2: load model only')
parser.add_argument('--multiprocessing', action='store_true',
help='Enable multiprocessing (using Horovod as backend). Default: False')
parser.add_argument('--checkpoint_path', type=str,
help='Path to the checkpoint to be loaded. Required if --load_checkpoint is set as 1 or 2')
parser.add_argument('--override_cluster_size', type=int,
help='Override the cluster size in the experiment config when performing sparsity evaluation')
args = parser.parse_args()
if args.multiprocessing is True:
hvd.init()
experiment = experimentLeNet(configFile=args.config_file,
multiprocessing=args.multiprocessing)
if args.load_checkpoint == 1 or args.load_checkpoint == 2:
assert args.checkpoint_path is not None, 'Experiment is required to load from an existing checkpoint, but no path to checkpoint is provided!'
loadModelOnly = True if args.load_checkpoint == 2 else False
experiment.restore_experiment_from_checkpoint(checkpoint=args.checkpoint_path,
loadModelOnly=loadModelOnly)
if args.mode == 'train':
experiment.train()
# Copy the config file into the log directory
logPath = experiment.config.checkpointSaveDir
configFileName = os.path.basename(args.config_file)
newConfigFilePath = os.path.join(logPath, configFileName)
shutil.copy(args.config_file, newConfigFilePath)
elif args.mode == 'evaluate_sparsity':
experiment.save_sparsity_stats(args.override_cluster_size, numBatches=100)
elif args.mode == 'print_model':
experiment.print_model()
elif args.mode == 'trace_model':
experiment.trace_model(dirnameOverride=os.getcwd(), numMemoryRegions=3, modelName='resnet50_imagenet',
foldBN=True)
| StarcoderdataPython |
1902062 | <reponame>adamtorres/unit-convert
import unittest
import unit_convert
class TestVolume(unittest.TestCase):
def test_ounce_and_cup(self):
# (ounces, cups)
tests = [
(16, 2),
(1, 0.125),
(0.5, 0.0625),
(128, 16),
(124, 15.5),
]
for t in tests:
with self.subTest(t=t):
self.assertEqual(unit_convert.UnitConvert(ounces=t[0]).cups, t[1])
self.assertEqual(unit_convert.UnitConvert(cups=t[1]).ounces, t[0])
def test_tsp_and_gallon(self):
# (tsp, gal)
tests = [
(768, 1),
(384, 0.5),
(192, 0.25),
(12, 0.015625),
]
for t in tests:
with self.subTest(t=t):
self.assertEqual(unit_convert.UnitConvert(teaspoons=t[0]).gal, t[1])
self.assertEqual(unit_convert.UnitConvert(gal=t[1]).tsp, t[0])
def test_gallon_and_kiloliter(self):
# (gal, kl)
tests = [
(264.172, 1),
]
for t in tests:
with self.subTest(t=t):
# Using assertAlmostEqual as the conversions give a pile of decimal places. Rounding to two decimal
# places for the tests.
self.assertAlmostEqual(unit_convert.UnitConvert(gallons=t[0]).kiloliters, t[1], 2)
self.assertAlmostEqual(unit_convert.UnitConvert(kiloliters=t[1]).gal, t[0], 2)
def test_ml_and_ounce(self):
# (ml, ounce)
tests = [
(500, 16.907),
(750, 25.3605),
(7640614, 258359.89416)
]
for t in tests:
with self.subTest(t=t):
# Using assertAlmostEqual as the conversions give a pile of decimal places. Rounding to two decimal
# places for the tests.
self.assertAlmostEqual(unit_convert.UnitConvert(ml=t[0]).oz, t[1], 2)
self.assertAlmostEqual(unit_convert.UnitConvert(oz=t[1]).ml, t[0], 2)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6464724 | # encoding: utf-8
# module PySide.QtGui
# from C:\Python27\lib\site-packages\PySide\QtGui.pyd
# by generator 1.147
# no doc
# imports
import PySide.QtCore as __PySide_QtCore
import Shiboken as __Shiboken
class QProxyModel(__PySide_QtCore.QAbstractItemModel):
# no doc
def columnCount(self, *args, **kwargs): # real signature unknown
pass
def connectToModel(self, *args, **kwargs): # real signature unknown
pass
def data(self, *args, **kwargs): # real signature unknown
pass
def disconnectFromModel(self, *args, **kwargs): # real signature unknown
pass
def dropMimeData(self, *args, **kwargs): # real signature unknown
pass
def fetchMore(self, *args, **kwargs): # real signature unknown
pass
def flags(self, *args, **kwargs): # real signature unknown
pass
def hasChildren(self, *args, **kwargs): # real signature unknown
pass
def headerData(self, *args, **kwargs): # real signature unknown
pass
def index(self, *args, **kwargs): # real signature unknown
pass
def insertColumns(self, *args, **kwargs): # real signature unknown
pass
def insertRows(self, *args, **kwargs): # real signature unknown
pass
def match(self, *args, **kwargs): # real signature unknown
pass
def mimeData(self, *args, **kwargs): # real signature unknown
pass
def mimeTypes(self, *args, **kwargs): # real signature unknown
pass
def model(self, *args, **kwargs): # real signature unknown
pass
def parent(self, *args, **kwargs): # real signature unknown
pass
def revert(self, *args, **kwargs): # real signature unknown
pass
def rowCount(self, *args, **kwargs): # real signature unknown
pass
def setData(self, *args, **kwargs): # real signature unknown
pass
def setHeaderData(self, *args, **kwargs): # real signature unknown
pass
def setModel(self, *args, **kwargs): # real signature unknown
pass
def setProxyModel(self, *args, **kwargs): # real signature unknown
pass
def setSourceModel(self, *args, **kwargs): # real signature unknown
pass
def sort(self, *args, **kwargs): # real signature unknown
pass
def span(self, *args, **kwargs): # real signature unknown
pass
def submit(self, *args, **kwargs): # real signature unknown
pass
def supportedDropActions(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
staticMetaObject = None # (!) real value is '<PySide.QtCore.QMetaObject object at 0x0000000004C2C1C8>'
| StarcoderdataPython |
6469400 | <reponame>osoco/better-ways-of-thinking-about-software
# lint-amnesty, pylint: disable=missing-module-docstring
import datetime
from unittest.mock import patch
import pytest
from openedx.core.djangoapps.schedules.content_highlights import (
course_has_highlights_from_store,
get_all_course_highlights,
get_next_section_highlights,
get_week_highlights
)
from openedx.core.djangoapps.schedules.exceptions import CourseUpdateDoesNotExist
from openedx.core.djangolib.testing.utils import skip_unless_lms
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@skip_unless_lms
class TestContentHighlights(ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
super().setUp()
self._setup_course()
self._setup_user()
def _setup_course(self):
self.course = CourseFactory.create(
highlights_enabled_for_messaging=True
)
self.course_key = self.course.id
def _setup_user(self):
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course_key)
def _create_chapter(self, **kwargs):
ItemFactory.create(
parent=self.course,
category='chapter',
**kwargs
)
def test_non_existent_course_raises_exception(self):
nonexistent_course_key = self.course_key.replace(run='no_such_run')
with pytest.raises(CourseUpdateDoesNotExist):
get_week_highlights(self.user, nonexistent_course_key, week_num=1)
def test_empty_course_raises_exception(self):
with pytest.raises(CourseUpdateDoesNotExist):
get_week_highlights(self.user, self.course_key, week_num=1)
def test_happy_path(self):
highlights = ['highlights']
with self.store.bulk_operations(self.course_key):
self._create_chapter(highlights=highlights)
assert course_has_highlights_from_store(self.course_key)
assert get_week_highlights(self.user, self.course_key, week_num=1) == highlights
def test_get_all_course_highlights(self):
all_highlights = [["week1highlight1", "week1highlight2"], ["week1highlight1", "week1highlight2"], [], []]
with self.store.bulk_operations(self.course_key):
for week_highlights in all_highlights:
self._create_chapter(highlights=week_highlights)
assert get_all_course_highlights(self.course_key) == all_highlights
def test_highlights_disabled_for_messaging(self):
highlights = ['A test highlight.']
with self.store.bulk_operations(self.course_key):
self._create_chapter(highlights=highlights)
self.course.highlights_enabled_for_messaging = False
self.store.update_item(self.course, self.user.id)
assert not course_has_highlights_from_store(self.course_key)
with pytest.raises(CourseUpdateDoesNotExist):
get_week_highlights(
self.user,
self.course_key,
week_num=1,
)
def test_course_with_no_highlights(self):
with self.store.bulk_operations(self.course_key):
self._create_chapter(display_name="Week 1")
self._create_chapter(display_name="Week 2")
self.course = self.store.get_course(self.course_key) # lint-amnesty, pylint: disable=attribute-defined-outside-init
assert len(self.course.get_children()) == 2
assert not course_has_highlights_from_store(self.course_key)
with pytest.raises(CourseUpdateDoesNotExist):
get_week_highlights(self.user, self.course_key, week_num=1)
def test_course_with_highlights(self):
with self.store.bulk_operations(self.course_key):
self._create_chapter(highlights=['a', 'b', 'á'])
self._create_chapter(highlights=[])
self._create_chapter(highlights=['skipped a week'])
assert course_has_highlights_from_store(self.course_key)
assert get_week_highlights(self.user, self.course_key, week_num=1) == ['a', 'b', 'á']
assert get_week_highlights(self.user, self.course_key, week_num=2) == ['skipped a week']
with pytest.raises(CourseUpdateDoesNotExist):
get_week_highlights(self.user, self.course_key, week_num=3)
def test_staff_only(self):
with self.store.bulk_operations(self.course_key):
self._create_chapter(
highlights=["I'm a secret!"],
visible_to_staff_only=True,
)
assert course_has_highlights_from_store(self.course_key)
with pytest.raises(CourseUpdateDoesNotExist):
get_week_highlights(self.user, self.course_key, week_num=1)
@patch('openedx.core.djangoapps.course_date_signals.utils.get_expected_duration')
def test_get_next_section_highlights(self, mock_duration):
# All of the dates chosen here are to make things easy and clean to calculate with date offsets
# It only goes up to 6 days because we are using two_days_ago as our reference point
# so 6 + 2 = 8 days for the duration of the course
mock_duration.return_value = datetime.timedelta(days=8)
today = datetime.datetime.utcnow()
two_days_ago = today - datetime.timedelta(days=2)
two_days = today + datetime.timedelta(days=2)
three_days = today + datetime.timedelta(days=3)
four_days = today + datetime.timedelta(days=4)
six_days = today + datetime.timedelta(days=6)
with self.store.bulk_operations(self.course_key):
self._create_chapter( # Week 1
highlights=['a', 'b', 'á'],
)
self._create_chapter( # Week 2
highlights=['skipped a week'],
)
self._create_chapter( # Week 3
highlights=[]
)
self._create_chapter( # Week 4
highlights=['final week!']
)
assert get_next_section_highlights(self.user, self.course_key, two_days_ago, today.date()) ==\
(['skipped a week'], 2)
exception_message = 'Next section [{}] has no highlights for {}'.format( # pylint: disable=unused-variable
'chapter 3', self.course_key
)
with pytest.raises(CourseUpdateDoesNotExist):
get_next_section_highlights(self.user, self.course_key, two_days_ago, two_days.date())
# Returns None, None if the target date does not match any due dates. This is caused by
# making the mock_duration 8 days and there being only 4 chapters so any odd day will
# fail to match.
assert get_next_section_highlights(self.user, self.course_key, two_days_ago, three_days.date()) == (None, None)
assert get_next_section_highlights(self.user, self.course_key, two_days_ago, four_days.date()) ==\
(['final week!'], 4)
exception_message = f'Last section was reached. There are no more highlights for {self.course_key}'
with pytest.raises(CourseUpdateDoesNotExist):
get_next_section_highlights(self.user, self.course_key, two_days_ago, six_days.date())
@patch('lms.djangoapps.courseware.module_render.get_module_for_descriptor')
def test_get_highlights_without_module(self, mock_get_module):
mock_get_module.return_value = None
with self.store.bulk_operations(self.course_key):
self._create_chapter(highlights=['Test highlight'])
with self.assertRaisesRegex(CourseUpdateDoesNotExist, 'Course module .* not found'):
get_week_highlights(self.user, self.course_key, 1)
yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
today = datetime.datetime.utcnow()
with self.assertRaisesRegex(CourseUpdateDoesNotExist, 'Course module .* not found'):
get_next_section_highlights(self.user, self.course_key, yesterday, today.date())
| StarcoderdataPython |
5104538 | <filename>solver.py
#Dependencies: None
"""
/***************************************************************************
Author :<NAME>
Email :cdbrissac (at) gmail.com
License :Apache 2.0
***************************************************************************/
"""
import copy
import random
N=64
visc=0.0
diff=0.0
dt=0.1
def print_array(x):
for i in range(1,N+1):
for j in range(1,N+1):
#print "(%d,%d) "%(i,j),
print("%f "%x[IX(i,j)]),
print('')
print('')
def IX(i,j):
return ((i)+(N+2)*(j))
def SWAP(x0,x):
tmp=x0
x0=x
x=tmp
return x0,x
def set_bnd(b,x):
for i in range(1,N+1):
if b==0:
x[IX(0 ,i)] = x[IX(1,i)]
x[IX(N+1,i)] = x[IX(N,i)]
x[IX(i,0 )] = x[IX(i,1)]
x[IX(i,N+1)] = x[IX(i,N)]
elif b==1:
x[IX(0 ,i)]=-x[IX(1,i)]
x[IX(N+1,i)]=-x[IX(N,i)]
x[IX(i,0 )]=x[IX(i,1)]
x[IX(i,N+1)]=x[IX(i,N)]
elif b==2:
x[IX(0 ,i)]=x[IX(1,i)]
x[IX(N+1,i)]=x[IX(N,i)]
x[IX(i,0 )]=-x[IX(i,1)]
x[IX(i,N+1)]=-x[IX(i,N)]
x[IX(0 ,0 )] = 0.5*(x[IX(1,0 )]+x[IX(0 ,1)]);
x[IX(0 ,N+1)] = 0.5*(x[IX(1,N+1)]+x[IX(0 ,N)]);
x[IX(N+1,0 )] = 0.5*(x[IX(N,0 )]+x[IX(N+1,1)]);
x[IX(N+1,N+1)] = 0.5*(x[IX(N,N+1)]+x[IX(N+1,N)]);
def add_source(x,s):
size=(N+2)*(N+2)
for i in range(size):
x[i] += dt*s[i];
def lin_solve(b,x,x0,a,c):
for k in range(20):
for i in range(1,N+1):
for j in range(1,N+1):
x[IX(i,j)] = (x0[IX(i,j)] + a*(x[IX(i-1,j)]+x[IX(i+1,j)]+x[IX(i,j-1)]+x[IX(i,j+1)]))/c;
set_bnd ( b, x );
def diffuse(b,x,x0):
a=dt*diff*N*N;
lin_solve ( b, x, x0, a, 1+4*a )
def advect(b,d,d0,u,v):
dt0 = dt*N
for i in range(1,N+1):
for j in range(1,N+1):
x=i-dt0*u[IX(i,j)]
y=j-dt0*v[IX(i,j)]
if x<0.5:x=0.5
if x>N+0.5:x=N+0.5
i0=int(x)
i1=i0+1
if y<0.5:y=0.5
if y>N+0.5:y=N+0.5
j0=int(y)
j1=j0+1
s1=x-i0
s0=1-s1
t1=y-j0
t0=1-t1
d[IX(i,j)]=s0*(t0*d0[IX(i0,j0)]+t1*d0[IX(i0,j1)])+s1*(t0*d0[IX(i1,j0)]+t1*d0[IX(i1,j1)])
set_bnd ( b, d )
def project(u,v,p,div):
for i in range(1,N+1):
for j in range(1,N+1):
div[IX(i,j)] = -0.5*(u[IX(i+1,j)]-u[IX(i-1,j)]+v[IX(i,j+1)]-v[IX(i,j-1)])/N;
p[IX(i,j)] = 0.;
set_bnd( 0, div )
set_bnd( 0, p )
lin_solve( 0, p, div, 1, 4 )
for i in range(1,N+1):
for j in range(1,N+1):
u[IX(i,j)] -= 0.5*N*(p[IX(i+1,j)]-p[IX(i-1,j)])
v[IX(i,j)] -= 0.5*N*(p[IX(i,j+1)]-p[IX(i,j-1)])
set_bnd( 1, u )
set_bnd( 2, v )
def velocity_step( u, v, u0, v0 ):
#u0[IX(33,33)]=5.-10.*random.random()
v0[IX(int(N/2+1),int(N/2+1))]=5.
add_source ( u, u0 )
add_source ( v, v0 )
#print_array(u)
if diff>0:
u0,u=SWAP(u0,u)
diffuse( 1, u, u0 )
v0,v=SWAP(v0,v)
diffuse( 2, v, v0 )
project( u, v, u0, v0 )
#print_array(u)
u0,u=SWAP(u0,u)
v0,v=SWAP(v0,v)
advect( 1, u, u0, u0, v0 )
advect( 2, v, v0, u0, v0 )
project( u, v, u0, v0 )
return u,v,u0,v0
| StarcoderdataPython |
6436977 | """I'm a different package, but I'm in demopackage.__all__!"""
x = 42
| StarcoderdataPython |
308883 | <gh_stars>1-10
from django.conf.urls import patterns, url, include
from example.forms import AutoCompleteOrderedItemForm, OrderedItemForm, ContactFormset, MaxFiveContactsFormset, EmptyContactFormset, EventFormset
from example.forms import AutoCompleteSelectFieldForm
urlpatterns = patterns('example.views',
url(r'^stacked/$', 'formset', {'formset_class': ContactFormset, 'template': 'example/formset-stacked.html'}, name='example_stacked'),
url(r'^table/$', 'formset', {'formset_class': ContactFormset, 'template': 'example/formset-table.html'}, name='example_table'),
url(r'^form-template/$', 'formset_with_template', {'formset_class': EmptyContactFormset, 'template': 'example/form-template.html'}, name='example_form_template'),
url(r'^admin-widget/$', 'formset', {'formset_class': EventFormset, 'template': 'example/formset-admin-widget.html'}, name='example_admin_widget'),
url(r'^multiple-formsets/$', 'multiple_formsets', {'template': 'example/formset-multiple-formsets.html'}, name='example_multiple_formsets'),
url(r'^inline-formset/$', 'inline_formset',
{'form_class': OrderedItemForm, 'template': 'example/inline-formset.html'}, name='example_inline_formset'),
url(r'^inline-formset-autocomplete/$', 'inline_formset',
{'form_class': AutoCompleteOrderedItemForm, 'template': 'example/inline-formset-autocomplete.html'}, name='example_inline_autocomplete'),
url(r'^inline-formset-ajax-selects/$', 'inline_formset',
{'form_class': AutoCompleteSelectFieldForm, 'template': 'example/inline-formset-django-ajax-select.html'}, name='example_inline_ajax_selects'),
url(r'^autocomplete-products/$', 'autocomplete_products', name='example_autocomplete_products')
)
import django
major, minor = django.VERSION[:2]
if major >= 1 and minor >= 2:
# These examples require Django 1.2 and above:
urlpatterns += patterns('example.views',
url(r'^max-forms/$', 'formset', {'formset_class': MaxFiveContactsFormset, 'template': 'example/max-forms.html'}, name='example_max_forms'),
url(r'^empty-form/$', 'formset', {'formset_class': EmptyContactFormset, 'template': 'example/empty-form.html'}, name='example_empty_form'),
)
if major >=1 and minor >= 7:
from example.forms import MinTwoContactsFormset
# These examples require Django 1.7 and above:
urlpatterns += patterns('example.views',
url(r'^min-forms/$', 'formset', {'formset_class': MinTwoContactsFormset, 'template': 'example/min-forms.html'}, name='example_min_forms'),
) | StarcoderdataPython |
12865605 | # ------------------------------------------
# Description: This python script will update AWS Thing Shadow for a Device/Thing
# ------------------------------------------
# Import package
import paho.mqtt.client as mqtt
import ssl, time, sys
# =======================================================
# Set Following Variables
# AWS IoT Endpoint
MQTT_HOST = "your aws iot endpoint"
# CA Root Certificate File Path
CA_ROOT_CERT_FILE = "path for the aws root certificate file"
# AWS IoT Thing Name
THING_NAME = "your thing name"
# AWS IoT Thing Certificate File Path
THING_CERT_FILE = "path for your device certificate file"
# AWS IoT Thing Private Key File Path
THING_PRIVATE_KEY_FILE = "path for your device private key"
# =======================================================
# =======================================================
# No need to change following variables
MQTT_PORT = 8883
MQTT_KEEPALIVE_INTERVAL = 45
SHADOW_UPDATE_TOPIC = "$aws/things/" + THING_NAME + "/shadow/update"
SHADOW_UPDATE_ACCEPTED_TOPIC = "$aws/things/" + THING_NAME + "/shadow/update/accepted"
SHADOW_UPDATE_REJECTED_TOPIC = "$aws/things/" + THING_NAME + "/shadow/update/rejected"
SHADOW_STATE_DOC_LED_ON = """{"state" : {"desired" : {"LED" : "ON"}}}"""
SHADOW_STATE_DOC_LED_OFF = """{"state" : {"desired" : {"LED" : "OFF"}}}"""
RESPONSE_RECEIVED = False
# =======================================================
# Initiate MQTT Client
mqttc = mqtt.Client("led_switch_client")
def on_connect(mosq, obj, rc, another):
mqttc.subscribe(SHADOW_UPDATE_ACCEPTED_TOPIC, 1)
mqttc.subscribe(SHADOW_UPDATE_REJECTED_TOPIC, 1)
# Define on_message event function.
# This function will be invoked every time,
# a new message arrives for the subscribed topic
def on_message(mosq, obj, msg):
if str(msg.topic) == SHADOW_UPDATE_ACCEPTED_TOPIC:
print "\n---SUCCESS---\nShadow State Doc Accepted by AWS IoT."
print "Response JSON:\n" + str(msg.payload)
elif str(msg.topic) == SHADOW_UPDATE_REJECTED_TOPIC:
print "\n---FAILED---\nShadow State Doc Rejected by AWS IoT."
print "Error Response JSON:\n" + str(msg.payload)
else:
print "AWS Response Topic: " + str(msg.topic)
print "QoS: " + str(msg.qos)
print "Payload: " + str(msg.payload)
# Disconnect from MQTT_Broker
mqttc.disconnect()
global RESPONSE_RECEIVED
RESPONSE_RECEIVED = True
# Register callback functions
mqttc.on_message = on_message
mqttc.on_connect = on_connect
# Configure TLS Set
mqttc.tls_set(CA_ROOT_CERT_FILE, certfile=THING_CERT_FILE, keyfile=THING_PRIVATE_KEY_FILE, cert_reqs=ssl.CERT_REQUIRED,
tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None)
# Connect with MQTT Broker
mqttc.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)
mqttc.loop_start()
print "Enter 1 to Turn On the LED"
print "Enter 2 to Turn OFF the LED"
print "Enter 3 to exit"
data = raw_input("Select an option:")
if data == "1":
mqttc.publish(SHADOW_UPDATE_TOPIC, SHADOW_STATE_DOC_LED_ON, qos=1)
elif data == "2":
mqttc.publish(SHADOW_UPDATE_TOPIC, SHADOW_STATE_DOC_LED_OFF, qos=1)
elif data == "3":
sys.exit()
else:
print("Invalid input try again...")
sys.exit()
# Wait for Response
Counter = 1
while True:
if RESPONSE_RECEIVED == True:
break
print "I have finished my work!!!"
# time.sleep(1)
# if Counter == 10:
# print "No response from AWS IoT. Check your Settings."
# break
# elif RESPONSE_RECEIVED == True:
# break
| StarcoderdataPython |
4969208 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 by nils_2 <<EMAIL>>
#
# Display size of current logfile in item-bar
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# 2019-07-12: nils_2 (freenode.#weechat)
# 0.4 : option "display" is evaluated
# : use hook_process("wc") to not stall weechat anymore
# : new function: refresh only on buffer/window switch
# : make script compatible with Python 3.x
# 2013-01-07: nils_2 (freenode.#weechat)
# 0.3 : missing logfile caused a crash (thanks swimmer)
# : add support of more than one window
# : two new options "log_disabled" and "file_not_found"
# 2012-11-22: nils_2 (freenode.#weechat)
# 0.2 : bug on first startup removed (thanks swimmer)
# 2012-01-14: nils_2 (freenode.#weechat)
# 0.1 : initial release
#
# How to use:
# add item "logsize" to option "weechat.bar.status.items"
#
# Development is currently hosted at
# https://github.com/weechatter/weechat-scripts
from __future__ import print_function
from builtins import str
try:
import weechat, re
except Exception:
print("This script must be run under WeeChat.")
print("Get WeeChat now at: https://weechat.org")
quit()
SCRIPT_NAME = "logsize"
SCRIPT_AUTHOR = "nils_2 <<EMAIL>>"
SCRIPT_VERSION = "0.4"
SCRIPT_LICENSE = "GPL"
SCRIPT_DESC = "display size of current logfile in item-bar"
OPTIONS = { "refresh" : ("0","refresh timer (in seconds). 0 = refresh only on buffer or window switch. this is the default setting"),
"size" : ("KB","display length in KB/MB/GB/TB. Leave option empty for byte"),
"display" : ("%L","possible item: %W = words, %L = lines or %F = file length. (content is evaluated, e.g. you can use colors with format \"${color:xxx}\", see /help eval)"),
"log_disabled" : ("","displays a text in item, when logger is disabled for buffer"),
"file_not_found": ("","displays a text in item, when logfile wasn't found"),
}
hooks = { "timer": "", "bar_item": "" }
hook_process_out = []
output = ""
# regexp to match ${color} tags
regex_color=re.compile('\$\{([^\{\}]+)\}')
# regexp to match ${optional string} tags
regex_optional_tags=re.compile('%\{[^\{\}]+\}')
# ================================[ size ]===============================
def sizecheck(filesize):
filesize = int(filesize)
if OPTIONS["size"].lower() == "kb":
filesize = "%.2f" % (filesize / 1024)
size = "K"
elif OPTIONS["size"].lower() == "mb":
filesize = "%.2f" % (filesize / 1024 / 1024)
size = "M"
elif OPTIONS["size"].lower() == "gb":
filesize = "%.2f" % (filesize / 1024 / 1024 / 1024)
size = "G"
elif OPTIONS["size"].lower() == "tb":
filesize = "%.2f" % (filesize / 1024 / 1024 / 1024 / 1024)
size = "T"
else:
filesize = "%.0f" % filesize
size = "b"
return "%s%s" % (filesize,size)
# ================================[ weechat item ]===============================
def show_item (data, item, window):
global output
return output
def get_file_information(ptr_buffer):
global hook_process_out, output
(logfile,log_enabled) = get_logfile(ptr_buffer)
if not log_enabled:
output = OPTIONS["log_disabled"]
return
output = ''
if logfile != '':
# newline / word / bytes / filename
weechat.hook_process("wc %s" % logfile, 50000, "my_hook_process_cb", "")
if hook_process_out:
lines = hook_process_out[0]
words = hook_process_out[1]
flength = sizecheck(hook_process_out[2])
tags = {'%L': str(lines),
'%W': str(words),
'%F': str(flength)}
output = substitute_colors(OPTIONS['display'])
# replace mandatory tags
for tag in list(tags.keys()):
# for tag in tags.keys():
output = output.replace(tag, tags[tag])
weechat.bar_item_update(SCRIPT_NAME)
return
def substitute_colors(text):
if int(version) >= 0x00040200:
return weechat.string_eval_expression(text,{},{},{})
# substitute colors in output
return re.sub(regex_color, lambda match: weechat.color(match.group(1)), text)
def get_logfile(ptr_buffer):
log_filename = ""
log_enabled = 0
infolist = weechat.infolist_get('logger_buffer','','')
while weechat.infolist_next(infolist):
bpointer = weechat.infolist_pointer(infolist, 'buffer')
if ptr_buffer == bpointer:
log_filename = weechat.infolist_string(infolist, 'log_filename')
log_enabled = weechat.infolist_integer(infolist, 'log_enabled')
log_level = weechat.infolist_integer(infolist, 'log_level')
weechat.infolist_free(infolist) # free infolist()
return (log_filename,log_enabled)
def item_update(data, remaining_calls):
global hooks
weechat.bar_item_update(SCRIPT_NAME)
return weechat.WEECHAT_RC_OK
# ================================[ hook process]===============================
def my_hook_process_cb(data, command, return_code, out, err):
global hook_process_out
if return_code == weechat.WEECHAT_HOOK_PROCESS_ERROR:
weechat.prnt("", "Error with command '%s'" % command)
return weechat.WEECHAT_RC_OK
# if return_code >= 0:
# weechat.prnt("", "return_code = %d" % return_code)
if out != "":
hook_process_out = out.split()
if err != "":
weechat.prnt("", "stderr: %s" % err)
return weechat.WEECHAT_RC_OK
# ================================[ weechat hook ]===============================
def window_switch_cb(data, signal, signal_data):
window = signal_data
window = weechat.current_window()
ptr_buffer = weechat.window_get_pointer(window,"buffer")
get_file_information(ptr_buffer)
return weechat.WEECHAT_RC_OK
def buffer_switch_cb(data, signal, signal_data):
ptr_buffer = signal_data
window = weechat.current_window()
ptr_buffer = weechat.window_get_pointer(window,'buffer')
if ptr_buffer == '':
return ''
get_file_information(ptr_buffer)
return weechat.WEECHAT_RC_OK
def hook_timer_refresh_item_cb(data, remaining_calls):
weechat.bar_item_update(SCRIPT_NAME)
return weechat.WEECHAT_RC_OK
def unhook_timer():
global hooks
if hooks["timer"] != "":
weechat.bar_item_remove(hooks["bar_item"])
weechat.unhook(hooks["timer"])
hooks["timer"] = ""
hooks["bar_item"]
def hook_timer():
global hooks
hooks["timer"] = weechat.hook_timer(int(OPTIONS["refresh"]) * 1000, 0, 0, 'item_update', '')
hooks["bar_item"] = weechat.bar_item_new(SCRIPT_NAME, 'show_item','')
if hooks["timer"] == 0:
weechat.prnt('',"%s: can't enable %s, hook failed" % (weechat.prefix("error"), SCRIPT_NAME))
weechat.bar_item_remove(hooks["bar_item"])
hooks["bar_item"] = ""
return 0
weechat.bar_item_update(SCRIPT_NAME)
return 1
# ================================[ weechat options and description ]===============================
def toggle_refresh(pointer, name, value):
global hooks
option = name[len('plugins.var.python.' + SCRIPT_NAME + '.'):] # get optionname
OPTIONS[option] = value # save new value
if option == 'refresh': # option "refresh" changed by user?
if hooks["timer"] != "": # timer currently running?
if OPTIONS['refresh'] != "0": # new user setting not zero?
unhook_timer()
hook_timer()
else:
unhook_timer() # user switched timer off
elif hooks["timer"] == "": # hook is empty
if OPTIONS['refresh'] != "0": # option is not zero!
hook_timer() # install hook
weechat.bar_item_update(SCRIPT_NAME)
return weechat.WEECHAT_RC_OK
def init_options():
global OPTIONS
for option, value in list(OPTIONS.items()):
weechat.config_set_desc_plugin(option, '%s (default: "%s")' % (value[1], value[0]))
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, value[0])
OPTIONS[option] = value[0]
else:
OPTIONS[option] = weechat.config_get_plugin(option)
# ================================[ main ]===============================
if __name__ == "__main__":
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, '', ''):
version = weechat.info_get("version_number", "") or 0
init_options()
if OPTIONS["refresh"] != "0":
hook_timer()
else:
weechat.hook_signal("buffer_switch","buffer_switch_cb","")
weechat.hook_signal("window_switch","window_switch_cb","")
weechat.bar_item_new(SCRIPT_NAME, 'show_item','')
weechat.bar_item_update(SCRIPT_NAME)
weechat.hook_config( 'plugins.var.python.' + SCRIPT_NAME + '.*', 'toggle_refresh', '' )
| StarcoderdataPython |
139054 | <gh_stars>1-10
from mrhttp import app
@app.route('/')
def hello(r):
if r.file == None:
return "No file uploaded"
#for f in r.files:
#print(f)
name = r.file['name']
typ = r.file['type']
body = r.file['body']
return name
app.run(cores=4)
# curl -i -X POST -F "data=@14_upload.py" http://localhost:8080/
| StarcoderdataPython |
3402275 | #!/usr/bin/env python3
# frequency.py
# word frequency analysis script
# i love dictionaries
import string
import json
def alphanumericize(source_text):
source = source_text.lower()
source = source.replace('-', ' ') # because Dostoevsky loves his dashes
# I think this is backwards - I should instead filter all but alphanumerics. But Crime & Punishment has
# an expanded character set due to its use of phrases in other languages, and digraphs.
translate_table = str.maketrans(dict.fromkeys(string.punctuation + '\u201c\u201d\u2018\u2019'))
text = source.translate(translate_table)
return text
def histogram(source_text):
# initialize our empty dictionary
histogram_dictionary = {}
# strip all punctuation from the file
source = alphanumericize(source_text)
# split the source text into words (splits on whitespace)
# then iterate through each word
for word in source.split():
if word in histogram_dictionary:
histogram_dictionary[word] += 1
else:
histogram_dictionary[word] = 1
return histogram_dictionary
def unique_words(histogram):
number_of_uniques = len(list(histogram))
return number_of_uniques
def frequency(word, histogram):
unique_word = word.lower()
word_frequency = 0
# check what kind of histogram we have
if type(histogram) == dict:
word_frequency = histogram[unique_word]
elif type(histogram) == list:
# if we have a list, iterate through the list until we find our word
# then check the value
not_found = True
index = 0
while not_found and index < len(histogram):
if unique_word in histogram[index]:
word_frequency = histogram[index][1]
not_found = False
else:
index += 1
return word_frequency
def histogram_lists(source_text):
histogram_list = []
# split the source into words (splits on whitespace)
# strip all punctuation from the file
source = alphanumericize(source_text)
text = source.split()
# sort the list of words, then count the number of unique words.
# we start by looking at the word at the end of the list,
# and then check if the penultimate word is the same, and the
# word before that, etc. until we get a different word.
# we keep a count of words that are the same, and when we get
# a different word we append the word and the count to our
# histogram list, then we delete all the list entries at the
# end we just counted!
text.sort()
while len(text) > 0:
count = 0
samesies = True
index = len(text) - 1
word = text[index]
while samesies and index >= 0:
if word == text[index]:
count += 1
index -= 1
else:
samesies = False
histogram_list.append([word, count])
del text[-(count):]
return histogram_list
def histogram_tuples(source_text):
histogram_tuple = []
# strip all punctuation from the file
source = alphanumericize(source_text)
text = source.split()
# this algorithm just keeps track of 'runs' of the same word,
# then when it encounters a different word, appends what it has
# as a tuple to our histogram_tuple list. since the text is
# sorted, this will cover every case.
text.sort()
index = 0
word = text[0]
count = 0
while index < len(text):
if word == text[index]:
count += 1
index += 1
else:
histogram_tuple.append((word, count))
word = text[index]
count = 1
index += 1
# for our last item
histogram_tuple.append((word, count))
return histogram_tuple
def histogram_counts(source_text):
# implemented using a list of lists, this function returns an inverted histogram
# where words are grouped together in a list with a count as follows:
# [[1, [fish, blue]], [2, [red, dinosaur]], ...] etc.
histogram_counts = []
# strip all punctuation from the file
source = alphanumericize(source_text)
text = source.split()
# same algorithm as list of lists histogram
text.sort()
while len(text) > 0:
count = 0
samesies = True
index = len(text) - 1
word = text[index]
while samesies and index >= 0:
if word == text[index]:
count += 1
index -= 1
else:
samesies = False
# this time it's different
counted = False
for entry in histogram_counts:
if entry[0] == count:
entry[1].append(word)
counted = True
if counted == False:
histogram_counts.append([count, [word]])
del text[-(count):]
histogram_counts.sort()
return histogram_counts
def save_histogram(histogram):
with open('histogram.txt', 'w') as histogram_file:
json.dump(histogram, histogram_file)
if __name__ == '__main__':
# open our file and transfer it to memory
with open('crime_and_punishment.txt', 'r') as file:
source = file.read()
# make the histogram
source_histogram = histogram_tuples(source_text = source)
# get the number of unique words
source_unique_words = unique_words(histogram = source_histogram)
# sample some known words of "Crime and Punishment". 14 is alphabetically speaking,
# the first word in C&P; éternelle is the last. I put those in to check the algorithms
# and make sure they are covering every word.
fourteen_frequency = frequency(word = '14', histogram = source_histogram)
mystery_frequency = frequency(word = 'mystery', histogram = source_histogram)
sonia_frequency = frequency(word = 'Sonia', histogram = source_histogram)
murder_frequency = frequency(word = 'murder', histogram = source_histogram)
eternelle_frequency = frequency(word = '\u00e9ternelle', histogram = source_histogram)
# print the result
print('{} unique words. The word \'14\' appears {} times, the word \'mystery\' appears \n{} times, the word \'Sonia\' {} times, the word \'murder\' {} times, and the \nword \'\u00e9ternelle\' {} times.'.format(source_unique_words, fourteen_frequency, mystery_frequency, sonia_frequency, murder_frequency, eternelle_frequency))
# save the histogram as a file
save_histogram(histogram = source_histogram)
print('Histogram saved as histogram.txt.')
| StarcoderdataPython |
5151215 | <reponame>hashberg-io/dag-cbor<filename>dag_cbor/utils.py
"""
Error classes and utility functions.
Errors raised because of CBOR codec specifications are instances of :class:`CBORError`, while errors
raised because of additional restrictions imposed by the DAG-CBOR codec are instances of :class:`DAGCBORError`,
a subclass of :class:`CBORError`. Both kind of errors are then further specialised into encoding and decoding errors,
depending on where they are raised.
There are two utility functions dealing with dictionary keys:
- :func:`check_key_compliance` enforces that dictionary keys myst be :obj:`str` instances and unique
- :func:`canonical_order_dict` applies the above and then sorts the dictionary keys by the lexicographic ordering
of the corresponding UTF-8 bytestrings (according to DAG-CBOR specification)
"""
from typing import Any, Dict
from typing_validation import validate
class CBORError(Exception):
"""
Parent class for all errors due to the CBOR specification.
"""
...
class CBOREncodingError(CBORError):
"""
Class for encoding errors due to the CBOR specification.
"""
...
class CBORDecodingError(CBORError):
"""
Class for decoding errors due to the CBOR specification.
"""
...
class DAGCBORError(CBORError):
"""
Parent class for all errors due to the DAG-CBOR specification.
"""
...
class DAGCBOREncodingError(CBOREncodingError, DAGCBORError):
"""
Class for encoding errors due to the DAG-CBOR specification.
"""
...
class DAGCBORDecodingError(CBORDecodingError, DAGCBORError):
"""
Class for decoding errors due to the DAG-CBOR specification.
"""
...
def _canonical_order_dict(value: Dict[str, Any]) -> Dict[str, Any]:
# try:
# utf8key_key_val_pairs = [(k.encode("utf-8", errors="strict"), k, v) for k, v in value.items()]
# except UnicodeError as e:
# raise CBOREncodingError("Strings must be valid utf-8 strings.") from e
# # as far as I understand, the above should never raise UnicodeError on "utf-8" encoding
utf8key_key_val_pairs = [(k.encode("utf-8", errors="strict"), k, v) for k, v in value.items()]
sorted_utf8key_key_val_pairs = sorted(utf8key_key_val_pairs, key=lambda i: (len(i[0]), i[0]))
return {k: v for _, k, v in sorted_utf8key_key_val_pairs}
def _check_key_compliance(value: Dict[str, Any]) -> None:
""" Check keys for DAG-CBOR compliance. """
if not all(isinstance(k, str) for k in value.keys()):
raise DAGCBOREncodingError("Keys for maps must be strings.")
# if len(value.keys()) != len(set(value.keys())):
# raise CBOREncodingError("Keys for maps must be unique.")
# # as far as I understand, the above should never happen for dictionary keys
def check_key_compliance(value: Dict[str, Any]) -> None:
""" Check keys for DAG-CBOR compliance. """
validate(value, Dict[str, Any])
_check_key_compliance(value)
def canonical_order_dict(value: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns a dictionary with canonically ordered keys, according to the DAG-CBOR specification.
Specifically, keys are sorted increasingly by the lexicographic ordering of the corresponding
UTF-8 bytestrings.
"""
validate(value, Dict[str, Any])
_check_key_compliance(value)
# sort keys canonically
return _canonical_order_dict(value)
| StarcoderdataPython |
9624790 | import fu
from inspect import isclass
##################################################
# classes for navigating through types dynamically
class naviter(object):
'''generic way of navigating through an object's properties if they are iterable'''
attribute = 'co_consts'
type = None
object = None
def __init__(self, obj, up=None, type=None):
super(naviter, self).__init__()
self.object = obj
self.parent = up
if type:
self.type = type
def __getattr__(self, attrib):
obj = getattr( self.object, self.attribute )
return getattr(obj, attrib)
def __getitem__(self, k):
obj = getattr( self, k )
return obj
def __repr__(self):
return '%s -> %s'%( super(naviter, self).__repr__(), repr(self.object))
def up(self):
return self.parent
def down(self):
def ifelse( fn, istrue, isfalse ):
if fn():
return istrue
return isfalse
# return [ ifelse(lambda: type(dn) == self.type, naviter(dn, up=self, type=self.type), None) for dn in iter(getattr(self.object, self.attribute)) ]
return [ naviter(dn, up=self, type=self.type) for dn in iter(getattr(self.object, self.attribute)) if type(dn) == self.type ]
########
class navt(naviter):
'''all types need to be derived from this class in order for navi() to work'''
def __str__(self):
return repr(self)
def navi(object):
'''automatically identifies object used to manipulate type, and provides a naviter interface to it'''
all = [ x for x in globals().values() if isclass(x) and x != navt and issubclass(x, navt) ]
for item in all:
if item.type == type(object):
return item( object )
# dynamically create a class for this type
class _navi(navt):
type = type(object)
_navi.__name__ = 'navi__%s'% _navi.type.__name__
return _navi(object)
### how to represent each attribute by its type
class navcode(navt):
type = fu.code.getclass()
## merged in from aaron's asm.py
def findByName(navObj, nodeName):
'''returns a code object, if it finds it'''
## top level object
if navObj.object.co_name == "?":
if navObj.object.co_filename == nodeName:
return navObj.object
## found somethin
elif navObj.object.co_name == nodeName:
return navObj.object
# get downgraph
children = navObj.down()
while children:
child = children.pop()
if child.object.co_name == nodeName:
return child.object
children = child.down()
raise KeyError('"%s" not found in %s'%( nodeName, repr(navObj) ))
function = type(eval("lambda:True"))
class navfunc(navt):
type = function
instancemethod = type(AssertionError.__str__)
class navinstancemethod(navt):
type = instancemethod
class navlist(navt):
type = list
if __name__ == '__main__':
def dump_navi(nav):
res = []
res.append('self: %s'% repr(nav))
res.append('object: %s'% repr(nav.object))
res.append('up: %s'% repr(nav.up()))
res.append('down: %s'% repr(nav.down()))
print('\n'.join(res))
######################################
# tests for closures and variable scope
def example1():
scope_1 = True
def a():
print(scope_1)
scope_2 = True
print(scope_1)
a()
return True
print(fu.function.repr(example1))
cobj = example1.func_code
##
print(fu.code.repr(cobj))
print(fu.code.repr(cobj.co_consts[1]))
##
nav = navi(cobj)
dump_navi(nav)
nav = nav.down()[0]
dump_navi(nav)
def example2():
scope_1 = True
def a():
scope_2a = True
print(scope_1)
scope_1 = False
def b():
scope_2b = True
print(scope_1)
a()
b()
return True
##
print(fu.function.repr(example2))
cobj = example2.func_code
##
print(fu.code.repr(cobj))
print(fu.code.repr(cobj.co_consts[1]))
print(fu.code.repr(cobj.co_consts[2]))
##
nav = navi(cobj)
dump_navi(nav)
nav = nav.down()[0]
dump_navi(nav)
nav = nav.up()
nav = nav.down()[1]
dump_navi(nav)
def example3():
scope_1 = True
def a():
scope_2 = True
def aa():
scope_3= True
pass
pass
##
print(fu.function.repr(example3))
cobj = example3.func_code
##
print(fu.code.repr(cobj))
print(fu.code.repr(cobj.co_consts[1]))
print(fu.code.repr(cobj.co_consts[1].co_consts[1]))
##
nav = navi(cobj)
dump_navi(nav)
nav = nav.down()[0]
dump_navi(nav)
nav = nav.down()[0]
dump_navi(nav)
| StarcoderdataPython |
4890888 | <filename>shShellcode.py
#!/usr/bin/python
### /bin/sh shellcode of length 28 bytes
shellcode = "\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x89\xc1\x89\xc2\xb0\x0b\xcd\x80\x31\xc0\x40\xcd\x80"
print len(shellcode)
print shellcode
| StarcoderdataPython |
9638803 | '''
View for managing social properties of view
'''
from django.template import Template, Context
from django.conf import settings
class SocialView(object):
"""
Uses class properties for social arguments
Allows inheritance.
Template language can be used
e.g.
share_title = "{{title}}"
Where a view has a 'title' variable.
"""
share_image = ""
share_site_name = ""
share_image_alt = ""
share_description = ""
share_title = ""
share_twitter = ""
share_url = ""
twitter_share_image = ""
share_image_alt = ""
page_title = ""
def extra_params(self,context):
params = super(SocialView,self).extra_params(context)
if hasattr(settings,"SITE_ROOT"):
params["SITE_ROOT"] = settings.SITE_ROOT
extra = {"social_settings":self.social_settings(params),
"page_title":self._page_title(params)}
params.update(extra)
return params
def _page_title(self,context):
c_context = Context(context)
return Template(self.__class__.page_title).render(c_context)
def social_settings(self,context):
"""
run class social settings against template
"""
cls = self.__class__
c_context = Context(context)
process = lambda x: Template(x).render(c_context)
if cls.twitter_share_image:
twitter_img = cls.twitter_share_image
else:
twitter_img = cls.share_image
di = {'share_site_name':process(cls.share_site_name),
'share_image':process(cls.share_image),
'twitter_share_image':process(twitter_img),
'share_image_alt':process(cls.share_image_alt),
'share_description':process(cls.share_description),
'share_title':process(cls.share_title),
'url':process(cls.share_url),
'share_image_alt':process(cls.share_image_alt),
}
return di
| StarcoderdataPython |
1747656 | <filename>python_experiments/generate_inputs/generate_bin_files.py<gh_stars>1-10
import os
if __name__ == '__main__':
small_data_set_lst = ['ca-GrQc', 'ca-HepTh', 'p2p-Gnutella06', 'wiki-Vote']
data_set_lst = [
# 'email-Enron', 'email-EuAll',
# 'web-NotreDame', 'web-Stanford', 'web-BerkStan', 'web-Google', 'cit-Patents',
# 'soc-LiveJournal1',
# 'wiki-Link',
'digg-friends',
'flickr-growth',
]
generate_exec_path = '/homes/ywangby/workspace/yche/new-git-repos-yche/SimRank/LPMC-Profile/build/util/bin_converter'
for data_set in small_data_set_lst + data_set_lst:
cmd = ' '.join(map(str, [generate_exec_path, data_set]))
print cmd
os.system(cmd)
| StarcoderdataPython |
5152494 | # splines in 1d
# We use the cherry blossom daa from sec 4.5 of "Statistical Rethinking"
# We use temperature as the target variable, to match a draft version of the book,
# https://github.com/Booleans/statistical-rethinking/blob/master/Statistical%20Rethinking%202nd%20Edition.pdf
# The published version uses day of year as target, which is less visually interesting.
# This an MLE version of the Bayesian numpyro code from
# https://fehiepsi.github.io/rethinking-numpyro/04-geocentric-models.html
import numpy as np
np.set_printoptions(precision=3)
import matplotlib.pyplot as plt
import math
import os
import warnings
import pandas as pd
from scipy.interpolate import BSpline
from scipy import stats
from patsy import bs, dmatrix
import sklearn
from sklearn.linear_model import LinearRegression, Ridge
#https://stackoverflow.com/questions/61807542/generate-a-b-spline-basis-in-scipy-like-bs-in-r
def make_splines_scipy(x, num_knots, degree=3):
knot_list = np.quantile(x, q=np.linspace(0, 1, num=num_knots))
knots = np.pad(knot_list, (3, 3), mode="edge")
B = BSpline(knots, np.identity(num_knots + 2), k=degree)(x)
# according to scipy documentation
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html
# if degree = k, ncoef = n, nknots = n + k + 1
# so if k=3, ncoef = nknots - 4
# where nknots = num_knot + 6 (because of 3 pad on left, 3 on right)
# so ncoef= num_knots + 6 - 4 = num_knots + 2
return B
def make_splines_patsy(x, num_knots, degree=3):
knot_list = np.quantile(x, q=np.linspace(0, 1, num=num_knots))
#B = bs(x, knots=knot_list, degree=degree) # ncoef = knots + degree + 1
B = bs(x, df=num_knots, degree=degree) # uses quantiles
return B
def plot_basis(x, B, w=None):
if w is None: w = np.ones((B.shape[1]))
fig, ax = plt.subplots()
ax.set_xlim(np.min(x), np.max(x))
for i in range(B.shape[1]):
ax.plot(x, (w[i] * B[:, i]), "k", alpha=0.5)
return ax
def plot_basis_with_vertical_line(x, B, xstar):
ax = plot_basis(x, B)
num_knots = B.shape[1]
ndx = np.where(x==xstar)[0][0]
for i in range(num_knots):
yy = B[ndx,i]
if yy>0:
ax.scatter(xstar, yy, s=40)
ax.axvline(x=xstar)
return ax
def plot_pred(mu, x, y):
plt.figure()
plt.scatter(x, y, alpha=0.5)
plt.plot(x, mu, 'k-', linewidth=4)
def main():
url = 'https://raw.githubusercontent.com/fehiepsi/rethinking-numpyro/master/data/cherry_blossoms.csv'
cherry_blossoms = pd.read_csv(url, sep=';')
df = cherry_blossoms
display(df.sample(n=5, random_state=1))
display(df.describe())
df2 = df[df.temp.notna()] # complete cases
x = df2.year.values.astype(float)
y = df2.temp.values.astype(float)
xlabel = 'year'
ylabel = 'temp'
nknots = 15
#B = make_splines_scipy(x, nknots)
B = make_splines_patsy(x, nknots)
print(B.shape)
plot_basis_with_vertical_line(x, B, 1200)
plt.tight_layout()
plt.savefig(f'../figures/splines_basis_vertical_MLE_{nknots}_{ylabel}.pdf', dpi=300)
#reg = LinearRegression().fit(B, y)
reg = Ridge().fit(B, y)
w = reg.coef_
a = reg.intercept_
print(w)
print(a)
plot_basis(x, B, w)
plt.tight_layout()
plt.savefig(f'../figures/splines_basis_weighted_MLE_{nknots}_{ylabel}.pdf', dpi=300)
mu = a + B @ w
plot_pred(mu, x, y)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
plt.savefig(f'../figures/splines_point_pred_MLE_{nknots}_{ylabel}.pdf', dpi=300)
main()
| StarcoderdataPython |
6630624 | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ctm_api_client.configuration import Configuration
class CertificateSigningRequestData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"organization": "str",
"organization_unit": "str",
"city_locality": "str",
"state_province": "str",
"country": "str",
"email_address": "str",
}
attribute_map = {
"organization": "organization",
"organization_unit": "organizationUnit",
"city_locality": "cityLocality",
"state_province": "stateProvince",
"country": "country",
"email_address": "emailAddress",
}
def __init__(
self,
organization=None,
organization_unit=None,
city_locality=None,
state_province=None,
country=None,
email_address=None,
_configuration=None,
): # noqa: E501
"""CertificateSigningRequestData - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._organization = None
self._organization_unit = None
self._city_locality = None
self._state_province = None
self._country = None
self._email_address = None
self.discriminator = None
if organization is not None:
self.organization = organization
if organization_unit is not None:
self.organization_unit = organization_unit
if city_locality is not None:
self.city_locality = city_locality
if state_province is not None:
self.state_province = state_province
if country is not None:
self.country = country
if email_address is not None:
self.email_address = email_address
@property
def organization(self):
"""Gets the organization of this CertificateSigningRequestData. # noqa: E501
The organization HIDDEN # noqa: E501
:return: The organization of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this CertificateSigningRequestData.
The organization HIDDEN # noqa: E501
:param organization: The organization of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._organization = organization
@property
def organization_unit(self):
"""Gets the organization_unit of this CertificateSigningRequestData. # noqa: E501
The organizationUnit HIDDEN # noqa: E501
:return: The organization_unit of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._organization_unit
@organization_unit.setter
def organization_unit(self, organization_unit):
"""Sets the organization_unit of this CertificateSigningRequestData.
The organizationUnit HIDDEN # noqa: E501
:param organization_unit: The organization_unit of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._organization_unit = organization_unit
@property
def city_locality(self):
"""Gets the city_locality of this CertificateSigningRequestData. # noqa: E501
The cityLocality HIDDEN # noqa: E501
:return: The city_locality of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._city_locality
@city_locality.setter
def city_locality(self, city_locality):
"""Sets the city_locality of this CertificateSigningRequestData.
The cityLocality HIDDEN # noqa: E501
:param city_locality: The city_locality of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._city_locality = city_locality
@property
def state_province(self):
"""Gets the state_province of this CertificateSigningRequestData. # noqa: E501
The stateProvince HIDDEN # noqa: E501
:return: The state_province of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._state_province
@state_province.setter
def state_province(self, state_province):
"""Sets the state_province of this CertificateSigningRequestData.
The stateProvince HIDDEN # noqa: E501
:param state_province: The state_province of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._state_province = state_province
@property
def country(self):
"""Gets the country of this CertificateSigningRequestData. # noqa: E501
The country HIDDEN # noqa: E501
:return: The country of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this CertificateSigningRequestData.
The country HIDDEN # noqa: E501
:param country: The country of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._country = country
@property
def email_address(self):
"""Gets the email_address of this CertificateSigningRequestData. # noqa: E501
The emailAddress HIDDEN # noqa: E501
:return: The email_address of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""Sets the email_address of this CertificateSigningRequestData.
The emailAddress HIDDEN # noqa: E501
:param email_address: The email_address of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._email_address = email_address
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(CertificateSigningRequestData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CertificateSigningRequestData):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CertificateSigningRequestData):
return True
return self.to_dict() != other.to_dict()
| StarcoderdataPython |
5112760 | <filename>notedrive/others/magnet/utils.py
class FailedToFetchException(Exception):
"""Unable to fetch torrent exception"""
| StarcoderdataPython |
6491708 | from flask_restful import Resource,reqparse
from flask import jsonify, request
import pandas as pd
import os
import numpy as np
import datetime
# parse parameters
parser = reqparse.RequestParser()
parser.add_argument('stockid', type=str)
parser.add_argument('start', type=str)
parser.add_argument('end', type=str)
# 接收前端音频
class StockAPI(Resource):
# http://127.0.0.1:5000/api/stock/?stockid=值&start=值&end=值
# 传{"key":"值"}
def get(self):
try:
stockid = request.args.get("stockid","")
start = request.args.get("start","")
start = datetime.datetime.strptime(start,"%Y/%m/%d")
end = request.args.get("end","")
end = datetime.datetime.strptime(end,"%Y/%m/%d")
data = searchFromDB(stockid,start,end)
return jsonify(data)
except Exception:
return jsonify({"error":"error"})
def searchFromDB(stockid,start,end):
dir_name = os.path.dirname(os.path.dirname(__file__))
filePath = os.path.join(dir_name,"Data","goldFutures.xlsx")
data = pd.read_excel(filePath)
data2 = data.loc[(data["Date"]>start) & (data["Date"]<end),["Date","Open","Close","Low","High","Volume"]].dropna()
data2["Date"] = data2["Date"].astype(str)
data3 = data2.values.tolist()
return data3 | StarcoderdataPython |
1764771 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class QueryPriceForRenewEcsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'QueryPriceForRenewEcs')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_EcsId(self):
return self.get_query_params().get('EcsId')
def set_EcsId(self,EcsId):
self.add_query_param('EcsId',EcsId)
def get_EcsPeriod(self):
return self.get_query_params().get('EcsPeriod')
def set_EcsPeriod(self,EcsPeriod):
self.add_query_param('EcsPeriod',EcsPeriod)
def get_EmrPeriod(self):
return self.get_query_params().get('EmrPeriod')
def set_EmrPeriod(self,EmrPeriod):
self.add_query_param('EmrPeriod',EmrPeriod) | StarcoderdataPython |
5079666 | ##Data Structure
#List: Mutable, meaning data can be manipulated
#List instantiation with specific values
my_list = [1,2,3]
#Nested List instantiation
my_list = [1,2,[3,4]]
print('Before appending anything:',my_list)
#Appending list value
my_list.append(5)
print('After appending \'5\' to the list:',my_list)
#Retrieving specific value from a list
my_list[0]#Retrieves first value:1
my_list[3]#Retrieves fourth index value:5
#Dictionaries: Almost the same Structure as JSON file but with sets of method
d = {'k1' : 'val1', 'k2' : 'val2'}
#Retrieving Value of dictionary
d['k1']#Keys are used to retrieved values:val1
#Tuple:Immutable, meaning data cannot be changed/manipulated.
#Tuple instantiation with specific values
my_tuple = (1,2,3)
#Retrieving value from a tuple
my_tuple[0]
#Attempting to change value of a tuple will cause an error
# my_tuple[0] = 'someValue'
#Set: Can expand on data of the variable but removes duplicates and cannot replace index existing values
#Set instantiation with specific values
my_set = {1,2,2,2,2,2,3}
print(my_set)#1,2,3
| StarcoderdataPython |
14793 | <filename>example.py<gh_stars>0
#!/usr/bin/env python3
# pylint: disable=unused-wildcard-import
from py_derive_cmd import *
import cmd
class MyCmd(cmd.Cmd):
pass
s = Settings(MyCmd, print_warnings=False)
@make_command(s, 'Test for the decorator', aliases=['d'])
def test_decorator(shell: MyCmd, req_arg: str, opt_arg: str = None):
print('Decorator works')
print(req_arg)
print(opt_arg)
def test_register(shell: MyCmd, raw_arg: str):
print('Register works')
print(raw_arg)
@make_command(s, 'Exit the shell by pressing Ctrl-D')
def EOF(shell: MyCmd) -> bool:
return True
CommandInfo(s, test_register, ['registered', 'r'], 'Test for register', raw_arg=True).register()
shell = MyCmd()
shell.cmdloop() | StarcoderdataPython |
1649058 | #!/usr/bin/env python3
import logging
from logging import config
import os
from os import path
import sys
import ntpath
import smtplib
import email
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
LOG_PATH = '/data/script_logs/'
DATA_PATH = '/data/script_data/'
SENDER_EMAIL = '<EMAIL>'
MAIL_SERVER_HOSTNAME = 'mail'
ARTIST_RESULT_FILE = "/data/script_counters/artist_result.txt"
ARTIST_ID_FILE = "/data/script_counters/artist_id.txt"
MISSING_SONG_ATTRIBUTES_FILE = "/data/script_counters/missing_song_attributes.txt"
MISSING_SONG_FOLLOWERS_FILE = "/data/script_counters/missing_song_followers.txt"
TWITTER_USER_QUEUE_FILE = "/data/script_counters/twitter_user_queue.txt"
SPOTIFY_MISSING_TWITTER_FILE = "/data/script_data/spotify_missing_twitter_file.csv"
SPOTIFY_MISSING_TWITTER_FILE_2 = "/data/script_data/spotify_missing_twitter_file_2.csv"
SECOND_TIER_USER_FILE = "/data/script_counters/second_tier_user.txt"
SECOND_TIER_FOLLOWERS_FILE = "/data/script_counters/second_tier_followers.txt"
class Config(object):
TWITTER_BEARER = os.environ.get('TWITTER_BEARER')
SPOTIFY_ID = os.environ.get('SPOTIFY_ID')
SPOTIFY_SECRET = os.environ.get('SPOTIFY_SECRET')
SQLALCHEMY_DATABASE_URI = 'postgresql://{}:{}@db/{}'\
.format(os.environ.get('POSTGRES_USER'),
os.environ.get('PGPASSWORD'),
os.environ.get('POSTGRES_DB'))
RECEIVER_EMAIL = os.environ.get('RECEIVER_EMAIL')
SQLALCHEMY_TRACK_MODIFICATIONS = False
ALERT_LEVEL = os.environ.get('ALERT_LEVEL')
class LoggerWrapper:
twitter_logger = None
spotify_logger = None
db_logger = None
mgr_logger = None
email_logger = None
def __init__(self):
os.getcwd()
config_file_path = path.join(path.dirname(path.abspath(__file__)), "logging.conf")
config.fileConfig(config_file_path)
self.mgr_logger = logging.getLogger("mgr")
self.twitter_logger = logging.getLogger("twitter")
self.spotify_logger = logging.getLogger("spotify")
self.db_logger = logging.getLogger("db")
self.email_logger = logging.getLogger("email")
def twitter_info(self, message):
self.twitter_logger.info(message)
def twitter_debug(self, message):
self.twitter_logger.debug(message)
def twitter_warn(self, message):
self.twitter_logger.warning(message)
def spotify_info(self, message):
self.spotify_logger.info(message)
def spotify_debug(self, message):
self.spotify_logger.debug(message)
def spotify_warn(self, message):
self.spotify_logger.warning(message)
def db_info(self, message):
self.db_logger.info(message)
def db_debug(self, message):
self.db_logger.debug(message)
def db_warn(self, message):
self.db_logger.warning(message)
def manager_info(self, message):
self.mgr_logger.info(message)
def manager_debug(self, message):
self.mgr_logger.debug(message)
def manager_warn(self, message):
self.mgr_logger.warning(message)
def email_warn(self, message):
self.email_logger.warning(message)
class EmailWrapper:
@staticmethod
def sendEmail(body, subject="Twitifynd Alert", attachment=""):
smtp_server = MAIL_SERVER_HOSTNAME
sender_email = SENDER_EMAIL
receiver_email = Config.RECEIVER_EMAIL
message = MIMEMultipart()
message["From"] = sender_email
message["To"] = receiver_email
message["Subject"] = subject
message.attach(MIMEText(body, "plain"))
if attachment != "":
_, filetail = ntpath.split(attachment)
with open(attachment, "rb") as attachment_data:
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment_data.read())
encoders.encode_base64(part)
part.add_header("Content-Disposition", f"attachment; filename= {filetail}")
message.attach(part)
text = message.as_string()
with smtplib.SMTP(smtp_server, 25) as server:
server.sendmail(sender_email, receiver_email, text)
class FileWrapper():
@staticmethod
def writeValToFile(filepath, contents):
f = open(filepath, "w")
f.write(str(contents))
f.close()
@staticmethod
def readFromFile(filepath):
f = open(filepath, "r")
return f.read()
@staticmethod
def appendToFile(filepath, contents):
f = open(filepath, "a")
f.write(str(contents) + "\n")
f.close()
@staticmethod
def getMostRecentDir(filepath):
max_mtime = 0
max_subdr = ""
for dirname,subdirs,files in os.walk(filepath):
for subdir in subdirs:
if "log_" in subdir:
full_path = os.path.join(dirname, subdir)
mtime = os.stat(full_path).st_mtime
if mtime > max_mtime:
max_subdr = full_path
return max_subdr
if __name__ == "__main__":
logger = LoggerWrapper()
# logger.manager_info("Default run of utils")
artist_result_line = sys.argv[1] if len(sys.argv) > 1 else "Missing"
artist_id = sys.argv[2] if len(sys.argv) > 2 else "Missing"
missing_song_attributes = sys.argv[3] if len(sys.argv) > 3 else "Missing"
twitter_user_queue = sys.argv[4] if len(sys.argv) > 4 else "Missing"
body = "Twitifynd starting up.\nArtist Result Line: {:}\nArtist ID: {:}\nMissing Song Attributes: {:}\nTwitter User Queue: {:}".format(artist_result_line, artist_id, missing_song_attributes, twitter_user_queue)
logger.email_warn(body)
# FileWrapper.writeValToFile(ARTIST_RESULT_FILE, "-1")
# filename = path.join(path.dirname(path.abspath(__file__)), "logging.conf")
# EmailWrapper.sendEmail("Test Email", attachment=filename) | StarcoderdataPython |
150175 | <filename>openleadr/enums.py
# SPDX-License-Identifier: Apache-2.0
# Copyright 2020 Contributors to OpenLEADR
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of useful enumerations that you can use to construct or
interpret OpenADR messages. Can also be useful during testing.
"""
from openleadr.objects import Measurement, PowerAttributes
class Enum(type):
def __getitem__(self, item):
return getattr(self, item)
@property
def members(self):
return sorted([item for item in list(set(dir(self)) - set(dir(Enum)))
if not item.startswith("_")])
@property
def values(self):
return [self[item] for item in self.members]
class EVENT_STATUS(metaclass=Enum):
NONE = "none"
FAR = "far"
NEAR = "near"
ACTIVE = "active"
COMPLETED = "completed"
CANCELLED = "cancelled"
class SIGNAL_TYPE(metaclass=Enum):
DELTA = "delta"
LEVEL = "level"
MULTIPLIER = "multiplier"
PRICE = "price"
PRICE_MULTIPLIER = "priceMultiplier"
PRICE_RELATIVE = "priceRelative"
SETPOINT = "setpoint"
X_LOAD_CONTROL_CAPACITY = "x-loadControlCapacity"
X_LOAD_CONTROL_LEVEL_OFFSET = "x-loadControlLevelOffset"
X_LOAD_CONTROL_PERCENT_OFFSET = "x-loadControlPorcentOffset"
X_LOAD_CONTROL_SETPOINT = "x-loadControlSetpoint"
class SIGNAL_NAME(metaclass=Enum):
SIMPLE = "SIMPLE"
simple = "simple"
ELECTRICITY_PRICE = "ELECTRICITY_PRICE"
ENERGY_PRICE = "ENERGY_PRICE"
DEMAND_CHARGE = "DEMAND_CHARGE"
BID_PRICE = "BID_PRICE"
BID_LOAD = "BID_LOAD"
BID_ENERGY = "BID_ENERGY"
CHARGE_STATE = "CHARGE_STATE"
LOAD_DISPATCH = "LOAD_DISPATCH"
LOAD_CONTROL = "LOAD_CONTROL"
class SI_SCALE_CODE(metaclass=Enum):
p = "p"
n = "n"
micro = "micro"
m = "m"
c = "c"
d = "d"
k = "k"
M = "M"
G = "G"
T = "T"
none = "none"
class OPT(metaclass=Enum):
OPT_IN = "optIn"
OPT_OUT = "optOut"
class OPT_REASON(metaclass=Enum):
ECONOMIC = "economic"
EMERGENCY = "emergency"
MUST_RUN = "mustRun"
NOT_PARTICIPATING = "notParticipating"
OUTAGE_RUN_STATUS = "outageRunStatus"
OVERRIDE_STATUS = "overrideStatus"
PARTICIPATING = "participating"
X_SCHEDULE = "x-schedule"
class READING_TYPE(metaclass=Enum):
DIRECT_READ = "Direct Read"
NET = "Net"
ALLOCATED = "Allocated"
ESTIMATED = "Estimated"
SUMMED = "Summed"
DERIVED = "Derived"
MEAN = "Mean"
PEAK = "Peak"
HYBRID = "Hybrid"
CONTRACT = "Contract"
PROJECTED = "Projected"
X_RMS = "x-RMS"
X_NOT_APPLICABLE = "x-notApplicable"
class REPORT_TYPE(metaclass=Enum):
READING = "reading"
USAGE = "usage"
DEMAND = "demand"
SET_POINT = "setPoint"
DELTA_USAGE = "deltaUsage"
DELTA_SET_POINT = "deltaSetPoint"
DELTA_DEMAND = "deltaDemand"
BASELINE = "baseline"
DEVIATION = "deviation"
AVG_USAGE = "avgUsage"
AVG_DEMAND = "avgDemand"
OPERATING_STATE = "operatingState"
UP_REGULATION_CAPACITY_AVAILABLE = "upRegulationCapacityAvailable"
DOWN_REGULATION_CAPACITY_AVAILABLE = "downRegulationCapacityAvailable"
REGULATION_SETPOINT = "regulationSetpoint"
STORED_ENERGY = "storedEnergy"
TARGET_ENERGY_STORAGE = "targetEnergyStorage"
AVAILABLE_ENERGY_STORAGE = "availableEnergyStorage"
PRICE = "price"
LEVEL = "level"
POWER_FACTOR = "powerFactor"
PERCENT_USAGE = "percentUsage"
PERCENT_DEMAND = "percentDemand"
X_RESOURCE_STATUS = "x-resourceStatus"
class REPORT_NAME(metaclass=Enum):
METADATA_HISTORY_USAGE = "METADATA_HISTORY_USAGE"
HISTORY_USAGE = "HISTORY_USAGE"
METADATA_HISTORY_GREENBUTTON = "METADATA_HISTORY_GREENBUTTON"
HISTORY_GREENBUTTON = "HISTORY_GREENBUTTON"
METADATA_TELEMETRY_USAGE = "METADATA_TELEMETRY_USAGE"
TELEMETRY_USAGE = "TELEMETRY_USAGE"
METADATA_TELEMETRY_STATUS = "METADATA_TELEMETRY_STATUS"
TELEMETRY_STATUS = "TELEMETRY_STATUS"
class STATUS_CODES(metaclass=Enum):
OUT_OF_SEQUENCE = 450
NOT_ALLOWED = 451
INVALID_ID = 452
NOT_RECOGNIZED = 453
INVALID_DATA = 454
COMPLIANCE_ERROR = 459
SIGNAL_NOT_SUPPORTED = 460
REPORT_NOT_SUPPORTED = 461
TARGET_MISMATCH = 462
NOT_REGISTERED_OR_AUTHORIZED = 463
DEPLOYMENT_ERROR_OR_OTHER_ERROR = 469
class SECURITY_LEVEL:
STANDARD = 'STANDARD'
HIGH = 'HIGH'
_CURRENCIES = ("AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", "BAM",
"BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL",
"BSD", "BTN", "BWP", "BYR", "BZD", "CAD", "CDF", "CHE", "CHF", "CHW",
"CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK",
"DJF", "DKK", "DOP", "DZD", "EEK", "EGP", "ERN", "ETB", "EUR", "FJD",
"FKP", "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GWP", "GYD",
"HKD", "HNL", "HRK", "HTG", "HUF", "IDR", "ILS", "INR", "IQD", "IRR",
"ISK", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF", "KPW", "KRW",
"KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL", "LTL", "LVL",
"LYD", "MAD", "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO",
"MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", "NAD", "NGN", "NIO",
"NOK", "NPR", "NZD", "OMR", "PAB", "PEN", "PGK", "PHP", "PKR", "PLN",
"PYG", "QAR", "RON", "RSD", "RUB", "RWF", "SAR", "SBD", "SCR", "SDG",
"SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "STD", "SVC", "SYP", "SZL",
"THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", "UAH",
"UGX", "USD", "USN", "USS", "UYI", "UYU", "UZS", "VEF", "VND", "VUV",
"WST", "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR",
"XFU", "XOF", "XPD", "XPF", "XPF", "XPF", "XPT", "XTS", "XXX", "YER",
"ZAR", "ZMK", "ZWL")
_ACCEPTABLE_UNITS = {'currency': _CURRENCIES,
'currencyPerKW': _CURRENCIES,
'currencyPerKWh': _CURRENCIES,
'currencyPerThm': _CURRENCIES,
'current': ('A',),
'energyApparent': ('VAh',),
'energyReactive': ('VARh',),
'energyReal': ('Wh',),
'frequency': ('Hz',),
'powerApparent': ('VA',),
'powerReactive': ('VAR',),
'powerReal': ('W',),
'pulseCount': ('count',),
'temperature': ('celsius', 'fahrenheit'),
'Therm': ('thm',),
'voltage': ('V',)}
_MEASUREMENT_DESCRIPTIONS = {'currency': 'currency',
'currencyPerKW': 'currencyPerKW',
'currencyPerKWh': 'currencyPerKWh',
'currencyPerThm': 'currency',
'current': 'Current',
'energyApparent': 'ApparentEnergy',
'energyReactive': 'ReactiveEnergy',
'energyReal': 'RealEnergy',
'frequency': 'Frequency',
'powerApparent': 'ApparentPower',
'powerReactive': 'ReactivePower',
'powerReal': 'RealPower',
'pulseCount': 'pulse count',
'temperature': 'temperature',
'Therm': 'Therm',
'voltage': 'Voltage'}
_MEASUREMENT_NAMESPACES = {'currency': 'oadr',
'currencyPerWK': 'oadr',
'currencyPerKWh': 'oadr',
'currencyPerThm': 'oadr',
'current': 'oadr',
'energyApparent': 'power',
'energyReactive': 'power',
'energyReal': 'power',
'frequency': 'oadr',
'powerApparent': 'power',
'powerReactive': 'power',
'powerReal': 'power',
'pulseCount': 'oadr',
'temperature': 'oadr',
'Therm': 'oadr',
'voltage': 'power',
'customUnit': 'oadr'}
class MEASUREMENTS(metaclass=Enum):
VOLTAGE = Measurement(name='voltage',
description=_MEASUREMENT_DESCRIPTIONS['voltage'],
unit=_ACCEPTABLE_UNITS['voltage'][0],
acceptable_units=_ACCEPTABLE_UNITS['voltage'],
scale='none')
CURRENT = Measurement(name='current',
description=_MEASUREMENT_DESCRIPTIONS['current'],
unit=_ACCEPTABLE_UNITS['current'][0],
acceptable_units=_ACCEPTABLE_UNITS['current'],
scale='none')
ENERGY_REAL = Measurement(name='energyReal',
description=_MEASUREMENT_DESCRIPTIONS['energyReal'],
unit=_ACCEPTABLE_UNITS['energyReal'][0],
acceptable_units=_ACCEPTABLE_UNITS['energyReal'],
scale='none')
REAL_ENERGY = Measurement(name='energyReal',
description=_MEASUREMENT_DESCRIPTIONS['energyReal'],
unit=_ACCEPTABLE_UNITS['energyReal'][0],
acceptable_units=_ACCEPTABLE_UNITS['energyReal'],
scale='none')
ACTIVE_ENERGY = Measurement(name='energyReal',
description=_MEASUREMENT_DESCRIPTIONS['energyReal'],
unit=_ACCEPTABLE_UNITS['energyReal'][0],
acceptable_units=_ACCEPTABLE_UNITS['energyReal'],
scale='none')
ENERGY_REACTIVE = Measurement(name='energyReactive',
description=_MEASUREMENT_DESCRIPTIONS['energyReactive'],
unit=_ACCEPTABLE_UNITS['energyReactive'][0],
acceptable_units=_ACCEPTABLE_UNITS['energyReactive'],
scale='none')
REACTIVE_ENERGY = Measurement(name='energyReactive',
description=_MEASUREMENT_DESCRIPTIONS['energyReactive'],
unit=_ACCEPTABLE_UNITS['energyReactive'][0],
acceptable_units=_ACCEPTABLE_UNITS['energyReactive'],
scale='none')
ENERGY_APPARENT = Measurement(name='energyApparent',
description=_MEASUREMENT_DESCRIPTIONS['energyApparent'],
unit=_ACCEPTABLE_UNITS['energyApparent'][0],
acceptable_units=_ACCEPTABLE_UNITS['energyApparent'],
scale='none')
APPARENT_ENERGY = Measurement(name='energyApparent',
description=_MEASUREMENT_DESCRIPTIONS['energyApparent'],
unit=_ACCEPTABLE_UNITS['energyApparent'][0],
acceptable_units=_ACCEPTABLE_UNITS['energyApparent'],
scale='none')
ACTIVE_POWER = Measurement(name='powerReal',
description=_MEASUREMENT_DESCRIPTIONS['powerReal'],
unit=_ACCEPTABLE_UNITS['powerReal'][0],
acceptable_units=_ACCEPTABLE_UNITS['powerReal'],
scale='none',
power_attributes=PowerAttributes(hertz=50,
voltage=230,
ac=True))
REAL_POWER = Measurement(name='powerReal',
description=_MEASUREMENT_DESCRIPTIONS['powerReal'],
unit=_ACCEPTABLE_UNITS['powerReal'][0],
acceptable_units=_ACCEPTABLE_UNITS['powerReal'],
scale='none',
power_attributes=PowerAttributes(hertz=50,
voltage=230,
ac=True))
POWER_REAL = Measurement(name='powerReal',
description=_MEASUREMENT_DESCRIPTIONS['powerReal'],
unit=_ACCEPTABLE_UNITS['powerReal'][0],
acceptable_units=_ACCEPTABLE_UNITS['powerReal'],
scale='none',
power_attributes=PowerAttributes(hertz=50,
voltage=230,
ac=True))
REACTIVE_POWER = Measurement(name='powerReactive',
description=_MEASUREMENT_DESCRIPTIONS['powerReactive'],
unit=_ACCEPTABLE_UNITS['powerReactive'][0],
acceptable_units=_ACCEPTABLE_UNITS['powerReactive'],
scale='none',
power_attributes=PowerAttributes(hertz=50,
voltage=230,
ac=True))
POWER_REACTIVE = Measurement(name='powerReactive',
description=_MEASUREMENT_DESCRIPTIONS['powerReactive'],
unit=_ACCEPTABLE_UNITS['powerReactive'][0],
acceptable_units=_ACCEPTABLE_UNITS['powerReactive'],
scale='none',
power_attributes=PowerAttributes(hertz=50,
voltage=230,
ac=True))
APPARENT_POWER = Measurement(name='powerApparent',
description=_MEASUREMENT_DESCRIPTIONS['powerApparent'],
unit=_ACCEPTABLE_UNITS['powerApparent'][0],
acceptable_units=_ACCEPTABLE_UNITS['powerApparent'],
scale='none',
power_attributes=PowerAttributes(hertz=50,
voltage=230,
ac=True))
POWER_APPARENT = Measurement(name='powerApparent',
description=_MEASUREMENT_DESCRIPTIONS['powerApparent'],
unit=_ACCEPTABLE_UNITS['powerApparent'][0],
acceptable_units=_ACCEPTABLE_UNITS['powerApparent'],
scale='none',
power_attributes=PowerAttributes(hertz=50,
voltage=230,
ac=True))
FREQUENCY = Measurement(name='frequency',
description=_MEASUREMENT_DESCRIPTIONS['frequency'],
unit=_ACCEPTABLE_UNITS['frequency'][0],
acceptable_units=_ACCEPTABLE_UNITS['frequency'],
scale='none')
PULSE_COUNT = Measurement(name='pulseCount',
description=_MEASUREMENT_DESCRIPTIONS['pulseCount'],
unit=_ACCEPTABLE_UNITS['pulseCount'][0],
acceptable_units=_ACCEPTABLE_UNITS['pulseCount'],
pulse_factor=1000)
TEMPERATURE = Measurement(name='temperature',
description=_MEASUREMENT_DESCRIPTIONS['temperature'],
unit=_ACCEPTABLE_UNITS['temperature'][0],
acceptable_units=_ACCEPTABLE_UNITS['temperature'],
scale='none')
THERM = Measurement(name='Therm',
description=_MEASUREMENT_DESCRIPTIONS['Therm'],
unit=_ACCEPTABLE_UNITS['Therm'][0],
acceptable_units=_ACCEPTABLE_UNITS['Therm'],
scale='none')
CURRENCY = Measurement(name='currency',
description=_MEASUREMENT_DESCRIPTIONS['currency'],
unit=_CURRENCIES[0],
acceptable_units=_CURRENCIES,
scale='none')
CURRENCY_PER_KW = Measurement(name='currencyPerKW',
description=_MEASUREMENT_DESCRIPTIONS['currencyPerKW'],
unit=_CURRENCIES[0],
acceptable_units=_CURRENCIES,
scale='none')
CURRENCY_PER_KWH = Measurement(name='currencyPerKWh',
description=_MEASUREMENT_DESCRIPTIONS['currencyPerKWh'],
unit=_CURRENCIES[0],
acceptable_units=_CURRENCIES,
scale='none')
CURRENCY_PER_THM = Measurement(name='currencyPerThm',
description=_MEASUREMENT_DESCRIPTIONS['currencyPerThm'],
unit=_CURRENCIES[0],
acceptable_units=_CURRENCIES,
scale='none')
| StarcoderdataPython |
5197464 | <filename>draw/__init__.py
__all__ = [
"Canvas", "Rect", "Circle", "Line"
]
from .canvas import Canvas
from .diagrams import Rect, Circle, Line
| StarcoderdataPython |
3285121 | <filename>src/dispatch/plugins/kandbox_planner/planner_engine/optimizer_shared_jobs_in_slots.py
from dispatch.plugins.kandbox_planner.env.env_models import ActionDict
from dispatch.plugins.kandbox_planner.env.env_enums import OptimizerSolutionStatus, ActionType
from datetime import datetime, timedelta
import pandas as pd
from ortools.sat.python import cp_model
import collections
from pprint import pprint
import sys
from dispatch import config
import dispatch.plugins.kandbox_planner.util.kandbox_date_util as date_util
from dispatch.plugins.kandbox_planner.travel_time_plugin import (
MultiLayerCacheTravelTime as TravelTime,
)
from dispatch.plugins.bases.kandbox_planner import KandboxBatchOptimizerPlugin
import logging
log = logging.getLogger("OptimizerJobsInSlots")
# log = logging.getLogger(__name__)
class OptimizerJobsInSlots:
title = "Kandbox Plugin - Batch Optimizer - opti1day"
slug = "kandbox_opti1day"
author = "Kandbox"
author_url = "https://github.com/alibaba/easydispatch"
description = "Batch Optimizer - opti1day."
version = "0.1.0"
default_config = {"log_search_progress": False, "max_exec_seconds": 5}
config_form_spec = {
"type": "object",
"properties": {},
}
def __init__(self, env, config=None):
self.env = env
self.travel_router = env.travel_router # TravelTime(travel_speed=25)
self.config = config or self.default_config
# self.kandbox_env = kandbox_env
# self.kandbox_env.reset()
# if max_exec_seconds is not None:
# self.max_exec_seconds = max_exec_seconds
def _get_travel_time_from_location_to_job(self, location, job_code_2):
# y_1,x_1= site1.split(':')
# y_2,x_2= site2.split(':')
site2 = self.env.jobs_dict[job_code_2]
new_time = self.travel_router.get_travel_minutes_2locations(
location,
[site2.location.geo_longitude, site2.location.geo_latitude],
)
# try:
# print(f"{ location.geo_longitude, location.geo_latitude}")
# except:
# print("ok")
if new_time > config.TRAVEL_MINUTES_WARNING_LEVEL:
log.debug(
f"JOB:{job_code_2}:LOCATION:{ location[0], location[1]}:very long travel time: {new_time} minutes. ")
return config.TRAVEL_MINUTES_WARNING_RESULT
return int(new_time / 1)
def _get_travel_time_2_sites(self, job_code_1, job_code_2):
# y_1,x_1= site1.split(':')
# y_2,x_2= site2.split(':')
site1 = self.env.jobs_dict[job_code_1]
site2 = self.env.jobs_dict[job_code_2]
new_time = self.travel_router.get_travel_minutes_2locations(
[site1.location.geo_longitude, site1.location.geo_latitude],
[site2.location.geo_longitude, site2.location.geo_latitude],
)
if new_time > config.TRAVEL_MINUTES_WARNING_LEVEL:
log.debug(
f"JOB:{job_code_1}:JOB:{job_code_2}: very long travel time: {new_time} minutes")
return int(new_time / 1)
def dispatch_jobs_in_slots(self, working_time_slots: list):
"""Assign jobs to workers."""
num_slots = len(working_time_slots)
# All durations are in minutes.
all_start_minutes = [s.start_minutes - s.start_overtime_minutes for s in working_time_slots]
all_end_minutes = [s.end_minutes + s.end_overtime_minutes for s in working_time_slots]
# MAX_MINUTE = 24 * 60 # 24 hours in one day.
SCHEDULED_WORKER_CODES = [s.worker_id for s in working_time_slots[:-2]]
CURRENT_JOB_CODE = working_time_slots[-1].assigned_job_codes[0]
all_job_durations = {
CURRENT_JOB_CODE: self.env. get_encode_shared_duration_by_planning_efficiency_factor(
self.env.jobs_dict[CURRENT_JOB_CODE].requested_duration_minutes,
len(SCHEDULED_WORKER_CODES))
}
for a_slot in working_time_slots:
for a_jc in a_slot.assigned_job_codes:
if a_jc not in all_job_durations.keys():
if self.env.jobs_dict[a_jc].planning_status == 'U':
log.error(
f"dispatch_jobs_in_slots: self.env.jobs_dict[a_jc].planning_status == 'U', job_code = {a_jc}")
all_job_durations[a_jc] = self.env.jobs_dict[a_jc].scheduled_duration_minutes
MIN_MINUTE = max(all_start_minutes) # 24 hours in one day.
MAX_MINUTE = min(all_end_minutes) # 24 hours in one day.
MAX_TRAVEL_MINUTES = MAX_MINUTE - MIN_MINUTE
# TODO: Missing shared ===
# model.NewIntVar(min_num_workers, min_num_workers * 5, 'num_slots')
# Computed data.
# We are going to build a flow from a the start of the day to the end
# of the day.
#
# Along the path, we will accumulate travel time
model = cp_model.CpModel()
# Per node info
#incoming_literals = collections.defaultdict(list)
#outgoing_literals = collections.defaultdict(list)
outgoing_other_job_index = []
workers_assigned2_job_literals = collections.defaultdict(
list
) # emp_job_literals [job] = [worker_n_lit, ....]
travel_time_per_emp_sum_dict = {} # TODO
# incoming_sink_literals = []
# outgoing_source_literals = []
# new_start_time = []
# Duan
# Create all the shift variables before iterating on the transitions
# between these shifts.
total_travel_until_emp = {}
shift_start_time_dict = {}
travel_time_until_job = {}
source_lit_dict = {}
sink_lit_dict = {}
total_travel_until_emp[-1] = model.NewIntVar(
0, 0, "total_travel until emp {}".format("init")
)
all_worker_codes_for_each_job_dict = {}
all_jobs_in_slots = []
for slot_i in range(num_slots):
all_jobs_in_slots.append([
self.env.jobs_dict[jc] for jc in working_time_slots[slot_i].assigned_job_codes
])
for ajob in all_jobs_in_slots[slot_i]:
if ajob.job_code not in all_worker_codes_for_each_job_dict.keys():
all_worker_codes_for_each_job_dict[ajob.job_code] = [
working_time_slots[slot_i].worker_id]
else:
all_worker_codes_for_each_job_dict[ajob.job_code].append(
working_time_slots[slot_i].worker_id)
for slot_i in range(num_slots):
incoming_literals_per_slot = {}
outgoing_literals_per_slot = {}
shift_start_time_dict[slot_i] = {}
source_lit_dict[slot_i] = {}
sink_lit_dict[slot_i] = {}
travel_time_per_emp_sum_dict[slot_i] = model.NewIntVar(
0, MAX_TRAVEL_MINUTES, "total_travel time for emp {}".format(slot_i)
)
total_travel_until_emp[slot_i] = model.NewIntVar(
0, MAX_TRAVEL_MINUTES * (slot_i + 1), "total_travel for emp {}".format(slot_i)
)
# To chain and accumulate all travel times for each employee
model.Add(
total_travel_until_emp[slot_i] ==
total_travel_until_emp[slot_i - 1] + travel_time_per_emp_sum_dict[slot_i]
)
# total traval (travel_time_final_total) is the last one
travel_time_final_total = model.NewIntVar(
0, num_slots * int(MAX_TRAVEL_MINUTES), "total_travel for - {}".format("all")
)
model.Add(travel_time_final_total == total_travel_until_emp[num_slots - 1])
# Not sure why sum() does not work!!! sum(travel_time_per_emp_sum_dict)
"""
model.Add(travel_time_final_total == travel_time_per_emp_sum_dict[0] + travel_time_per_emp_sum_dict[1] \
+ travel_time_per_emp_sum_dict[2] + travel_time_per_emp_sum_dict[3] \
+ travel_time_per_emp_sum_dict[4] + travel_time_per_emp_sum_dict[5] \
)
"""
shared_job_lits = {}
job_edge_dict = {} # only for tracking
# other_start_time_dict = {}
for slot_i in range(num_slots):
outgoing_other_job_index.append([])
num_jobs = len(working_time_slots[slot_i].assigned_job_codes)
incoming_literals_per_slot[slot_i] = collections.defaultdict(list)
outgoing_literals_per_slot[slot_i] = collections.defaultdict(list)
# fmt: off
for shift in range(num_jobs):
shift_start_time_dict[slot_i][shift] = model.NewIntVar(
MIN_MINUTE, MAX_MINUTE, "start_time_shift_%i" % shift)
travel_time_until_job[shift] = model.NewIntVar(
0, MAX_TRAVEL_MINUTES, "travel_time_until_shift_%i" % shift)
job_code = working_time_slots[slot_i].assigned_job_codes[shift]
if job_code in shared_job_lits.keys():
for existing_lits in shared_job_lits[job_code]:
if existing_lits[0] == slot_i:
log.error(
f"Duplicated job ({job_code}) code in same slot({working_time_slots[slot_i]})")
res_dict = {"status": OptimizerSolutionStatus.INFEASIBLE}
return res_dict
# raise ValueError(f"Error, duplicated job ({job_code}) code in same slot")
# Not necessary for M*N
# model.Add( existing_lits[1] == shift_start_time_dict[slot_i][shift] )
# Link only the last slot_i literal to this new literal in new slot, for same job
last_lit = shared_job_lits[job_code][-1][1]
model.Add(last_lit == shift_start_time_dict[slot_i][shift])
shared_job_lits[job_code].append((slot_i, shift_start_time_dict[slot_i][shift]))
else:
shared_job_lits[job_code] = [(slot_i, shift_start_time_dict[slot_i][shift])]
for shift in range(num_jobs):
#
job_code = working_time_slots[slot_i].assigned_job_codes[shift]
#
# job_duration = self.env. get_encode_shared_duration_by_planning_efficiency_factor (
# self.env.jobs_dict[job_code].requested_duration_minutes,
# NBR_OF_WORKERS)
"""
if working_time_slots[slot_i].assigned_job_codes[shift]["mandatory_minutes_minmax_flag"] == 1:
shift_start_time_dict[slot_i][shift] = model.NewIntVar(
working_time_slots[slot_i].assigned_job_codes[shift]["requested_start_min_minutes"],
working_time_slots[slot_i].assigned_job_codes[shift]["requested_start_max_minutes"],
"start_time_shift_%i" % shift,
)
else:
"""
source_lit_dict[slot_i][shift] = model.NewBoolVar(
"Source Emp {} to job {}".format(slot_i, shift))
# Arc from source worker to this job
incoming_literals_per_slot[slot_i][shift].append(source_lit_dict[slot_i][shift])
# If this job[shift] is first job for worker[slot_i], travel time on this job is from home to job.
model.Add(
travel_time_until_job[shift] == self._get_travel_time_from_location_to_job(
working_time_slots[slot_i].start_location,
working_time_slots[slot_i].assigned_job_codes[shift]
)
).OnlyEnforceIf(source_lit_dict[slot_i][shift])
sink_lit_dict[slot_i][shift] = model.NewBoolVar(
"Sink job {} to emp {} ".format(shift, slot_i)
)
# Arc from job to sinking_worker.
outgoing_literals_per_slot[slot_i][shift].append(sink_lit_dict[slot_i][shift])
this_job = self.env.jobs_dict[job_code]
# If this job[shift] is the last job for worker[slot_i], travel_time_per_emp_sum_dict (total) is the travel time on this job is from job to home.
model.Add(
travel_time_per_emp_sum_dict[slot_i] ==
travel_time_until_job[shift] +
self._get_travel_time_from_location_to_job(
working_time_slots[slot_i].end_location, working_time_slots[slot_i].assigned_job_codes[shift])
).OnlyEnforceIf(
sink_lit_dict[slot_i][shift]
) # from sink_
# job must obey Start time for worker, if assigned to this worker
# TODO, only 1 day for now, [0]
try:
model.Add(
shift_start_time_dict[slot_i][shift] >= int(
working_time_slots[slot_i].start_minutes -
working_time_slots[slot_i].start_overtime_minutes +
self._get_travel_time_from_location_to_job(
working_time_slots[slot_i].start_location,
working_time_slots[slot_i].assigned_job_codes[shift]))
).OnlyEnforceIf(source_lit_dict[slot_i][shift])
except TypeError:
log.error(str('internal - int vs float?'))
#
# job must obey end time for worker, if assigned to this worker
model.Add(
shift_start_time_dict[slot_i][shift] <= int(
working_time_slots[slot_i].end_minutes +
working_time_slots[slot_i].end_overtime_minutes -
all_job_durations[job_code] - self._get_travel_time_from_location_to_job(
working_time_slots[slot_i].end_location,
working_time_slots[slot_i].assigned_job_codes[shift]))
).OnlyEnforceIf(sink_lit_dict[slot_i][shift])
for other in range(num_jobs):
if shift == other:
continue
other_job_code = working_time_slots[slot_i].assigned_job_codes[other]
other_duration = self.env.jobs_dict[other_job_code].requested_duration_minutes
lit = model.NewBoolVar("job path from %i to %i" % (shift, other))
job_edge_dict[(slot_i, shift, other)] = lit
# constraint for start time by duan 2019-10-09 16:58:42 #### + working_time_slots[slot_i].assigned_job_codes[shift]['requested_duration_minutes'] + min_delay_between_shifts
model.Add(
shift_start_time_dict[slot_i][shift] + int(all_job_durations[job_code]) + self._get_travel_time_2_sites(working_time_slots[slot_i].assigned_job_codes[shift], working_time_slots[slot_i].assigned_job_codes[other]) <
shift_start_time_dict[slot_i][other]
).OnlyEnforceIf(
lit
)
# Increase travel time
model.Add(
travel_time_until_job[other] ==
travel_time_until_job[shift] +
self._get_travel_time_2_sites(
working_time_slots[slot_i].assigned_job_codes[shift], working_time_slots[slot_i].assigned_job_codes[other])
).OnlyEnforceIf(lit)
# Add arc
outgoing_literals_per_slot[slot_i][shift].append(lit)
incoming_literals_per_slot[slot_i][other].append(lit)
"""
model.Add(sum( ( outgoing_literals_per_slot[slot_i][s_i] for s_i in range(num_jobs) )
) == 1)
model.Add(sum( [incoming_literals_per_slot[slot_i][s_i] for s_i in range(num_jobs) ]
) == 1)
"""
# fmt: on
# Create dag constraint.
for slot_i in range(num_slots):
num_jobs = len(working_time_slots[slot_i].assigned_job_codes)
model.Add(sum((
sink_lit_dict[slot_i][s_i] for s_i in range(num_jobs)
)) == 1)
model.Add(sum((
source_lit_dict[slot_i][s_i] for s_i in range(num_jobs)
)) == 1)
for shift in range(num_jobs):
model.Add(sum((outgoing_literals_per_slot[slot_i][shift][s_i] for s_i in range(num_jobs))
) == 1)
model.Add(sum((incoming_literals_per_slot[slot_i][shift][s_i] for s_i in range(num_jobs))
) == 1)
"""
"""
# model.Add(sum(incoming_sink_literals) == num_slots)
# model.Add(sum(outgoing_source_literals) == num_slots)
model.Minimize(travel_time_final_total)
# Solve model.
solver = cp_model.CpSolver()
solver.parameters.log_search_progress = self.config["log_search_progress"]
# solver.parameters.num_search_workers = 4
# https://developers.google.com/optimization/cp/cp_tasks
solver.parameters.max_time_in_seconds = self.config["max_exec_seconds"] # two minutes
status = solver.Solve(model)
if status != cp_model.OPTIMAL and status != cp_model.FEASIBLE: #
log.debug(f"Solver Status = {solver.StatusName(status)}, Not FEASIBLE, failed!")
res_dict = {"status": OptimizerSolutionStatus.INFEASIBLE}
return res_dict
optimal_travel_time = int(solver.ObjectiveValue())
log.debug(
f"Solver Status = {solver.StatusName(status)}, optimal_travel_time = {optimal_travel_time} minutes")
# s_printer = OneSolutionPrinter(__variables,solver)
# return s_printer.print_solution()
# return optimal_num_workers
all_following_tasks = {}
emp_following_tasks = {}
"""
for slot_i in range(num_slots):
print("w_{}_{}: {}, hasta {}".format(
slot_i,
workers[slot_i]['worker_code'],
solver.Value( travel_time_per_emp_sum_dict[slot_i]),
solver.Value( total_travel_until_emp[slot_i])
)
)
for slot_i in range(num_jobs):
print("j_{} : {}, travel {}, start: {}".format(
slot_i,
working_time_slots[slot_i].assigned_job_codes[slot_i]['requested_duration_minutes'],
solver.Value(travel_time_until_job[slot_i] ) ,
solver.Value( shift_start_time_dict[slot_i] [slot_i])
)
)
"""
# j_file.write('')
to_print_json_list = []
final_result = {"status": OptimizerSolutionStatus.SUCCESS,
"changed_action_dict_by_job_code": {}, "not_changed_job_codes": []}
for slot_i in range(num_slots):
to_print_json_list.append([])
num_jobs = len(working_time_slots[slot_i].assigned_job_codes)
# for LI in range(num_jobs):
# for LJ in range(num_jobs):
# if LI != LJ:
# print(f"slot-i = {slot_i}, edge: {LI}-{LJ} == {solver.BooleanValue(job_edge_dict[(slot_i,LI,LJ)])}")
# TODO , to align jobs to job_side, or beginning
# TODO , add lunch break as objective.
for shift in range(num_jobs):
if all_jobs_in_slots[slot_i][shift].scheduled_start_minutes == solver.Value(shift_start_time_dict[slot_i][shift]):
changed_flag = False
else:
changed_flag = True
if changed_flag:
this_job_code = all_jobs_in_slots[slot_i][shift].job_code
one_job_action_dict = ActionDict(
is_forced_action=False,
job_code=this_job_code,
action_type=ActionType.JOB_FIXED,
# self._tmp_get_worker_code_list_by_id_n_ids(primary_id = )
scheduled_worker_codes=all_worker_codes_for_each_job_dict[this_job_code],
scheduled_start_minutes=solver.Value(shift_start_time_dict[slot_i][shift]),
scheduled_duration_minutes=all_jobs_in_slots[slot_i][shift].scheduled_duration_minutes,
# slot_code_list =
)
final_result["changed_action_dict_by_job_code"][all_jobs_in_slots[slot_i]
[shift].job_code] = one_job_action_dict
else:
final_result["not_changed_job_codes"].append(
all_jobs_in_slots[slot_i][shift].job_code)
one_job_result = [
solver.Value(shift_start_time_dict[slot_i][shift]),
all_jobs_in_slots[slot_i][shift].job_code,
shift,
changed_flag
]
to_print_json_list[slot_i].append(one_job_result)
# final_result["changed_action_dict_by_job_code"][all_jobs_in_slots [slot_i][shift].job_code] = one_job_result
res_slots = []
for job_list in to_print_json_list:
res_slots.append(sorted(
job_list,
key=lambda item: item[0],
reverse=False,
))
final_result["slots"] = res_slots
log.debug(final_result)
return final_result
if __name__ == "__main__":
import pickle
slots = pickle.load(open("/tmp/working_time_slots.p", "rb"))
opti_slot = OptimizerJobsInSlots()
res = opti_slot.dispatch_jobs_in_slots(slots)
print(res)
| StarcoderdataPython |
6433176 | from rest_framework import generics, status
from localground.apps.site.api import serializers, filters
from localground.apps.site.api.views.abstract_views \
import QueryableListCreateAPIView
from localground.apps.site import models
from localground.apps.site.api.permissions import CheckProjectPermissions
from django.db.models import Q
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from localground.apps.site.api.permissions import \
CheckProjectPermissions, CheckUserCanPostToProject
class MapList(QueryableListCreateAPIView):
error_messages = {}
warnings = []
def get_serializer_class(self):
method = self.get_serializer_context().get('request').method
if method == 'GET':
return serializers.MapSerializerList
else:
return serializers.MapSerializerPost
filter_backends = (filters.SQLFilterBackend, filters.RequiredProjectFilter)
permission_classes = (CheckProjectPermissions, CheckUserCanPostToProject)
model = models.StyledMap
paginate_by = 100
def get_queryset(self):
if self.request.user.is_authenticated():
return self.model.objects.get_objects(self.request.user)
else:
return self.model.objects.get_objects_public(
access_key=self.request.GET.get('access_key')
)
def create(self, request, *args, **kwargs):
response = super(MapList, self).create(request, *args, **kwargs)
if len(self.warnings) > 0:
response.data.update({'warnings': self.warnings})
if self.error_messages:
response.data = self.error_messages
response.status = status.HTTP_400_BAD_REQUEST
return response
class MapInstance(generics.RetrieveUpdateDestroyAPIView):
error_messages = {}
warnings = []
queryset = models.StyledMap.objects.select_related('owner').all()
serializer_class = serializers.MapSerializerDetail
model = models.StyledMap
def update(self, request, *args, **kwargs):
response = super(MapInstance, self).update(request, *args, **kwargs)
if len(self.warnings) > 0:
response.data.update({'warnings': self.warnings})
if self.error_messages:
response.data = self.error_messages
response.status = status.HTTP_400_BAD_REQUEST
return response
class MapInstanceSlug(MapInstance):
serializer_class = serializers.MapSerializerDetailSlug
lookup_field = 'slug'
| StarcoderdataPython |
5118233 | #!/usr/bin/python
from __future__ import print_function
import os
import sys
import errno
import argparse
import tempfile
import subprocess
import shlex
import shutil
import stat
import json
cmdLog = None
# Derived from http://stackoverflow.com/a/4417735
def execute(command):
cmdLog.write(command + '\n')
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
lines_iterator = iter(p.stdout.readline, b"")
savedOutput = []
for line in lines_iterator:
savedOutput.append(line.rstrip('\r\n'))
cmdLog.write(line) # yield line
cmdLog.write('\n')
if p.wait() == 0:
print('SUCCESS')
else:
raise ValueError('Command returned with non-zero value.')
return savedOutput
# Derived from http://stackoverflow.com/a/22331852
def copytree(src, dst, symlinks = False, ignore = None):
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
# # Derived from http://stackoverflow.com/a/1889686
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
# Wrap os.makedirs to not throw exception when directory already exists
def makedirs(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class getsource:
def __init__(self, globalargs, testname, actiondict):
# Initialize attributes
self.globalargs = globalargs
self.testname = testname
for k, v in actiondict.items():
setattr(self, k, v)
# Check for important values
if (not self.cmd):
raise ValueError(testname + 'test does not specify a getsource command.')
if (not self.output):
raise ValueError(testname + 'test does not specify an output directory.')
def execute(self, clean):
os.chdir(self.globalargs.srcdir)
if (os.path.isdir(self.output) and clean):
print('Removing ' + self.testname + ' source directory... ', end='')
shutil.rmtree(self.output, onerror=remove_readonly)
print('SUCCESS')
if (not os.path.isdir(self.output)):
print('Acquiring ' + self.testname + ' source code... ', end='')
execute(self.cmd)
print('Entering the ' + self.testname + ' repository directory... ', end='')
os.chdir(self.output)
print('SUCCESS')
class vsimport:
def __init__(self, globalargs, testname, actiondict):
# Initialize attributes
self.globalargs = globalargs
self.testname = testname
self.args = ''
for k, v in actiondict.items():
setattr(self, k, v)
if (self.dir):
self.dir = os.path.join(globalargs.srcdir, self.dir)
def execute(self, clean):
if (self.dir):
print('Entering the ' + self.testname + ' import directory... ', end='')
os.chdir(self.dir)
print('SUCCESS')
print('Generating ' + self.testname + ' VS projects... ', end='')
vsimporter = os.path.join(sys.path[0], '..\\..\\bin\\vsimporter.exe')
output = execute(vsimporter + ' ' + self.args)
print('Copying test signing certificates...', end='')
for line in output:
if not (line.startswith('Generated ') and line.endswith('.vcxproj')):
continue
projectDir = os.path.dirname(line[10:])
projectName = os.path.splitext(os.path.basename(line))[0]
if os.path.isfile(os.path.join(projectDir, 'Package.appxmanifest')):
shutil.copyfile(os.path.join(sys.path[0], 'TemporaryKey.pfx'), os.path.join(projectDir, projectName + '_TemporaryKey.pfx'))
print('SUCCESS')
class msbuild:
def __init__(self, globalargs, testname, actiondict):
# Initialize attributes
self.globalargs = globalargs
self.testname = testname
self.args = ''
self.dir = ''
for k, v in actiondict.items():
setattr(self, k, v)
if (self.dir):
self.dir = os.path.join(globalargs.srcdir, self.dir)
def execute(self, clean):
if (self.dir):
print('Entering the ' + self.testname + ' build directory... ', end='')
os.chdir(self.dir)
print('SUCCESS')
print('Building ' + self.testname + ' projects... ', end='')
execute('MSBuild.exe ' + self.args)
print('Copying ' + self.testname + ' AppX packages... ', end='')
for root, subFolders, files in os.walk('AppPackages'):
for file in files:
if file.endswith('.appx'):
copytree(root, os.path.join(self.globalargs.appxdir, os.path.basename(root)))
subFolders[:] = []
break;
print('SUCCESS')
def main(argv):
# Get PATH from the environment
envPath = os.environ.get("PATH", os.defpath)
# Set up argument parsing
parser = argparse.ArgumentParser(description = 'Run WinObjC tests.')
parser.add_argument('--testfile', default = os.path.join(sys.path[0], 'AutoAppBuild-tests.json'), type = argparse.FileType('r'), help = 'Test descriptions file.')
parser.add_argument('--clean', default = False, action='store_true', help = 'Clean git repositories before buidling.')
parser.add_argument('--srcdir', default = "src", help = 'Directory where tests will be cloned and built.')
parser.add_argument('--appxdir', default = "appx", help = 'Destination directory for AppX packages.')
args = parser.parse_args()
# Create build directory
args.srcdir = os.path.abspath(args.srcdir)
makedirs(args.srcdir)
# Create AppX directory
args.appxdir = os.path.abspath(args.appxdir)
makedirs(args.appxdir)
# Open log file
global cmdLog
cmdLog = open(os.path.join(args.srcdir, 'CmdLog.txt'), 'wb', 1)
# Read the JSON test descriptions
tests = json.load(args.testfile)
# Print info
print('Test file:', args.testfile.name)
print('Build directory: ', args.srcdir)
print('AppX directory: ', args.appxdir)
# Iterate over tests
successCount = 0
totalCount = 0
for test in tests:
# Deserialize build steps into objects
actionObjects = []
try:
for step in test['buildSteps']:
actionObj = globals()[step['action']](args, test['name'], step)
actionObjects.append(actionObj)
totalCount += 1
except Exception as e:
print('Failed to parse test description: ' + str(e))
continue
# Execute build steps
print()
try:
for action in actionObjects:
action.execute(args.clean)
successCount += 1
except Exception as e:
print('FAILURE')
cmdLog.write(str(e))
# Print results
print()
print('Results: ' + str(successCount) + '/' + str(totalCount))
if __name__ == "__main__":
main(sys.argv) | StarcoderdataPython |
12862101 | <gh_stars>0
from lesson12_projects.house3.data.const import E_TURNED_KNOB, MSG_TURN_KNOB, E_FAILED
class OutState:
def update(self, req):
self.on_entry(req)
# 入力
msg = self.on_trigger(req)
# 外に居ます。 'Turn knob' とメッセージを送ってくるのが正解です
if msg == MSG_TURN_KNOB:
self.on_turned_knob(req)
return E_TURNED_KNOB
else:
self.on_failed(req)
return E_FAILED
def on_entry(self, req):
req.c_sock.send(
"""You can see the house.
You can see the close knob.""".encode()
)
def on_trigger(self, req):
return req.pull_trigger()
def on_turned_knob(self, req):
pass
def on_failed(self, req):
pass
| StarcoderdataPython |
3375945 | # -*- coding: utf-8 -*-
# pylint: disable=bad-continuation
"""core user interface
:mod:`pcapkit.interface.core` defines core user-oriented
interfaces, variables, and etc., which wraps around the
foundation classes from :mod:`pcapkit.foundation`.
"""
import io
import sys
from pcapkit.foundation.analysis import analyse as analyse2
from pcapkit.foundation.extraction import Extractor
from pcapkit.foundation.traceflow import TraceFlow
from pcapkit.protocols.protocol import Protocol
from pcapkit.reassembly.ipv4 import IPv4_Reassembly
from pcapkit.reassembly.ipv6 import IPv6_Reassembly
from pcapkit.reassembly.tcp import TCP_Reassembly
from pcapkit.utilities.exceptions import FormatError
from pcapkit.utilities.validations import bool_check, int_check, io_check, str_check
__all__ = [
'extract', 'analyse', 'reassemble', 'trace', # interface functions
'TREE', 'JSON', 'PLIST', 'PCAP', # format macros
'LINK', 'INET', 'TRANS', 'APP', 'RAW', # layer macros
'DPKT', 'Scapy', 'PyShark', 'MPServer', 'MPPipeline', 'PCAPKit',
# engine macros
]
# output file formats
TREE = 'tree'
JSON = 'json'
PLIST = 'plist'
PCAP = 'pcap'
# layer thresholds
RAW = 'None'
LINK = 'Link'
INET = 'Internet'
TRANS = 'Transport'
APP = 'Application'
# extraction engines
DPKT = 'dpkt'
Scapy = 'scapy'
PCAPKit = 'default'
PyShark = 'pyshark'
MPServer = 'server'
MPPipeline = 'pipeline'
def extract(fin=None, fout=None, format=None, # basic settings # pylint: disable=redefined-builtin
auto=True, extension=True, store=True, # internal settings
files=False, nofile=False, verbose=False, # output settings
engine=None, layer=None, protocol=None, # extraction settings
ip=False, ipv4=False, ipv6=False, tcp=False, strict=True, # reassembly settings
trace=False, trace_fout=None, trace_format=None, # trace settings # pylint: disable=redefined-outer-name
trace_byteorder=sys.byteorder, trace_nanosecond=False): # trace settings
"""Extract a PCAP file.
Arguments:
fin (Optiona[str]): file name to be read; if file not exist, raise :exc:`FileNotFound`
fout (Optiona[str]): file name to be written
format (Optional[Literal['plist', 'json', 'tree']]): file format of output
auto (bool): if automatically run till EOF
extension (bool): if check and append extensions to output file
store (bool): if store extracted packet info
files (bool): if split each frame into different files
nofile (bool): if no output file is to be dumped
verbose (bool): if print verbose output information
engine (Optional[Literal['default', 'pcapkit', 'dpkt', 'scapy', 'pyshark', 'server', 'pipeline']]):
extraction engine to be used
layer (Optional[Literal['Link', 'Internet', 'Transport', 'Application']]): extract til which layer
protocol (Optional[Union[str, Tuple[str], Type[Protocol]]]): extract til which protocol
ip (bool): if record data for IPv4 & IPv6 reassembly
ipv4 (bool): if perform IPv4 reassembly
ipv6 (bool): if perform IPv6 reassembly
tcp (bool): if perform TCP reassembly
strict (bool): if set strict flag for reassembly
trace (bool): if trace TCP traffic flows
trace_fout (Optional[str]): path name for flow tracer if necessary
trace_format (Optional[Literal['plist', 'json', 'tree', 'pcap']]): output file
format of flow tracer
trace_byteorder (Literal['little', 'big']): output file byte order
trace_nanosecond (bool): output nanosecond-resolution file flag
Returns:
Extractor -- an :class:`~pcapkit.foundation.extraction.Extractor` object
"""
if isinstance(layer, type) and issubclass(layer, Protocol):
layer = layer.__layer__
if isinstance(protocol, type) and issubclass(protocol, Protocol):
protocol = protocol.id()
str_check(fin or '', fout or '', format or '',
trace_fout or '', trace_format or '',
engine or '', layer or '', *(protocol or ''))
bool_check(files, nofile, verbose, auto, extension, store,
ip, ipv4, ipv6, tcp, strict, trace)
return Extractor(fin=fin, fout=fout, format=format,
store=store, files=files, nofile=nofile,
auto=auto, verbose=verbose, extension=extension,
engine=engine, layer=layer, protocol=protocol,
ip=ip, ipv4=ipv4, ipv6=ipv6, tcp=tcp, strict=strict,
trace=trace, trace_fout=trace_fout, trace_format=trace_format,
trace_byteorder=trace_byteorder, trace_nanosecond=trace_nanosecond)
def analyse(file, length=None):
"""Analyse application layer packets.
Arguments:
file (Union[bytes, io.BytesIO]): packet to be analysed
length (Optional[int]): length of the analysing packet
Returns:
Analysis: an :class:`~pcapkit.foundation.analysis.Analysis` object
"""
if isinstance(file, bytes):
file = io.BytesIO(file)
io_check(file)
int_check(length or sys.maxsize)
return analyse2(file, length)
def reassemble(protocol, strict=False):
"""Reassemble fragmented datagrams.
Arguments:
protocol (Union[str, Type[Protocol]]) protocol to be reassembled
strict (bool): if return all datagrams (including those not implemented) when submit
Returns:
Union[IPv4_Reassembly, IPv6_Reassembly, TCP_Reassembly]: a :class:`~pcapkit.reassembly.reassembly.Reassembly`
object of corresponding protocol
Raises:
FormatError: If ``protocol`` is **NOT** any of IPv4, IPv6 or TCP.
"""
if isinstance(protocol, type) and issubclass(protocol, Protocol):
protocol = protocol.id()
str_check(protocol)
bool_check(strict)
if protocol == 'IPv4':
return IPv4_Reassembly(strict=strict)
if protocol == 'IPv6':
return IPv6_Reassembly(strict=strict)
if protocol == 'TCP':
return TCP_Reassembly(strict=strict)
raise FormatError(f'Unsupported reassembly protocol: {protocol}')
def trace(fout=None, format=None, byteorder=sys.byteorder, nanosecond=False): # pylint: disable=redefined-builtin
"""Trace TCP flows.
Arguments:
fout (str): output path
format (Optional[str]): output format
byteorder (str): output file byte order
nanosecond (bool): output nanosecond-resolution file flag
Returns:
TraceFlow: a :class:`~pcapkit.foundation.traceflow.TraceFlow` object
"""
str_check(fout or '', format or '')
return TraceFlow(fout=fout, format=format, byteorder=byteorder, nanosecond=nanosecond)
| StarcoderdataPython |
186848 | # -*- coding: utf-8 -*-
# IPC: A python library for interprocess communication via standard streams.
#
# $Id$
#
# License: MIT
# Copyright 2015-2017 <NAME> (https://github.com/UncleRus)
# Copyright 2017 <NAME> (https://github.com/oleg-golovanov)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import re
import os
from setuptools import setup
DIR = os.path.dirname(__file__)
with open(os.path.join(DIR, 'ipc.py')) as f:
version = re.search(r'__version__\s+=\s+[\'\"]+(.*)[\'\"]+', f.read()).group(1)
setup(
name='ipc',
version=version,
py_modules=['ipc'],
data_files=[('', ['LICENSE', 'README.md'])],
description='Interprocess communication via standard streams.',
zip_safe=False,
platforms='any',
long_description=open(os.path.join(DIR, 'README.md')).read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| StarcoderdataPython |
6674627 | <filename>lib/workflow/func/tag.py<gh_stars>0
from .func import FuncInsert, FuncUpdate
class FuncTagInsert(FuncInsert):
def __init__(self, key, rule):
self._key = key
self._rule = rule
def get_fields(self):
if type(self._key) == str:
return [self._key]
elif type(self._key) == list:
return self._key
else:
raise Exception("func key is error!")
def get_keys(self):
return self._rule.get_keys()
def insert(self, item):
content = self._get_content(keys=self._key, item=item)
if content == '':
return None
return self._rule.tag_insert(content=content)
class FuncTagUpdate(FuncUpdate):
def __init__(self, key, rules):
self._rules = rules
self._key = key
def get_fields(self):
return [self._key]
def update(self, item):
if self._key not in item:
return None
content = item[self._key]
update_item = dict()
for rule in self._rules:
u = rule.tag_update(content=content)
if u:
update_item.update(u)
return update_item
class FuncTagInsertTogether(FuncInsert):
def __init__(self, key, rules, insert_keys):
self._key = key
self._rules = rules
self._insertKeys = insert_keys
def get_keys(self):
return self._insertKeys
def get_fields(self):
return [self._key]
def insert(self, item):
if self._key not in item:
return None
content = item[self._key]
tags_items = []
for rule in self._rules:
tags = rule.tag_insert(content=content)
if tags:
tags_items.extend(tags)
return tags_items
class FuncTagsInsertSideBySide(FuncInsert):
def __init__(self, key, rules):
self._key = key
self._rules = rules
self._tags_keys = []
self._rules_null_tags = []
for rule in self._rules:
self._tags_keys.extend(rule.get_keys())
self._rules_null_tags.append(('',) * len(rule.get_keys()))
def get_keys(self):
return self._tags_keys
def get_fields(self):
return [self._key]
def insert(self, item):
if self._key not in item:
return None
content = item[self._key]
all_tags_items = []
tags_items = []
for rule_index, rule in enumerate(self._rules):
all_tags_items = []
rule_tags_items = rule.tag_insert(content=content)
if not rule_tags_items:
rule_tags_items = [self._rules_null_tags[rule_index]]
if tags_items:
for all_tags_item_index, all_tags_item in enumerate(tags_items):
for rule_tag_item_index, rule_tag_item in enumerate(rule_tags_items):
all_tags_items.append(all_tags_item + rule_tag_item)
else:
all_tags_items = rule_tags_items
tags_items = all_tags_items.copy()
return all_tags_items
| StarcoderdataPython |
1922307 | <reponame>anajulijapreseren/Najbolj-e-knjige-vseh-asov<gh_stars>0
import csv
import os
import requests
import re
import math
import os.path
import json
import sys
#--------------------------------------------------------------------------------------------------
#Definiramo konstante za število knjig in poimenujemo spletne strani ter datoteke in direktorije,
#v katere bomo shranjevali podatke
SKUPNO_ST_KNJIG = 5000
ST_KNJIG_NA_STRAN = 100 #konstantno, ne moremo spremeniti
ST_STRANI = math.ceil(SKUPNO_ST_KNJIG / ST_KNJIG_NA_STRAN)
#URL glavne strani goodreads z najboljšimi knjigami
book_frontpage_url = 'https://www.goodreads.com/list/show/1.Best_Books_Ever?page={}'
#URL podstrani posamezne knjige
book_subpage_url = 'https://www.goodreads.com/book/show/{}'
# mapa, v katero bomo shranili podatke
book_directory = 'Pridobivanje_podatkov'
# ime datoteke, v katero bomo shranili glavno stran
frontpage_filename = 'best_books.html'
#ime datoteke, v katero bomo shranili podstran posamezne knjige
subpage_filename = 'book_subpage.html'
# ime CSV datoteke v katero bomo shranili podatke
csv_filename = 'best_books.csv'
#--------------------------------------------------------------------------------------------------
#REGEX VZORCI:
#vzorci, ki jih uporabimo na glavni strani(frontpage):
#url naslove poberemo, da lahko obiščemo podstran vsake knjige in pridobimo ostale podatke
vzorec_url = r'href=[\'"]?\/book\/show\/([^\'" >]+)'
vzorec_id_avtor = r'author\/show\/(\d+)'
vzorec_score = r'score: (.+)<\/a>'
vzorec_st_glasov = r'false;">(.+) people voted<\/a>'
#vzorci, ki jih uporabimo na podstrani(subpage):
vzorec_avtor = r'<span itemprop="name">(?P<avtor>[^<]+)<+?'
vzorec_knjiga = r'<h1 id="bookTitle".*itemprop="name">\s*(?P<knjiga>.*)\s*</h1>'
vzorec_serija = r'<div class="infoBoxRowTitle">(Series)</div>'
vzorec_povprecna_ocena = r'<span itemprop="ratingValue">\n*\s*(?P<povprecna_ocena>.+)\n*</span>'
vzorec_zalozba = r'<div class="row">\s*Published\s*.*\d{4}\s*by (?P<zalozba>.*)\s+'
vzorec_st_ocen = r'<meta itemprop="ratingCount" content="(?P<ratings>\d*)"\s*/>'
vzorec_st_reviewov = r'<meta itemprop="reviewCount" content="(?P<reviews>\d*)"\s*/>'
vzorec_zanr = r'people shelved this book as '(?P<zanr>.+)''
vzorec_st_ocen_zanra = r'(?P<st_ocen_zanra>\d+) people shelved this book as ''
vzorec_nagrade = r'award\/show\/.*?>(?P<nagrade>.+?)(?:\(|<)'
vzorec_leto_nagrade = r'award/show/.*?>.+?\((?P<leto_nagrade>\d*)\)</a>'
#ta vzorec je za prvo leto izida (first published in _)saj so nekatere knjige izdali večkrat
vzorec_leto1 = r'<div class="row">\s*Published\s*<nobr class="greyText">\s*\(.*(?P<leto>\d{4})\)\s*<\/nobr>'
#splošen vzorec za leto izida
vzorec_leto2 = r'<div class="row">\s*Published\s*.*(?P<leto>\d{4})\s*by'
#Opisi knjig so različno dolgi, zato so nekateri v celoti zapisani v okvirčku, pri drugih pa moramo klikniti "more"
#če uporabimo le vzorec za krajše besedilo, na koncu daljšega zapisa dobimo "..."
#če uporabimo le vzorec za daljše besedilo, pa pri krajšem zapisu preskoči v komentarje in kot opis zajame
#prvi komentar
#Zato z zgornjima regularnima izrazoma preverimo ali je opis dolg ali kratek,
#nato pa uporabimo ustreznega od spodnjih opisov.
vzorec_st_opisa = r'<span id="freeTextContainer(\d+)">'
vzorec_podrobnega_opisa = '<span id="freeText{}" style=".*">'
vzorec_opis_navaden = r'<span id="freeTextContainer\d+">(.*?)</span>'
vzorec_opis_podroben = r'<span id="freeText\d+" style=".*?">(?:Alternate.*?<br \/>)*(.*?)<\/span>'
#--------------------------------------------------------------------------------------------------
#FUNKCIJE za pripravo/prenos/zapis datotek:
#Funkcije: pripravi_imenik, zapisi_json in zapisi_csv so vzete iz
#https://github.com/matijapretnar/programiranje-1/tree/master/02-zajem-podatkov/predavanja iz datoteke orodja.py.
def pripravi_imenik(ime_datoteke):
'''Če še ne obstaja, pripravi prazen imenik za dano datoteko.'''
imenik = os.path.dirname(ime_datoteke)
if imenik:
os.makedirs(imenik, exist_ok=True)
def zapisi_json(objekt, ime_datoteke):
'''Iz danega objekta ustvari JSON datoteko.'''
pripravi_imenik(ime_datoteke)
with open(ime_datoteke, 'w', encoding='utf-8') as json_datoteka:
json.dump(objekt, json_datoteka, indent=4, ensure_ascii=False)
def download_url_to_string(url):
"""Funkcija kot argument sprejme niz in puskuša vrniti vsebino te spletne
strani kot niz. V primeru, da med izvajanje pride do napake vrne None.
"""
try:
# del kode, ki morda sproži napako
page_content = requests.get(url)
except requests.exceptions.ConnectionError as e:
# koda, ki se izvede pri napaki
# dovolj je če izpišemo opozorilo in prekinemo izvajanje funkcije
print("Prislo je do napake pri povezovanju")
print(e)
return None
#status code(200,404..)
if page_content.status_code == requests.codes.ok:#ni bilo napake
return page_content.text
# nadaljujemo s kodo če je prišlo do napake
print("Težava pri vsebini strani")
return None
def save_string_to_file(text, directory, filename):
"""Funkcija zapiše vrednost parametra "text" v novo ustvarjeno datoteko
locirano v "directory"/"filename", ali povozi obstoječo. V primeru, da je
niz "directory" prazen datoteko ustvari v trenutni mapi.
"""
os.makedirs(directory, exist_ok=True)
path = os.path.join(directory, filename)
with open(path, 'w', encoding='utf-8') as file_out:
file_out.write(text)
return None
def save_frontpage(page, directory, filename):
"""Funkcija shrani vsebino spletne strani na naslovu "page" v datoteko
"directory"/"filename"."""
html = download_url_to_string(page)
if html: #Če ni "" ali če ni None
save_string_to_file(html, directory, filename)
return True
raise NotImplementedError()
def zapisi_csv(slovarji, imena_polj, ime_datoteke):
'''Iz seznama slovarjev ustvari CSV datoteko z glavo.'''
pripravi_imenik(ime_datoteke)
with open(ime_datoteke, 'w', encoding='utf-8') as csv_datoteka:
writer = csv.DictWriter(csv_datoteka, fieldnames=imena_polj)
writer.writeheader()
for slovar in slovarji:
writer.writerow(slovar)
#--------------------------------------------------------------------------------------------------
#FUNKCIJE za zajem opisa in olepšavo/popravek zajetega besedila:
#funkcija,ki ugotovi ali moramo vzeti izraz za kratek ali dolg opis
def izloci_opis(html_text):
stevke = re.findall(vzorec_st_opisa, html_text)[0]
podroben_opis = re.findall(vzorec_podrobnega_opisa.format(stevke), html_text)
#če nismo našli podrobnega opisa(ni nam treba klikniti na more-ze vidimo celoten opis) nam regularni izraz
#za podroben opis ne bo našel ničesar
if podroben_opis == []:
opis = re.findall(vzorec_opis_navaden, html_text)[0]
#našli smo gumb "more", torej moramo vzeti podrobnejši opis
else:
opis = re.findall(vzorec_opis_podroben, html_text)[0]
return opis
#spletna stran pri opisu knjige včasno vključi tudi podatke o naslovici
#(npr. alternative cover, You can find the redesigned cover of this edition HERE, ISBN...)
#poleg tega vpis vkljucuje tudi html oznake za presledke med vrsticami, poševno pisavo...
#čim več teh "nepravilnosti" želimo odpraviti z naslednjo funkcijo
#(seveda pa ne moremo odpraviti vseh, saj ne moremo le z regularnimi izrazi vedeti ali zadnji stavek "You can also read part 2"
# še opisuje knjigo ali ne)
def predelaj_opis(opis, naslov):
"""Funkcija sprejme besedilo, ki ga dobimo z regularnim izrazom,
in ga predela v uporabniku lepo obliko, torej odstrani <b>,<i>...in nezeljene podatke"""
opis = opis.replace("<em>", "").replace("</em>", "").replace("<br>", " ").replace("<b>", "").replace("</b>", "").replace("<br />", " ").replace("<br/>", " ")
opis = opis.replace("<a>", "").replace("</a>", "").replace('\"', "").replace("<p>", "").replace("</p>", "").replace("(back cover)","")
while "<i>" in opis:
zacetek = opis.find("<i>")
konec = opis.find("</i>")
opis = opis[:zacetek] + opis[konec + 4:]
#vcasih v besedilu vseeno ostane se kaksen </i> zakljucek-ga odstranimo
opis = opis.replace("</i>", "")
#Problem: vcasih se v <i>...</i> skrivajo informacije, ki nimajo zveze z opisom(npr. alternative cover),
#drugic pa naslov. Zato po odstranitvi preverimo, če se opis sedaj začne s presledkom, is, was...
#To pomeni, da smo izbrisali posevno zapisan naslov, torej ga spet dodamo.
if (opis[:3] == " is" or opis[:4] == " was" or opis[:4] == " has" or opis[:5] == " have"):
opis = naslov + opis
return opis
def seznam_kljucev(slovar):
"""funkcija da kljuce slovarja v seznam, ki ga lahko uporabimo za poimenovanje
stolpcev v csv filu"""
sez = []
for key in slovar.keys():
sez.append(key)
return sez
def spremeni_v_apostrof(seznam):
"""nekatere nagrade imajo v imenu apostrof, ki je v html oblike '
ta zapis spremenimo v ' """
sez = []
for i in seznam:
i = i.replace("'", "'")
sez.append(i)
return sez
#--------------------------------------------------------------------------------------------------
#GLAVNA FUNKCIJA:
def main(redownload=True, reparse=True):
#spletna stran ima vsake toliko časa "sesuto" podstran knjige
#(npr. https://www.goodreads.com/book/show/18765.I_Claudius)
#opomba:pri zadnjem pregledu sem ugotovila, da spletna stran spet obstaja, vendar pa
#ne škodi, da še vedno preverim za morebitne napake
#te knjige moramo izlociti(to naredimo, ko ugotovimo da "knjiga nima naslova")
# Najprej v lokalno datoteko shranimo eno od glavnih strani
for i in range(1,ST_STRANI + 1):
print("koncal {} stran".format(i-1))
#vse podatke o knjigah(shranjeni so v slovarju) shranimo v sezname:
#seznam, uporabljen za json file
vse_knjige = []
#seznami, uporabljeni za csv file
knjige = []
zanri = []
nagrade = []
id = (i-1)*ST_KNJIG_NA_STRAN + 1
#shranimo eno od strani, ki jih moramo analizirati
save_frontpage(book_frontpage_url.format(i), book_directory, frontpage_filename)
#Iz te strani poberemo podatke, ki jih ne najdemo na podstrani posamezne knjige in url, ki
#nas bo peljal na podstran vsake knjige, da dobimo ostale podatke.
#Ko poberemo podatke z vseh podstrani naložimo novo stran in pri tem povozimo staro datoteko,
#saj je več ne potrebujemo.
with open(r"C:\Users\<NAME>\Documents\Najbolj-e-knjige-vseh-asov\Pridobivanje_podatkov\best_books.html", "r", encoding="utf-8") as s:
string = s.read()
#podatke o avtorju, scoru in id-ju bomo potrebovali za csv, zato jih shranimo v list:
id_avtor, score, st_glasov = [], [], []
id_avtor.extend(re.findall(vzorec_id_avtor, string))
score.extend(re.findall(vzorec_score, string))
st_glasov.extend(re.findall(vzorec_st_glasov, string))
#url vsake podstrani se ponovi 2x, zato bomo pri delu z njimi šli za 2 naprej.
urlji = re.findall(vzorec_url, string)
#sedaj moramo prenesti podstran vsake knjige:
#v datoteko shranimo podstran ene knjige, pridobimo podatke in jih nekam shranimo,
#nato pa shranimo naslednjo podstran in povozimo prejsnjo datoteko
k=0
for j in range(0,len(urlji),2):
slovar_knjige = {}
save_frontpage(book_subpage_url.format(urlji[j]), book_directory, subpage_filename)
with open(r"C:\Users\<NAME>\Documents\Najbolj-e-knjige-vseh-asov\Pridobivanje_podatkov\book_subpage.html", "r", encoding="utf-8") as sub:
besedilo = sub.read()
#v slovar posamezne knjige moramo shraniti podatke, ki smo jih pridobili na glavni strani
# (id_avtor, score, st_glasov, te podatke bom zaradi lepsega izgleda slovarja vrinila med ostale
#KNJIGE
knjiga = re.findall(vzorec_knjiga, besedilo)
if knjiga != []:#tu zaznamo podstrani z napako in jih preskocimo
slovar_knjige["knjiga"] = knjiga[0]
slovar_knjige["id_knjige"] = id
slovar_knjige["avtor"] = re.findall(vzorec_avtor, besedilo)[0]
slovar_knjige["id_avtor"] = int(id_avtor[k])
slovar_knjige["serija"] = re.findall(vzorec_serija, besedilo) != []
slovar_knjige["opis"] = predelaj_opis(izloci_opis(besedilo),slovar_knjige["knjiga"])
leto = re.findall(vzorec_leto1, besedilo)
if leto != []:
slovar_knjige["leto"] = int(leto[0])
else:
leto = re.findall(vzorec_leto2, besedilo)
if leto != []:
slovar_knjige["leto"] = int(leto[0])
else:
#tu sem naredila napako, saj sem namesto None zapisala "unknown",
#kar mi je povzročalo težavo pri delu z letom(vse je oblike string)
slovar_knjige["leto"] = "unknown"
#bolje:
#slovar_knjige["leto"] = None
zalozba = re.findall(vzorec_zalozba, besedilo)
if zalozba != []:
slovar_knjige["zalozba"] = zalozba[0]
else:
#Tudi tu bi bilo morda bolje vzeti None
slovar_knjige["zalozba"] = "unknown"
#slovar_knjige["zalozba"] = None
slovar_knjige["povprecna_ocena"] = float(re.findall(vzorec_povprecna_ocena, besedilo)[0])
slovar_knjige["score"] = int(score[k].replace(",", ""))
slovar_knjige["st_glasov"] = int(st_glasov[k].replace(",", ""))
slovar_knjige["st_ocen"] = int(re.findall(vzorec_st_ocen, besedilo)[0].replace(",", ""))
slovar_knjige["st_reviewov"] = int(re.findall(vzorec_st_reviewov, besedilo)[0].replace(",", ""))
slovar_knjige["nagrade"] = spremeni_v_apostrof(re.findall(vzorec_nagrade, besedilo))
slovar_knjige["zanri"] = re.findall(vzorec_zanr, besedilo)[:3]
vse_knjige.append(slovar_knjige)
ima_nagrade = (slovar_knjige["nagrade"] != [])
#naredimo slovar knjig brez zanrov in nagrad(uporabili ga bomp pri pisanju csv datoteke)
#zanre in nagrade izkljucimo, saj sta seznama z vec elementi
knjige_bzn = slovar_knjige.copy()
knjige_bzn.pop("zanri")
knjige_bzn.pop("nagrade")
knjige.append(knjige_bzn)
#Dodamo stolpec "nagrade", da bomo pri analizi takoj ločili med knjigami brez/z nagradami.
knjige_bzn["nagrade"]=ima_nagrade
#ŽANRI
for zanr in slovar_knjige["zanri"]:
zanri.append({"id_knjige": id, "zanr" : zanr})
#NAGRADE
for nagrada in slovar_knjige["nagrade"]:
nagrade.append({"id_knjige": id, "nagrada" : nagrada})
id += 1
k += 1
#naredimo json file (json datotek na koncu nisem uporabljala, bom pa pustila kodo,
#če jo bom potrebovala kdaj kasneje)
#zapisi_json(vse_knjige, 'PODATKI/vse_knjige{}.json'.format(i))
#NAREDIMO CSV FILE
#Vsakih sto knjig podatke shranimo v csv datoteke.
#Tako računalniku ni potrebno držati vseh podatkov v nekem seznamu/slovarju +
#Spletna stran pri prevelikem prenosu strani neha servirati podatke.
#Takrat lahko proces ustavimo, pri "for i in range(1,ST_STRANI + 1):" spremenimo 1 v številko strani,
#kjer smo končali (vidiš po končnici csv datotek) in nadaljujemo.
#Vse csv datoteke lahko kasneje združimo s pogonom "zdruzi_csv.py"
#naredimo "glavni file" knjige, ki vsebuje vse podatke razen zanrov in nagrad(to sta seznama z vec podatki)
zapisi_csv(knjige, seznam_kljucev(knjige[0]), 'PODATKI/knjige{}.csv'.format(i))
#naredimo še fila za zanre in nagrade, kjer zanr in nagrado priredimo id-ju knjige
zapisi_csv(zanri, seznam_kljucev(zanri[0]), 'PODATKI/zanri{}.csv'.format(i))
zapisi_csv(nagrade, ["id_knjige", "nagrada"], 'PODATKI/nagrade{}.csv'.format(i))
if __name__ == '__main__':
main() | StarcoderdataPython |
11200460 | import sys
from django.apps import apps
from django.core.management.base import BaseCommand
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.loader import MigrationLoader
class Command(BaseCommand):
help = "Shows all available migrations for the current project"
def add_arguments(self, parser):
parser.add_argument(
'app_label', nargs='*',
help='App labels of applications to limit the output to.',
)
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. Defaults to the "default" database.',
)
formats = parser.add_mutually_exclusive_group()
formats.add_argument(
'--list', '-l', action='store_const', dest='format', const='list',
help='Shows a list of all migrations and which are applied.',
)
formats.add_argument(
'--plan', '-p', action='store_const', dest='format', const='plan',
help=(
'Shows all migrations in the order they will be applied. '
'With a verbosity level of 2 or above all direct migration dependencies '
'and reverse dependencies (run_before) will be included.'
)
)
parser.set_defaults(format='list')
def handle(self, *args, **options):
self.verbosity = options['verbosity']
# Get the database we're operating from
db = options['database']
connection = connections[db]
if options['format'] == "plan":
return self.show_plan(connection, options['app_label'])
else:
return self.show_list(connection, options['app_label'])
def _validate_app_names(self, loader, app_names):
has_bad_names = False
for app_name in app_names:
try:
apps.get_app_config(app_name)
except LookupError as err:
self.stderr.write(str(err))
has_bad_names = True
if has_bad_names:
sys.exit(2)
def show_list(self, connection, app_names=None):
"""
Show a list of all migrations on the system, or only those of
some named apps.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection, ignore_no_migrations=True)
graph = loader.graph
# If we were passed a list of apps, validate it
if app_names:
self._validate_app_names(loader, app_names)
# Otherwise, show all apps in alphabetic order
else:
app_names = sorted(loader.migrated_apps)
# For each app, print its migrations in order from oldest (roots) to
# newest (leaves).
for app_name in app_names:
self.stdout.write(app_name, self.style.MIGRATE_LABEL)
shown = set()
for node in graph.leaf_nodes(app_name):
for plan_node in graph.forwards_plan(node):
if plan_node not in shown and plan_node[0] == app_name:
# Give it a nice title if it's a squashed one
title = plan_node[1]
if graph.nodes[plan_node].replaces:
title += " (%s squashed migrations)" % len(graph.nodes[plan_node].replaces)
# Mark it as applied/unapplied
if plan_node in loader.applied_migrations:
self.stdout.write(" [X] %s" % title)
else:
self.stdout.write(" [ ] %s" % title)
shown.add(plan_node)
# If we didn't print anything, then a small message
if not shown:
self.stdout.write(" (no migrations)", self.style.ERROR)
def show_plan(self, connection, app_names=None):
"""
Show all known migrations (or only those of the specified app_names)
in the order they will be applied.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection)
graph = loader.graph
if app_names:
self._validate_app_names(loader, app_names)
targets = [key for key in graph.leaf_nodes() if key[0] in app_names]
else:
targets = graph.leaf_nodes()
plan = []
seen = set()
# Generate the plan
for target in targets:
for migration in graph.forwards_plan(target):
if migration not in seen:
node = graph.node_map[migration]
plan.append(node)
seen.add(migration)
# Output
def print_deps(node):
out = []
for parent in sorted(node.parents):
out.append("%s.%s" % parent.key)
if out:
return " ... (%s)" % ", ".join(out)
return ""
for node in plan:
deps = ""
if self.verbosity >= 2:
deps = print_deps(node)
if node.key in loader.applied_migrations:
self.stdout.write("[X] %s.%s%s" % (node.key[0], node.key[1], deps))
else:
self.stdout.write("[ ] %s.%s%s" % (node.key[0], node.key[1], deps))
if not plan:
self.stdout.write('(no migrations)', self.style.ERROR)
| StarcoderdataPython |
6551430 | # Copyright (c) 2016 Boulder Real Time Technologies, Inc.
#
# Written by <NAME>
#
# This software may be used freely in any way as long as
# the copyright statement above is not removed.
from __main__ import * # _et all the libraries from parent
class DbMoment(Station):
'''
Main class for calculating moment tensors of events
This class is responsible for importing all modules needed for the
calculation of the MT. The main PF file "dbmoment.pf" will specify
the names of the modules that we want and will have the main parameters
for the inversion.
'''
def __init__( self, database=None, orid=None, options=None,
inv=None, data=None, event=None, synth=None ):
elog.debug( "DbMoment.__init__(%s)" % (database) )
'''
Load modules
'''
# Instantiate Origin Class
try:
elog.debug( 'event_obj.Origin(%s)' % database )
self.my_event = event.Origin( database )
except Exception,e:
elog.error("EVENT module init error: [%s]" % e)
# Instantiate Data Class
try:
elog.debug( 'data_obj.Data(%s,%s)' % (options.wave_db, options.allowed_segtype) )
self.my_data = data.Waveforms(options.wave_db, options.allowed_segtype)
except Exception,e:
elog.error("DATA module init error: [%s]" % e)
# Instantiate Synthetics Class
try:
elog.debug( 'synty_obj.Synthetics(%s,%s)' % (options.synth_db_folder, options.tmp_folder) )
self.my_synth = synth.Synthetics( options.synth_db_folder, options.tmp_folder)
except Exception,e:
elog.error("Synthetics module Init Error: [%s]" % e)
# Instantiate Inversion Classes. Dreger's code wrapper.
try:
elog.debug( 'inv_obj.MomentTensor()' )
self.my_inv = inv.MomentTensor()
pass
except Exception,e:
elog.error("Inversion Module Init Error: [%s]" % e)
self.orid = orid
self.model = None
self.stations = {}
self.verbose = options.verbose
self.debug = options.debug
self.debug_synth = options.debug_synth
self.debug_real = options.debug_real
self.tmp_folder = options.tmp_folder
self.pf = open_verify_pf(options.pf)
self.select = options.select
self.reject = options.reject
self.sta_min = int(options.sta_min)
self.sta_max = int(options.sta_max)
self.filter = options.filter
self.depth_min = options.depth_min
self.depth_max = options.depth_max
self.recursive = options.recursive
self.timewindow = int(options.timewindow)
self.chan_to_use = options.chan_to_use
self.min_quality = int(options.min_quality)
self.arrivals_only = options.arrivals_only
# Overwrites to PF files
self.mindistance = options.mindistance
self.maxdistance = options.maxdistance
if options.min_variance:
self.min_variance = float( options.min_variance )
else:
self.min_variance = 0.0
# Extract event information from database
elog.notify( "Get orid [%s]" % orid )
self.my_event.get_origin( orid , self.select, self.reject)
self.my_inv.set_depth( self.my_event.depth )
self.my_inv.set_folder( self.tmp_folder )
def mt(self, model):
'''
Main method to control the inversion.
Need to run with any origin from the database and will return all results
for that inversion.
'''
self.model = model
elog.debug( "mt(model=%s)" % (self.model) )
self.my_synth.set_model( self.model )
self.my_event.set_distance_step( self.model['distance_step'] )
# Save short name of model in object
self.my_event.model = self.my_synth.model_short_name
## Verify we have a valid event
if self.my_event.depth >= self.depth_min and self.my_event.depth <= self.depth_max:
elog.info("Event depth [%s] within our limits." % (self.my_event.depth))
else:
elog.error("Event depth [%s] out of limits.[%s,%s]" % \
(self.my_event.depth,self.depth_min,self.depth_max), 5)
# sorted by distance
total_stations = self.my_event.station_list()
elog.info( 'Total list of stations: %s' % total_stations )
for sta in total_stations:
elog.info('Test station [%s]' % sta)
distance = self.my_event.distance(sta)
if self.mindistance and distance < self.mindistance:
elog.warning( '%s => %s too close for this config [min:%s]' % \
(sta, distance, self.mindistance) )
continue
elif distance < int(self.model['distance_min']):
elog.warning( '%s => %s too close for this model [min:%s]' % \
(sta, distance, self.model['distance_min']) )
continue
if self.maxdistance and distance > self.maxdistance:
elog.warning( '%s => %s too far for this config [max:%s]' % \
(sta, distance, self.maxdistance) )
continue
elif distance > int(self.model['distance_max']):
elog.warning( '%s => %s too far for this model [max:%s]' % \
(sta, distance, self.model['distance_max']) )
continue
# Test different filters here.
test_site = self.add_filtered_station( sta )
if test_site:
# Need to put the data into disk in the temporary folder
# ****** FILES ARE SAVED TO DISK ON THESE 2 LINES. ********
elog.debug('Real traces saved to %s' % test_site.to_file('real') )
elog.debug('Synthetic traces saved to %s' % test_site.to_file('synth') )
# add to list of stations that we want to use
self.stations[sta] = test_site
else:
elog.warning('No data for station %s' % sta )
continue
elog.info( 'Valid stations for inversion: %s ' % ', '.join(sorted(self.stations.keys()) ) )
# Verify that we have min number of stations
if len(self.stations.keys()) < self.sta_min:
elog.error('NOT enough stations [%s] for this event. Need [%s]' % \
(len(self.stations), self.sta_min))
# Maybe we have too many sites. Let's keep the best ones.
while len(self.stations) > ( self.sta_max * 2 ):
worst = sorted(self.stations.keys(),
key=lambda x: float( self.stations[x].vr ) )[0]
elog.info('Too many stations. To be removed: [%s] [%s]' % \
(worst, self.stations[worst].vr) )
del( self.stations[worst] )
elog.info('Now we have [%s] sites' % len(self.stations) )
'''
We have list of all possible sites. Now invert.
'''
elog.debug('Loop for inversion and removal of stations')
while True:
elog.debug('determine_mt_solution()')
self.results = self.my_inv.invert(self.stations)
elog.notify('INVERSION: Quality:[%s] VR:[%0.1f]' % \
( self.results['Quality'], self.results['VarRed']) )
## If recursive not set then stop here
if not self.recursive:
elog.debug('Not running recursive. Stop.')
break
# Verify number or sites in inversion
if len(self.stations) <= self.sta_min:
elog.debug('Minimum number of stations [%s]. Stop.' % len(self.stations) )
break
# jackknife variance calculation
best_vr = None
avoid_site = None
keep_results = None
worst_vr = None
keep_site = None
elog.debug('RecursiveTest: original station list: %s)' % self.stations.keys() )
for test in sorted(self.stations.keys()):
elog.debug('\tRecursiveTest: TEST ON REJECTING: %s)' % test )
elog.debug('\tdetermine_mt_solution(RecursiveTest:%s)' % test)
results = self.my_inv.invert(self.stations, ignore_sta=test )
if not results:
worst_vr = 0.0
avoid_site = test
elog.info('\tRecursiveTest [%s] Q:[-] VR:[-]' % test )
continue
elog.info('\tRecursiveTest [%s] Q:[%s] VR:[%0.1f]' % \
( test, results['Quality'], results['VarRed']) )
if not worst_vr or results['VarRed'] < worst_vr:
worst_vr = results['VarRed']
keep_site = test
if not best_vr or results['VarRed'] > best_vr:
best_vr = results['VarRed']
avoid_site = test
keep_results = results
elog.info('\tTEMP: Best contributor [%s] ' % keep_site )
elog.info('\tTEMP: Worst contributor [%s] ' % avoid_site)
elog.info('\tBest Contributor [%s] ' % keep_site )
elog.info('\tWorst Contributor [%s] ' % avoid_site)
elog.info('\tRecursiveTest Best option is avoiding [%s] for Q:[%s] VR:[%0.1f]' % \
( avoid_site, keep_results['Quality'], keep_results['VarRed'] ) )
# IF WE CAN INCREASE QUALITY SIGNIFICANTLY THEN DO IT!!!!
# VERIFY IF THE VR IS BETTER THAN 1.2 TIMES THE ORIGINAL!!!!
if self._good_fit(keep_results['VarRed'], self.min_variance) and \
keep_results['VarRed'] > self.results['VarRed'] * 1.2:
elog.notify('Remove %s to improve VR %s=>%0.1f' % \
(avoid_site, self.results['VarRed'], keep_results['VarRed']) )
self.results = keep_results
self.stations[avoid_site].clean()
del( self.stations[ avoid_site ] )
continue
elif len( self.stations.keys() ) > self.sta_max:
elog.notify('Remove %s. Too many sites. VR %s=>%0.1f' % \
(avoid_site, self.results['VarRed'], keep_results['VarRed']) )
self.results = keep_results
self.stations[avoid_site].clean()
del( self.stations[ avoid_site ] )
continue
else:
elog.notify( 'No significant improvement from RecursiveTest. Continue.' )
# Might need to review final solution for individual VarRed
# VERIFY INDIVIDUAL SITES IN THIS POSSIBLE SOLUTION
if len( self.stations.keys() ) > self.sta_min:
# only get here if no station was removed on last loop
worst = sorted(self.results['variance'],
key=lambda x: float(self.results['variance'][x]) )[0]
if self.results['variance'][ worst ] < self.min_variance:
elog.notify('Remove %s for bad VR:%0.1f' % (worst, self.results['variance'][ worst ]) )
self.results = keep_results
self.stations[worst].clean()
del self.stations[worst]
continue
else:
elog.info('All stations above minimum variance reduction [%s]' % self.min_variance)
# If nothing changed then stop here.
break
# Verify that we have minimum quality
if int(self.results['Quality']) < int(self.min_quality):
elog.warning('Quality[%s] Minimum required [%s]' % \
(self.results['Quality'], self.min_quality))
elog.warning('NO RESULTS PRODUCED WITH VALID PARAMETERS.')
return {}
self.results['event'] = self.my_event
self.results['stations'] = self.stations
elog.debug(self.results)
return self.results
def _good_fit(self, test, min_val=0.0):
# Need to run several tests on the correlation value
if test != test: return False
if float(test) == float('Inf'): return False
if float(test) == float('-Inf'): return False
if float(test) > 150.0: return False
if float(test) < float(min_val): return False
return True
def add_filtered_station(self, sta):
'''
Method that will try to bring a new station to the process.
We take care of pulling the data from the archive and the
matching synthetics.
Also verify which is the best filter for the site.
'''
# Skip this station if we want arrivals only
if self.arrivals_only and not self.my_event.has_arrival(sta):
elog.warning('%s No arrivals on database. Skip station.' % sta)
return None
# Calculate some values for station.
# Then it's simpler to handle in the rest of the loop.
distance = self.my_event.distance(sta)
seaz = self.my_event.seaz(sta)
esaz = self.my_event.esaz(sta)
time = self.my_event.time
depth = self.my_event.depth
# Get the waveforms for this station
elog.debug( 'Get Waveforms for %s' % sta )
real = self.my_data.get_waveforms( sta, self.chan_to_use, time, esaz=esaz,
seaz=seaz, tw=self.timewindow, filters=self.filter,
debug_plot=self.debug_real)
# Verify that we have good information for this site
if not real:
elog.warning('Problems during get_waveforms() for %s' % sta )
return None
elog.debug( '%s Get SYNTH at %s km' % \
(sta, distance) )
synth = self.my_synth.get_synth(depth=depth,
distance=distance, tw=self.timewindow,
filters=self.filter, response=real[ self.filter[-1] ].response,
debug_plot=self.debug_synth)
# Verify that we have good synthetics for this site
if not synth:
elog.warning('%s Problems during get_synth(depth=%s,distance=%s)' % \
(sta, depth, distance) )
return None
# Save station-event metadata to my object
results = Station(sta,self.tmp_folder)
results.depth = self.my_event.depth
results.time = self.my_event.time
results.distance = self.my_event.distance(sta)
results.realdistance = self.my_event.realdistance(sta)
results.azimuth = self.my_event.esaz(sta)
results.timewindow = self.timewindow
# Verify which filter provides the best
# variance reduction
variance_results = {}
zcor_results = {}
for fltr in self.filter:
elog.debug('%s Test VR for [%s]' % (sta,fltr) )
# Save temp data to object
results.real_data( real[fltr] )
results.synth_data( synth[fltr] )
# Save traces to tmp files
file_for_real = results.to_file('real')
file_for_synth = results.to_file('synth')
elog.debug('\tTEMP: Real traces saved to %s' % file_for_real )
elog.debug('\tTEMP: Synthetic traces saved to %s' % file_for_synth )
# We have the information for this station. Let's try the fit alone.
try:
temp = self.my_inv.invert( {sta:results} )
results.clean()
except Exception,e:
elog.warning('%s %s' % (Exception,e) )
elog.error('Invertion on {0} failed!'.format(sta) )
if not temp:
continue
variance_results[ fltr ] = temp['variance'][sta]
zcor_results[ fltr ] = temp['zcor'][sta]
elog.info( '\tTEMP: VR:%s ZCOR:%s Filter:[%s]' % \
(variance_results[ fltr ], zcor_results[ fltr ], fltr) )
elog.debug( variance_results )
elog.debug( zcor_results )
if not variance_results:
elog.warning('No VR results for station {0}!'.format(sta) )
return None
best = sorted(variance_results.items(), key=operator.itemgetter(1))[-1]
best_filter = best[0]
best_vr = best[1]
best_zcor = zcor_results[ best_filter]
elog.info( '%s Best VR is [%0.1f] for [%s]' % ( sta, best_vr, best_filter) )
if best_vr < self.min_variance:
elog.warning( '%s Best VR [%0.1f] lower than allowed minimal [%s]' % \
(sta, best_vr, self.min_variance) )
return None
# Save data to object
results.real_data( real[best_filter] )
results.synth_data( synth[best_filter] )
results.filter = best_filter
results.zcor = best_zcor
results.vr = best_vr
return results
if __name__ == "__main__": raise ImportError( "\n\n\tAntelope's dbmoment module. Not to run directly!!!! **\n" )
| StarcoderdataPython |
5077252 |
import os
import json
class User(object):
def __init__(self, userName:str, dirPath:str):
assert isinstance(userName, str)
if dirPath is not None:
assert isinstance(dirPath, str)
dataFilePath = os.path.join(dirPath, "data.json")
self.__name = userName
self.__dirPath = dirPath
self.__dataFilePath = dataFilePath
if (dataFilePath is not None) and os.path.isfile(dataFilePath):
with open(dataFilePath, "r") as f:
self.__data = json.load(f)
assert self.__data["version"] == 1
else:
raise Exception()
#
@property
def dirPath(self) -> str:
return self.__dirPath
#
@property
def hasKeyPair(self) -> bool:
return False
#
@property
def privileges(self) -> tuple:
ret = self.__data.get("privileges", [])
assert isinstance(ret, list)
return tuple(ret)
#
@property
def name(self) -> str:
return self.__name
#
def __str__(self):
return self.__name
#
def hasPrivilege(self, privilege:str) -> bool:
assert isinstance(privilege, str)
ret = self.__data.get("privileges", [])
assert isinstance(ret, list)
return privilege in ret
#
def __eq__(self, value):
if isinstance(value, User):
return value.__name == self.__name
else:
return None
#
#
| StarcoderdataPython |
230204 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.text import slugify
from cms.models import CMSPlugin
from .conf import settings
class TabGroupPlugin(CMSPlugin):
tab_group_name = models.CharField(max_length=200, blank=False)
layout_template = models.CharField(
verbose_name=_('layout'),
max_length=200,
choices=settings.ALDRYN_TABNAV_TEMPLATES,
default=settings.ALDRYN_TABNAV_DEFAULT_TEMPLATE,
)
def __unicode__(self):
return self.tab_group_name or unicode(self.pk)
class TabPlugin(CMSPlugin):
tab_title = models.CharField(verbose_name=_('title'), max_length=127)
tab_extra_icon = models.CharField(
verbose_name=_('icon css class'),
max_length=63,
choices=settings.ALDRYN_TABNAV_ICONS,
blank=True,
)
def __unicode__(self):
return self.tab_title
def tab_html_id(self):
html_id = '%d-%s' % (self.pk, self.tab_title)
return slugify(html_id)
def tab_icon_class(self):
icon_class = 'icon icon-{0}'
return icon_class.format(self.tab_extra_icon)
class TabDropDownPlugin(CMSPlugin):
# used in template to distinguish between normal and dropdown tabs
is_dropdown = True
tab_title = models.CharField(verbose_name=_('title'), max_length=127)
def __unicode__(self):
return self.tab_title
def tab_html_id(self):
html_id = '%d-%s' % (self.pk, self.tab_title)
return slugify(html_id)
| StarcoderdataPython |
8103859 | #!/usr/bin/python
import nfldb
import nflgame
from nflgame import *
import json
import sys
import urllib
import urllib2
import re
#db = nfldb.connect()
#q.game(season_year=2014, season_type='Regular')
#for pp in q.sort('passing_yds').limit(10).as_aggregate():
# print pp.player, pp.passing_yds
#q.game(season_year=2014, week=1)
#for p in q.sort('passing_yds').limit(40).as_aggregate():
# print p.player, p.player.team, p.player_id, p.player.profile_id
def print_players(team):
q = nfldb.Query(db)
q.player(team=team)
for pp in q.as_players():
print pp.full_name, pp.profile_url
def http_request(url):
try:
req = urllib2.Request(url)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return ""
def get_esb_id_profile_url(url):
try:
content = http_request(url)
gid, esb = None, None
m = re.search('GSIS\s+ID:\s+([0-9-]+)', content)
n = re.search('ESB\s+ID:\s+([A-Z][A-Z][A-Z][0-9]+)', content)
if m is not None:
gid = m.group(1).strip()
if n is not None:
esb = n.group(1).strip()
if len(gid) != 10:
gid = None
if len(esb) != 9:
esb = None
return esb
except:
return ""
def build_roster():
roster = {}
img = ''
for x in nflgame.teams:
roster[x[0]] = []
print x[0]
players_pool = nflgame.players.itervalues()
for p in players_pool:
if p.team == x[0]:
esb=get_esb_id_profile_url(p.profile_url)
details = p.__dict__
print details['name']
details['esb_id'] = esb
roster[x[0]].append(details)
ptr = open("./data/"+x[0]+".json", "w+")
ptr.write(json.dumps(roster[x[0]]))
ptr.close()
return roster
def build_mapping():
img = ''
for x in nflgame.teams:
roster = {}
print x[0]
players_pool = nflgame.players.itervalues()
for p in players_pool:
if p.team == x[0]:
esb=get_esb_id_profile_url(p.profile_url)
details = p.__dict__
details['esb_id'] = esb
roster[details['gsis_id']] = details
ptr = open("./data/"+x[0]+".mapping", "w+")
ptr.write(json.dumps(roster))
ptr.close()
return roster
#build_roster()
build_mapping()
#print_players("NYG")
| StarcoderdataPython |
9634186 | <filename>breakdown_utilization_experiments/asynchronous_task_generation.py
import functools
from cProfile import run
from math import floor, inf
import pickle
from random import choice, randint
import matplotlib.pyplot as plt
from multiprocessing import cpu_count, Pool
from priority_functions import *
from schedule_plotting import *
from task_scheduling import *
from task_systems import *
from time import time
import sys
# discrete time model with 1 time unit = 1 microsecond
MS = 1000
_POSSIBLE_PERIODS = [MS * 2 ** k for k in range(3, 9)]
def random_task(id=None):
period = choice(_POSSIBLE_PERIODS)
phase = randint(0, period - 1)
cost = randint(1, period - 1)
relative_deadline = randint(cost, period)
return PeriodicTask(phase=phase, period=period, cost=cost, relative_deadline=relative_deadline, id=id)
def random_task_system(num_tasks):
return PeriodicTaskSystem([random_task(id=k) for k in range(num_tasks)])
| StarcoderdataPython |
6512680 | class Player:
def __init__(self, turn, character):
self.turn = turn
self.character = character
class Bot:
def __init__(self, turn, character):
self.turn = turn
self.character = character
| StarcoderdataPython |
1616453 | import json
import logging
import os
import re
import requests
import retrying
import shakedown
from dcos import marathon
log = logging.getLogger(__name__)
logging.basicConfig(format='[%(levelname)s] %(message)s', level='INFO')
def get_json(file_name):
""" Retrieves json app definitions for Docker and UCR backends.
"""
with open(file_name) as f:
return json.load(f)
def find_app_port(config, app_name):
""" Finds the port associated with the app in haproxy_getconfig.
This is done through regex pattern matching.
"""
pattern = re.search(r'{0}(.+?)\n bind .+:\d+'.format(app_name), config)
return pattern.group()[-5:]
@retrying.retry(stop_max_delay=10000)
def get_app_port(app_name, ip):
""" Returns the port that the app is configured on.
"""
get_config = requests.get('http://{}:9090/_haproxy_getconfig'.format(ip))
port = find_app_port(get_config.content.decode("utf-8"), app_name)
return port
@retrying.retry(stop_max_delay=10000)
def get_app_content(app_port, ip):
""" Returns the content of the app.
"""
get_port = requests.get('http://{}:{}'.format(ip, app_port))
return (get_port.content.decode("utf-8").rstrip(), get_port.status_code)
def test_backends():
""" Tests Marathon-lb against a number of Docker and UCR backends.
All backends are defined in backends/ & backends_1.9/.
The test retrieves the port to which each app is bound on.
This is done through retrieving the port from _haproxy_getconfig.
Each app is configured to display its id as content if launched healthy.
The test asserts whether the text response matches the expected response.
"""
public_ip = os.environ['PUBLIC_AGENT_IP']
if os.environ['DCOS_VERSION'] == '1.9':
app_defs = [get_json('backends_1.9/' + filename)
for filename in os.listdir('backends_1.9/')]
else:
app_defs = [get_json('backends/' + filename)
for filename in os.listdir('backends/')]
for app_def in app_defs:
app_id = app_def['id']
app_name = app_id[1:] if app_id[0] == '/' else app_id
print(app_name)
log.info('{} is being tested.'.format(app_name))
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_id)
app = client.get_app(app_id)
tasks = app['tasksRunning']
instances = app_def['instances']
assert tasks == instances, ("Number of tasks is {}, {} were expected."
.format(tasks, instances))
log.info('Number of tasks for {} is {}'.format(app_name, tasks))
port = get_app_port(app_name, public_ip)
expected_port = app_def["labels"]["HAPROXY_0_PORT"]
msg = "{} bound to {}, not {}.".format(app_name, port, expected_port)
assert port == expected_port, msg
log.info('{} is bound to port {}.'.format(app_name, port))
text_response, status_code = get_app_content(port, public_ip)
expected_response = app_name
msg = "Response is {}, not {}".format(text_response, expected_response)
if status_code == 200:
assert text_response == expected_response, msg
log.info('Text response is {}.'.format(text_response))
| StarcoderdataPython |
6412072 | import numpy as np
r=int(input('Rate:'))
p=int(input('Principle:'))
n=int(input('Months:'))
print(np.pmt(r,n,p))
| StarcoderdataPython |
11296414 | #!/usr/bin/env python3
#
# Copyright (c) 2022 <NAME>
#
# MIT License - See LICENSE file accompanying this package.
#
"""Wrapper for "pulumi destroy" command"""
from typing import (Any, Dict, List, Optional, Union, Set, Type)
from copy import deepcopy
from lib2to3.pgen2.token import OP
import os
import sys
import json
import subprocess
from ...backend import XPulumiBackend
from ...project import XPulumiProject
from ...base_context import XPulumiContextBase
from ...exceptions import XPulumiError
from ...internal_types import JsonableTypes
from ..help_metadata import (
PulumiMetadata,
ParsedPulumiCmd,
)
from ...stack import XPulumiStack
from ..wrapper import (
CmdExitError,
PulumiCommandHandler,
PulumiWrapper,
PosStackArgPulumiCommandHandler,
PrecreatePosStackArgPulumiCommandHandler,
PrecreatePulumiCommandHandler,
Fore,
Back,
Style,
)
class PulumiCmdHandlerDestroy(PulumiCommandHandler):
full_subcmd = "destroy"
_recursive: bool
@classmethod
def modify_metadata(cls, wrapper: PulumiWrapper, metadata: PulumiMetadata):
topic = metadata.topic_by_full_name[cls.full_subcmd]
topic.add_option([ '-R', '--recursive' ], description='[xpulumi] Recursively destroy dependencies first', is_persistent = True)
def custom_tweak(self) -> None:
self._recursive = not not self.get_parsed().pop_option_optional_bool('--recursive')
def do_pre_raw_pulumi(self, cmd: List[str], env: Dict[str, str]) -> Optional[int]:
yes_flag = self.get_parsed().get_option_bool('--yes')
stack = self.require_stack()
if not stack.is_deployed():
print(
f"{self.ecolor(Fore.GREEN)}NOTE: xpulumi stack '{stack.full_stack_name}' has already been "
f"destroyed or has never been deployed.{self.ecolor(Style.RESET_ALL)}", file=sys.stderr
)
return 0
if not stack.is_deployable():
raise XPulumiError(f"Stack {stack.full_stack_name} is not destroyable")
dependencies = stack.get_stack_destroy_order(include_self=False)
remaining: List[XPulumiStack] = []
for dep in dependencies:
if dep.is_deployed():
remaining.append(dep)
elif self._recursive:
print(
f"{self.ecolor(Fore.GREEN)}NOTE: dependent xpulumi stack '{dep.full_stack_name}' has already been "
f"destroyed or has never been deployed.{self.ecolor(Style.RESET_ALL)}", file=sys.stderr
)
if len(remaining) > 0:
if not self._recursive:
raise XPulumiError(
f"Cannot destroy stack {stack.full_stack_name} "
f"until dependencies are destroyed: {', '.join(x.full_stack_name for x in remaining)}"
)
for dep in remaining:
dep_stack_name = dep.stack_name
dep_project = dep.project
print(f"\n{self.ecolor(Fore.GREEN)}===============================================================================", file=sys.stderr)
print(f" Destroying dependent xpulumi project {dep_project.name}, stack {dep_stack_name}", file=sys.stderr)
print(f"==============================================================================={self.ecolor(Style.RESET_ALL)}\n", file=sys.stderr)
cmd = ['destroy']
if yes_flag:
cmd.append('--yes')
rc = dep_project.call_project_pulumi(cmd, stack_name=dep_stack_name)
if rc != 0:
return rc
print(f"\n{self.ecolor(Fore.GREEN)}===============================================================================", file=sys.stderr)
print(f" All dependent stacks destroyed; destroying xpulumi project {stack.project.name}, stack {stack.stack_name}", file=sys.stderr)
print(f"==============================================================================={self.ecolor(Style.RESET_ALL)}\n", file=sys.stderr)
return None
| StarcoderdataPython |
1800844 | <reponame>llinmeng/PythonStudy
# -*- coding: utf-8 -*-
for line in open(filename):
process(line) | StarcoderdataPython |
1916843 | <reponame>Yadira-Ferrer/tytus
import tabla_simbolos as TS
class Instruccion():
def __init__(self, tipo, instruccion):
self.tipo = tipo
self.instruccion = instruccion
class Select():
def __init__(self, dist, selcol, fromcol, joins, order, conditions):
self.dist = dist
self.selcol = selcol
self.fromcol = fromcol
self.joins = joins
self.order = order
self.conditions = conditions
def execute():
#Llamar metodo que realizara el select
print('ejecutando select')
class AlterTable():
def __init__(self, id, cols, constrain, fkey, ref):
self.id = id
self.cols = cols
self.constrain = constrain
self.fkey = fkey
self.ref = ref
def execute(self):
print('ejecutando alter table')
print('id : ' + str(self.id))
print('cols : ' + str(self.cols))
print('constrain : ' + str(self.constrain))
print('foreing keys :' + str(self.fkey))
print('references : ' + str(self.ref))
class CreateDB():
def __init__(self, id, owner, mode):
self.id = id
self.owner = owner
self.mode = mode
def execute(self):
print('Ejecutando Create DB')
print('db id : ' + str(self.id))
print('owner : ' + str(self.owner))
print('mode : ' + str(self.mode))
class ShowDB():
def __init__(self):
print('show')
def execute(self):
print('Ejecutando ShowDB')
class Drop():
def __init__(self, id):
self.id = id
def execute(self):
print('Ejecutando Drop')
print('id : ' + self.id)
class CreateTable():
def __init__(self, id, base, cols, inh):
self.id = id,
self.base = base
self.cols = cols
self.inh = inh
def execute(self,ts):
print('Ejecutando Creare Table')
print('id : ' + str(self.id))
for col in self.cols :
print('col id : ' + str(col.id))
print('col type : ' + str(col.tipo))
if self.inh != None :
print('Inherit : ' + self.inh)
class Insert():
def __init__(self, id, vals):
print('init')
self.id = id
self.vals = vals
def execute(self):
print('Ejecutando Insert')
print('id : ' + str(self.id))
for val in self.vals:
print('value : ' + str(val))
class UseDB():
def __init__(self, id):
self.id = id
def execute(self):
print('Ejecutando Use DB')
print('id : ' + self.id)
class Delete():
def __init__(self, id, cond):
self.id = id
self.cond = cond
def execute(self):
print('Ejecutando Delete')
print('id : ' + str(self.id))
class Update():
def __init__(self, id, vals):
self.id = id
self.vals = vals
def execute(self):
print('Ejecutando Update')
print('id : ' + str(id))
'''
import tabla_simbolos as TS
import Errores as E
#Creación de la tabla de simbolos
ts_global = TS.tabla_simbolos()
#Creación de lista de errores
ls_error = []
def create_table(db, nombre, columnas, ts):
nueva_tabla = TS.Simbolo(nombre, TS.tipo_simbolo.TABLE, None, db, None, None, None, None)
x = columnas.split(",")
for i in x:
c = i.split(":")
print('El nombre es -> ' + c[0] + ' y el tipo es -> ' + c[1])
#create_column(db, nombre, c[0], c[1], ts)
ts.agregar_simbolo(nueva_tabla)
return ts
def create_column(db, tabla, columna, tipo, ts):
nueva_columna = TS.Simbolo(columna,TS.tipo_simbolo.INTEGER,None,db,0,True,False,None)
agregar = ts.agregar_columna(tabla, db, nueva_columna)
class CreateDB():
def __init__ (self, replace, ifnot, iden, owner, mode): # boolean, boolean, string, string, integer
self.replace = replace # si existe, la reemplaza/modifica
self.ifnot = ifnot # si existe, no la crea
self.iden = iden # nombre de la base de datos
self.owner = owner # nombre/id del creador
self.mode = mode # modo de almacenamiento
def ejecutar(self):
nueva_base = TS.Simbolo(self.iden, TS.tipo_simbolo.DATABASE, None, None, None, None, None, None)
existe = False
bases = ts_global.get_databases() # obtiene todas las bases de datos
for base in bases:
if base.id == self.iden: # verifico si existe
existe = True
break
if not self.ifnot: # si no viene "IF NOT EXISTS", se crea
if self.replace: # si viene "OR REPLACE"
if existe: # si existe la base de datos, se elimina
ts_global.drop_db(self.iden)
ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo
else: # si no viene "OR REPLACE"
if existe: # si existe, es un error
nuevo_error = E.Errores('Semántico.', 'Ya existe una base de datos con el nombre \'' + self.iden + '\'.')
#ls_error.append(nuevo_error)
print(nuevo_error.error)
else: # si no existe
ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo
else: # si viene "IF NOT EXISTS"
if self.replace: # si viene "OR REPLACE", es error
nuevo_error = E.Errores('Semántico.', 'No pueden venir conjuntamente las cláusulas \'OR REPLACE\' e \'IF NOT EXISTS\'.')
#ls_error.append(nuevo_error)
print(nuevo_error.error)
else:
if not existe: # si existe la base de datos, no se crea, si no
ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo
class UseDB():
def __init__(self, iden): # string
self.iden = iden # nombre de la base de datos
def ejecutar(self):
bases = ts_global.get_databases() # obtiene todas las bases de datos
for base in bases:
if base.id == self.iden: # verifico si existe
return self.iden
new_error = E.Errores('Semántico.', 'La base de datos \'' + self.iden + '\' no existe.')
#ls_error.append(new_error)
print(new_error.error)
return None
class ShowDB():
def ejecutar(self):
bases = ts_global.get_databases() # obtiene todas las bases de datos
respuesta = '\n'
for base in bases:
respuesta = respuesta + '\t' + base.id + '\n' # concatena los nombres y los retorna
return respuesta + '\n'
class InsertT():
def __init__(self, tabla, base, campos, valores): # string, string, [string], [string]
self.tabla = tabla
self.base = base
self.campos = campos
self.valores = valores
def ejecutar(self):
if self.base == None:
nuevo_error = E.Errores('Semántico.', 'No se ha seleccionado una base de datos.')
#ls_error.append(nuevo_Error)
return
tabla = ts_global.get_table(self.base, self.tabla)
if tabla == None:
nuevo_error = E.Errores('Semántico.', 'No se ha encontrado la tabla solicitada.')
ls_error.append(nuevo_Error)
return
c_campos = -1
if self.campos != None:
columna = None
errores = False
c_campos = 0
for campo in self.campos:
columna = ts_global.get_column(self.base, self.tabla, campo)
c_campos = c_campos + 1
if columna == None:
new_error = E.Errores('Semántico.', 'No existe el campo \'' + columna.id + '\' en la tabla \'' + self.tabla + '\'.')
#ls_error.append(nuevo_Error)
print(new_error.error)
errores = True
elif columna.error != None:
#ls_error.append(nuevo_Error)
print(columna.error)
errores = True
if errores:
return
c_valores = 0
for valor in self.valores:
c_valores = c_valores + 1
if c_campos == -1:
print('no trae campos, verifica cantidad de campos e inserta en todos')
elif c_campos == 0:
print('error al obtener los campos')
elif c_campos != c_valores:
new_error = E.Errores('Semántico.', 'La cantidad de campos a ingresar no coincide con la cantidad de valores.')
#ls_error.append(nuevo_Error)
print(new_error.error)
else:
print('inserción')
''' | StarcoderdataPython |
221390 | <reponame>seuzmj/pyBN<filename>pyBN/inference/map_exact/__init__.py
from pyBN.inference.map_exact.ilp_map import *
from pyBN.inference.map_exact.ve_map import * | StarcoderdataPython |
6440805 | <filename>corehq/apps/export/management/commands/process_skipped_pages.py<gh_stars>100-1000
import multiprocessing
import os
import re
import shutil
import tempfile
import zipfile
from datetime import datetime
from django.core.management.base import BaseCommand, CommandError
import sh
from corehq.apps.export.dbaccessors import get_properly_wrapped_export_instance
from corehq.apps.export.multiprocess import (
UNPROCESSED_PAGES_DIR,
MultiprocessExporter,
RetryResult,
_add_compressed_page_to_zip,
)
from corehq.util.files import safe_filename
class Command(BaseCommand):
help = "Remove sensitive columns from an export"
def add_arguments(self, parser):
parser.add_argument('export_id')
parser.add_argument(
'--export_path',
help='Path to export ZIP',
)
parser.add_argument(
'--processes',
type=int,
dest='processes',
default=multiprocessing.cpu_count() - 1,
help='Number of parallel processes to run.'
)
parser.add_argument(
'--force-upload',
action='store_true',
help='Upload the final archive even if there are still unprocessed pages'
)
def handle(self, **options):
if __debug__:
raise CommandError("You should run this with 'python -O'")
export_id = options.pop('export_id')
export_archive_path = options.pop('export_path')
processes = options.pop('processes')
force_upload = options.pop('force_upload')
export_instance = get_properly_wrapped_export_instance(export_id)
if not export_archive_path or not os.path.exists(export_archive_path):
confirm = input(
"""
No export archive provided. Do you want to download the latest one? [y/N]
"""
)
if not confirm == "y":
raise CommandError("Export path missing: {}".format(export_archive_path))
export_archive_path = self._download_export(export_instance)
extract_to = tempfile.mkdtemp()
total_docs, unprocessed_pages = self._get_unprocessed_pages(export_archive_path, extract_to)
print('{} pages still to process'.format(len(unprocessed_pages)))
exporter = MultiprocessExporter(export_instance, total_docs, processes)
error_pages, successful_pages = self._process_pages(
exporter, unprocessed_pages
)
final_path = self.compile_final_zip(
error_pages, export_archive_path, export_instance, successful_pages
)
if force_upload or not error_pages:
print('Uploading final archive', '(forced)' if force_upload and error_pages else '')
exporter.upload(final_path, clean=not error_pages)
else:
print(self.style.ERROR(
'Not all pages processed successfully.\n'
'You can re-run the command on the final archive to try again: {}\n'
'NOTE: final archive not uploaded. '
'Use --force-upload to upload even with errors'.format(final_path))
)
shutil.rmtree(extract_to)
self.stdout.write(self.style.SUCCESS('Rebuild Complete and payload uploaded'))
def _download_export(self, export_instance):
export_archive_path = '{}_{}.zip'.format(
safe_filename(export_instance.name or 'Export'),
datetime.utcnow().isoformat()
)
payload = export_instance.get_payload(stream=True)
with open(export_archive_path, 'wb') as download:
shutil.copyfileobj(payload, download)
return export_archive_path
def compile_final_zip(self, error_pages, export_archive_path, export_instance, successful_pages):
final_dir, orig_name = os.path.split(export_archive_path)
if not error_pages:
fd, final_path = tempfile.mkstemp()
else:
final_name = 'INCOMPLETE_{}_{}.zip'.format(orig_name, datetime.utcnow().isoformat())
final_path = os.path.join(final_dir, final_name)
print('Recompiling export')
export_name = safe_filename(export_instance.name or 'Export')
with zipfile.ZipFile(final_path, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as final_zip:
for result in successful_pages:
print(' Adding page {} to final file'.format(result.page))
_add_compressed_page_to_zip(final_zip, result.page, result.path)
print(' Adding original export pages and unprocessed pages final file')
def _include_member(member):
# add original export pages and any raw data that we weren't able to process
add = member.startswith(export_name) or member in error_pages
if add:
print(' {}'.format(member))
return add
_copy_files_from_zip_to_zip(final_zip, export_archive_path, _include_member)
return final_path
def _process_pages(self, exporter, unprocessed_pages):
exporter.start()
for page_path, page_number, doc_count in unprocessed_pages:
exporter.process_page(RetryResult(page_number, page_path, doc_count, 0))
export_results = exporter.get_results(retries_per_page=0)
successful_pages = [res for res in export_results if res.success]
error_pages = {
'{}/page_{}.json.gz'.format(UNPROCESSED_PAGES_DIR, res.page)
for res in export_results if not res.success
}
return error_pages, successful_pages
def _get_unprocessed_pages(self, export_archive_path, extract_to_path):
print('Extracting unprocessed pages')
with zipfile.ZipFile(export_archive_path, 'r') as zipref:
for member in zipref.namelist():
if member.startswith(UNPROCESSED_PAGES_DIR):
zipref.extract(member, extract_to_path)
unprocessed_path = os.path.join(extract_to_path, UNPROCESSED_PAGES_DIR)
if not os.path.exists(unprocessed_path):
shutil.rmtree(extract_to_path)
raise CommandError('Export has no unprocessed pages.')
unprocessed_pages = []
total_docs = 0
for page_filename in os.listdir(unprocessed_path):
page_path = os.path.join(unprocessed_path, page_filename)
page_search = re.search(r'page_(\d+).json.gz', page_filename)
if page_search:
page_number = int(page_search.group(1))
else:
raise CommandError('Unexpected page filename: {}'.format(page_filename))
doc_count = int(sh.wc('-l', page_path).split(' ')[0])
total_docs += doc_count
unprocessed_pages.append((page_path, page_number, doc_count))
if not unprocessed_pages:
raise CommandError('No pages left to process')
return total_docs, unprocessed_pages
def _copy_files_from_zip_to_zip(to_zip, from_zip_path, include_filter=None):
with zipfile.ZipFile(from_zip_path, 'r') as from_zip:
for member in from_zip.namelist():
if not include_filter or include_filter(member):
to_zip.writestr(member, from_zip.read(member))
| StarcoderdataPython |
11213295 | """
Some common utilities for interacting with discord.
"""
from __future__ import annotations
import asyncio
import re
import math
import discord
import discord.abc
import discord.ext.commands
import string
import logging
from typing import (Any, List, Sequence, Callable, Iterable, Optional, Union, Coroutine, AsyncContextManager, Generic,
TypeVar, Type, Protocol, cast)
import discord_client
import plugins
logger: logging.Logger = logging.getLogger(__name__)
class Quoted:
__slots__ = "text"
text: str
def __init__(self, text: str):
self.text = text
def __str__(self) -> str:
return self.text
def __repr__(self) -> str:
return "Quoted({!r})".format(self.text)
@classmethod
async def convert(cls, ctx: discord.ext.commands.Context, arg: str) -> Quoted:
return cls(arg)
def undo_get_quoted_word(view: discord.ext.commands.view.StringView, arg: str) -> int:
escaped_quotes: Iterable[str] = discord.ext.commands.view._all_quotes
offset = 0
last = view.buffer[view.index - 1]
if last == "\\":
offset = 1
elif not arg.endswith(last):
for open_quote, close_quote in discord.ext.commands.view._quotes.items():
if close_quote == last:
escaped_quotes = (open_quote, close_quote)
offset = 2
break
return view.index - offset - len(arg) - sum(ch in escaped_quotes for ch in arg)
class CodeBlock(Quoted):
__slots__ = "language"
language: Optional[str]
def __init__(self, text: str, language: Optional[str] = None):
self.text = text
self.language = language
def __str__(self) -> str:
text = self.text.replace("``", "`\u200D`")
return "```{}\n".format(self.language or "") + text + "```"
def __repr__(self) -> str:
if self.language is None:
return "CodeBlock({!r})".format(self.text)
else:
return "CodeBlock({!r}, language={!r})".format(self.text, self.language)
codeblock_re: re.Pattern[str] = re.compile(r"```(?:(?P<language>\S*)\n(?!```))?(?P<block>(?:(?!```).)+)```", re.S)
@classmethod
async def convert(cls, ctx: discord.ext.commands.Context, arg: str) -> CodeBlock:
if (match := cls.codeblock_re.match(ctx.view.buffer, pos=undo_get_quoted_word(ctx.view, arg))) is not None:
ctx.view.index = match.end()
return cls(match["block"], match["language"] or None)
raise discord.ext.commands.ArgumentParsingError("Please provide a codeblock")
class Inline(Quoted):
__slots__ = "text"
text: str
def __init__(self, text: str):
self.text = text
def __str__(self) -> str:
text = self.text
if "`" in text:
if "``" in text:
text = text.replace("`", "`\u200D")
if text.startswith("`"):
text = " " + text
if text.endswith("`"):
text = text + " "
return "``" + text + "``"
return "`" + text + "`"
def __repr__(self) -> str:
return "Inline({!r})".format(self.text)
inline_re: re.Pattern[str] = re.compile(r"``((?:(?!``).)+)``|`([^`]+)`", re.S)
@classmethod
async def convert(cls, ctx: discord.ext.commands.Context, arg: str) -> Inline:
if (match := cls.inline_re.match(ctx.view.buffer, pos=undo_get_quoted_word(ctx.view, arg))) is not None:
ctx.view.index = match.end()
return cls(match[1] or match[2])
raise discord.ext.commands.ArgumentParsingError("Please provide an inline")
class Formatter(string.Formatter):
"""
A formatter class designed for discord messages. The following conversions
are understood:
{!i} -- turn into inline code
{!b} -- turn into a code block
{!b:lang} -- turn into a code block in the specified language
{!m} -- turn into mention
{!M} -- turn into role mention
{!c} -- turn into channel link
"""
__slots__ = ()
def convert_field(self, value: Any, conversion: str) -> Any:
if conversion == "i":
return str(Inline(str(value)))
elif conversion == "b":
return CodeBlock(str(value))
elif conversion == "m":
if isinstance(value, discord.Role):
return "<@&{}>".format(value.id)
elif isinstance(value, discord.abc.User):
return "<@{}>".format(value.id)
elif isinstance(value, int):
return "<@{}>".format(value)
elif conversion == "M":
if isinstance(value, discord.Role):
return "<@&{}>".format(value.id)
elif isinstance(value, int):
return "<@&{}>".format(value)
elif conversion == "c":
if isinstance(value, discord.TextChannel):
return "<#{}>".format(value.id)
elif isinstance(value, discord.CategoryChannel):
return "<#{}>".format(value.id)
elif isinstance(value, int):
return "<#{}>".format(value)
return super().convert_field(value, conversion)
def format_field(self, value: Any, fmt: str) -> Any:
if isinstance(value, CodeBlock):
if fmt:
value.language = fmt
return str(value)
return super().format_field(value, fmt)
formatter: string.Formatter = Formatter()
format = formatter.format
class UserError(discord.ext.commands.CommandError):
"""General exceptions in commands."""
__slots__ = ()
class InvocationError(discord.ext.commands.UserInputError):
"""Exceptions in commands that are to do with the user input. Triggers displaying the command's usage."""
__slots__ = ()
class NamedType(Protocol):
id: int
name: str
class NicknamedType(Protocol):
id: int
name: str
nick: str
M = TypeVar("M", bound=Union[NamedType, NicknamedType])
def smart_find(name_or_id: str, iterable: Iterable[M]) -> Optional[M]:
"""
Find an object by its name or id. We try an exact id match, then the
shortest prefix match, if unique among prefix matches of that length, then
an infix match, if unique.
"""
int_id: Optional[int]
try:
int_id = int(name_or_id)
except ValueError:
int_id = None
prefix_match: Optional[M] = None
prefix_matches: List[str] = []
infix_matches: List[M] = []
for x in iterable:
if x.id == int_id:
return x
if x.name.startswith(name_or_id):
if prefix_matches and len(x.name) < len(prefix_matches[0]):
prefix_matches = []
prefix_matches.append(x.name)
prefix_match = x
else:
nick = getattr(x, "nick", None)
if nick is not None and nick.startswith(name_or_id):
if prefix_matches and len(nick) < len(prefix_matches[0]):
prefix_matches = []
prefix_matches.append(nick)
prefix_match = x
elif name_or_id in x.name:
infix_matches.append(x)
elif nick is not None and name_or_id in nick:
infix_matches.append(x)
if len(prefix_matches) == 1:
return prefix_match
if len(infix_matches) == 1:
return infix_matches[0]
return None
T = TypeVar("T")
def priority_find(predicate: Callable[[T], Union[float, int, None]], iterable: Iterable[T]) -> List[T]:
"""
Finds those results in the input for which the predicate returns the highest rank, ignoring those for which the rank
is None, and if any item has rank math.inf, the first such item is returned.
"""
results = []
cur_rank = None
for x in iterable:
rank = predicate(x)
if rank is None:
continue
elif rank is math.inf:
return [x]
elif cur_rank is None or rank > cur_rank:
cur_rank = rank
results = [x]
elif rank == cur_rank:
results.append(x)
elif rank < cur_rank:
continue
return results
class TempMessage(AsyncContextManager[discord.Message]):
"""An async context manager that sends a message upon entering, and deletes it upon exiting."""
__slots__ = "sendable", "args", "kwargs", "message"
sendable: discord.abc.Messageable
args: Any
kwargs: Any
message: Optional[discord.Message]
def __init__(self, sendable: discord.abc.Messageable,
*args: Any, **kwargs: Any):
self.sendable = sendable
self.args = args
self.kwargs = kwargs
async def __aenter__(self) -> discord.Message:
self.message = await self.sendable.send(*self.args, **self.kwargs)
return self.message
async def __aexit__(self, exc_type, exc_val, tb) -> None: # type: ignore
try:
if self.message is not None:
await self.message.delete()
except (discord.Forbidden, discord.NotFound):
pass
class ChannelById(discord.abc.Messageable):
__slots__ = "id", "_state"
id: int
_state: discord.state.ConnectionState
def __init__(self, client: discord.Client, id: int):
self.id = id
self._state = client._connection # type: ignore
async def _get_channel(self) -> discord.abc.Messageable:
return self
def nicknamed_priority(u: Union[NamedType, NicknamedType], s: str) -> Optional[int]:
name = u.name
nick = getattr(u, "nick", None)
if s == name:
return 3
elif nick is not None and s == nick:
return 3
elif s.lower() == name.lower():
return 2
elif nick is not None and s.lower() == nick.lower():
return 2
elif name.lower().startswith(s.lower()):
return 1
elif nick is not None and nick.lower().startswith(s.lower()):
return 1
elif s.lower() in name.lower():
return 0
elif nick is not None and s.lower() in nick.lower():
return 0
else:
return None
def named_priority(x: NamedType, s: str) -> Optional[int]:
name = x.name
if s == name:
return 3
elif s.lower() == name.lower():
return 2
elif name.lower().startswith(s.lower()):
return 1
elif s.lower() in name.lower():
return 0
else:
return None
# We inherit XCoverter from X, so that given a declaration x: XConverter could be used with the assumption that really
# at runtime x: X
class PartialUserConverter(discord.abc.Snowflake):
mention_re: re.Pattern[str] = re.compile(r"<@!?(\d+)>")
id_re: re.Pattern[str] = re.compile(r"\d{15,}")
discrim_re: re.Pattern[str] = re.compile(r"(.*)#(\d{4})")
@classmethod
async def convert(cls, ctx: discord.ext.commands.Context, arg: str) -> discord.abc.Snowflake:
if match := cls.mention_re.fullmatch(arg):
return discord.Object(int(match[1]))
elif match := cls.id_re.fullmatch(arg):
return discord.Object(int(match[0]))
user_list: Sequence[Union[discord.User, discord.Member]]
if ctx.guild is not None:
user_list = ctx.guild.members
else:
user_list = [cast(discord.User, ctx.bot.user), ctx.author]
if match := cls.discrim_re.fullmatch(arg):
name, discrim = match[1], match[2]
matches = list(filter(lambda u: u.name == name and u.discriminator == discrim, user_list))
if len(matches) > 1:
raise discord.ext.commands.BadArgument(format("Multiple results for {}#{}", name, discrim))
elif len(matches) == 1:
return matches[0]
matches = priority_find(lambda u: nicknamed_priority(u, arg), user_list)
if len(matches) > 1:
raise discord.ext.commands.BadArgument(format("Multiple results for {}", arg))
elif len(matches) == 1:
return matches[0]
else:
raise discord.ext.commands.BadArgument(format("No results for {}", arg))
class MemberConverter(discord.User):
@classmethod
async def convert(cls, ctx: discord.ext.commands.Context, arg: str) -> Optional[discord.Member]:
if ctx.guild is None:
raise discord.ext.commands.NoPrivateMessage(format("Cannot obtain member outside guild"))
obj = await PartialUserConverter.convert(ctx, arg)
if isinstance(obj, discord.Member):
return obj
elif isinstance(obj, discord.User):
raise discord.ext.commands.BadArgument(format("No member found by ID {}", obj.id))
member = ctx.guild.get_member(obj.id)
if member is not None: return member
try:
return await ctx.guild.fetch_member(obj.id)
except discord.NotFound:
raise discord.ext.commands.BadArgument(format("No member found by ID {}", obj.id))
class UserConverter(discord.User):
@classmethod
async def convert(cls, ctx: discord.ext.commands.Context, arg: str) -> Optional[discord.User]:
obj = await PartialUserConverter.convert(ctx, arg)
if isinstance(obj, discord.User):
return obj
user = ctx.bot.get_user(obj.id)
if user is not None: return user
try:
return await ctx.bot.fetch_user(obj.id)
except discord.NotFound:
raise discord.ext.commands.BadArgument(format("No user found by ID {}", obj.id))
class PartialRoleConverter(discord.abc.Snowflake):
mention_re: re.Pattern[str] = re.compile(r"<@&(\d+)>")
id_re: re.Pattern[str] = re.compile(r"\d{15,}")
@classmethod
async def convert(cls, ctx: discord.ext.commands.Context, arg: str) -> discord.abc.Snowflake:
if match := cls.mention_re.fullmatch(arg):
return discord.Object(int(match[1]))
elif match := cls.id_re.fullmatch(arg):
return discord.Object(int(match[0]))
if ctx.guild is None:
raise discord.ext.commands.NoPrivateMessage(format("Outside a guild a role can only be specified by ID"))
matches = priority_find(lambda r: named_priority(r, arg), ctx.guild.roles)
if len(matches) > 1:
raise discord.ext.commands.BadArgument(format("Multiple results for {}", arg))
elif len(matches) == 1:
return matches[0]
else:
raise discord.ext.commands.BadArgument(format("No results for {}", arg))
class RoleConverter(discord.Role):
@classmethod
async def convert(cls, ctx: discord.ext.commands.Context, arg: str) -> discord.Role:
obj = await PartialRoleConverter.convert(ctx, arg)
if isinstance(obj, discord.Role):
return obj
if ctx.guild is not None:
role = ctx.guild.get_role(obj.id)
if role is not None:
return role
for guild in ctx.bot.guilds:
role = guild.get_role(obj.id)
if role is not None:
return role
else:
raise discord.ext.commands.BadArgument(format("No role found by ID {}", obj.id))
C = TypeVar("C", bound=discord.abc.GuildChannel)
class PCConv(Generic[C]):
mention_re: re.Pattern[str] = re.compile(r"<#(\d+)>")
id_re: re.Pattern[str] = re.compile(r"\d{15,}")
@classmethod
async def partial_convert(cls, ctx: discord.ext.commands.Context, arg: str, ty: Type[C]) -> discord.abc.Snowflake:
if match := cls.mention_re.fullmatch(arg):
return discord.Object(int(match[1]))
elif match := cls.id_re.fullmatch(arg):
return discord.Object(int(match[0]))
if ctx.guild is None:
raise discord.ext.commands.NoPrivateMessage(format("Outside a guild a channel can only be specified by ID"))
chan_list: Sequence[discord.abc.GuildChannel] = ctx.guild.channels
if ty == discord.TextChannel:
chan_list = ctx.guild.text_channels
elif ty == discord.VoiceChannel:
chan_list = ctx.guild.voice_channels
elif ty == discord.CategoryChannel:
chan_list = ctx.guild.categories
elif ty == discord.StageChannel:
chan_list = ctx.guild.stage_channels
matches = priority_find(lambda c: named_priority(c, arg), chan_list)
if len(matches) > 1:
raise discord.ext.commands.BadArgument(format("Multiple results for {}", arg))
elif len(matches) == 1:
return matches[0]
else:
raise discord.ext.commands.BadArgument(format("No results {}", arg))
@classmethod
async def convert(cls, ctx: discord.ext.commands.Context, arg: str, ty: Type[C]) -> C:
obj = await cls.partial_convert(ctx, arg, ty)
if isinstance(obj, ty):
return obj
if ctx.guild is not None:
chan = ctx.guild.get_channel(obj.id)
if chan is not None:
if not isinstance(chan, ty):
raise discord.ext.commands.BadArgument(format("{!c} is not a {}", chan.id, ty))
return chan
for guild in ctx.bot.guilds:
chan = guild.get_channel(obj.id)
if chan is not None:
if not isinstance(chan, ty):
raise discord.ext.commands.BadArgument(format("{!c} is not a {}", chan.id, ty))
return chan
else:
raise discord.ext.commands.BadArgument(format("No {} found by ID {}", obj.id))
class PartialChannelConverter(discord.abc.GuildChannel):
@classmethod
async def convert(cls, ctx: discord.ext.commands.Context, arg: str) -> discord.abc.Snowflake:
return await PCConv.partial_convert(ctx, arg, discord.abc.GuildChannel)
class PartialTextChannelConverter(discord.abc.GuildChannel):
@classmethod
async def convert(cls, ctx: discord.ext.commands.Context, arg: str) -> discord.abc.Snowflake:
return await PCConv.partial_convert(ctx, arg, discord.TextChannel)
class PartialCategoryChannelConverter(discord.abc.GuildChannel):
@classmethod
async def convert(cls, ctx: discord.ext.commands.Context, arg: str) -> discord.abc.Snowflake:
return await PCConv.partial_convert(ctx, arg, discord.CategoryChannel)
class ChannelConverter(discord.abc.GuildChannel):
@classmethod
async def convert(cls, ctx: discord.ext.commands.Context, arg: str) -> discord.abc.GuildChannel:
return await PCConv.convert(ctx, arg, discord.abc.GuildChannel)
def partial_message(channel: Union[discord.abc.Snowflake], id: int) -> discord.PartialMessage:
return discord.PartialMessage(channel=channel, id=id) # type: ignore
def partial_from_reply(pmsg: Optional[discord.PartialMessage], ctx: discord.ext.commands.Context
) -> discord.PartialMessage:
if pmsg is not None:
return pmsg
if (ref := ctx.message.reference) is not None:
if isinstance(msg := ref.resolved, discord.Message):
return partial_message(msg.channel, msg.id)
if (channel := discord_client.client.get_channel(ref.channel_id)) is None:
raise InvocationError(format("Could not find channel by ID {}", ref.channel_id))
if ref.message_id is None:
raise InvocationError("Referenced message has no ID")
return partial_message(channel, ref.message_id)
raise InvocationError("Expected either a message link, channel-message ID, or a reply to a message")
class ReplyConverter(discord.PartialMessage):
"""
Parse a PartialMessage either from either the replied-to message, or from the command (using an URL or a
ChannelID-MessageID). If the command ends before this argument is parsed, the converter won't even be called, so if
this is the last non-optional parameter, wrap it in Optional, and pass the result via partial_from_reply.
"""
@classmethod
async def convert(cls, ctx: discord.ext.commands.Context, arg: str) -> discord.PartialMessage:
pos = undo_get_quoted_word(ctx.view, arg)
if (ref := ctx.message.reference) is not None:
ctx.view.index = pos
return partial_from_reply(None, ctx)
return await discord.ext.commands.PartialMessageConverter().convert(ctx, arg)
| StarcoderdataPython |
1844518 | <filename>systemofequations.py
#!/usr/bin/env python
# TOGGLE THESE VALUES
equation1 = '10 - x' # y = 10 - x
equation2 = 'x / (5 / 8)' # y = x / (5 / 8)
check_until = 100 # We check all the fractions from 1/1 to check_until / check_until. So for example it would go 1 / 1, 1 / 2, ..., 1 / 100, ..., 100 / 1, 100 / 2, ..., 100 / 100
depth = 3 # The amount we round answers to (we need to set this otherwise super small decimals might break this program)
# CALCULATE
for i in range(1, check_until):
for j in range(1, check_until):
formated1 = equation1.replace('x', str(i / j))
formated2 = equation2.replace('x', str(i / j))
if round(eval(formated1), depth) == round(eval(formated2), depth):
print(f'Intersection found at x: {i}/{j} y: ~{round(eval(formated1), depth)}!') | StarcoderdataPython |
6583312 | import falcon
from wsgiref import simple_server
import leaderboard
import root
ALLOWED_ORIGINS = ['http://0.0.0.0:5000'] # Or load this from a config file
class CorsMiddleware(object):
def process_request(self, request, response):
origin = request.get_header('Origin')
if origin in ALLOWED_ORIGINS:
response.set_header('Access-Control-Allow-Origin', origin)
app = falcon.API(middleware=[CorsMiddleware()])
root = root.RootItem()
board = leaderboard.LeaderboardItem()
app.add_route('/', root)
app.add_route('/v1.0/{org}/{repo}/leaderboard/', board)
msgtmpl = (u'Serving on host %(bind)s:%(port)s')
print(msgtmpl,
{'bind': "0.0.0.0", 'port': "8888"})
httpd = simple_server.make_server("0.0.0.0",
8888,
app)
httpd.serve_forever()
| StarcoderdataPython |
5109863 | <reponame>thegreenwebfoundation/greenwebfoundation-admin<filename>apps/accounts/admin_site.py
import re
import logging
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.forms import AuthenticationForm
from django.core.exceptions import ValidationError
from django.urls import path
from django.urls import reverse
from django.shortcuts import render
from django.views.generic.edit import FormView
from django import forms
import ipwhois
from apps.greencheck.views import GreenUrlsView
from ..greencheck import domain_check
from ..greencheck import models as gc_models
checker = domain_check.GreenDomainChecker()
logger = logging.getLogger(__name__)
class CheckUrlForm(forms.Form):
"""
A form for checking a url against the database and surfacing
what other the information we can see from third part services.
"""
url = forms.URLField()
green_status = False
whois_info = None
check_result = None
sitecheck = None
def clean_url(self):
"""
Check the submitted url against the TGWF green
domain database.
"""
# TODO: decided if we should split this into a
# separate method. clean_field typically doesn't make
# other requests
url = self.cleaned_data["url"]
domain_to_check = checker.validate_domain(url)
ip_address = checker.convert_domain_to_ip(domain_to_check)
whois_lookup = ipwhois.IPWhois(ip_address)
# returns a green domain object, not our sitecheck, which
# contains the kind of match we used
# TODO rewrite this. the sitecheck / greendomain thing is
# clumsy to use
res = checker.perform_full_lookup(domain_to_check)
sitecheck = checker.check_domain(domain_to_check)
rdap = whois_lookup.lookup_rdap(depth=1)
# import json
# radp_json_dump = open(f"{domain_to_check}.radp.lookup.json", "w")
# radp_json_dump.write(json.dumps(rdap))
# radp_json_dump.close()
self.whois_info = rdap
self.domain = domain_to_check
self.green_status = res.green
self.check_result = res
self.sitecheck = sitecheck
class CheckUrlView(FormView):
template_name = "try_out.html"
form_class = CheckUrlForm
success_url = "/not/used"
def lookup_asn(self, domain: str) -> dict:
"""Look up the corresponding ASN for this domain"""
pass
def lookup_whois(self, domain: str) -> dict:
"""Lookup the structured"""
pass
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["form_url"] = reverse("admin:check_url")
return ctx
def form_valid(self, form):
green_status = form.green_status
ctx = self.get_context_data()
if form.whois_info:
ctx["domain"] = form.domain
ctx["ip_lookup"] = form.whois_info["query"]
ctx["whois_info"] = form.whois_info
#
if form.sitecheck.green and form.sitecheck.match_type == "as":
# this is an AS match. Point to the ASN match
as_match = gc_models.GreencheckASN.objects.filter(
id=form.sitecheck.match_ip_range
)
if as_match:
ctx["matching_green_as"] = as_match[0]
if form.sitecheck.green and form.sitecheck.match_type == "ip":
ip_match = gc_models.GreencheckIp.objects.filter(
id=form.sitecheck.match_ip_range
)
if ip_match:
ctx["matching_green_ip"] = ip_match[0]
ctx["green_status"] = "green" if green_status else "gray"
return render(self.request, self.template_name, ctx)
class GreenWebAdmin(AdminSite):
# This is a standard authentication form that allows non-staff users
login_form = AuthenticationForm
index_template = "admin_index.html"
site_header = "The Green Web Foundation Administration Site"
index_title = "The Green Web Foundation Administration Site"
login_template = "login.html"
logout_template = "logout.html"
def has_permission(self, request):
"""
Just check that the user is active, we want
non-staff users to be able to access admin too.
"""
return request.user.is_active
def get_urls(self):
urls = super().get_urls()
patterns = [
path("try_out/", CheckUrlView.as_view(), name="check_url"),
path("green-urls", GreenUrlsView.as_view(), name="green_urls"),
]
return patterns + urls
def get_app_list(self, request):
app_list = super().get_app_list(request)
app_list += [
{
"name": "Try out greencheck",
"app_label": "greencheck",
"app_url": reverse("admin:check_url"),
"models": [
{
"name": "Try out a url",
"object_name": "greencheck_url",
"admin_url": reverse("admin:check_url"),
"view_only": True,
}
],
},
{
"name": "Download data dump",
"app_label": "greencheck",
"app_url": reverse("admin:check_url"),
"models": [
{
"name": "Download data dump",
"object_name": "greencheck_url",
"admin_url": reverse("admin:green_urls"),
"view_only": True,
}
],
},
]
return app_list
greenweb_admin = GreenWebAdmin(name="greenweb_admin")
| StarcoderdataPython |
1746613 | <gh_stars>0
#!/usr/bin/env python3
INT_POWER = 32
for _ in range(int(input())):
arr = [1] * INT_POWER
b = bin(int(input()))[2:]
len_b = len(b)
for i in range(len_b):
if b[i] == '1':
arr[len_b - i - 1] = 0
print(sum([2 ** i for i in range(INT_POWER) if arr[i] == 1]))
| StarcoderdataPython |
5153927 | import abc
import collections
import typing
import warnings
from typing import AbstractSet, Deque, Dict, Optional, Set, Tuple, cast
from ..utils.meta import roundrepr
if typing.TYPE_CHECKING:
from ..entity import Entity
from ..term import Term, TermSet, TermData
from ..relationship import Relationship, RelationshipData, RelationshipSet
from ..ontology import Ontology, _DataGraph
_E = typing.TypeVar("_E", bound="Entity")
# --- Storage ----------------------------------------------------------------
@roundrepr
class Lineage(object):
"""An internal type to store the superclasses and subclasses of a term.
Used in `Ontology` to cache subclassing relationships between terms since
only the superclassing relationships are explicitly declared in source
documents.
"""
__slots__ = ("sub", "sup")
def __init__(
self,
sub: Optional[AbstractSet[str]] = None,
sup: Optional[AbstractSet[str]] = None,
):
self.sub: Set[str] = set(sub) if sub is not None else set() # type: ignore
self.sup: Set[str] = set(sup) if sup is not None else set() # type: ignore
def __eq__(self, other: object) -> bool:
if isinstance(other, Lineage):
return self.sub == other.sub and self.sup == other.sup
return False
# `Lineage` is mutable so this is the explicit way to tell it's unhashable
# (see https://docs.python.org/3/reference/datamodel.html#object.__hash__)
__hash__ = None # type: ignore
# --- Abstract handlers ------------------------------------------------------
class LineageHandler(typing.Generic[_E], typing.Iterable[_E]):
def __init__(
self, entity: _E, distance: Optional[int] = None, with_self: bool = False
):
self.entity = entity
self.distance = distance
self.with_self = with_self
# TODO: API compatibilty with previous iterator (remove for v3.0.0)
self._it: typing.Optional[typing.Iterator[_E]] = None
def __next__(self) -> _E:
if self._it is None:
ty = type(self.entity).__name__
warnings.warn(
f"`{ty}.subclasses()` and `{ty}.superclasses()` will not "
"return iterators in next major version, but iterables. "
"Update your code to use `iter(...)` if needed.",
category=DeprecationWarning,
stacklevel=2,
)
self._it = iter(self)
return next(cast(typing.Iterator[_E], self._it))
def _add(self, subclass: _E, superclass: _E):
if superclass._ontology() is not subclass._ontology():
ty = type(subclass).__name__
raise ValueError(f"cannot use `{ty}` instances from different ontologies")
lineage = self._get_data().lineage
lineage[subclass.id].sup.add(superclass.id)
lineage[superclass.id].sub.add(subclass.id)
def _remove(self, subclass: _E, superclass: _E):
if superclass._ontology() is not subclass._ontology():
ty = type(subclass).__name__
raise ValueError(f"cannot use `{ty}` instances from different ontologies")
lineage = self._get_data().lineage
lineage[subclass.id].sup.remove(superclass.id)
lineage[superclass.id].sub.remove(subclass.id)
@abc.abstractmethod
def __iter__(self) -> "LineageIterator[_E]":
return NotImplemented
@abc.abstractmethod
def _get_data(self) -> "_DataGraph":
return NotImplemented
@abc.abstractmethod
def add(self, other: _E) -> None:
return NotImplemented
@abc.abstractmethod
def remove(self, other: _E) -> None:
return NotImplemented
@abc.abstractmethod
def clear(self) -> None:
return NotImplemented
class TermHandler(LineageHandler["Term"]):
@abc.abstractmethod
def __iter__(self) -> "TermIterator":
return NotImplemented
def _get_data(self) -> "_DataGraph[TermData]":
return self.entity._ontology()._terms
def to_set(self) -> "TermSet":
return self.__iter__().to_set()
class RelationshipHandler(LineageHandler["Relationship"]):
@abc.abstractmethod
def __iter__(self) -> "RelationshipIterator":
return NotImplemented
def _get_data(self) -> "_DataGraph[RelationshipData]":
return self.entity._ontology()._relationships
def to_set(self) -> "RelationshipSet":
return self.__iter__().to_set()
class SuperentitiesHandler(LineageHandler):
@abc.abstractmethod
def __iter__(self) -> "SuperentitiesIterator":
return NotImplemented
def add(self, superclass: _E):
self._add(subclass=self.entity, superclass=superclass)
def remove(self, superclass: _E):
self._remove(subclass=self.entity, superclass=superclass)
def clear(self):
lineage = self._get_data().lineage
for subclass in lineage[self.entity.id].sup:
lineage[subclass].sub.remove(self.entity.id)
lineage[self.entity.id].sup.clear()
class SubentitiesHandler(LineageHandler):
@abc.abstractmethod
def __iter__(self) -> "SubentitiesIterator":
return NotImplemented
def add(self, subclass: _E):
self._add(superclass=self.entity, subclass=subclass)
def remove(self, subclass: _E):
self._remove(superclass=self.entity, subclass=subclass)
def clear(self):
lineage = self._get_data().lineage
for superclass in lineage[self.entity.id].sub:
lineage[superclass].sup.remove(self.entity.id)
lineage[self.entity.id].sub.clear()
# --- Concrete handlers ------------------------------------------------------
class SubclassesHandler(SubentitiesHandler, TermHandler):
def __iter__(self) -> "SubclassesIterator":
return SubclassesIterator(
self.entity, distance=self.distance, with_self=self.with_self
)
class SubpropertiesHandler(SubentitiesHandler, RelationshipHandler):
def __iter__(self) -> "SubpropertiesIterator":
return SubpropertiesIterator(
self.entity, distance=self.distance, with_self=self.with_self
)
class SuperclassesHandler(SuperentitiesHandler, TermHandler):
def __iter__(self) -> "SuperclassesIterator":
return SuperclassesIterator(
self.entity, distance=self.distance, with_self=self.with_self
)
class SuperpropertiesHandler(SuperentitiesHandler, RelationshipHandler):
def __iter__(self) -> "SuperpropertiesIterator":
return SuperpropertiesIterator(
self.entity, distance=self.distance, with_self=self.with_self
)
# --- Abstract iterators -----------------------------------------------------
class LineageIterator(typing.Generic[_E], typing.Iterator[_E]):
_distmax: float
_ontology: "Ontology"
_linked: Set[str]
_done: Set[str]
_frontier: Deque[Tuple[str, int]]
_queue: Deque[str]
# ---
@abc.abstractmethod
def _get_data(self) -> "_DataGraph":
return NotImplemented # type: ignore
@abc.abstractmethod
def _get_neighbors(self, node: str) -> Set[str]:
return NotImplemented # type: ignore
@abc.abstractmethod
def _get_entity(self, node: str) -> _E:
return NotImplemented # type: ignore
@abc.abstractmethod
def _maxlen(self) -> int:
return NotImplemented # type: ignore
# ---
def __init__(
self, *entities: _E, distance: Optional[int] = None, with_self: bool = True
) -> None:
self._distmax = float("inf") if distance is None else distance
# if not term is given, `__next__` will raise `StopIterator` on
# the first call without ever accessing `self._ontology`, so it's
# safe not to initialise it here in that case.
if entities:
self._ontology = ont = entities[0]._ontology()
self._linked: Set[str] = set()
self._done: Set[str] = set()
self._frontier: Deque[Tuple[str, int]] = collections.deque()
self._queue: Deque[str] = collections.deque()
for entity in entities:
self._frontier.append((entity.id, 0))
self._linked.add(entity.id)
if with_self:
self._queue.append(entity.id)
def __iter__(self) -> "LineageIterator[_E]":
return self
def __length_hint__(self) -> int:
"""Return an estimate of the number of remaining entities to yield."""
if self._queue or self._frontier:
return self._maxlen() - len(self._linked) + len(self._queue)
else:
return 0
def __next__(self) -> "_E":
while self._frontier or self._queue:
# Return any element currently queued
if self._queue:
return self._get_entity(self._queue.popleft())
# Get the next node in the frontier
node, distance = self._frontier.popleft()
self._done.add(node)
# Process its neighbors if they are not too far
neighbors: Set[str] = set(self._get_neighbors(node))
if neighbors and distance < self._distmax:
for node in sorted(neighbors.difference(self._done)):
self._frontier.append((node, distance + 1))
for neighbor in sorted(neighbors.difference(self._linked)):
self._linked.add(neighbor)
self._queue.append(neighbor)
# Stop iteration if no more elements to process
raise StopIteration
class TermIterator(LineageIterator["Term"]):
def _maxlen(self):
return len(self._ontology.terms())
def _get_entity(self, id):
return self._ontology.get_term(id)
def _get_data(self):
return self._ontology._terms
def to_set(self) -> "TermSet":
"""Collect all classes into a `~pronto.TermSet`.
Hint:
This method is useful to query an ontology using a method chaining
syntax, for instance::
>>> cio = pronto.Ontology("cio.obo")
>>> sorted(cio['CIO:0000034'].subclasses().to_set().ids)
['CIO:0000034', 'CIO:0000035', 'CIO:0000036']
"""
from ..term import TermSet
return TermSet(self)
class RelationshipIterator(LineageIterator["Relationship"]):
def _maxlen(self):
return len(self._ontology.relationships())
def _get_entity(self, id):
return self._ontology.get_relationship(id)
def _get_data(self):
return self._ontology._relationships
def to_set(self) -> "RelationshipSet":
"""Collect all relationshisp into a `~pronto.RelationshipSet`.
Hint:
This method is useful to query an ontology using a method chaining
syntax.
"""
from ..relationship import RelationshipSet
return RelationshipSet(self)
class SubentitiesIterator(LineageIterator):
def _get_neighbors(self, node: str) -> Set[str]:
return self._get_data().lineage.get(node, Lineage()).sub
class SuperentitiesIterator(LineageIterator):
def _get_neighbors(self, node: str) -> Set[str]:
return self._get_data().lineage.get(node, Lineage()).sup
# --- Concrete iterators -----------------------------------------------------
class SubclassesIterator(SubentitiesIterator, TermIterator):
"""An iterator over the subclasses of one or several `~pronto.Term`."""
class SuperclassesIterator(SuperentitiesIterator, TermIterator):
"""An iterator over the superclasses of one or several `~pronto.Term`."""
class SubpropertiesIterator(SubentitiesIterator, RelationshipIterator):
"""An iterator over the subproperties of one or several `~pronto.Relationship`."""
class SuperpropertiesIterator(SuperentitiesIterator, RelationshipIterator):
"""An iterator over the superproperties of one or several `~pronto.Relationship`."""
| StarcoderdataPython |
3286538 | <reponame>xujunhuii/huobi_Python<filename>example/generic/get_market_status.py
from huobi.client.generic import GenericClient
generic_client = GenericClient()
market_status = generic_client.get_market_status()
print(market_status)
| StarcoderdataPython |
1718316 | <filename>random_reward.py
import json
import argparse
from functions import *
#parse arguments
#usage of id arguments not recommended; they are here for completeness
parser = argparse.ArgumentParser()
parser.add_argument('--profile', default='default')
parser.add_argument('--token')
parser.add_argument('--budget_name')
parser.add_argument('--budget_id')
parser.add_argument('--category_from_name')
parser.add_argument('--category_from_id')
parser.add_argument('--category_to_name')
parser.add_argument('--category_to_id')
parser.add_argument('--amount', type=float)
parser.add_argument('--random_threshold', type=float)
args = parser.parse_args()
profile_name = args.profile
#load profile parameters
f = open('parameters.json')
profile_list = json.load(f)
profile = profile_list[profile_name]
default = profile_list['default']
#assign params; find ids if none exist
#check for token
token = select_parameter('token', args, profile, default, profile_name)[0]
if token is None:
raise ValueError('No security token provided! Go to https://app.youneedabudget.com/settings/developer and create one, then add it to parameters.json.')
headers = {'Authorization': 'Bearer ' + token}
#find budget_id; assume name is correct if there's a conflict
budget_name, bdgt_name_source = select_parameter('budget_name', args, profile, default, profile_name)
budget_id, bdgt_id_source = select_parameter('budget_id', args, profile, default, profile_name)
if bdgt_name_source != bdgt_id_source:
budget_id = None
budget_id = validate_budget(budget_id, headers, budget_name)
url = f'https://api.youneedabudget.com/v1/budgets/{budget_id}'
#find category_id for both categories
month = get_current_month()
cat_from_name, cat_from_name_source = select_parameter('category_from_name', args, profile, default, profile_name)
cat_from_id, cat_from_id_source = select_parameter('category_from_id', args, profile, default, profile_name)
if cat_from_name_source != cat_from_id_source:
cat_from_id = None
cat_from_id = validate_category(url, headers, cat_from_id, month, cat_from_name, 'category_from')
cat_to_name, cat_to_name_source = select_parameter('category_to_name', args, profile, default, profile_name)
cat_to_id, cat_to_id_source = select_parameter('category_to_id', args, profile, default, profile_name)
if cat_to_name_source != cat_to_id_source:
cat_to_id = None
cat_to_id = validate_category(url, headers, cat_to_id, month, cat_to_name, 'category_to')
#save updated budget + category ids
new_profile_list = profile_list.copy()
if bdgt_name_source != 'args':
new_profile_list[bdgt_name_source]['budget_id'] = budget_id
if cat_from_name_source != 'args':
new_profile_list[cat_from_name_source]['category_from_id'] = cat_from_id
if cat_to_name_source != 'args':
new_profile_list[cat_to_name_source]['category_to_id'] = cat_to_id
f = open('parameters.json', 'w')
with f as outfile:
json.dump(new_profile_list, outfile, indent=4)
t_hold = select_parameter('random_threshold', args, profile, default, profile_name)[0] #random threshold
money = select_parameter('amount', args, profile, default, profile_name)[0] #amount of money to transfer, in dollars
#convert params to more usable forms
money_converted = int(money * 1000)
if __name__ == '__main__':
money_moved = random_reward(url, headers,cat_from_id, cat_to_id, money_converted, t_hold)
if money_moved:
print("Random test passed! Money transfered.")
else:
print("Random test failed. No money transfered.")
| StarcoderdataPython |
5077894 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
#lint:disable
from .config import get_db
from .core import getTable, getSession
#lint:enable
| StarcoderdataPython |
8167083 | <reponame>MegaMosquito/rebooter
#
# This is the code for my autmoatic rebooter.
#
# Written by <EMAIL>, 2021-8-21
#
import os
import sys
import schedule
import time
from datetime import datetime
def get_from_env(v, d):
if v in os.environ and '' != os.environ[v]:
return os.environ[v]
else:
return d
# Default reboot time is 3AM. For a different time, set WHEN in the environment
WHEN = get_from_env('WHEN', '03:00')
# Interface to watch and flip down/up fi/when it goes offline (default "wlan0")
INTERFACE = get_from_env('INTERFACE', 'wlan0')
# Local router gateway address (the address used to check connectivity)
GATEWAY = get_from_env('GATEWAY', '10.10.10.10')
def reboot():
print('(' + datetime.now().strftime('%Y-%b-%d %H:%M:%S') + ') Rebooting...')
sys.stdout.flush()
# Reboot immediately without checking whether operations are in progress
os.system('/bin/systemctl reboot -i')
def reset(interface):
print('(' + datetime.now().strftime('%Y-%b-%d %H:%M:%S') + ') Network down! Resetting "' + interface + '"...')
os.system('/usr/sbin/ifdown ' + interface)
time.sleep(5)
os.system('/usr/sbin/ifup --force ' + interface)
time.sleep(5)
schedule.every().day.at(WHEN).do(reboot)
while True:
result = os.system('/usr/bin/ping -c 4 ' + GATEWAY + ' > /dev/null')
if 0 != result:
reset(INTERFACE)
schedule.run_pending()
time.sleep(60)
| StarcoderdataPython |
3516274 | <reponame>nkhanal0/ampcrowd
from django.conf.urls import patterns, url
from basecrowd import views
urlpatterns = patterns(
'',
url(r'^(\w+)/assignments/$', views.get_assignment, name='get_assignment'),
url(r'^(\w+)/responses/$', views.post_response, name='post_response'),
url(r'^(\w+)/tasks/$', views.create_task_group, name='create_tasks'),
url(r'^(\w+)/purge_tasks/$', views.purge_tasks, name='purge_tasks'),
)
| StarcoderdataPython |
1689851 | <filename>5 kyu/The maximum and minimum difference Challenge version.py
def max_and_min(arr1,arr2):
arr1=set(arr1)
arr2=set(arr2)
temp1=abs(max(arr1)-min(arr2))
temp2=abs(max(arr2)-min(arr1))
first=temp1 if temp1>temp2 else temp2
arr1=sorted(arr1)
arr2=sorted(arr2)
index1=len(arr1)-1
index2=len(arr2)-1
diff=abs(arr1[index1]-arr2[index2])
while index1>=0 and index2>=0 and diff>0:
diff=min(abs(arr1[index1]-arr2[index2]), diff)
if arr1[index1]>arr2[index2]:
index1-=1
else:
index2-=1
return (first, diff) | StarcoderdataPython |
9798011 | #!/usr/bin/env python
"""
Definition and functions for a simple image (stamp) data structure:
a square image with Npix*Npix pixels, where input Npix is odd
N.B. the center of the central pixel of the stamp has coordinates (0,0)
Functions:
- getOneDpixels(self, Npix)
- set2Dpixels(self, Xpixels, Ypixels, Bkgd)
- addGaussianSource(self, muX, muY, alpha)
- addDoubleGaussianSource(self, muX, muY, alpha)
- addNoise(self, sigmaNoise, addsourcenoise=0, gain=1.0)
"""
import numpy as np
import math
from scipy import optimize
from scipy import interpolate
class stamp(object):
def __init__(self, Npix, Bkgd):
# make 1D pixel array
self.getOneDpixels(Npix)
# make 2D image and set to background Nkgd
self.set2Dpixels(self.oneDpixels[:, np.newaxis], self.oneDpixels, Bkgd)
# for self-awareness of added sources (debugging, etc)
self.sourceAdded = 0
self.addGaussian = 0
self.addDoubleGaussian = 0
def getOneDpixels(self, Npix):
# if Npix is even, increment by 1
if (Npix/2*2 == Npix): Npix += 1
NpixHalf = np.int(Npix/2)
self.oneDpixels = np.linspace(-NpixHalf, NpixHalf, Npix)
self.Npix = Npix
def set2Dpixels(self, Xpixels, Ypixels, Bkgd):
# make and set image to the background value
r = np.sqrt(Xpixels**2 + Ypixels**2)
self.image = np.empty(r.shape)
self.image.fill(Bkgd)
self.Xpixels = Xpixels
self.Ypixels = Ypixels
def addGaussianSource(self, muX, muY, alpha, A):
# add circular 2D gaussian at pixel coordinates (muX, muY)
# its width in pixels is alpha and the total count is A
r = np.sqrt((self.Xpixels-muX)**2 + (self.Ypixels-muY)**2)
sourceImage = A*np.exp(-r**2/2/alpha**2) / (2*math.pi*alpha**2)
self.sourceImage = sourceImage
self.sourceAdded = 1
self.image += sourceImage
self.addGaussian += 1
def addDoubleGaussianSource(self, muX, muY, alpha, A):
# add double gaussian at pixel coordinates (muX, muY)
# the alpha ratio is 1:2 and the amplitude ratio is 1:10
r = np.sqrt((self.Xpixels-muX)**2 + (self.Ypixels-muY)**2)
sourceImage = 0.909*A*np.exp(-r**2/2/alpha**2) / (2*math.pi*alpha**2)
alpha2 = alpha*2
sourceImage += 0.091*A*np.exp(-r**2/2/alpha2**2) / (2*math.pi*alpha2**2)
self.sourceImage = sourceImage
self.sourceAdded = 1
self.image += sourceImage
self.addDoubleGaussian += 1
def addNoise(self, sigmaNoise, addsourcenoise=0, gain=1.0):
# make a copy of noiseless input image and add gaussian noise
self.imageNoNoise = np.copy(self.image)
self.image += np.random.normal(0, sigmaNoise, self.image.shape)
variance = 0*self.imageNoNoise + sigmaNoise**2
if (addsourcenoise):
sourceVariance = np.copy(self.sourceImage)/gain
self.image += np.random.normal(0, np.sqrt(sourceVariance), self.image.shape)
variance += sourceVariance
self.variance = variance
| StarcoderdataPython |
6661619 | <filename>tests/v0x01/test_controller2switch/test_aggregate_stats_reply.py
"""Test for AggregateStatsReply message."""
import unittest
from pyof.v0x01.controller2switch.common import AggregateStatsReply
class TestAggregateStatsReply(unittest.TestCase):
"""Test for AggregateStatsReply message."""
def setUp(self):
"""Basic test setup."""
self.message = AggregateStatsReply()
self.message.packet_count = 5
self.message.byte_count = 1
self.message.flow_count = 8
def test_get_size(self):
"""[Controller2Switch/AggregateStatsReply] - size 24."""
self.assertEqual(self.message.get_size(), 24)
@unittest.skip('Not yet implemented')
def test_pack(self):
"""[Controller2Switch/AggregateStatsReply] - packing."""
# TODO
pass
@unittest.skip('Not yet implemented')
def test_unpack(self):
"""[Controller2Switch/AggregateStatsReply] - unpacking."""
# TODO
pass
| StarcoderdataPython |
225145 | <filename>Providers/Scripts/3.x/Scripts/Tests/dummy_nxOMSAutomationWorker_files/main.py
#!/usr/bin/env python2
# ====================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# ====================================
import sys
import os
import configuration
import ConfigParser
import time
def exit_on_error(message, exit_code=1):
print str(message)
try:
os.chdir(os.path.expanduser("~"))
open("automation_worker_crash.log", "w").write(message)
except:
pass
sys.exit(exit_code)
def generate_state_file():
state_file_name = "state.conf"
if configuration.get_state_directory_path() == configuration.DEFAULT_STATE_DIRECTORY_PATH:
state_file_path = os.path.join(configuration.get_working_directory_path(), state_file_name)
else:
state_file_path = os.path.join(configuration.get_state_directory_path(), state_file_name)
if os.path.isfile(state_file_path):
os.remove(state_file_path)
section = "state"
conf_file = open(state_file_path, 'wb')
config = ConfigParser.ConfigParser()
config.add_section(section)
config.set(section, configuration.STATE_PID, str(os.getpid()))
config.set(section, configuration.WORKER_VERSION, str(configuration.get_worker_version()))
# for OMS scenarios, optional for DIY
if len(sys.argv) >= 3:
config.set(section, configuration.STATE_WORKSPACE_ID, str(sys.argv[2]))
if len(sys.argv) >= 4:
config.set(section, configuration.STATE_RESOURCE_VERSION, str(sys.argv[3]))
config.write(conf_file)
conf_file.close()
def main():
if len(sys.argv) < 2:
exit_on_error("Invalid configuration file path (absolute path is required).")
configuration_path = sys.argv[1]
if not os.path.isfile(configuration_path):
exit_on_error("Invalid configuration file path (absolute path is required).")
# configuration has to be read first thing
try:
# remove the test_mode env_var value (mainly for Windows)
# this value is set in test
del os.environ["test_mode"]
except KeyError:
pass
worker_dir = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(worker_dir, configuration_path)
configuration.read_and_set_configuration(config_path)
configuration.set_config({configuration.COMPONENT: "worker"})
# do not trace anything before this point
generate_state_file()
# start a non terminating job
while (True):
time.sleep(60)
if __name__ == "__main__":
main()
| StarcoderdataPython |
5067992 | <gh_stars>1-10
"""
[2017-08-01] Challenge #325 [Easy] Color maze
https://www.reddit.com/r/dailyprogrammer/comments/6qutez/20170801_challenge_325_easy_color_maze/
#Description
Today we are going to do something colorfull and amazing. Yes it is a color maze :D (you can downvote me now, it was
totally worth it).
You traverse a color by following a sequence of colors. For example [this maze](http://imgur.com/AnK1kwa) can be solved
by the sequence 'orange -> green'.
Then you would have something like [this](http://imgur.com/zgciqa5) (paint skills)
For the mazes you always pick a spot on the bottom, in the starting color and try to get to the first row. Once you
reach the first row, you are out of the maze. The sequence does not have to be complete.
You can move horizontally and vertically, but not diagonally. It is also allowed to move on the same node more then
once.
#Formal Inputs & Outputs
##Input description
You will recieve a line with the sequence to follow and all the lines after that are the maze.
O G
B O R O Y
O R B G R
B O G O Y
Y G B Y G
R O R B R
##Output description
You can choose what you want to output:
you could output the path:
(1,4)
(1,3)
(1,2)
(2,2)
(3,2)
(3,1)
(3,0)
or you could plot out the sequence
/ / / O /
/ / / G /
/ O G O /
/ G / / /
/ O / / /
or you could create an image result or go even fancier if you want to.
##Challnge Input
R O Y P O
R R B R R R B P Y G P B B B G P B P P R
B G Y P R P Y Y O R Y P P Y Y R R R P P
B P G R O P Y G R Y Y G P O R Y P B O O
R B B O R P Y O O Y R P B R G R B G P G
R P Y G G G P Y P Y O G B O R Y P B Y O
O R B G B Y B P G R P Y R O G Y G Y R P
B G O O O G B B R O Y Y Y Y P B Y Y G G
P P G B O P Y G B R O G B G R O Y R B R
Y Y P P R B Y B P O O G P Y R P P Y R Y
P O O B B B G O Y G O P B G Y R R Y R B
P P Y R B O O R O R Y B G B G O O P B Y
B B R G Y G P Y G P R R P Y G O O Y R R
O G R Y B P Y O P B R Y B G P G O O B P
R Y G P G G O R Y O O G R G P P Y P B G
P Y P R O O R O Y R P O R Y P Y B B Y R
O Y P G R P R G P O B B R B O B Y Y B P
B Y Y P O Y O Y O R B R G G Y G R G Y G
Y B Y Y G B R R O B O P P O B O R R R P
P O O O P Y G G Y P O G P O B G P R P B
R B B R R R R B B B Y O B G P G G O O Y
#Notes/Hints
Since the sequence can have the same color more then once, it is possible that you have to visit the same node more
then once.
#Bonus
Read the data not from text input but from the [image](http://imgur.com/uoItN6T)
All squares are 100 by 100 pixels with 1 pixel border
The RGB values are
Red: (255, 0, 0)
Green: (0,128,0)
Blue: (0, 0, 255)
Orange: (255, 165, 0)
Yellow: (255, 255, 0)
Pink: (255, 192, 203)
#Finally
Have a good challenge idea?
Consider submitting it to /r/dailyprogrammer_ideas
EDIT: Added clarifications after some questions of /u/the_droide
"""
def main():
pass
if __name__ == "__main__":
main()
| StarcoderdataPython |
6621455 | <reponame>Healthedata1/pubsub-endpoint<filename>app.py
# A very simple Flask app to get started with using
# FHIR Subscriptions
# This is a reciever for the FHIR R4 Server URL (https://subscriptions.argo.run/)
# with an ednpoint = "http://healthedatainc2.pythonanywhere.com/webhook"
# It just saves the subscription notification data to a flat csv file "data.csv"
# to initialize the data.csv:
#
# data = dict(
# timestamp = [], #Bundle['timestamp']
# foo = [], # Bundle['entry'][0]['resource']['parameter'][5]['valueCode']
# status = [], # Bundle['entry'][0]['resource']['parameter'][4]['valueCode']
# topic = [], # Bundle['entry'][0]['resource']['parameter'][1]['valueUri']
# event_id = [], # Bundle['entry'][0]['fullUri']
# )
# df = pd.DataFrame(data=data)
# df
#
# file_name = 'data.csv'
# df.to_csv(file_name)
# print(f'saving {file_name} as csv ...')
# my_csv = pd.read_csv(file_name, index_col = 0)
# my_csv#
#
# and display subscription notifications data
# the csv file "data.csv" is consantly appended and not created each time
from flask import Flask, request, Response, render_template, session
import os
import logging
from datetime import datetime
from json import dumps, loads
import pandas as pd
logging.basicConfig(
filename='demo.log',
level=logging.DEBUG,
format='[%(asctime)s] %(levelname)s in %(module)s %(lineno)d}: %(message)s')
app = Flask(__name__)
app.config["DEBUG"] = True
app.secret_key = 'my_secret_key'
file_name = 'data.csv'
empty_table = dict(
timestamp = [], #Bundle['timestamp']
type = [], # Bundle['entry'][0]['resource']['parameter'][5]['valueCode']
status = [], # Bundle['entry'][0]['resource']['parameter'][4]['valueCode']
topic = [], # Bundle['entry'][0]['resource']['parameter'][1]['valueUri']
event_id = [], # Bundle['entry'][0]['fullUri']
)
#see add_url_rule to conditionally open rest hook.= e.g after subscribing"
@app.route('/webhook', methods=['POST'])
def respond():
# webhook logic to do something
app.logger.info(request.headers)
app.logger.info(request.json)
try: # sometimes is empty
bundle_event_id = request.json['entry'][1]['fullUrl']
except IndexError: # if no entry that is OK
#app.logger.exception(e)
bundle_event_id = None
except KeyError: # if no fullUrl that is no good
#app.logger.exception(e)
return Response(status=400)
try: # if these are empty then fail
bundle_ts = request.json['timestamp']
params = request.json['entry'][0]['resource']['parameter']
bundle_type = [param['valueCode'] for param in params if param['name']=='type'][0]
bundle_status = [param['valueCode'] for param in params if param['name']=='status'][0]
bundle_topic = [param['valueUri'] for param in params if param['name']=='topic'][0]
except Exception as e: # work on python 3.x
#app.logger.exception(e)
return Response(status=400)
else:
df = pd.read_csv(file_name, index_col = 0)
my_row = pd.Series(
data = [bundle_ts,bundle_type,bundle_status,bundle_topic,bundle_event_id,],
index=df.columns,
)
#app.logger.info(f'{df.shape[0]} rows')
df = df.append(my_row, ignore_index=True)
df.to_csv(file_name)
#app.logger.info(f'saving {file_name} as csv ...')
return Response(status=200)
@app.route('/',methods = ['POST', 'GET'])
def html_table():
#app.logger.info(f"request.method = {request.method}")
if "clear_button" in request.form:
#app.logger.info("clear table")
df = pd.DataFrame(data=empty_table)
df.to_csv(file_name)
df = pd.read_csv(file_name, index_col = 0, keep_default_na=False )
#app.logger.info("update table")
return render_template('index.html',
tables=[df.to_html(classes='data')],
titles = df.columns.values,)
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
6505812 | import threading
class Limiter:
'''A limit keeps track of the count of certain keys
If a key's count exceeds the limit, it is considered done.
'''
def __init__(self, limit=2):
self.d = dict()
self.limit = limit
self.lock = threading.Lock()
def get(self, url):
return self.d.get(url, 0)
def increment(self, url):
with self.lock:
if url in self.d:
self.d[url] += 1
else:
self.d[url] = 1
def done(self, url):
return self.d.get(url, 0) >= self.limit
| StarcoderdataPython |
396255 | <filename>giveaways/constants.py
import discord
giveaway_embed: discord.Embed = discord.Embed(
title="**`{prize}`**",
description=("React with {emoji} to enter\n" "Host: {host}\n" "Ends {timestamp}\n"),
).set_footer(text="Winners: {winners} | ends : ")
winnerdm_message: str = (
"You have won the giveaway for `{prize}` in **__{server}__**.\n"
"Click [here]({jump_url}) to jump to the giveaway."
)
hostdm_message: str = (
"Your giveaway for {prize} has ended.\n"
"The winner(s) are: {winners}\n\n"
"Click [here]({jump_url}) to jump to the giveaway."
)
embed_title: str = "**`{prize}`**"
embed_description: str = (
"React with {emoji} to enter\n" "Host: {host.mention}\n" "Ends {timestamp}\n"
)
embed_footer_text: str = "Winners: {winners} | ends : "
embed_footer_icon: str = "{host_avatar_url}"
embed_thumbnail: str = "{server_icon_url}"
guild_default_config = {
"msg": ":tada:Giveaway:tada:",
"emoji": "🎉",
"winnerdm": True,
"winnerdm_message": winnerdm_message,
"hostdm": True,
"hostdm_message": hostdm_message,
"reactdm": True,
"unreactdm": True,
"endmsg": "Congratulations :tada:{winner}:tada:. You have won the giveaway for ***{prize}***.\n{link}",
"tmsg": "Prize: {prize}\nDonor: {donor.mention}\n\nThank the donor in general chat",
"manager": [],
"pingrole": None,
"autodelete": False,
"blacklist": [],
"bypass": [],
"top_managers": {},
"show_defaults": True,
"embed_title": embed_title,
"embed_description": embed_description,
"embed_footer_text": embed_footer_text,
"embed_footer_icon": embed_footer_icon,
"embed_thumbnail": embed_thumbnail,
"color": None,
}
commands_to_delete = ["giveaway start", "giveaway flash", "giveaway end", "giveaway reroll"]
| StarcoderdataPython |
12839849 | from copy import deepcopy
from nglp.lib.seamless import Construct, SeamlessException
class OpenAPISupport(object):
DEFAULT_OPENAPI_TRANS = {
# The default translation from our coerce to openapi is {"type": "string"}
# if there is no matching entry in the trans dict here.
"unicode": {"type": "string"},
"utcdatetime": {"type": "string", "format": "date-time"},
"integer": {"type": "integer"},
"bool": {"type": "boolean"},
"float": {"type": "float"},
"isolang": {"type": "string", "format": "isolang"},
"url": {"type": "string", "format": "url"},
"isolang_2letter": {"type": "string", "format": "isolang-alpha2"},
"country_code": {"type": "string", "format": "country_code"},
"currency_code": {"type": "string", "format": "currency_code"},
"license": {"type": "string", "format": "license_type"},
"persistent_identifier_scheme": {"type": "string", "format": "persistent_identifier_scheme"},
"format": {"type": "string", "format": "format"},
"deposit_policy": {"type": "string", "format": "deposit_policy"},
}
def __init__(self, openapi_trans=None):
self._openapi_trans = openapi_trans if openapi_trans is not None else deepcopy(self.DEFAULT_OPENAPI_TRANS)
# def struct_to_swag(self, struct, schema_title='', **kwargs):
# if not struct:
# if not self._struct:
# raise DataSchemaException("No struct to translate to Swagger.")
# struct = self._struct
#
#
# swag = {
# "properties": self.__struct_to_swag_properties(struct=struct, **kwargs)
# }
# required = deepcopy(struct.get('required', []))
# if len(required) > 0:
# swag["required"] = required
#
# if schema_title:
# swag['title'] = schema_title
#
# return swag
def request_body_section(self, struct):
return {
"requestBody" : {
"content" : {
"application/json" : {
"schema" : self.struct_to_jsonschema(struct)
}
}
}
}
def struct_to_jsonschema(self, struct, path=''):
'''A recursive function to translate the Seamless Struct to JSONSchema'''
if not (isinstance(struct, dict) or isinstance(struct, Construct)):
raise SeamlessException("The struct whose properties we're translating to JSONSchema should always be a dict-like object.")
swag_properties = {}
# convert simple fields
for simple_field, instructions in iter(struct.get('fields', {}).items()):
# no point adding to the path here, it's not gonna recurse any further from this field
swag_properties[simple_field] = self._openapi_trans.get(instructions['coerce'], {"type": "string"})
# convert objects
for obj in struct.get('objects', []):
newpath = obj if not path else path + '.' + obj
instructions = struct.get('structs', {}).get(obj, {})
swag_properties[obj] = {}
swag_properties[obj]['title'] = newpath
swag_properties[obj]['type'] = 'object'
swag_properties[obj]['properties'] = self.struct_to_jsonschema(struct=instructions, path=newpath)["properties"] # recursive call, process sub-struct(s)
required = deepcopy(instructions.get('required', []))
if len(required) > 0:
swag_properties[obj]['required'] = required
# convert lists
for l, instructions in iter(struct.get('lists', {}).items()):
newpath = l if not path else path + '.' + l
swag_properties[l] = {}
swag_properties[l]['type'] = 'array'
swag_properties[l]['items'] = {}
if instructions['contains'] == 'field':
swag_properties[l]['items'] = self._openapi_trans.get(instructions['coerce'], {"type": "string"})
elif instructions['contains'] == 'object':
swag_properties[l]['items']['type'] = 'object'
swag_properties[l]['items']['title'] = newpath
swag_properties[l]['items']['properties'] = self.struct_to_jsonschema(struct=struct.get('structs', {}).get(l, {}), path=newpath)["properties"] # recursive call, process sub-struct(s)
required = deepcopy(struct.get('structs', {}).get(l, {}).get('required', []))
if len(required) > 0:
swag_properties[l]['items']['required'] = required
else:
raise SeamlessException("Instructions for list {x} unclear. Conversion to JSONSchema only supports lists containing \"field\" and \"object\" items. Found: {y}".format(x=newpath, y=instructions['contains']))
return {"properties" : swag_properties}
| StarcoderdataPython |
12830162 | from .constants import VV, VH, IW, ASCENDING
from .filter import filter_sentinel1_data
from tqdm import tqdm
import ee
from .compose import compose_sentinel1_data
from .dictify import dictify_pixel_values
__all__ = ["fetch_sentinel1_data"]
def fetch_sentinel1_data(
start_date,
end_date,
geometry,
scale,
crs,
pass_direction=ASCENDING,
):
'''
Retrieves and queries ImageCollection using input parameters and return
data as a tuple of header and values.
Parameters
----------
start_date : str
str following the pattern ``'yyyy-mm-dd'`` describing the start date of
the time interval
end_date : str
str following the pattern ``'yyyy-mm-dd'`` describing the end date of
the time interval
geometry : ee.Geometry
Geometry object defining the area of process
scale : int
Scale parameters of the getRegion() function. Defaulting at ``20``,
change it to change the scale of the final data points. The highest,
the lower the spatial resolution. Should be at least ``10``.
pass_direction : str, optional
Defines the pass direction to set for the data retrieval process
Returns
-------
(val_header, val) : tuple
val_header corresponds to the ``list of str`` describing the fields of
the val array. The val array is a ``list`` of data records, each
represented as a ``list`` of the same size as the val_header array.
'''
filtered_sentinel1_data = filter_sentinel1_data(
start_date=start_date,
end_date=end_date,
geometry=geometry,
pass_direction=pass_direction
)
val_vv = (filtered_sentinel1_data
.select(VV)
.getRegion(geometry, scale=scale, crs=crs)
.getInfo()
)
val_vh = (filtered_sentinel1_data
.select(VH)
.getRegion(geometry, scale=scale, crs=crs)
.getInfo()
)
val_header = val_vv[0][1:] + [VH]
val = [
val_vv[i][1:]
+ [val_vh[i][val_vh[0].index(VH)]]
for i in range(1, len(val_vv))
]
return (val_header, val)
def fetch_composite_pixels(
list_of_coordinates,
start_date,
end_date,
scale,
crs,
pass_direction,
statistic,
):
"""
Filters and aggregates Sentinel-1 products (S1_GRD ImageCollection) using
input parameters and return data as a dictionnary.
Parameters
----------
list_of_coordinates :
start_date : str
str following the pattern ``'yyyy-mm-dd'`` describing the start date of
the time interval
end_date : str
str following the pattern ``'yyyy-mm-dd'`` describing the end date of
the time interval
geometry : ee.Geometry
Geometry object defining the area of process
scale : int
Scale parameters of the getRegion() function. Defaulting at ``20``,
change it to change the scale of the final data points. The highest,
the lower the spatial resolution. Should be at least ``10``.
crs : str, optional
Coordinate Reference System
pass_direction : str, optional
Defines the pass direction to set for the data retrieval process
statistic : str
The descriptive statistic as per Google Earth Engine's reducers.
Returns
-------
pixel_values : dict
"""
header = []
composite_values = []
for coordinates in tqdm(list_of_coordinates):
try:
subregion = ee.Geometry.Polygon(
coords=[coordinates],
proj=crs,
geodesic=False,
)
subregion_header, subregion_values = compose_sentinel1_data(
start_date=start_date,
end_date=end_date,
geometry=subregion,
scale=scale,
crs=crs,
pass_direction=pass_direction,
statistic=statistic,
)
composite_values.extend(subregion_values)
if not header:
header.extend(subregion_header)
except Exception as e:
print("Some exception occured:", e)
pass
pixel_values = dictify_pixel_values(
header=header,
pixel_values_list=composite_values,
)
return pixel_values
| StarcoderdataPython |
3286960 | <filename>tests/legacy/core_tests.py
from nose.tools import raises
from graffiti.legacy import core
from graffiti.util import fninfo
from graffiti.legacy.keys import simplify
def test_build_nodes():
fn = lambda a, b=1: 1
info = fninfo(fn)
graph = {
"a": fn,
"b": fn,
"c": "foo"
}
nodes = {
"a": info,
"b": info,
"c": "foo"
}
assert core.build_nodes(graph) == nodes
def test_deps_for_flat():
graph = {
"a": lambda n: 1,
}
nodes = core.build_nodes(graph)
assert core.deps_for(nodes, "a") == { "n" }
def test_deps_for_transitive():
graph = {
"a": lambda n: 1,
"b": lambda a: 2,
}
nodes = core.build_nodes(graph)
assert core.deps_for(nodes, "b") == { "n", "a" }
def test_deps_for_nested():
graph = {
"a": lambda n: 1,
"b": {
"c": lambda a: 2
},
"d": lambda b__c: 3
}
nodes = core.build_nodes(simplify(graph))
assert core.deps_for(nodes, "d") == { "n", "a", "b__c" }
@raises(core.GraphError)
def test_deps_for_cycle():
graph = {
"a": lambda b: 1,
"b": lambda c: 1,
"c": lambda a: 1
}
nodes = core.build_nodes(simplify(graph))
core.deps_for(nodes, "b")
def test_build_dependency_tree():
graph = {
"a": lambda n: 1,
"b": {
"c": lambda a: 2
},
"d": lambda b__c: 3
}
deps = {
"a": { "n" },
"b__c": { "n", "a" },
"d": { "n", "a", "b__c" }
}
nodes = core.build_nodes(simplify(graph))
assert core.build_dependency_tree(nodes) == deps
def test_graph_parameters():
graph = {
"a": lambda n: 1,
"b": lambda a, c=10: 3
}
params = ({ "n" }, { "c" }, { "a", "b" })
nodes = core.build_nodes(graph)
assert core.graph_parameters(nodes) == params
def test_graph_nodes():
graph = {
"a": lambda n: 1,
"b": lambda a, c=10: 3
}
names = { "a", "b", "n" }
deps = core.build_dependency_tree(core.build_nodes(graph))
assert core.graph_nodes(deps) == names
def test_compile_graph():
graph = {
"a": lambda n: 1,
"b": lambda a, c=10: 3
}
nodes = core.build_nodes(graph)
deps = core.build_dependency_tree(nodes)
node_names = core.graph_nodes(deps)
req, opt, out = core.graph_parameters(nodes)
compiled = {
"descriptor": graph,
"nodes": nodes,
"dependencies": deps,
"required_inputs": req,
"optional_inputs": opt,
"outputs": out,
"node_names": node_names,
}
assert core.compile_graph(graph) == compiled
def test_call_graph():
graph = {
"a": lambda n: n,
"b": lambda a, c=10: a + c
}
compiled = core.compile_graph(graph)
assert core.call_graph(compiled, "a", { "n": 1 }) == 1
assert core.call_graph(compiled, "a", { "n": 1, "c": 1 }) == 1
assert core.call_graph(compiled, "b", { "n": 1, "a": 1 }) == 11
assert core.call_graph(compiled, "b", { "n": 1, "a": 1, "c": 1 }) == 2
# graffiti.core.run_graph tested in tests/graph_tests.py
| StarcoderdataPython |
11222834 | import typing
def travese_better(x: int) -> int:
# 在函数里面定义函数
def helper(x, res=0):
x = x if x > 0 else -x
if x == 0:
return res
res = res * 10 + x % 10
w = helper(x // 10, res)
if res > 2 ** 31:
return 0
return w
return helper(x) if x >= 0 else -1 * helper(x)
n1 = 2 << 31
n2 = 2 ** 31
print(n1, n2)
| StarcoderdataPython |
198264 | from virtool_workflow.analysis.sample import sample
from virtool_workflow.analysis.subtractions import subtractions
from virtool_workflow.analysis.hmms import hmms
from virtool_workflow.analysis.analysis import analysis
from virtool_workflow.analysis.indexes import indexes
__all__ = [
"sample",
"subtractions",
"hmms",
"analysis",
"indexes",
]
| StarcoderdataPython |
1766815 | <reponame>minhman727/miao-nutrition-assistant
from mongoengine import *
from src.models.abstract_models.food_model import Food
class SampleFood(Food):
meta = {
"db_alias": "miao",
"collection": "sample_foods"
} | StarcoderdataPython |
6575645 | from .native import *
from .native_extends import *
from .bool import *
from .vector import * | StarcoderdataPython |
391764 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Deprecated.
This module is deprecated since version 1.13 and may be removed in version 2.0.
Use fixtures.Mock* classes instead.
"""
from debtcollector import removals
import fixtures
removals.removed_module("oslotest.mockpatch", replacement="fixtures",
version="1.13", removal_version="2.0",
message="Use fixtures.Mock* classes instead.")
PatchObject = fixtures.MockPatchObject
Patch = fixtures.MockPatch
Multiple = fixtures.MockPatchMultiple
| StarcoderdataPython |
313477 | # Bader et al. (2003) MCODE clustering algorithm from
# "An automated method for finding molecular complexes in large protein interaction networks"
# Python 2 Author: <NAME> <<EMAIL>>
# Python 3 protclus version: Paul Scherer
import sys
from collections import defaultdict
from .cluster_alg import ClusterAlg
class MCODE(ClusterAlg):
"""Class for running and administrating Bader et al.'s MCODE algorithm"""
def __init__(self, filename, weight_threshold=0.2):
super(MCODE, self).__init__(filename)
self.weight_threshold = 1 - weight_threshold
def cluster(self):
edges = defaultdict(set) # node id => neighboring node ids
# Read edgelist
with open(self.filename, 'r') as f:
for line in f:
a, b = line.split()[:2]
edges[a].add(b)
edges[b].add(a)
print ('## Input graph loaded; %i nodes' % (len(edges),))
# Clusters list
clusters = []
# Stage 1: Vertex Weighting
print ('## Weighting vertices...')
weights = dict((v, 1.) for v in edges)
for i, v in enumerate(edges):
neighborhood = set((v,)) | edges[v]
# if node has only one neighbor, we know everything we need to know
if len(neighborhood) <= 2:
continue
# see if larger k-cores exist
k = 1 # highest valid k-core
while neighborhood:
k_core = neighborhood.copy()
invalid_nodes = True
while invalid_nodes and neighborhood:
invalid_nodes = set(n for n in neighborhood if len(
edges[n] & neighborhood) <= k)
neighborhood -= invalid_nodes
k += 1 # on exit, k will be one greater than we want
# vertex weight = k-core number * density of k-core
weights[v] = (k - 1) * (sum(len(edges[n] & k_core)
for n in k_core) / (2. * len(k_core)**2))
# Stage 2: Molecular Complex Prediction
print('## Molecular complex prediction...')
unvisited = set(edges)
num_clusters = 0
for seed in sorted(weights, key=weights.get, reverse=True):
if seed not in unvisited:
continue
cluster, frontier = set((seed,)), set((seed,))
w = weights[seed] * self.weight_threshold
while frontier:
cluster.update(frontier)
unvisited -= frontier
frontier = set(n for n in set.union(
*(edges[n] for n in frontier)) & unvisited if weights[n] > w)
# Haircut: only keep 2-core complexes
invalid_nodes = True
while invalid_nodes and cluster:
invalid_nodes = set(
n for n in cluster if len(edges[n] & cluster) < 2)
cluster -= invalid_nodes
if cluster:
# fluff never really seems to improve anything...
# cluster.update(
# n for n in set.union(*(edges[c] for c in cluster)) & unvisited
# if densities[n] > FLUFF_THRESHOLD)
print (' '.join(cluster))
num_clusters += 1
print (num_clusters, len(cluster), seed)
clusters.append(cluster)
self.clusters = clusters
# if __name__ == '__main__':
# filename = "../data/unweighted_example_network.txt"
# c = MCODE(filename)
# c.cluster()
# c.save_clusters("/tmp/mcode_test.txt")
| StarcoderdataPython |
3310253 | <reponame>hansehe/DockerBuildSystem
import unittest
import os
from tests import TestTools
from DockerBuildSystem import VersionTools
class TestVersionTools(unittest.TestCase):
def test_GetVersionFromChangelog(self):
version = VersionTools.GetVersionFromChangelog(os.path.join(TestTools.TEST_SAMPLE_FOLDER, 'CHANGELOG.md'))
self.assertEqual(version, '1.0.0')
version = VersionTools.GetVersionFromChangelog(os.path.join(TestTools.TEST_SAMPLE_FOLDER, 'CHANGELOG.v2.md'))
self.assertEqual(version, '1.0.0')
def test_ExportVersionFromChangelogToEnvironment(self):
os.environ.clear()
VersionTools.ExportVersionFromChangelogToEnvironment(os.path.join(TestTools.TEST_SAMPLE_FOLDER, 'CHANGELOG.md'), 'VERSION')
self.assertEqual(os.environ['VERSION'], '1.0.0')
os.environ.clear()
VersionTools.ExportVersionFromChangelogToEnvironment(os.path.join(TestTools.TEST_SAMPLE_FOLDER, 'CHANGELOG.v2.md'), 'VERSION')
self.assertEqual(os.environ['VERSION'], '1.0.0')
def test_ExportMajorMinorVersionFromChangelogToEnvironment(self):
os.environ.clear()
VersionTools.ExportVersionFromChangelogToEnvironment(os.path.join(TestTools.TEST_SAMPLE_FOLDER, 'CHANGELOG.md'), 'VERSION', 'VERSIONMAJOR', 'VERSIONMINOR')
self.assertEqual(os.environ['VERSION'], '1.0.0')
self.assertEqual(os.environ['VERSIONMAJOR'], '1')
self.assertEqual(os.environ['VERSIONMINOR'], '0')
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
6539459 | from flask import Flask, jsonify, make_response
from connector import connector
from sqlapi import sqlapi
from utils import api_response
app = Flask(__name__)
app.register_blueprint(connector)
app.register_blueprint(sqlapi)
@app.route("/")
@api_response
def default():
return 'Nothing to see here'
@app.errorhandler(404)
def resource_not_found(e):
return make_response(jsonify(error='Not found!'), 404)
| StarcoderdataPython |
5010436 | """ This module tests the Fourier implementation.
Author: <NAME>, <EMAIL>
"""
import numpy as np
from pypret.fourier import FourierTransform, Gaussian
from pypret import lib
def test_gaussian_transformation():
""" This test compares the numerical approximation of the Fourier transform
to the analytic solution for a Gaussian function. It uses non-centered
grids and a non-centered Gaussian on purpose.
"""
# define the grid parameters
# choose some arbitrary values to break symmetries
dt = 0.32
N = 205
dw = np.pi / (0.5 * N * dt)
t0 = -(N//2 + 2.1323) * dt
w0 = -(N//2 - 1.23) * dw
# and actually create it
ft = FourierTransform(N, dt, t0=t0, w0=w0)
# create and calculate a non-centered Gaussian distribution
gaussian = Gaussian(10 * dt, 0.1 * t0, 0.12 * w0)
temporal0 = gaussian.temporal(ft.t)
spectral0 = gaussian.spectral(ft.w)
# calculate the numerical approximations
spectral1 = ft.forward(temporal0)
temporal1 = ft.backward(spectral0)
temporal_error = lib.nrms(temporal1, temporal0)
spectral_error = lib.nrms(spectral1, spectral0)
# calculate the error (actual error depends on the FFT implementation)
assert temporal_error < 1e-14
assert spectral_error < 1e-14
if __name__ == "__main__":
test_gaussian_transformation()
| StarcoderdataPython |
5156949 | <gh_stars>1-10
import io
import json
import pathlib
import fastavro
import pytest
from freezegun import freeze_time
import datahub.metadata.schema_classes as models
from datahub.cli.json_file import check_mce_file
from datahub.ingestion.run.pipeline import Pipeline
from datahub.ingestion.source.file import iterate_mce_file
from datahub.metadata.schema_classes import MetadataChangeEventClass
from datahub.metadata.schemas import getMetadataChangeEventSchema
from tests.test_helpers import mce_helpers
from tests.test_helpers.click_helpers import run_datahub_cmd
from tests.test_helpers.type_helpers import PytestConfig
FROZEN_TIME = "2021-07-22 18:54:06"
@freeze_time(FROZEN_TIME)
@pytest.mark.parametrize(
"json_filename",
[
# Normal test.
"tests/unit/serde/test_serde_large.json",
# Ensure correct representation of chart info's input list.
"tests/unit/serde/test_serde_chart_snapshot.json",
# Check usage stats as well.
"tests/unit/serde/test_serde_usage.json",
# Profiles with the MetadataChangeProposal format.
"tests/unit/serde/test_serde_profile.json",
],
)
def test_serde_to_json(
pytestconfig: PytestConfig, tmp_path: pathlib.Path, json_filename: str
) -> None:
golden_file = pytestconfig.rootpath / json_filename
output_filename = "output.json"
output_file = tmp_path / output_filename
pipeline = Pipeline.create(
{
"source": {"type": "file", "config": {"filename": str(golden_file)}},
"sink": {"type": "file", "config": {"filename": str(output_file)}},
"run_id": "serde_test",
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=f"{tmp_path}/{output_filename}",
golden_path=golden_file,
)
@pytest.mark.parametrize(
"json_filename",
[
"tests/unit/serde/test_serde_large.json",
"tests/unit/serde/test_serde_chart_snapshot.json",
],
)
@freeze_time(FROZEN_TIME)
def test_serde_to_avro(pytestconfig: PytestConfig, json_filename: str) -> None:
# In this test, we want to read in from JSON -> MCE object.
# Next we serialize from MCE to Avro and then deserialize back to MCE.
# Finally, we want to compare the two MCE objects.
json_path = pytestconfig.rootpath / json_filename
mces = list(iterate_mce_file(str(json_path)))
# Serialize to Avro.
parsed_schema = fastavro.parse_schema(json.loads(getMetadataChangeEventSchema()))
fo = io.BytesIO()
out_records = [mce.to_obj(tuples=True) for mce in mces]
fastavro.writer(fo, parsed_schema, out_records)
# Deserialized from Avro.
fo.seek(0)
in_records = list(fastavro.reader(fo, return_record_name=True))
in_mces = [
MetadataChangeEventClass.from_obj(record, tuples=True) for record in in_records
]
# Check diff
assert len(mces) == len(in_mces)
for i in range(len(mces)):
assert mces[i] == in_mces[i]
@pytest.mark.parametrize(
"json_filename",
[
# Normal test.
"tests/unit/serde/test_serde_large.json",
# Check for backwards compatability with specifying all union types.
"tests/unit/serde/test_serde_backwards_compat.json",
# Usage stats.
"tests/unit/serde/test_serde_usage.json",
# Profiles with the MetadataChangeProposal format.
"tests/unit/serde/test_serde_profile.json",
# Ensure sample MCE files are valid.
"examples/mce_files/single_mce.json",
"examples/mce_files/mce_list.json",
"examples/mce_files/bootstrap_mce.json",
],
)
@freeze_time(FROZEN_TIME)
def test_check_mce_schema(pytestconfig: PytestConfig, json_filename: str) -> None:
json_file_path = pytestconfig.rootpath / json_filename
run_datahub_cmd(["check", "mce-file", f"{json_file_path}"])
@pytest.mark.parametrize(
"json_filename",
[
# Extra field.
"tests/unit/serde/test_serde_extra_field.json",
# Missing fields.
"tests/unit/serde/test_serde_missing_field.json",
],
)
def test_check_mce_schema_failure(
pytestconfig: PytestConfig, json_filename: str
) -> None:
json_file_path = pytestconfig.rootpath / json_filename
with pytest.raises((ValueError, AssertionError)):
check_mce_file(str(json_file_path))
def test_field_discriminator() -> None:
cost_object = models.CostClass(
costType=models.CostTypeClass.ORG_COST_TYPE,
cost=models.CostCostClass(
fieldDiscriminator=models.CostCostDiscriminatorClass.costCode,
costCode="sampleCostCode",
),
)
assert cost_object.validate()
| StarcoderdataPython |
1647664 | """The min_max component."""
from homeassistant.const import Platform
DOMAIN = "min_max"
PLATFORMS = [Platform.SENSOR]
| StarcoderdataPython |
8033801 | <gh_stars>1-10
"""Support Trac's XML-RPC interface."""
from dateutil.parser import parse as dateparse
from . import Trac
from .._xmlrpc import Xmlrpc, MulticallIterator, _Unmarshaller
from ...utc import utc
class _Unmarshaller_UTC(_Unmarshaller):
"""Unmarshaller that assumes datetimes are in UTC."""
dispatch = _Unmarshaller.dispatch
def end_dateTime(self, data):
value = dateparse(data).astimezone(utc)
self.append(value)
dispatch["dateTime.iso8601"] = end_dateTime
class TracMulticallIterator(MulticallIterator):
"""Iterate over the results of a multicall.
Extract error message from Trac XML-RPC specific field.
"""
def handle_error(self, item):
if '_message' in item:
raise self.service._service_error_cls(msg=item['_message'])
else:
super().handle_error(item)
class TracXmlrpc(Trac, Xmlrpc):
_service = 'trac-xmlrpc'
_multicall_iter = TracMulticallIterator
def _getparser(self):
u = _Unmarshaller_UTC()
return super()._getparser(unmarshaller=u)
| StarcoderdataPython |
8135884 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
import numpy as np
from pyroofit.models import Gauss, Chebychev
from pyroofit.composites import AddPdf, ProdPdf, Convolution
def get_test_df(size=100):
d = {}
d['mbc1'] = np.random.random_sample(size)
d['mbc'] = np.random.random_sample(size)
return pd.DataFrame(d)
def test_AddPdf():
import ROOT
df = get_test_df()
assert isinstance(df, pd.DataFrame)
bkg = Chebychev(('mbc', 0, 1))
sig = Gauss(('mbc', 0, 1))
pdf = sig+bkg
#pdf.fit(df)
#pdf.plot('test2.pdf')
assert isinstance(pdf, AddPdf)
assert isinstance(pdf.roo_pdf, ROOT.RooAbsPdf)
def test_AddPdf_fit():
import ROOT
df = get_test_df()
assert isinstance(df, pd.DataFrame)
bkg = Chebychev(('mbc', 0, 1))
sig = Gauss(('mbc', 0, 1))
pdf = sig+bkg
pdf.fit(df)
#pdf.plot('test2.pdf')
assert isinstance(pdf, AddPdf)
assert isinstance(pdf.roo_pdf, ROOT.RooAbsPdf)
def test_ProdPdf():
import ROOT
df = get_test_df()
assert isinstance(df, pd.DataFrame)
bkg = Chebychev(('mbc', 0, 1))
sig = Gauss(('mbc', 0, 1))
pdf = sig*bkg
assert isinstance(pdf, ProdPdf)
assert isinstance(pdf.roo_pdf, ROOT.RooAbsPdf)
def test_Convolution():
import ROOT
df = get_test_df()
assert isinstance(df, pd.DataFrame)
bkg = Chebychev(('mbc', 0, 1))
sig = Gauss(('mbc', 0, 1))
pdf = Convolution(bkg, sig)
assert isinstance(pdf, Convolution)
assert isinstance(pdf.roo_pdf, ROOT.RooAbsPdf)
| StarcoderdataPython |
1679803 | import threading
import time
import json
def writeDataToLog():
json_data=open("last.json").read()
data = json.loads(json_data)
json_data2=open("log.json").read()
data2 = json.loads(json_data2)
data2['log'].append(data)
with open('log.json', 'w') as outfile:
json.dump(data2, outfile)
class checkWeight():
def __init__(self, interval=1):
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.start()
def run(self):
curentData = {"Weight":0}
while True:
json_data=open("last.json").read()
data = json.loads(json_data)
if data['Weight'] != curentData['Weight'] :
writeDataToLog()
curentData = data
print "changed"
time.sleep(self.interval)
| StarcoderdataPython |
9638531 | <filename>answers/Drish-xD/Day-11/Question-1.py
# Program to Check the string is anagram, which is in turn a palindrome :)
def palindrome(string):
lst = [] # Empty list to store all odd occurring alphabets in the string
length1 = len(string)
for i in range(len(string)):
if string[i] in lst:
lst.remove(string[i]) # This will remove all the even number of alphabets from the string
else:
lst.append(string[i]) # This will add all the odd number of alphabets from the string to empty list
# If all the alphabets in the string is of even count or
# if their is only one alphabet with odd count
# then the string is Palindrome else its not palindrome
if length1 % 2 == 0 and len(lst) == 0 or length1 % 2 == 1 and len(lst) == 1:
return "YES :)"
else:
return "NO :("
st = input("Enter the string :")
print(palindrome(st))
| StarcoderdataPython |
5037928 | import re
import sys
from git import Repo
from utils import Mbox, get_issue, generate_branch_name
from constants import ISSUE_URL, WORKING_DIR, BASE_BRANCH_NAME
from startIssue import start_issue
from finishIssue import finish_issue
from cleanupIssue import cleanup_issue
def router(issue_url: str, working_dir: str = WORKING_DIR):
issue = get_issue(issue_url)
branch_name = generate_branch_name(issue)
# make sure the working_dir is the repo that have this issue
repo_name = re.match(r'https://github.com/(\S+)/(\S+)/issues/\d+',
issue_url)
assert repo_name.group(2) in working_dir
# is a valid repo
repo = Repo(working_dir)
assert not repo.bare
# switch logic
if issue['state'] == 'closed':
cleanup_issue(issue, repo, BASE_BRANCH_NAME)
else:
assert issue['state'] == 'open'
if repo.active_branch.name == branch_name:
finish_issue(issue, repo, repo_name)
else:
start_issue(issue, repo, BASE_BRANCH_NAME)
if __name__ == '__main__':
try:
router(ISSUE_URL, WORKING_DIR)
except (RuntimeError, AssertionError) as e:
Mbox(type(e), str(e))
except Exception as e:
Mbox('Unexpected Error', str(e))
| StarcoderdataPython |
76052 | from datetime import *
'''to find age of a person'''
try:
day=int(input("enter day as number "))
month=int(input("enter month as number (Eg: 1-for Jan 2-for feb) "))
year=int(input("enter year Eg-2004,2018 "))
dob=date(year,month,day)
print(dob)
today=datetime.now().date()
age=(today-dob)/timedelta(days=365.24)
print(round(age,2))
except ValueError as e:
print("dob issue ",e) | StarcoderdataPython |
11391883 | <filename>spotty/providers/aws/cfn_templates/instance_profile/template.py
import os
import chevron
def prepare_instance_profile_template(managed_policy_arns: list):
with open(os.path.join(os.path.dirname(__file__), 'data', 'template.yaml')) as f:
content = f.read()
parameters = {
'HAS_MANAGED_POLICIES': len(managed_policy_arns),
'MANAGED_POLICY_ARNS': [{'MANAGED_POLICY_ARN': arn} for arn in managed_policy_arns]
}
template = chevron.render(content, parameters)
return template
| StarcoderdataPython |
3598278 | """
CloudFormation Lint Rules for Serverless applications
"""
| StarcoderdataPython |
3568251 | <filename>test/programytest/parser/base.py
import unittest
from programy.bot import Bot
from programy.config.bot.bot import BotConfiguration
from programy.context import ClientContext
from programytest.client import TestClient
from programytest.custom import CustomAssertions
class TestBot(Bot):
def __init__(self, bot_config, client):
Bot.__init__(self, bot_config, client)
self._response = "Unknown"
@property
def response(self):
return self._response
@response.setter
def response(self, text):
self._response = text
def ask_question(self, clientid, text, srai=False):
del clientid
return self._response
class ParserTestsBaseClass(unittest.TestCase, CustomAssertions):
def setUp(self):
client = TestClient()
client.add_default_stores()
self._client_context = ClientContext(client, "testid")
self._client_context.bot = TestBot(BotConfiguration(), client)
self._client_context.brain = self._client_context.bot.brain
| StarcoderdataPython |
4999954 | """Kata: List Filering
#1 Best practice solution
def filter_list(l):
'return a new list with the strings filtered out'
return [i for i in l if not isinstance(i, str)]
"""
def filter_list(given_list):
"""Function filers list and removes anything that is not an int."""
return [i for i in given_list if type(i) is int]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.