index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
13,200 | 9645174931e3b0f375243fe065b3f8dc8a688650 | import os
from pepys_import.core.formats.rep_line import REPLine
from pepys_import.core.formats import unit_registry
from pepys_import.core.validators import constants
from pepys_import.file.importer import Importer
class ReplayImporter(Importer):
def __init__(
self,
name="Replay File Format Importer",
validation_level=constants.ENHANCED_LEVEL,
short_name="REP Importer",
separator=" ",
):
super().__init__(name, validation_level, short_name)
self.separator = separator
self.text_label = None
self.depth = 0.0
self.errors = list()
def can_load_this_type(self, suffix):
return suffix.upper() == ".REP" or suffix.upper() == ".DSF"
def can_load_this_filename(self, filename):
return True
def can_load_this_header(self, first_line):
return True
def can_load_this_file(self, file_contents):
return True
def _load_this_file(self, data_store, path, file_object, datafile, change_id):
for line_number, line in enumerate(file_object.lines(), 1):
if line.text.startswith(";"):
continue
else:
# create state, to store the data
rep_line = REPLine(line_number, line, self.separator)
# Store parsing errors in self.errors list
if not rep_line.parse(self.errors, self.error_type):
continue
# and finally store it
vessel_name = rep_line.get_platform()
platform = data_store.get_platform(
platform_name=vessel_name,
nationality="UK",
platform_type="Fisher",
privacy="Public",
change_id=change_id,
)
sensor_type = data_store.add_to_sensor_types(
"_GPS", change_id=change_id
)
privacy = data_store.missing_data_resolver.resolve_privacy(
data_store, change_id
)
sensor = platform.get_sensor(
data_store=data_store,
sensor_name=platform.name,
sensor_type=sensor_type,
privacy=privacy.name,
change_id=change_id,
)
state = datafile.create_state(
data_store, platform, sensor, rep_line.timestamp, self.short_name
)
state.elevation = (-1 * rep_line.depth) * unit_registry.metre
state.heading = rep_line.heading
state.speed = rep_line.speed
state.privacy = privacy.privacy_id
if vessel_name in self.prev_location:
state.prev_location = self.prev_location[vessel_name]
state.location = rep_line.get_location()
self.prev_location[vessel_name] = state.location
@staticmethod
def degrees_for(degs, mins, secs, hemi: str):
if hemi.upper() == "S" or hemi.upper() == "W":
factor = -1
else:
factor = 1
return factor * (float(degs) + float(mins) / 60 + float(secs) / 60 / 60)
|
13,201 | 097f5ec9fc7179a40c6196832142a16e06f452a2 | import web
import application.models.model_datos as model_datos
render = web.template.render('application/views/', base='master2')
class Insert:
def __init__(self):
pass
def GET(self):
return render.insert()
def POST(self):
formulario = web.input()
id = formulario['id']
nombre = formulario['nombre']
precio = formulario['precio']
existencia = formulario['existencia']
model_datos.insert_datos(id, nombre,precio,existencia)
raise web.seeother('/admin')
|
13,202 | 1c125c576daa4508ec8a97bb2a3f12e4ff96bbe5 | #from functions_ import addNumbers,subtractNumbers
#addNumbers(12,45)
#from function3 import add3numbers
#add3numbers(12,56,99)
#function that reverses all the words in a sentence which contains a particular letter.
def reverse(string1,b):
words=string1.split(' ')
for i in words:
if b in i:
words2=i[::-1]
stringb=string1.replace(i,words2)
return stringb
print(reverse("word searches are super fun", "s"))
def reverse_begin_with(string3,x):
words=string3.split(' ')
for i in words:
if i[0]==x:
words=i[::-1]
else:
words=words
return " ".join(words)
print(reverse_begin_with("word searches are super fun", "s"))
"""def double_letters(word):
for i in range(len(word)-1):
if word[i] == word[i+1]:
return True
else:
return False
print(double_letters("loop"))"""
def index_of_caps(word):
return[word.index(i) for i in word if i.isupper()]
print(index_of_caps("eDabiT"))
|
13,203 | f099cec6fc072abb093d1e9d4bcec4b5af685868 | from django.conf.urls import url, include
from product.views.v1 import product as views
product_urls = [
url(
r'^products/', include(
[
url(
r'^$',
views.ProductListAPIView.as_view(),
name='product-list'
),
url(
r'^(?P<pk>\d+)/$',
views.ProductRetrieveAPIView.as_view(),
name='product-detail'
)
]
)
)
]
|
13,204 | cf9c07fed57236230f2357c94715e121083b6510 | from flask import render_template, make_response
from flask_restful import Resource
from flask_login import login_required
class Json(Resource):
@login_required
def get(self):
headers = {'Content-Type': 'text/html'}
return make_response(render_template('json.html'), 200, headers)
|
13,205 | 3b673dfafbee9c6a8fa1c176228d4e4cffd04330 | ''' Initial brute force solution:
Runtime = 40ms, faster than 95.69%
Memory = 12.8MB, less than 50.81%
'''
class Solution(object):
def isPalindrome(self, x):
strX = str(x)
strInvertedX = strX[::-1]
return (strInvertedX == strX) |
13,206 | c8fd7662aa9608cca581b63520092c41f4cd4386 | class Solution:
def findDuplicate(self, nums: List[int]) -> int:
dic = {}
for i in range(len(nums)):
if nums[i] not in dic.keys():
dic[nums[i]] = 1
else:
return nums[i]
|
13,207 | 188fd3d9658c2791edd606f269a310011c19cee0 | """Tools so trivial that tracebacks should not descend into them
We define the ``__unittest`` symbol in their module namespace so unittest will
skip them when printing tracebacks, just as it does for their corresponding
methods in ``unittest`` proper.
"""
import re
import unittest
__all__ = ['ok_', 'eq_']
# Use the same flag as unittest itself to prevent descent into these functions:
__unittest = 1
def ok_(expr, msg=None):
"""Shorthand for assert. Saves 3 whole characters!
"""
if not expr:
raise AssertionError(msg)
def eq_(a, b, msg=None):
"""Shorthand for 'assert a == b, "%r != %r" % (a, b)
"""
if not a == b:
raise AssertionError(msg or "%r != %r" % (a, b))
#
# Expose assert* from unittest.TestCase
# - give them pep8 style names
#
caps = re.compile('([A-Z])')
def pep8(name):
return caps.sub(lambda m: '_' + m.groups()[0].lower(), name)
class Dummy(unittest.TestCase):
def nop():
pass
_t = Dummy('nop')
for at in [ at for at in dir(_t)
if at.startswith('assert') and not '_' in at ]:
pepd = pep8(at)
vars()[pepd] = getattr(_t, at)
__all__.append(pepd)
del Dummy
del _t
del pep8
|
13,208 | ea05f13b8f0ddd09371200765ab76ae6c7f7455a | from __future__ import absolute_import
import functools
from web3.iban import Iban
from web3.utils.string import (
force_text,
coerce_args_to_text,
coerce_return_to_text,
)
from web3.utils.address import (
is_address,
is_strict_address,
)
from web3.utils.types import (
is_array,
is_string,
)
from web3.utils.formatting import (
is_0x_prefixed,
)
from web3.utils.encoding import (
to_hex,
encode_hex,
decode_hex,
from_decimal,
to_decimal,
)
from web3.utils.functional import (
identity,
compose,
)
import web3.utils.config as config
def isPredefinedBlockNumber(blockNumber):
if not is_string(blockNumber):
return False
return force_text(blockNumber) in {"latest", "pending", "earliest"}
def inputDefaultBlockNumberFormatter(blockNumber=None):
if not blockNumber:
return config.defaultBlock
return inputBlockNumberFormatter(blockNumber)
def inputBlockNumberFormatter(blockNumber):
if not blockNumber:
return None
elif isPredefinedBlockNumber(blockNumber):
return blockNumber
return to_hex(blockNumber)
@coerce_args_to_text
@coerce_return_to_text
def input_call_formatter(eth, txn):
defaults = {
'from': eth.defaultAccount,
}
formatters = {
'from': input_address_formatter,
'to': input_address_formatter,
}
return {
key: formatters.get(key, identity)(txn.get(key, defaults.get(key)))
for key in set(tuple(txn.keys()) + tuple(defaults.keys()))
}
@coerce_args_to_text
@coerce_return_to_text
def input_transaction_formatter(eth, txn):
defaults = {
'from': eth.defaultAccount,
}
formatters = {
'from': input_address_formatter,
'to': input_address_formatter,
}
return {
key: formatters.get(key, identity)(txn.get(key, defaults.get(key)))
for key in set(tuple(txn.keys()) + tuple(defaults.keys()))
}
@coerce_args_to_text
@coerce_return_to_text
def output_transaction_formatter(txn):
formatters = {
'blockNumber': lambda v: None if v is None else to_decimal(v),
'transactionIndex': lambda v: None if v is None else to_decimal(v),
'nonce': to_decimal,
'gas': to_decimal,
'gasPrice': to_decimal,
'value': to_decimal,
}
return {
key: formatters.get(key, identity)(value)
for key, value in txn.items()
}
@coerce_args_to_text
@coerce_return_to_text
def output_transaction_receipt_formatter(receipt):
"""
Formats the output of a transaction receipt to its proper values
"""
if receipt is None:
return None
logs_formatter = compose(functools.partial(map, outputLogFormatter), list)
formatters = {
'blockNumber': to_decimal,
'transactionIndex': to_decimal,
'cumulativeGasUsed': to_decimal,
'gasUsed': to_decimal,
'logs': lambda l: logs_formatter(l) if is_array(l) else l,
}
return {
key: formatters.get(key, identity)(value)
for key, value in receipt.items()
}
@coerce_return_to_text
def outputBlockFormatter(block):
"""
Formats the output of a block to its proper values
"""
# Transform to number
block["gasLimit"] = to_decimal(block["gasLimit"])
block["gasUsed"] = to_decimal(block["gasUsed"])
block["size"] = to_decimal(block["size"])
block["timestamp"] = to_decimal(block["timestamp"])
if block.get("number"):
block["number"] = to_decimal(block["number"])
block["difficulty"] = to_decimal(block["difficulty"])
block["totalDifficulty"] = to_decimal(block["totalDifficulty"])
if is_array(block.get("transactions")):
for item in block["transactions"]:
if not is_string(item):
item = output_transaction_formatter(item)
return block
@coerce_return_to_text
def outputLogFormatter(log):
"""
Formats the output of a log
"""
if log.get("blockNumber"):
log["blockNumber"] = to_decimal(log["blockNumber"])
if log.get("transactionIndex"):
log["transactionIndex"] = to_decimal(log["transactionIndex"])
if log.get("logIndex"):
log["logIndex"] = to_decimal(log["logIndex"])
return log
@coerce_return_to_text
def inputPostFormatter(post):
"""
Formats the input of a whisper post and converts all values to HEX
"""
post["ttl"] = from_decimal(post["ttl"])
post["workToProve"] = from_decimal(post.get("workToProve", 0))
post["priority"] = from_decimal(post["priority"])
if not is_array(post.get("topics")):
post["topics"] = [post["topics"]] if post.get("topics") else []
post["topics"] = [topic if is_0x_prefixed(topic) else encode_hex(topic)
for topic in post["topics"]]
return post
@coerce_return_to_text
def outputPostFormatter(post):
"""
Formats the output of a received post message
"""
post["expiry"] = to_decimal(post["expiry"])
post["sent"] = to_decimal(post["sent"])
post["ttl"] = to_decimal(post["ttl"])
post["workProved"] = to_decimal(post["workProved"])
if not post.get("topics"):
post["topics"] = []
post["topics"] = [decode_hex(topic) for topic in post["topics"]]
return post
def input_address_formatter(addr):
iban = Iban(addr)
if iban.isValid() and iban.isDirect():
return "0x" + iban.address()
elif is_strict_address(addr):
return addr
elif is_address(addr):
return "0x" + addr
raise ValueError("invalid address")
def outputSyncingFormatter(result):
result["startingBlock"] = to_decimal(result["startingBlock"])
result["currentBlock"] = to_decimal(result["currentBlock"])
result["highestBlock"] = to_decimal(result["highestBlock"])
return result
def transaction_pool_formatter(value, txn_formatter):
return {
'pending': {
sender: {
to_decimal(nonce): [txn_formatter(txn) for txn in txns]
for nonce, txns in txns_by_sender.items()
} for sender, txns_by_sender in value.get('pending', {}).items()
},
'queued': {
sender: {
to_decimal(nonce): [txn_formatter(txn) for txn in txns]
for nonce, txns in txns_by_sender.items()
} for sender, txns_by_sender in value.get('queued', {}).items()
},
}
def transaction_pool_content_formatter(value):
return transaction_pool_formatter(value, output_transaction_formatter)
def transaction_pool_inspect_formatter(value):
return transaction_pool_formatter(value, identity)
|
13,209 | 4b4950825db71cc82863c85d6f7593734f58208a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2020 - 2021 Louis Richard
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so.
"""disp_surf_calc.py
@author: Louis Richard
"""
import itertools
import numpy as np
def _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i):
# The elements of the dielectric tensor, using Swansons notation
diel_s = 1 - wp_e ** 2 / (w_final ** 2 - 1) - wp_i ** 2 / (
w_final ** 2 - wc_i ** 2)
diel_d = -wp_e ** 2 / (w_final * (w_final ** 2 - 1))
diel_d += wc_i * wp_i ** 2 / (w_final * (w_final ** 2 - wc_i ** 2))
diel_p = 1 - (wp_e ** 2 + wp_i ** 2) / w_final ** 2
n2_ = kc_ ** 2 / w_final ** 2
diel_xx = diel_s - n2_ * np.cos(theta_) ** 2
diel_xy = -1j * diel_d
diel_xz = n2_ * np.cos(theta_) * np.sin(theta_)
diel_yy = diel_s - n2_
diel_zz = diel_p - n2_ * np.sin(theta_) ** 2
return diel_xx, diel_xy, diel_xz, diel_yy, diel_zz
def _calc_e(diel_tensor):
_, diel_xy, diel_xz, diel_yy, diel_zz = diel_tensor
e_x = -diel_zz / diel_xz
e_y = diel_xy / diel_yy * e_x
e_z = np.ones(e_y.shape)
e_per = np.sqrt(e_x * np.conj(e_x) + e_y * np.conj(e_y))
e_tot = np.sqrt(e_x * np.conj(e_x) + e_y * np.conj(e_y) + e_z ** 2)
e_pol = -2 * np.imag(e_x * np.conj(e_y)) / e_per ** 2
return e_x, e_y, e_z, e_per, e_tot, e_pol
def _calc_b(kc_x_mat, kc_z_mat, w_final, e_x, e_y, e_z):
b_x = -kc_z_mat * e_y / w_final
b_y = (kc_z_mat * e_x - kc_x_mat * e_z) / w_final
b_z = kc_x_mat * e_y / w_final
b_par = np.sqrt(b_z * np.conj(b_z))
b_per = np.sqrt(b_x * np.conj(b_x) + b_y * np.conj(b_y))
b_pol = -2 * np.imag(b_x * np.conj(b_y)) / b_per ** 2
b_tot = np.sqrt(
b_x * np.conj(b_x) + b_y * np.conj(b_y) + b_z * np.conj(b_z))
return b_x, b_y, b_z, b_par, b_per, b_pol, b_tot
def _calc_s(e_x, e_y, e_z, b_x, b_y, b_z):
# Poynting flux
s_x = e_y * np.conj(b_z) - e_z * np.conj(b_y)
s_y = e_z * np.conj(b_x) - e_x * np.conj(b_z)
s_z = e_x * np.conj(b_y) - e_y * np.conj(b_x)
s_par = np.abs(s_z)
s_tot = np.sqrt(s_x * np.conj(s_x) + s_y * np.conj(s_y)
+ s_z * np.conj(s_z))
return s_par, s_tot
def _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot):
n_e = wp_e ** 2
en_e_n = en_e * n_e
en_i_n = en_i * n_e
en_efield = 0.5 * e_tot ** 2
en_bfield = 0.5 * b_tot ** 2
ratio_part_field = (en_e_n + en_i_n) / (en_efield + en_bfield)
return ratio_part_field
def _calc_continuity(kc_x_mat, kc_z_mat, w_final, v_ex, v_ez, v_ix, v_iz):
dn_e_n = (kc_x_mat * v_ex + kc_z_mat * v_ez) / w_final
dn_e_n = np.sqrt(dn_e_n * np.conj(dn_e_n))
dn_i_n = (kc_x_mat * v_ix + kc_z_mat * v_iz) / w_final
dn_i_n = np.sqrt(dn_i_n * np.conj(dn_i_n))
dne_dni = dn_e_n / dn_i_n
return dn_e_n, dn_i_n, dne_dni
def _calc_vei(m_i, wc_i, w_final, e_x, e_y, e_z):
q_e, q_i, m_e, wc_e = [-1, 1, 1, 1]
v_ex = 1j * q_e * (w_final * e_x - 1j * wc_e * e_y)
v_ex /= m_e * (w_final ** 2 - wc_e ** 2)
v_ey = 1j * q_e * (1j * wc_e * e_x + w_final * e_y)
v_ey /= m_e * (w_final ** 2 - wc_e ** 2)
v_ez = 1j * q_e * e_z / (m_e * w_final)
v_ix = 1j * q_i * (w_final * e_x + 1j * wc_i * e_y)
v_ix /= m_i * (w_final ** 2 - wc_i ** 2)
v_iy = 1j * q_i * (-1j * wc_i * e_x + w_final * e_y)
v_iy /= m_i * (w_final ** 2 - wc_i ** 2)
v_iz = 1j * q_i * e_z / (m_i * w_final)
return v_ex, v_ey, v_ez, v_ix, v_iy, v_iz
def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e):
r"""Calculate the cold plasma dispersion surfaces according to equation
2.64 in Plasma Waves by Swanson (2nd ed.)
Parameters
----------
kc_x_max : float
Max value of k_perpendicular*c/w_c.
kc_z_max : float
Max value of k_parallel*c/w_c.
m_i : float
Ion mass in terms of electron masses.
wp_e : float
Electron plasma frequency in terms of electron gyro frequency.
Returns
-------
kx_ : ndarray
kperpandicular*c/w_c meshgrid
kz_ : ndarray
kparallel*c/w_c meshgrid
wf_ : ndarray
Dispersion surfaces.
extra_param : dict
Extra parameters to plot.
"""
# Make vectors of the wave numbers
kc_z = np.linspace(1e-6, kc_z_max, 35)
kc_x = np.linspace(1e-6, kc_x_max, 35)
# Turn those vectors into matrices
kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z)
# Find some of the numbers that appear later in the calculations
kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k
theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B
wc_i = 1 / m_i # The ion gyro frequency
wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency
wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# For every k_perp and k_par, turn the dispersion relation into a
# polynomial equation and solve it.
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# The polynomial coefficients are calculated
pol_koeff_8 = -2 * kc_ ** 2
pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape)
pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2)
pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2
pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2)
pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2
pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * (
1 + np.cos(theta_) ** 2)
pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2
pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos(
theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i))
pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * (
1 + np.cos(theta_) ** 2)
pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2
w_final = np.zeros((10, len(kc_z), len(kc_x)))
# For each k, solve the equation
for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))):
disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0,
pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x],
0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]]
# theoretically should be real (A. Tjulin)
w_temp = np.real(np.roots(disp_polynomial))
# We need to sort the answers to get nice surfaces.
w_final[:, k_z, k_x] = np.sort(w_temp)
n2_ = kc_ ** 2 / w_final ** 2
v_ph_c = np.sqrt(1. / n2_)
va_c = 1 / (wp_e * np.sqrt(m_i))
v_ph_va = v_ph_c / va_c
diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i)
e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor)
e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_
b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat,
w_final, e_x, e_y, e_z)
dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]]
dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)]
dw_x[:, :, 1:] = np.diff(w_final, axis=2)
dw_z[:, 1:, :] = np.diff(w_final, axis=1)
v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])]
s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z)
# Compute ion and electron velocities
v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final,
e_x, e_y, e_z)
# Ratio of parallel and perpendicular to B speed
vepar_perp = v_ez * np.conj(v_ez)
vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey))
vipar_perp = v_iz * np.conj(v_iz)
vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy))
# Total particle speeds
v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez)
v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz)
# Ion and electron energies
m_e = -1
en_e = 0.5 * m_e * v_e2
en_i = 0.5 * m_i * v_i2
# Ratio of particle and field energy densities
ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot)
# Continuity equation
dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final,
v_ex, v_ez, v_ix, v_iz)
dn_e_n_db_b = dn_e_n / b_tot
dn_i_n_db_b = dn_i_n / b_tot
dn_e_n_dbpar_b = dn_e_n / b_par
dn_i_n_dbpar_b = dn_i_n / b_par
dn_e = dn_e_n * wp_e ** 2
k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat
k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e))
# Build output dict
extra_param = {"Degree of electromagnetism": np.log10(b_tot / e_tot),
"Degree of longitudinality": np.abs(e_par) / e_tot,
"Degree of parallelity E": e_z / e_tot,
"Degree of parallelity B": np.sqrt(
b_z * np.conj(b_z)) / b_tot,
"Ellipticity E": e_pol, "Ellipticity B": b_pol,
"E_part/E_field": np.log10(ratio_part_field),
"v_g": np.sqrt(v_x ** 2 + v_z ** 2),
"v_ph/v_a": np.log10(v_ph_va),
"E_e/E_i": np.log10(en_e / en_i),
"v_e/v_i": np.log10(np.sqrt(v_e2 / v_i2)),
"v_epara/v_eperp": np.log10(vepar_perp),
"v_ipara/v_iperp": np.log10(vipar_perp),
"dn_e/dn_i": np.log10(dne_dni),
"(dn_e/n)/ (dB/B)": np.log10(dn_e_n_db_b),
"(dn_i/n)/(dB/B)": np.log10(dn_i_n_db_b),
"(dn_i/n)/(dBpar/B)": np.log10(dn_i_n_dbpar_b),
"(dn_e/n)/(dB/B)": np.log10(dn_e / k_dot_e),
"(dn_e/n)/(dBpar /B)": np.log10(dn_e_n_dbpar_b),
" Spar/Stot": s_par / s_tot}
for k, v in zip(extra_param.keys(), extra_param.values()):
extra_param[k] = np.transpose(np.real(v), [0, 2, 1])
kx_ = np.transpose(kc_x_mat)
kz_ = np.transpose(kc_z_mat)
wf_ = np.transpose(w_final, [0, 2, 1])
return kx_, kz_, wf_, extra_param
|
13,210 | 136949a39d222885c706c82916daad924d5d6826 | from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from matplotlib import pyplot as plt
import numpy as np
#1. ์ ์ ๋ ๋ฐ์ดํฐ ์ธํ
x_train = np.array([1,2,3,4,5,6,7,8,9,10])
y_train = np.array([1,2,3,4,5,6,7,8,9,10])
x_pred = np.array([11,12,13])
#2. ๋ชจ๋ธ ๊ตฌ์ฑ
model = Sequential()
model.add(Dense(3, input_dim=1, activation='relu'))
model.add(Dense(5))
model.add(Dense(1))
#3. ๋ฐ์ดํฐ ๋ชจ๋ธ ํ๋ จ
model.compile(loss='mse', optimizer='adam'
, metrics=['mae','acc']
)
hist = model.fit(x_train, y_train, epochs=500, batch_size=1 )
#loss, acc = model.evaluate(x, y)
#4.ํ๊ฐ
loss = model.evaluate(x_train, y_train, batch_size=1)
#print('acc:',acc)
print('loss: ',loss)
y_predict = model.predict(x_pred)
print(y_predict)
#######
# for y in y_predict:
# print(round(float(y)))
##evaluate์์ ๊ฐ์ฅ ์ค์ํ ๊ฒ์ loss ์ด๋ค. metrics๋ ๋ณด์กฐ์งํ๋ฅผ ์ถ๊ฐํ๋ ํ๋ผ๋ฏธํฐ ์ด๋ค ์ฆ ['๋ณด์กฐ์งํ1', '๋ณด์กฐ์งํ2'] + evaluate์ ๋ฐํ๊ฐ ์ถ๊ฐ |
13,211 | 887d31f184b912035d15d576e744bb9c0d884785 | # Generated by Django 2.2.3 on 2019-07-15 13:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bim360', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='conteudo',
name='identity',
field=models.CharField(default='', max_length=300),
),
migrations.AddField(
model_name='conteudo',
name='nome',
field=models.CharField(default='', max_length=300),
),
migrations.AddField(
model_name='conteudo',
name='pjtId',
field=models.CharField(default='', max_length=300),
),
migrations.AddField(
model_name='projeto',
name='hubId',
field=models.CharField(default='', max_length=300),
),
migrations.AddField(
model_name='projeto',
name='identity',
field=models.CharField(default='', max_length=300),
),
migrations.AddField(
model_name='projeto',
name='nome',
field=models.CharField(default='', max_length=300),
),
]
|
13,212 | 0a8b80a5ddb9c6c06a3dda5e775854f4a4ae2c1c | import sys
def Palindrome(Number) :
temp = Number
Reverse = 0
while Number > 0 :
Reminder = Number % 10
Reverse = (Reverse*10) + Reminder
Number = Number // 10
if temp == Reverse :
return True
else :
return False
def Factorial(Number):
sum = 1
if Number == 0 or Number == 1:
return 1
for i in range(1, Number+1) :
sum = sum * i
return sum
choice = '1'
while choice != 'q' :
number = int(input("Enter the number"))
choice = input("\nEnter your choice : \n1 - Palindrom\n2- Factorial")
if choice == '1':
IsPalindrome = Palindrome(number)
if IsPalindrome == True:
print("{} is palindrome".format(number))
else :
print("{} is not palindrome".format(number))
elif choice == '2':
Fact = Factorial(number)
print("Factorial of {} is {}".format(number, Fact))
elif choice == 'q':
sys.exit(0)
else :
print("Wrong Choice")
|
13,213 | 1114557034301cacf162e69682d31f01969047a1 | __author__ = 'adekola'
def approve_lyrics():
"""this method is called by an admin user to approve the lyrics of a song"""
pass
def song_has_lyrics():
"""This method is called to check if a song already has lyrics so as to avoid duplicity of lyrics"""
pass
def lyrics_note_is_same_as_original():
"""This is called to compare a lyrics note to the original to ensure they are not the same..if they are , such a
lyrics note is rejected"""
pass
def is_lyrics_approved():
"""Checks if the lyrics has been approved or not"""
def is_api_caller_authorized():
pass |
13,214 | b6e9802b001f148b582dbdafb7a56e33709b7046 | import os
import sys
from PIL import Image
from shutil import copyfile
import imagehash
class utility:
labels = ['None',
'amusement',
'awe',
'contentment',
'anger',
'disgust',
'excitement',
'fear',
'sadness']
image_table_columns = [("id", "TEXT PRIMATY KEY UNIQUE"), ("path", "TEXT"), ("label", "INTEGER"),
("confidence", "INTEGER"), ("source", "TEXT"), ("comment", "TEXT")]
model_table_columns = [("name", "TEXT"), ("path", "TEXT"), ("accuracy", "REAL")]
score_table_columns = [("id", "INTEGER PRIMARY KEY"), ("model", "TEXT"), ("image_id", "TEXT"), ("label", "INTEGER"),
("confidence", "REAL")]
labeltype_table_columns = [("id", "INTEGER PRIMARY KEY"), ("name", "TEXT")]
@staticmethod
def checkStrong(topCount,totalCount):
totalCountThreshold=5
ratioThreshold=0.6
return (totalCount>=totalCountThreshold and topCount/totalCount>ratioThreshold)
@staticmethod
def checkFolder(path):
if not os.path.isdir(path):
os.makedirs(path)
@staticmethod
def isSelect(command):
first=(command.strip().split(' ')[0]).lower()
if first!='select' and first!='pragma':
return False
return True
@staticmethod
def hashImage(image):
"""
using p hash
"""
# if iamge is path, read it
if type(image) is str:
image =Image.open(image)
return str(imagehash.phash(image))
class tempFileHandler:
"""
this class handles temporary files
"""
def __init__(self,folderPath,file):
if os.path.isfile(file):
self.originalPath = file
self.tempPath = folderPath+"/temp/"+file.split("/")[-1]
utility.checkFolder(folderPath+"/temp")
copyfile(self.originalPath, self.tempPath)
os.remove(self.originalPath)
else:
self.tempPath=None
def remove(self):
if self.tempPath!=None:
os.remove(self.tempPath)
def copyBack(self):
if self.tempPath!=None:
copyfile(self.tempPath,self.originalPath)
class fileManager:
"""
this class manages file system locations
"""
def __init__(self,filePath):
self.filePath=filePath
utility.checkFolder(self.filePath+"/models")
def getImagePath(self,name,source='source'):
utility.checkFolder(self.filePath+"/images/"+str(source))
return "%s/images/%s/%s" % (self.filePath,str(source),name)
def getModelPath(self,name):
return "%s/models/%s" % (self.filePath,name)
def getAllImageList(self):
pathSet=set()
for root, dirs, files in os.walk(self.filePath+"/images"):
path = root.split(os.sep)
for file in files:
if file != ".DS_Store":
pathSet.add(root+os.sep+file)
return pathSet
def getAllModelList(self):
pathSet=set()
for root, dirs, files in os.walk(self.filePath+"/models"):
path = root.split(os.sep)
for file in files:
if file != ".DS_Store":
pathSet.add(root+os.sep+file)
return pathSet |
13,215 | 184b3f182aee061a11e0e1be9d4a24776c79ffee | # 19723756=ไบ้ณไน้ฃๅๆฆ, 3779629=ไบ้ณไนๆฐๆญๆฆ, 2884035=็ฝๆๅๅๆญๆฒๆฆ, 3778678=ไบ้ณไน็ญๆญๆฆ
# ่ฏ่ฎบ
# https://music.163.com/weapi/v1/resource/comments/A_PL_0_19723756?csrf_token=
import re
from selenium import webdriver
import selenium.webdriver.support.ui as ui
# from lxml import etree
# driver = webdriver.PhantomJS(executable_path=r'C:\liux\py_tools\phantomjs-2.1.1-windows\bin\phantomjs.exe')
driver = webdriver.PhantomJS(executable_path='phantomjs.exe')
driver.get("https://music.163.com/discover/toplist")
driver.switch_to.frame('g_iframe')
wait = ui.WebDriverWait(driver, 15)
body = driver.page_source
toplistidpa = 'href="/discover/toplist\?id=(\d+)"'
toplistids = re.compile(toplistidpa, re.S).findall(body)
# for
driver.get("https://music.163.com/discover/toplist?id=19723756")
driver.switch_to.frame('g_iframe')
wait = ui.WebDriverWait(driver, 15)
body = driver.page_source
# html = etree.HTML(body)
# mids = html.xpath('//tbody/tr/@id')
toplistpa = '<tr id="(\d+)".*?>(.*?)</tr>'
tolists = re.compile(toplistpa, re.S).findall(body)
print(tolists) |
13,216 | f2011d2b12a66c9d3074060516c801fbebdb4d23 | from django.conf.urls import url
from . import views
urlpatterns = [
url('^$', views.PersonList.as_view(), name='person_list'),
url('^(?P<slug>.*)/$', views.PersonDetail.as_view(), name='person_detail'),
]
|
13,217 | 49ca9e08320df3109f9c981f0f40df1686256baa | """
Given a linked list, determine if it has a cycle in it.
To represent a cycle in the given linked list, we use an integer pos which represents the position (0-indexed) in the linked list where tail connects to. If pos is -1, then there is no cycle in the linked list.
Example 1:
Input: head = [3,2,0,-4], pos = 1
Output: true
Explanation: There is a cycle in the linked list, where tail connects to the second node.
"""
"""
Approach3: Two pointers
"""
# Time Complexity - O(N + k)
# Space Complexity - O(1)
"""
List has a cycle:
We break down the movement of the slow pointer into two steps, the non-cyclic part and the cyclic part:
The slow pointer takes "non-cyclic length" steps to enter the cycle. At this point, the fast pointer has already reached the cycle. \text{Number of iterations} = \text{non-cyclic length} = NNumber of iterations=non-cyclic length=N
Both pointers are now in the cycle. Consider two runners running in a cycle - the fast runner moves 2 steps while the slow runner moves 1 steps at a time. Since the speed difference is 1, it takes \dfrac{\text{distance between the 2 runners}}{\text{difference of speed}}
difference of speed
distance between the 2 runners
โ
loops for the fast runner to catch up with the slow runner. As the distance is at most "\text{cyclic length K}cyclic length K" and the speed difference is 1, we conclude that
\text{Number of iterations} = \text{almost}Number of iterations=almost "\text{cyclic length K}cyclic length K".
Therefore, the worst case time complexity is O(N+K)O(N+K), which is O(n)O(n).
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def hasCycle(self, head: ListNode) -> bool:
# Edge Case
if head == None or head.next == None:
return False
# Two pointers approach
slow = fast = head
while fast != None and fast.next != None:
slow = slow.next
fast = fast.next.next
if slow == fast:
return True
return False |
13,218 | 07a3d8a605571c28c031ff30ecaed3b981d84cfe | import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
# from selenium.webdriver.common.keys import Keys
# from selenium.webdriver.common.by import By
# from selenium.webdriver.support.ui import WebDriverWait
# from selenium.webdriver.support import expected_conditions as EC
from win10toast import ToastNotifier
import emailAlert as EA
from datetime import datetime
ChromePath = "C:\\Program Files (x86)\\chromeDriver.exe" # Need to download chromeDriver from selenium's website
chrome_options = Options()
# chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
# This url is for Dr Eytan in Maccabi
url = "https://serguide.maccabi4u.co.il/heb/doctors/doctorssearchresults/doctorsinfopage/?ItemKeyIndex" \
"=07A84FD6B9055C8460995CA9E9A5B4A6C44AEEBF93BCBA6C560940F038DE0184&RequestId=25e9a718-c267-8b05-584e" \
"-daa37be8c0de&Source=SearchPageResults "
# TODO: Put the email you would like to notify here
email_address = "YourEmail@gmail.com"
tryNum = 1
currDate = ""
currDate_date = None
ans = input("Do you have an appointment? (y/n)\n")
if ans == "y" or ans == "Y":
currDate = input("Enter your appointment date (d/m/Y)\n")
currDate_date = datetime.strptime(currDate, '%d/%m/%Y')
while True:
print("Try number %d\n" % tryNum)
tryNum += 1
driver = webdriver.Chrome(ChromePath, options=chrome_options)
driver.get(url)
details = driver.find_elements_by_class_name("contactDetailsAns")
# for el in details:
# print(el.text)
availableDate = details[3].text
availableDate_date = datetime.strptime(availableDate, '%d/%m/%Y')
if currDate == "":
currDate = availableDate
currDate_date = datetime.strptime(currDate, '%d/%m/%Y')
print(currDate)
driver.quit()
if availableDate_date < currDate_date:
currDate = availableDate
currDate_date = availableDate_date
now_date = datetime.now()
toast = ToastNotifier()
toast.show_toast("New Available for DR Eytan", currDate, duration=30)
try:
EA.email_alert("New Available for DR Eytan", f"Dr Eytan will be available at {currDate_date.strftime('%A')} {currDate}\n"
f"Its {(currDate_date - now_date).days+1} days from now.\n"
f"Try to make an appointment with this link:\n"
f"https://visit.maccabi4u.co.il/AppointmentsOrder/DanaInfo=.avjulxEshklkmuF8Os5R15+OrderCheckPatientEligibleForService.aspx?DoctorID=9B27E7672BF1A469F1CBDEE1BF656B98&FacilityId=77279&JobId=1&linkType=3&Service=1"
, email_address)
print("email sent")
except Exception as e:
print("Didn't send mail\n", e)
# driver.quit()
time.sleep(300)
|
13,219 | 4301debbadc126fb74411f7650c5553e3d234791 | import os
import sys
import pickle
import shutil
progPath = os.path.dirname(sys.argv[0])
progPath = os.path.dirname(os.path.dirname(progPath))
pDir = progPath + "\\loc.pickle"
dst = sys.argv[-1]
if os.path.exists(pDir):
p = open(pDir, "rb")
temppath = pickle.load(p)
p.close()
os.remove(pDir)
folder = temppath[1]
folder = folder[len(temppath[2]):]
dst = dst + folder
if not os.path.exists(dst):
os.makedirs(dst)
cp = shutil.copy(temppath[0] ,dst)
shutil.rmtree(temppath[2], True)
|
13,220 | 9fba24402cfc98e6b67cf257885243eeecff242b | # https://www.acmicpc.net/problem/1296
# readline์ ์ฌ์ฉํ๊ธฐ ์ํด importํฉ๋๋ค.
from sys import stdin
# ์ฒซ์งธ ์ค์ ์ค๋ฏผ์์ ์์ด ์ด๋ฆ์ ์
๋ ฅํฉ๋๋ค.
# ๋งจ ๋์ \n์ ๋ผ์ด์ค๋๋ค.
ohminsik = stdin.readline().rstrip()
# ๋์งธ ์ค์๋ ์ข์ํ๋ ์ฌ์์ ์ N์ ์
๋ ฅํฉ๋๋ค.
# 50๋ณด๋ค ์๊ฑฐ๋ ๊ฐ์ ์์ฐ์์
๋๋ค.
# ์ ์ํ์ผ๋ก ๋ณํํฉ๋๋ค.
N = int(stdin.readline())
# ์ฑ๊ณตํ ํ๋ฅ ์ด ๊ฐ์ฅ ๋์ ์ฌ์๋ค์ ์ด๋ฆ๋ค์ ์ ์ฅํ ๋ฆฌ์คํธ ๋ณ์๋ฅผ ์ ์ธํฉ๋๋ค.
selected_woman = []
# ์ฑ๊ณตํ ํ๋ฅ ์ค ๊ฐ์ฅ ๋์ ๊ฐ์ ์ ์ฅํ ๋ณ์๋ฅผ ์ ์ธํฉ๋๋ค.
probability = 0
# ์ข์ํ๋ ์ฌ์์ ์ N๋งํผ ๋ฐ๋ณตํฉ๋๋ค.
for woman_idx in range(N):
# ์ฌ์์ ์ด๋ฆ์ ์
๋ ฅํฉ๋๋ค.
# ์ํ๋ฒณ ๋๋ฌธ์๋ก๋ง ๊ตฌ์ฑ๋์ด ์์ต๋๋ค.
# ๋งจ ๋์ \n์ ๋ผ์ด์ค๋๋ค.
woman_name = stdin.readline().rstrip()
# ์ค๋ฏผ์ ์ด๋ฆ๊ณผ ์
๋ ฅํ ์ฌ์์ ์ด๋ฆ์ ์ด์ ๋ณ์๋ฅผ ์ ์ธํฉ๋๋ค.
name_sum = ohminsik + woman_name
# ๋ ์ฌ๋ ์ด๋ฆ์์ ๋ฑ์ฅํ๋ L์ ๊ฐ์๋ฅผ ์ ์ฅํ๋ ๋ณ์๋ฅผ ์ ์ธํฉ๋๋ค.
L = name_sum.count('L')
# ๋ ์ฌ๋ ์ด๋ฆ์์ ๋ฑ์ฅํ๋ O์ ๊ฐ์๋ฅผ ์ ์ฅํ๋ ๋ณ์๋ฅผ ์ ์ธํฉ๋๋ค.
O = name_sum.count('O')
# ๋ ์ฌ๋ ์ด๋ฆ์์ ๋ฑ์ฅํ๋ V์ ๊ฐ์๋ฅผ ์ ์ฅํ๋ ๋ณ์๋ฅผ ์ ์ธํฉ๋๋ค.
V = name_sum.count('V')
# ๋ ์ฌ๋ ์ด๋ฆ์์ ๋ฑ์ฅํ๋ E์ ๊ฐ์๋ฅผ ์ ์ฅํ๋ ๋ณ์๋ฅผ ์ ์ธํฉ๋๋ค.
E = name_sum.count('E')
# ํ์ฌ ์ฌ์์ ์ฑ๊ณตํ ํ๋ฅ ์ ๊ณ์ฐํ ๊ฐ์ ์ ์ฅํ๋ ๋ณ์๋ฅผ ์ ์ธํฉ๋๋ค.
woman_prob = ((L + O) * (L + V) * (L + E) * (O + V) * (O + E) * (V + E)) % 100
# ์ด์ ๊น์ง์ ๊ฐ์ฅ ๋์ ํ๋ฅ ๊ณผ ํ์ฌ ์ฌ์์ ์ฑ๊ณตํ ํ๋ฅ ์ด ๊ฐ๋ค๋ฉด
if woman_prob == probability:
# ์ฑ๊ณตํ ํ๋ฅ ์ด ๊ฐ์ฅ ๋์ ์ฌ์๋ค์ ์ ์ฅํ๋ ๋ฆฌ์คํธ ๋ณ์์ ํ์ฌ ์ฌ์์ ์ด๋ฆ์ ๋ฃ์ด์ค๋๋ค.
selected_woman.append(woman_name)
# ์ด์ ๊น์ง์ ๊ฐ์ฅ ๋์ ํ๋ฅ ๋ณด๋ค ํ์ฌ ์ฌ์์ ์ฑ๊ณตํ ํ๋ฅ ์ด ํฌ๋ค๋ฉด
elif woman_prob > probability:
# ์ฑ๊ณตํ ํ๋ฅ ์ด ๊ฐ์ฅ ๋์ ์ฌ์๋ค์ ์ ์ฅํ๋ ๋ฆฌ์คํธ ๋ณ์๋ค์ ๋น์์ค๋๋ค.
selected_woman = list()
# ์ฑ๊ณตํ ํ๋ฅ ์ด ๊ฐ์ฅ ๋์ ์ฌ์๋ค์ ์ ์ฅํ๋ ๋ฆฌ์คํธ ๋ณ์์ ํ์ฌ ์ฌ์์ ์ด๋ฆ์ ๋ฃ์ด์ค๋๋ค.
selected_woman.append(woman_name)
# ์ฑ๊ณตํ ํ๋ฅ ์ค ๊ฐ์ฅ ๋์ ๊ฐ์ ์ ์ฅํ๋ ๋ณ์์ ํ์ฌ ์ฌ์์์ ์ฑ๊ณตํ ํ๋ฅ ๊ฐ์ ๋ฃ์ด์ค๋๋ค.
probability = woman_prob
# ์ฑ๊ณตํ ํ๋ฅ ์ด ๊ฐ์ฅ ๋์ ์ฌ์์ ์ด๋ฆ ์ค ์ํ๋ฒณ์ผ๋ก ๊ฐ์ฅ ์์๋ ์ด๋ฆ์ ์ถ๋ ฅํฉ๋๋ค.
print(sorted(selected_woman)[0]) |
13,221 | 02001f7beba29169bbe177c3a0021a307e85444a | def libras_para_kg(x):
kilo= float(libra)/2.204600
return round(kilo,7)
|
13,222 | 8ab33f46a750aa5a9f51fa07bbc9ac19767ca8eb | from rex.core import StrVal, FloatVal, Error
from rex.instrument.interface import Assessment
from rex.db import get_db
__all__ = ('DemoAssessment',)
class DemoAssessment(Assessment):
@classmethod
def get_by_uid(cls, uid, user=None):
db = get_db()
with db:
data = db.produce('/assessment?id()=$uid', uid=uid)
if not data:
return None
return cls(
data[0].uid,
DemoSubject.get_by_uid(data[0].subject),
DemoInstrumentVersion.get_by_uid(data[0].instrumentversion),
data[0].data,
evaluation_date=data[0].evaluation_date,
status=data[0].status,
)
@classmethod
def find(cls, offset=0, limit=None, user=None, **search_criteria):
db = get_db()
with db:
data = db.produce('/assessment.sort(uid)')
return [
cls(
d.uid,
DemoSubject.get_by_uid(d.subject),
DemoInstrumentVersion.get_by_uid(d.instrumentversion),
d.data,
evaluation_date=d.evaluation_date,
status=d.status,
)
for d in data
]
@classmethod
def bulk_retrieve(cls, uids):
db = get_db()
with db:
data = db.produce(
"/assessment{uid, instrumentversion.uid :as iv, data}.filter(uid=$uids).filter(status='completed').sort(uid)",
uids=uids,
)
return [
cls.BulkAssessment(
uid=str(d.uid),
data=AnyVal().parse(d.data),
instrument_version_uid=str(d.iv),
)
for d in data
]
@classmethod
def create(cls, subject, instrument_version, data=None, evaluation_date=None, implementation_context=None):
return cls(
'fake_assessment_1',
subject,
instrument_version,
data,
evaluation_date=evaluation_date,
)
def save(self, implementation_context=None):
print('### SAVED ASSESSMENT ' + self.uid)
@classmethod
def bulk_create(cls, assessments, validate=True):
for assessment in assessments:
if assessment.context['study1'] < 0:
raise Error('Bulk create failed with unexpected study1.')
print('### CREATED %s ASSESSMENTS' % len(assessments))
@classmethod
def get_implementation_context(cls, action):
if action == cls.CONTEXT_ACTION_CREATE:
return {
'study': {
'required': False,
'validator': StrVal(),
},
'study1': {
'required': True,
'validator': FloatVal(),
}
}
return Assessment.get_implementation_context(action)
|
13,223 | e88d13cf3695db489c4ac4754c0c6ec3bc5c67bb | for _ in range(int(input())):
if _ % 2 == 0:
print(input())
else:
input()
|
13,224 | 229f2d251512ddee56f78fe853411719a05dd514 | # Pesquisa Operacional - 2018/1
# Engenharia de Sistemas UFMG
# Artur Mello - 2013030392
# Hernane Braga Pereira - 2014112627
import gurobipy as grb
import pandas as pd
from plot_results import *
filename = "bebidas.csv"
data_csv = pd.read_csv(filename)
data = pd.DataFrame(data_csv)
# Fรณrmula geral: C = A/(R*P)
# A = gramas de รกlcool [g]
# R = fator de conversรฃo: 0.68 [homem] e 0.55 [mulher]
# P = peso da pessoa [kg]
# C = gramas de รกlcool ingerida por kilo
# Taxa de รกlcool no sangue [TAS] = C/1.056
# TAS รฉ expressa em [g/L] que รฉ grama de รกlcool por litro de sangue
# Dividi-se por 1.056, pois รฉ a densidade do sangue
def calculo_TAS(volume_ml, abv, peso, sexo):
A = (abv*volume_ml*0.79)/100 # 0.79 para converter ml para grama de รกlcool
if sexo == 2:
R = 0.55
else:
R = 0.68
C = A/(R*peso)
TAS = C/1.056
return TAS
''' Parametros de entrada do usuario e do modelo
peso em Kg
sexo - 1 - masculino / 2 - feminino
saldo - Quantidade de dinheiro para gastar em [R$]
TAS_min - Taxa de รกlcool no sangue para entrar em estado de embriaguez
TAS_max - Limite seguro para Taxa de รกlcool mรกxima no sangue
'''
# Usuรกrio
peso = 60
sexo = 2
saldo = 300
# Modelo
TAS_min = 0.3 # Taxa de รกlcool no sangue para entrar em estado de embriaguez
TAS_max = 1.2 # Limite seguro para Taxa de รกlcool mรกxima no sangue
# Parametros do modelo
# Processamento de informacoes
TAS_itens = []
nome_itens = []
preco_itens = []
data_list = data.values.tolist()
N = len(data_list)
# Calculo de TAS para cada item
for item in data_list:
TAS_itens.append(calculo_TAS(item[1], item[3], peso, sexo))
nome_itens.append(item[0])
preco_itens.append(item[2])
# item[i] se refere ร coluna do dataset
# Create a new model
m = grb.Model('alcoolizador')
x = [m.addVar(vtype=grb.GRB.INTEGER, name=i) for i in nome_itens]
m.update()
obj = grb.quicksum([i*j for i,j in zip(x, TAS_itens)])
custo = grb.quicksum([i*j for i,j in zip(x, preco_itens)])
m.setObjective(obj, grb.GRB.MAXIMIZE)
# Restricoes do problema:
m.addConstr(obj <= TAS_max)
m.addConstr(obj >= TAS_min)
m.addConstr(custo <= saldo)
m.optimize()
status = m.status
if status == 4:
print('Modelo infactรญvel: saldo monetรกrio insuficiente.')
# Caso o problema nรฃo seja infactivel
else:
conta_do_bar = 0
print('\n--- Resultado Final ----\n')
print('Sexo: {0}\nPeso: {1} kg\nSaldo disponรญvel: R${2:.2f}'.format(("masculino" if sexo == 1 else "feminino"), peso, float(saldo)))
print('\n## QUANTIDADE DE ITENS COMPRADOS ##')
items_index = []
for index, v in enumerate(m.getVars()):
if v.x > 0:
print('{}: {}'.format(v.varName, round(v.x)))
items_index.append({"id":index, "value": v.x})
conta_do_bar += v.x * preco_itens[index]
print('-------\nConta do bar: R${0:.2f} '.format(conta_do_bar))
print('\nTAS final: %g g/L' % m.objVal)
perc = 100*(m.objVal/TAS_max)
print('{}'.format(("Vocรช atingiu o limite mรกximo de รกlcool no sangue para " if perc >= 99
else "Vocรช estรก {0:.2f}% perto do limite mรกximo de nรฃo desmaiar ".format(perc))))
'''
# Plotar grรกtifcos
# 1 - itens por TAS(g/L)
# 2 - itens por dinheiros(R$)
'''
plot_tas_per_drink(data, TAS_itens, TAS_min, TAS_max, items_index, N, 4)
plot_price_per_drink(data, preco_itens, 0, saldo, items_index, N, 4)
|
13,225 | d34f14cf5ffa70b9d4bacf51ef4a41e8c41d533c | responses = {}
# Set a flag to indicate that polling is active.
polling_active = True
while polling_active:
# Prompt for the person's name and response.
name = input("\nWhat is your name? ")
response = input("Which are your favorite tacos? ")
# Store the response in the dictionary.
responses['name'] = name
responses['flavor'] = response
# Find out if anyone else is going to take the poll.
repeat = input("Would you like to tell me something else about you?(yes/no)")
if repeat.lower() == 'no':
polling_active = False
comment = ""
print(comment)
print("Goodbye!")
else:
comment = input("Write your comment please: ")
polling_active = False
print(comment)
print("Goodbye!")
responses['comment'] = comment
print(responses.items())
# Polling is complete. Show the results.
print("\n--- Poll Results ---")
for value in responses.values():
print(value)
print(f'Poll Dictionary: {responses}')
print("\n--- Poll Results ---")
for item in responses.items():
print(item) |
13,226 | e681385c6f55c8e91b8d319c7ce0e04528a13c45 | # task_1
a = 0
while a < 100:
print("dennis omoding")
a += 1
print(a)
|
13,227 | f4ba47a74da7b06e9d9c9c0a1b5e1267a733eca2 | from app import app
import csv
import os.path
def get_hackers():
hackers = []
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(BASE_DIR, "attendees.csv")
with open(file_path) as csvfile:
attendees = csv.reader(csvfile, delimiter=',')
count = 0
for row in attendees:
if count == 0:
count += 1
pass
else:
entry = {
'ticket_type': row[0],
'order_ref': row[1],
'shirt_size': row[2],
'school': row[3],
'checkin_first': row[4],
'checkin_last': row[5],
}
hackers.append(entry)
return hackers
def get_shirts():
hackers = get_hackers()
malexs, males, malem, malel, malexl = 0, 0, 0, 0, 0
femalexs, females, femalem, femalel, femalexl = 0, 0, 0, 0, 0
other = 0
for entry in hackers:
if 'Male XS' in entry['shirt_size']:
malexs += 1
elif 'Male S' in entry['shirt_size']:
males += 1
elif 'Male M' in entry['shirt_size']:
malem += 1
elif 'Male L' in entry['shirt_size']:
malel += 1
elif 'Male XL' in entry['shirt_size']:
malexl += 1
elif 'Female XS' in entry['shirt_size']:
femalexs += 1
elif 'Female S' in entry['shirt_size']:
females += 1
elif 'Female M' in entry['shirt_size']:
femalem += 1
elif 'Female L' in entry['shirt_size']:
femalel += 1
elif 'Female XL' in entry['shirt_size']:
femalexl += 1
else:
other += 1
male_total = malexs + males + malem + malel + malexl
female_total = femalexs + females + femalem + femalel + femalexl
shirt_data = {
'Total': male_total + female_total + other,
'Male': male_total,
'MaleXS': malexs,
'MaleS': males,
'MaleM': malem,
'MaleL': malel,
'MaleXL': malexl,
'Female': female_total,
'FemaleXS': femalexs,
'FemaleS': females,
'FemaleM': femalem,
'FemaleL': femalel,
'FemaleXL': femalexl,
'None': other
}
return shirt_data
|
13,228 | fc357997ff28ab0d43f27ff60f7fb20da6e949af | n = int(input())
li_a = list(map(int, input().split()))
dic_a = {}
for a in li_a:
dic_a[a] = dic_a.get(a, 0) + 1
q = int(input())
li_bc = list()
for i in range(q):
li_bc.append(tuple(map(int, input().split())))
answer = sum(li_a)
for l in li_bc:
b = l[0]
c = l[1]
diff = (c - b) * dic_a.get(b, 0)
if b in dic_a.keys():
dic_a[c] = dic_a.get(c, 0) + dic_a.get(b, 0)
dic_a[b] = 0
answer += diff
else:
pass
print(answer)
|
13,229 | 77b74c1717ba54372adf062f4e5cb99ec71827d2 | from model.Circle import Circle
from model.Is_Member import Is_Member
from DAO.TagDao import TagDao
from DAO.UserProfileDao import UserProfileDao
class CircleDao:
def __init__(self, baseConnection):
kb = baseConnection
self.connection = kb.getDB()
def has_tag(self, user_id, tag_id):
query = "select * from Circle where @rid = {0} and tags contains (@rid = {1})".format(user_id, tag_id)
# print(query)
result = self.connection.query(query)
response = False
if (result):
response = True
return response
def set_member(self, is_member):
user_rid = is_member.outV
circle_rid = is_member.inV
query = "Create Edge IS_MEMBER FROM {0} TO {1} CONTENT {2}".format(user_rid, circle_rid, is_member.toDict())
# print(query)
result = self.connection.command(query)
response = list()
for is_member_record in result:
response.append(self.to_Is_Member(is_member_record))
return response
def update_is_member(self, ismember):
cmd = "UPDATE Is_Member MERGE {1} WHERE @rid= {0} ".format(ismember.rid, ismember.toDict())
# print(cmd)
result = self.connection.command(cmd)
return result
def get_is_member(self, circle_rid, user_rid):
query = "select * from Is_Member WHERE in = {0} AND out = {1}".format(circle_rid, user_rid)
# print(query)
result = self.connection.query(query)
response = list()
for is_member_record in result:
response.append(CircleDao.to_Is_Member(is_member_record))
return response
def get_members(self, circle_rid, limit=-1):
query = "select expand(memberId) from (SELECT In('IS_MEMBER').@rid as memberId FROM {0} UNWIND memberId) limit {1}".format(
circle_rid, limit)
# print(query)
result = self.connection.query(query)
response = list()
for user_record in result:
response.append(UserProfileDao.to_UserProfile(user_record))
return response
def getAll(self, limit=-1):
query = "SELECT * FROM Circle limit " + str(limit)
# print(query)
result = self.connection.query(query)
response = list()
for circle_record in result:
response.append(self.to_Circle(circle_record))
return response
def exist(self, name):
result = self.getByName(name)
if (result):
return result[0].rid
return result
def getByName(self, name):
query = "SELECT * FROM Circle WHERE name = \"{0}\"".format(name.replace('"', ''))
# print(query)
result = self.connection.query(query)
response = list()
for circle_record in result:
response.append(self.to_Circle(circle_record))
return response
def getById(self, id):
query = "SELECT * FROM Circle WHERE @rid = {0}".format(id)
# print(query)
result = self.connection.query(query)
response = list()
for circle_record in result:
response.append(self.to_Circle(circle_record))
return response
def update(self, circle):
cmd = "UPDATE Circle MERGE {1} WHERE @rid= {0} ".format(circle.rid, circle.toDict())
# print(cmd)
result = self.connection.command(cmd)
return result
def add(self, circle):
cmd = "INSERT INTO Circle CONTENT {0}".format(circle.toDict())
# print(cmd)
result = self.connection.command(cmd)
response = list()
for circle_record in result:
response.append(self.to_Circle(circle_record))
return response
@staticmethod
def to_Is_Member(ismember):
inV = ismember.__getattr__('in') # mandatory, string
# inV = ismember._in # mandatory, string
outV = ismember.__getattr__('out') # mandatory, string
# outV = ismember._out # mandatory, string
status = ismember.__getattr__('status') # mandatory, string
rank = ismember.__getattr__('rank') # mandatory, string
timestamp = ismember.__getattr__('timestamp') # mandatory, string
new_isMember = Is_Member(inV, outV)
new_isMember.rid = ismember._rid
new_isMember.rank = rank
new_isMember.status = status
new_isMember.timestamp = timestamp
return new_isMember
@staticmethod
def to_Circle(circle):
name = circle.__getattr__('name') # mandatory, string
try:
tenancy0 = circle.__getattr__('tenancy')
tenancy = "#" + tenancy0.get() # Mandatory, Tenant
except AttributeError:
tenancy = ""
new_circle = Circle(name, tenancy)
new_circle.rid = circle._rid # string
status = circle.__getattr__('status') # string
try:
tags = circle.__getattr__('tags') # list of Tags(rids)
except AttributeError:
tags = list("")
new_circle.status = status
new_circle.tags = tags
return new_circle
if __name__ == "__main__":
myDao = CircleDao()
tagDao = TagDao()
circle = Circle("CircleNew", "#113:0") # tenancy #25:0
result0 = myDao.getAll()
if (result0):
rid0 = result0[0].rid
result = myDao.getById(rid0)
print("READ: {0} - {1}".format(result[0].rid, result[0].tenancy))
print("+: {0} - {1} - {2} - {3}".format(result[0].rid, result[0].name, str(result[0].tags), result[0].tenancy))
print("Circle: " + str(result[0]))
for tag in result[0].tags:
print("Circle tags: " + str(tag))
circle.add_tags(result[0].tags)
else:
tags = tagDao.getAll(2)
tags2 = list()
for tag in tags:
tags2.append(tag.rid)
print("tags: " + str(tags2))
circle.add_tags(tags2)
result2 = myDao.add(circle)
print("CREATE: {0} - {1}".format(result2[0].rid, result2[0].name))
circle.name = "newCircle23"
rid = result2[0].rid
circle.rid = rid
result3 = myDao.update(circle)
print("UPDATE: Count:", result3[0])
|
13,230 | f0c418e17980da37cdc754492de31c28b035d042 | import graphene
from .validator import Validator
from .utils.formatting import format_extra_arguments, format_graphene_arguments
from .utils.security import enforce_custom_auth_decorator
from django.forms.models import model_to_dict
class MutationBase(graphene.Mutation):
# Default field to return from the mutations
completed = graphene.Boolean()
messages = graphene.List(graphene.String)
@classmethod
@enforce_custom_auth_decorator
def mutate(cls, root, info, **kwargs):
# Extract extra arguments and converting to one element
extra_arguments = {}
for i in cls.extra_argument_names:
if i in kwargs:
extra_arguments[i] = kwargs.pop(i)
kwargs["extra_arguments"] = extra_arguments
cls.before_mutate(cls, root, info, kwargs)
rt = cls.mutate_method(cls, root, info, **kwargs)
cls.after_mutate(cls, root, info, kwargs)
return rt
def get_model(self):
return self.graphene_type._meta.model
def set_graphene_arguments(self, options, update_rel=False):
is_required = []
if required := options.get("is_required"):
is_required = required
exclude_list = []
if exclude := options.get("exclude"):
exclude_list = exclude
self.set_graphene_type(self, options)
graphene_type_argument = format_graphene_arguments(
self.graphene_type, is_required, exclude_list)
if not hasattr(self, "Arguments"):
setattr(self, "Arguments", type("Arguments", (), {}))
relationship_models = {}
for argument in graphene_type_argument:
if argument.of_type:
if argument.is_relationship:
relationship_models[argument.display_name] = argument.model
if update_rel and isinstance(argument.of_type, graphene.List):
setattr(
self.Arguments, f"add_{argument.display_name}", argument.of_type)
setattr(
self.Arguments, f"rmv_{argument.display_name}", argument.of_type)
else:
setattr(self.Arguments, argument.display_name,
argument.of_type)
else:
setattr(self.Arguments, argument.display_name,
argument.of_type)
setattr(self, "relationship_models", relationship_models)
def set_extra_arguments(self, options):
is_required = []
if required := options.get("is_required"):
is_required = required
# Getting and Setting Extra Arguments
extra_arguments = format_extra_arguments(
options.get("extra_arguments"), is_required)
if not hasattr(self, "Arguments"):
setattr(self, "Arguments", type("Arguments", (), {}))
setattr(self, "extra_argument_names", [])
for argument in extra_arguments:
setattr(self.Arguments, argument.display_name, argument.of_type)
self.extra_argument_names.append(argument.display_name)
def set_custom_auth(self, options):
custom_auth = options.get("custom_auth")
# If auth validation fails, creating a default_auth
if not Validator.validate_custom_auth(custom_auth):
def default_auth(*args, **kwargs):
return (True, [])
setattr(self, "custom_auth", default_auth)
else:
setattr(self, "custom_auth", custom_auth)
def set_extra_info(self, options):
extra_info = options.get("extra_info")
if Validator.validate_extra_info(extra_info):
self.extra_info = extra_info
def set_graphene_type(self, options, return_obj=True):
graphene_type = options.get("graphene_type")
if Validator.validate_graphene_type(graphene_type):
self.graphene_type = graphene_type
if return_obj:
setattr(self, graphene_type.__name__,
graphene.Field(graphene_type))
def set_before_mutate(self, options):
before_mutate = options.get("before_mutate")
if not Validator.validate_mutation_functions(before_mutate):
def default_mutate(*args, **kwargs):
pass
before_mutate = default_mutate
self.before_mutate = before_mutate
def set_mutate_method(self, mutate_method):
if not Validator.validate_mutation_functions(mutate_method):
raise Exception(
"Mutation instance needs a mutation_method property")
self.mutate_method = mutate_method
def set_after_mutate(self, options):
after_mutate = options.get("after_mutate")
if not Validator.validate_mutation_functions(after_mutate):
def default_mutate(*args, **kwargs):
pass
after_mutate = default_mutate
self.after_mutate = after_mutate
def pop_formatted_relationship_queries(cls, fields) -> dict:
relationship_queries = {
"foreign_key": {},
"many_to_many": {
"add": {},
"rmv": {}
}
}
for name, id in list(fields.items()):
query_name = name
if name in cls.relationship_models or (query_name := name[4:]) in cls.relationship_models:
model = cls.relationship_models.get(query_name)
if isinstance(id, list):
for i in id:
# Querying it to check if it exist
id_query = model.objects.get(pk=i)
if name.startswith("rmv"):
relationship_queries["many_to_many"]["rmv"][query_name] = id
else:
relationship_queries["many_to_many"]["add"][query_name] = id
else:
id_query = model.objects.get(pk=id)
relationship_queries["foreign_key"][query_name] = id_query
fields.pop(name)
return relationship_queries
def pop_manual_resolve_arguments(cls, model, fields: dict) -> dict:
manual_resolve_arg = {}
model_as_dict = model_to_dict(model)
for name, j in list(fields.items()):
if name not in model_as_dict:
if name[4:] not in model_as_dict:
value = fields.pop(name)
manual_resolve_arg[name] = value
|
13,231 | 9e26862991f7a6a677f19547dfb23b62e8b9c6b7 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""eval IsyNet."""
import os
from mindspore import context
from mindspore.common import set_seed
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.CrossEntropySmooth import CrossEntropySmooth
from src.model_utils.config import config
from src.dataset import create_dataset2 as create_dataset
from ISyNet.model import ISyNet
set_seed(1)
def eval_net():
"""eval net"""
target = config.device_target
# init context
context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False)
if target == "Ascend":
device_id = int(os.getenv('DEVICE_ID', default='0'))
context.set_context(device_id=device_id)
# create dataset
dataset = create_dataset(dataset_path=config.data_path, do_train=False, batch_size=config.batch_size,
target=target)
# define net
if config.dml > 1:
net = ISyNet(num_classes=config.class_num,
json_arch_file_backbone=[config.jsonFile, config.jsonFileTeacher],
dropout=config.dropout,
weight_standardization=config.weight_standardization,
last_bn=config.lastbn,
dml=config.dml,
evaluate=True)
else:
net = ISyNet(num_classes=config.class_num,
json_arch_file_backbone=config.jsonFile,
dropout=config.dropout,
weight_standardization=config.weight_standardization,
last_bn=config.lastbn,
dml=config.dml,
evaluate=True)
# load checkpoint
param_dict = load_checkpoint(config.checkpoint_file_path)
load_param_into_net(net, param_dict)
net.set_train(False)
# define loss, model
if config.dataset == "imagenet2012":
if not config.use_label_smooth:
config.label_smooth_factor = 0.0
loss = CrossEntropySmooth(sparse=True, reduction='mean',
smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
else:
loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
# define model
model = Model(net, loss_fn=loss, metrics={'top_1_accuracy', 'top_5_accuracy'})
# eval model
res = model.eval(dataset)
print("result:", res, "ckpt=", config.checkpoint_file_path)
if __name__ == '__main__':
eval_net()
|
13,232 | 50f671f35863921229c4eabf6f2186f3bb0109fd | import numpy as np
import pandas as pd
import datetime
from pathlib import Path
import csv
class Loader:
def __init__(self, log_drive, file_prefix, date_format='%Y-%m-%d', time_format='%H:%M:%S', quiet=True):
self.log_drive = log_drive
self.file_prefix = file_prefix
self.date_format = date_format
self.time_format = time_format
self.datetime_format = f'{self.date_format} {self.time_format}'
self.quiet = quiet
self.data = None
self.loaded_start_date = None
self.loaded_stop_date = None
self.lines_loaded = 0
def grab_dates(self, start_date, stop_date):
"""
grab_dates parses through the data log and returns a pandas data frame containing all of the data
for the days within start_date to stop_date.
"""
if isinstance(start_date, datetime.datetime):
start_date = start_date.date()
if isinstance(stop_date, datetime.datetime):
stop_date = stop_date.date()
if not self.quiet:
print(f'Grabbing data for dates '
f'{start_date.strftime(self.date_format)} through {stop_date.strftime(self.date_format)}')
t0 = datetime.datetime.now()
data = pd.DataFrame()
date_range = [dt.date() for dt in pd.date_range(start_date, stop_date).to_pydatetime()]
for date in date_range:
date_str = date.strftime(self.date_format)
file_name = f'{self.file_prefix} {date_str}.csv'
file_path = Path(self.log_drive, file_name)
try:
new_data = pd.read_csv(file_path,
header=0,
parse_dates={'datetime': ['date', 'time']},
index_col='datetime',
infer_datetime_format=True)
new_data.index = pd.to_datetime(new_data.index, format=self.datetime_format)
data = data.append(new_data)
except FileNotFoundError:
print(f'File not found: {file_path}')
if not self.quiet:
tf = datetime.datetime.now()
dt = (tf-t0).total_seconds()
print(f'Grabbed data for dates '
f'{start_date.strftime(self.date_format)} through '
f'{stop_date.strftime(self.date_format)}')
print(f'Grabbing took {dt:.3f} s')
return data
def refresh_data(self, start_datetime):
"""
refresh_data parses through the data log returning a pandas data frame which contains all of the data
from start_datetime.date() through the present moment. The Loader class keeps track of which data has
previously been loaded by saving previously loaded data in self.data and keeping track of how many
lines of data have already been read in from the data file corresponding to the present day in
self.lines_loaded. Correspondingly there are chains of logic to ensure this method exhibits the
appropriate behaviour depending on the state of the data log and the data which is loaded into self.data
and the time period for which data is being requested.
"""
start_date = start_datetime.date()
stop_datetime = datetime.datetime.now()
stop_date = stop_datetime.date()
if not self.quiet:
print(f'Refreshing data from '
f'{start_datetime.strftime(self.datetime_format)} through '
f'{stop_datetime.strftime(self.datetime_format)}')
t0 = datetime.datetime.now()
# If no data is loaded of if the data range being requested ranges to a time earlier than the
# start of the loaded data a `hard reset' is required. A hard reset simply involves clearing self.data
# and resetting some flags.
hard_reset = False
if self.loaded_start_date is None:
hard_reset = True
elif self.loaded_start_date is not None:
if start_date < self.loaded_start_date:
hard_reset = True
elif self.loaded_start_date < start_date:
# Remove data from dates older than the range which is being requested to save memory
time_mask = np.logical_and(np.array(start_datetime <= self.data.index),
np.array(self.data.index <= stop_datetime))
self.data = self.data.loc[time_mask]
self.loaded_start_date = start_datetime.date()
if hard_reset:
self.data = pd.DataFrame()
self.loaded_start_date = None
self.loaded_stop_date = None
self.lines_loaded = 0
date_range = [dt.date() for dt in pd.date_range(start_date, stop_date).to_pydatetime()]
for date in date_range:
if self.loaded_start_date is None or self.loaded_stop_date is None:
pass
else:
if date < self.loaded_stop_date:
# dates with date < self.loaded_stop_date should already be included in self.data.
# Skip to next date.
continue
elif date > self.loaded_stop_date:
# if date > self.loaded_stop_date it means we have moved onto a new file
# and must start reading at the beginning.
self.lines_loaded = 0
elif date == self.loaded_stop_date:
# if date == self.loaded_stop_date then current value of self.lines_loaded
# will be used to load only recent data.
pass
date_str = date.strftime(self.date_format)
file_name = f'{self.file_prefix} {date_str}.csv'
file_path = Path(self.log_drive, file_name)
try:
# Load in new data. Note that lines up until line number self.lines_loaded are skipped
new_data = pd.read_csv(file_path,
header=0,
skiprows=range(1, self.lines_loaded + 1),
parse_dates={'datetime': ['date', 'time']},
index_col='datetime',
infer_datetime_format=True)
new_data.index = pd.to_datetime(new_data.index, format=self.datetime_format)
new_row_count = new_data.shape[0]
if new_row_count > 0:
self.data = self.data.append(new_data)
if date == stop_date:
self.lines_loaded += new_row_count
except FileNotFoundError:
print(f'File not found: {file_path}')
if self.loaded_start_date is None:
self.loaded_start_date = date
self.loaded_stop_date = date
if not self.quiet:
tf = datetime.datetime.now()
dt = (tf-t0).total_seconds()
print(f'Data refreshed to include dates '
f'{start_date.strftime(self.date_format)} through '
f'{stop_date.strftime(self.date_format)}')
print(f'Refreshing took {dt:.3f} s')
return self.data
def get_header(self):
file_path = list(Path(self.log_drive).glob('*.csv'))[0] # extract header from first matching file
with file_path.open('r', newline='') as file:
reader = csv.reader(file)
header = next(reader)
return header
|
13,233 | 7e56a29cc790668a8d3509dbbca6637280236f63 | #!/usr/bin/python3
"""This module performs math on matrices"""
def matrix_divided(matrix, div):
"""This method divides each element in a matrix
Args:
matrix (list or lists): the matrix
div (int): the number to divide each element by
Attributes:
err_1 (str): error message 1
err_2 (str): error message 2
err_3 (str): error message 3
err_4 (str): error message 4
"""
err_1 = "matrix must be a matrix (list of lists) of integers/floats"
err_2 = "Each row of the matrix must have the same size"
err_3 = "div must be a number"
err_4 = "division by zero"
# Checks if matrix is a list
if not isinstance(matrix, list):
raise TypeError(err_1)
# Checks if the first element in matrix is a list so len can be used
if isinstance(matrix[0], list):
rowlen = len(matrix[0])
# Checking if matrix is formatted correctly
for mtx in matrix:
# Checks if mtx is a list
if not isinstance(mtx, list):
raise TypeError(err_1)
# Checks if the length of each row is the same
if len(mtx) != rowlen:
raise TypeError(err_2)
# Checks if the elements in the matrix are int or float
for elmnt in mtx:
if not isinstance(elmnt, (int, float)):
raise TypeError(err_1)
# Checks if div is a number
if not isinstance(div, (int, float)):
raise TypeError(err_3)
# Checks if div is 0
if div == 0:
raise ZeroDivisionError(err_4)
# Dividing original matrix and creating a new matrix
new_matrix = [[round(idx / div, 2) for idx in mtx] for mtx in matrix]
return new_matrix
|
13,234 | c6cdc2880c0a5aace4b3726e125e8c0872c34267 | #Username: chefteerth ## https://www.codechef.com/users/chefteerth
#Question URL: https://www.codechef.com/PCO12020/problems/LOTR1
# Problem Name: Lord of the Rings
# Problem Code: LOTR1
# Programming Lang: Python3
# 1<= x <= M
# 1<= y <= N
test = int(input())
ans_lst = []
p =0
while p < test:
x_lst = []
no_of_pais = 0
x=1
y=1
M, N = map(int,input().split())
for x in range(1,M+1):
for y in range(1,N+1):
if (x*y) + (x+y) == int(str(x) + str(y)):
no_of_pais = no_of_pais +1
x_lst.append(x)
ans = str(no_of_pais) + ' ' + str(len(x_lst))
ans_lst.insert(p, ans)
p = p + 1
for i in range(len(ans_lst)):
print(ans_lst[i])
|
13,235 | 75851bc9421c527665023142a062be2dc998f73a | # 8 10 6 2 4
# 8 6 10 2 4
# 8 6 2 10 4
# 8 6 2 4 10
# 6 8 2 4 10
# 6 2 8 4 10
# 6 2 4 8 10
# 2 6 4 8 10
# 2 4 6 8 10
list = [ 8, 10, 6, 2, 4 ]
print('BEFORE: ', list)
# We need 5 - 1 comparison
swapped = True
while swapped:
swapped = False
for i in range(len(list) - 1):
if list[i] > list[i + 1]:
# If we end up here -> We have to swap elements
swapped = True
list[i], list[i+1] = list[i+1], list[i]
print('AFTER: ', list)
|
13,236 | aa91a942dd93444cf55fb35e62ee59dfd0c54d06 | from django.http import HttpResponse
from django.urls import path, include, re_path
from django.conf import settings
index_file = (settings.BASE_DIR/"front" / "index.html").read_bytes()
urlpatterns = [
path('api/', include('api.urls')),
re_path(r'^.*', lambda x:HttpResponse(index_file)),
]
handler404 = 'api.views.page_not_found'
|
13,237 | d4db423614576901ccdfcbdfd7613debb2822612 | import string
import random
from datetime import datetime
def random_string(length:int) -> str:
chars=string.printable
return ''.join(random.choice(chars) for _ in range(length))
def random_date(start:datetime=datetime.min, end:datetime=datetime.max) -> datetime:
# Get a random amount of seconds between `start` and `end`
diff = random.randint(0, int((end - start).total_seconds()))
return start + datetime.timedelta(seconds=diff)
|
13,238 | f42fc434474956d386dcede3fb73e5ca6dded10a | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FruitType'
db.create_table(u'fruit_fruittype', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal(u'fruit', ['FruitType'])
# Renaming column for 'FruitLocation.fruit_type' to match new field type.
db.rename_column(u'fruit_fruitlocation', 'fruit_type', 'fruit_type_id')
# Changing field 'FruitLocation.fruit_type'
db.alter_column(u'fruit_fruitlocation', 'fruit_type_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['fruit.FruitType']))
# Adding index on 'FruitLocation', fields ['fruit_type']
db.create_index(u'fruit_fruitlocation', ['fruit_type_id'])
def backwards(self, orm):
# Removing index on 'FruitLocation', fields ['fruit_type']
db.delete_index(u'fruit_fruitlocation', ['fruit_type_id'])
# Deleting model 'FruitType'
db.delete_table(u'fruit_fruittype')
# Renaming column for 'FruitLocation.fruit_type' to match new field type.
db.rename_column(u'fruit_fruitlocation', 'fruit_type_id', 'fruit_type')
# Changing field 'FruitLocation.fruit_type'
db.alter_column(u'fruit_fruitlocation', 'fruit_type', self.gf('django.db.models.fields.CharField')(max_length=20))
models = {
u'fruit.fruitlocation': {
'Meta': {'ordering': "['-modified']", 'object_name': 'FruitLocation'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'fruit_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fruit.FruitType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'fruit.fruittype': {
'Meta': {'ordering': "['-modified']", 'object_name': 'FruitType'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['fruit'] |
13,239 | 3d03fd8d5b4fddb83cc8217a0bfd8d90f13bcec3 | def common_prefix(a,b):
i = 0
for i, (x, y) in enumerate(zip(a,b)):
if x!=y: break
return a[:i]
a=input()
b=input()
c=common_prefix(a,b)
print (c)
|
13,240 | 08e6715db563b44c49eaff9a4256d3c179cff16a | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# This script is inspired by the debian package python-chardet
import os
import glob
from distutils.core import setup
data_files = []
def get_debian_version():
f=open('debian/changelog', 'r')
line=f.readline()
f.close()
version=line.split()[1].replace('(','').replace(')','')
return version
def get_images(ipath):
images = []
for afile in glob.glob('%s/*'%(ipath) ):
if os.path.isfile(afile):
images.append(afile)
return images
data_files.append(('share/max-domain/images', get_images("images") ))
data_files.append(('share/max-domain', ['max-domain-main.ui'] ))
data_files.append(('share/applications', ['max-domain.desktop'] ))
data_files.append(('/etc/X11/Xsession.d', ['80_configure_domain_session'] ))
data_files.append(('share/gnome/shutdown', ['99domain_logout.sh'] ))
data_files.append(('share/max-domain', ['script_logout.sh'] ))
setup(name='MAX-DOMAIN',
description = 'Join MAX to AD domain',
version=get_debian_version(),
author = 'Mario Izquierdo',
author_email = 'mariodebian@gmail.com',
url = 'http://max.educa.madrid.org',
license = 'GPLv2',
platforms = ['linux'],
keywords = ['ldap', 'domain', 'samba'],
scripts=['max-domain', 'max-control'],
data_files=data_files
)
|
13,241 | 9b67b747bd495b2807f478ab1fcdb933158f91fb | import hashlib
is_valid = False
number = 0
while is_valid == False:
m = hashlib.md5()
m.update('iwrupvqb')
m.update(str(number))
if m.hexdigest()[0:6] == '000000':
print m.hexdigest()
print number
is_valid = True
number += 1 |
13,242 | 8387f9900993c6c7b90012109072038d2f91945e | # !/usr/bin/env python
# -*- encoding: utf-8 -*-
# @author: Lodgeinwh
# @file: Ghost Age.py
# @time: 2019/04/25 23:26:13
# @contact: lodgeinwh@gmail.com
# @version: 1.0
def checkio(opacity):
fab = [0, 1]
while fab[-1] < 10000:
fab.append(fab[-1] + fab[-2])
age = 0
result = 10000
while opacity != result:
age += 1
if age in fab:
result -= age
else:
result += 1
return age
# These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio(10000) == 0, "Newborn"
assert checkio(9999) == 1, "1 year"
assert checkio(9997) == 2, "2 years"
assert checkio(9994) == 3, "3 years"
assert checkio(9995) == 4, "4 years"
assert checkio(9990) == 5, "5 years"
|
13,243 | 9f3ac7d1330f481a191b6a6abffea84dce593209 | import serial
import serial.tools.list_ports
import platform
import logging
import py3lib.QuLogger
import numpy as np
ft232_name_in = "0403:6001"
arduino_name_in = "2341:0043"
#ft232_name_in_mac = "0403:6001"
#ft232_name_in_win = "VID_0403+PID_6001"
#ft232_name_in_win = "USB VID:PID=2341:0043" #Arduino Uno
manual_comport = 'COM5'
class UART:
def __init__(self, loggername=' '):
self.cp = 0
self.port = serial.Serial()
self.find_com = False
self.logger = logging.getLogger(loggername)
#self.port = self.comDetect()
#if (self.find_com == True):
# self.port.flush()
def connect(self, baudrate = 115200, timeout = 1):
self.baudrate = baudrate
self.timeout = timeout
self.find_com = self.checkCom() #ๆฑบๅฎcp (com port)
self.port = self.comDetect()
if (self.find_com == True):
self.port.flush()
return self.find_com
def connect_comboBox(self, baudrate = 115200, timeout = 1, port_name = ''):
self.baudrate = baudrate
self.timeout = timeout
# self.find_com = self.checkCom() #ๆฑบๅฎcp (com port)
print('comboBox cp=', port_name)
self.cp = port_name
self.find_com = True
# print('find_com: ', self.find_com)
self.port = self.comDetect()
# self.port.flush()
return 1
def comDetect(self):
# print('enter1')
ser = serial.Serial()
# print('find_com: ', self.find_com)
if (self.find_com == True):
print('self.cp: ', self.cp)
ser = serial.Serial(self.cp)
print('port: ', ser.port)
ser.baudrate = self.baudrate
ser.timeout = self.timeout
return ser
def portConnect(self, portid, baudrate = 115200, timeout = 1):
self.baudrate = baudrate;
self.timeout = timeout
self.find_com = self.checkPortCom(portid)
self.port = self.comDetect()
if (self.find_com == True):
self.port.flush()
return self.find_com
def checkPortCom(self, portid):
find_com = False
portlist = serial.tools.list_ports.comports()
os = platform.system()
for a in portlist:
if portid in a[2]:
self.cp = a[0]
if self.cp != 0:
find_com = True
else:
self.logger.error("Can't Find the COM Port")
return find_com
def selectCom(self):
self.comPort = np.empty(0)
portlist = serial.tools.list_ports.comports()
self.portNum = len(portlist)
for i in range(self.portNum):
self.comPort = np.append(self.comPort, portlist[i])
def checkCom(self):
find_com = False
portlist = serial.tools.list_ports.comports()
# os = platform.system()
# print(os)
for a in portlist:
if ft232_name_in in a[2] or arduino_name_in in a[2]:
self.cp = a[0]
print( "cp = " + str(self.cp) )
if self.cp != 0:
find_com = True
else:
self.logger.error("Can't Find the COM Port")
return find_com
def writeBinary(self, data):
#print("in")
data_list = list([data])
#data_list.append('\n')
self.port.write(data_list)
self.logger.debug("write hex data="+str(data_list))
def writeList(self, datalist):
self.port.write(datalist)
def readBinary(self):
try:
temp = self.port.read()
except:
self.logger.error("readBinary failed")
return "ERROR"
else:
if len(temp) > 0:
data = ord(temp)
self.logger.debug("read hex data="+str(data))
else:
data = temp
self.logger.debug("read hex data failed")
return data
def read1Binary(self):
try:
data = self.port.read(1)
except:
self.logger.error("readBinary failed")
return "ERROR"
else:
return data
def read4Binary(self):
try:
data = self.port.read(4)
except:
self.logger.error("readBinary failed")
return "ERROR"
else:
return data
def read5Binary(self):
try:
data = self.port.read(5)
except:
self.logger.error("readBinary failed")
return "ERROR"
else:
return data
def read3Binary(self):
try:
data = self.port.read(3)
except:
self.logger.error("readBinary failed")
return "ERROR"
else:
return data
def read2Binary(self):
try:
data = self.port.read(2)
except:
self.logger.error("readBinary failed")
return "ERROR"
else:
return data
def readBinaryMust(self, timeoutloop = 10):
run = True
loop = 1
while(run):
try:
temp = self.port.read()
except:
self.logger.error("readBinaryMust failed")
return "ERROR"
else:
if len(temp) > 0:
data = ord(temp)
self.logger.debug("read hex data =" +str(data))
run = False
return data
loop = loop+1
if (loop == timeoutloop):
run = False
self.logger.debug("read data timeout in readBinaryMust")
def writeLine(self, data, addR = False):
#print("in")
if (addR == True):
data_list = data + '\r\n'
else:
data_list = data + '\n'
#print(data_list)
try:
self.port.write(data_list.encode())
except:
self.logger.error("writeLine failed")
def readLine(self):
#print("out")
try:
data = self.port.readline().decode()
except:
self.logger.error("readLine failed")
return "ERROR"
else:
#print(data)
return data
def readLineF(self):
self.port.flushInput()
try:
data = self.port.readline().decode()
except:
self.logger.error("readLine failed")
return "ERROR"
else:
return data
class FT232:
def __init__(self, loggername):
self.cp = 0
self.port = serial.Serial()
self.find_com = False
self.logger = logging.getLogger(loggername)
#self.port = self.comDetect()
#if (self.find_com == True):
# self.port.flush()
def connect(self, baudrate = 115200, timeout = 1):
self.baudrate = baudrate
self.timeout = timeout
self.find_com = self.checkCom() #ๆฑบๅฎcp (com port)
self.port = self.comDetect()
if (self.find_com == True):
self.port.flush()
return self.find_com
def connect_comboBox(self, baudrate = 115200, timeout = 1, port_name = ''):
self.baudrate = baudrate
self.timeout = timeout
# self.find_com = self.checkCom() #ๆฑบๅฎcp (com port)
print('comboBox cp=', port_name)
self.cp = port_name
self.find_com = True
# print('find_com: ', self.find_com)
self.port = self.comDetect()
# self.port.flush()
return 1
def comDetect(self):
# print('enter1')
ser = serial.Serial()
# print('find_com: ', self.find_com)
if (self.find_com == True):
print('self.cp: ', self.cp)
ser = serial.Serial(self.cp)
print('port: ', ser.port)
ser.baudrate = self.baudrate
ser.timeout = self.timeout
return ser
def portConnect(self, portid, baudrate = 115200, timeout = 1):
self.baudrate = baudrate;
self.timeout = timeout
self.find_com = self.checkPortCom(portid)
self.port = self.comDetect()
if (self.find_com == True):
self.port.flush()
return self.find_com
def checkPortCom(self, portid):
find_com = False
portlist = serial.tools.list_ports.comports()
os = platform.system()
for a in portlist:
if portid in a[2]:
self.cp = a[0]
if self.cp != 0:
find_com = True
else:
self.logger.error("Can't Find the COM Port")
return find_com
def selectCom(self):
self.comPort = np.empty(0)
portlist = serial.tools.list_ports.comports()
self.portNum = len(portlist)
for i in range(self.portNum):
self.comPort = np.append(self.comPort, portlist[i])
def checkCom(self):
find_com = False
portlist = serial.tools.list_ports.comports()
# os = platform.system()
# print(os)
for a in portlist:
if ft232_name_in in a[2] or arduino_name_in in a[2]:
self.cp = a[0]
print( "cp = " + str(self.cp) )
if self.cp != 0:
find_com = True
else:
self.logger.error("Can't Find the COM Port")
return find_com
def writeBinary(self, data):
#print("in")
data_list = list([data])
#data_list.append('\n')
self.port.write(data_list)
self.logger.debug("write hex data="+str(data_list))
def writeList(self, datalist):
self.port.write(datalist)
def readBinary(self):
try:
temp = self.port.read()
except:
self.logger.error("readBinary failed")
return "ERROR"
else:
if len(temp) > 0:
data = ord(temp)
self.logger.debug("read hex data="+str(data))
else:
data = temp
self.logger.debug("read hex data failed")
return data
def read1Binary(self):
try:
data = self.port.read(1)
except:
self.logger.error("readBinary failed")
return "ERROR"
else:
return data
def read4Binary(self):
try:
data = self.port.read(4)
except:
self.logger.error("readBinary failed")
return "ERROR"
else:
return data
def read3Binary(self):
try:
data = self.port.read(3)
except:
self.logger.error("readBinary failed")
return "ERROR"
else:
return data
def read2Binary(self):
try:
data = self.port.read(2)
except:
self.logger.error("readBinary failed")
return "ERROR"
else:
return data
def readBinaryMust(self, timeoutloop = 10):
run = True
loop = 1
while(run):
try:
temp = self.port.read()
except:
self.logger.error("readBinaryMust failed")
return "ERROR"
else:
if len(temp) > 0:
data = ord(temp)
self.logger.debug("read hex data =" +str(data))
run = False
return data
loop = loop+1
if (loop == timeoutloop):
run = False
self.logger.debug("read data timeout in readBinaryMust")
def writeLine(self, data, addR = False):
#print("in")
if (addR == True):
data_list = data + '\r\n'
else:
data_list = data + '\n'
#print(data_list)
try:
self.port.write(data_list.encode())
except:
self.logger.error("writeLine failed")
def readLine(self):
#print("out")
try:
data = self.port.readline().decode()
except:
self.logger.error("readLine failed")
return "ERROR"
else:
#print(data)
return data
def readLineF(self):
self.port.flushInput()
try:
data = self.port.readline().decode()
except:
self.logger.error("readLine failed")
return "ERROR"
else:
return data
|
13,244 | 764911d7b12bcbde0d0a498fb712e5dca069d800 | #!/usr/bin/python3
"""100-my-int"""
class MyInt(int):
"""MyInt"""
def __new__(cls, value):
"""new"""
return super(MyInt, cls).__new__(cls, value)
def __eq__(self, other):
"""equal"""
return super(MyInt, self).__ne__(other)
def __ne__(self, other):
"""ne"""
return super(MyInt, self).__eq__(other) |
13,245 | 09d11245fc3614f2ab43f3b5759f5b5bc9308124 | import time
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data.dataloader import DataLoader
from torch.autograd import Variable
from data.parser import parse_mnist
from data.dataset import MNISTDataset
from data.transforms import MNISTTransform
from model import ModelMLP, ModelCNN
def train(model, optimizer, loader):
model.train()
loss_sum = 0
acc_sum = 0
for idx, (data, target) in enumerate(loader):
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss_sum += loss.data[0]
loss.backward()
optimizer.step()
predict = output.data.max(1)[1]
acc = predict.eq(target.data).cpu().sum()
acc_sum += acc
return loss_sum / len(loader), acc_sum / len(loader)
def evaluate(model, loader):
model.eval()
loss_sum = 0
acc_sum = 0
for idx, (data, target) in enumerate(loader):
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target, volatile=True)
output = model(data)
loss = F.cross_entropy(output, target)
loss_sum += loss.data[0]
predict = output.data.max(1)[1]
acc = predict.eq(target.data).cpu().sum()
acc_sum += acc
return loss_sum / len(loader), acc_sum / len(loader)
def main(cfg):
torch.manual_seed(cfg.torch_seed)
"""Prepare data"""
if cfg.mode == 'mlp':
train_data, train_label, valid_data, valid_label, test_data, test_label = parse_mnist(2, cfg.mnist_path, cfg.num_valid, cfg.parse_seed)
elif cfg.mode == 'cnn':
train_data, train_label, valid_data, valid_label, test_data, test_label = parse_mnist(4, cfg.mnist_path, cfg.num_valid, cfg.parse_seed)
else:
raise ValueError('Not supported mode')
transform = MNISTTransform()
train_dataset = MNISTDataset(train_data, train_label, transform=transform)
valid_dataset = MNISTDataset(valid_data, valid_label, transform=transform)
test_dataset = MNISTDataset(test_data, test_label, transform=transform)
train_iter = DataLoader(train_dataset, cfg.batch_size, shuffle=True, num_workers=cfg.num_workers)
valid_iter = DataLoader(valid_dataset, cfg.eval_batch_size, shuffle=False, num_workers=cfg.num_workers)
test_iter = DataLoader(test_dataset, cfg.eval_batch_size, shuffle=False)
"""Set model"""
if cfg.mode == 'mlp':
model = ModelMLP()
elif cfg.mode == 'cnn':
model = ModelCNN()
else:
raise ValueError('Not supported mode')
model.cuda(device_id=0)
optimizer = optim.Adam(model.parameters(), cfg.initial_lr)
"""Train"""
best_valid_loss = 1000000
patience = 0
change = 0
status = 'keep_train'
for epoch in range(cfg.max_epoch):
print('... Epoch', epoch, status)
start_time = time.time()
if status == 'end_train':
time.sleep(1)
torch.save(model.state_dict(), cfg.save_path)
break
elif status == 'change_lr':
time.sleep(1)
model.load_state_dict(torch.load(cfg.save_path))
lr = cfg.initial_lr * np.power(0.1, change)
for pg in optimizer.param_groups:
pg['lr'] = lr
elif status == 'save_param':
torch.save(model.state_dict(), cfg.save_path)
else:
pass
train_loss, train_acc = train(model, optimizer, train_iter)
valid_loss, valid_acc = evaluate(model, valid_iter)
print('...... Train loss, accuracy', train_loss, train_acc / cfg.batch_size)
print('...... Valid loss, best loss, accuracy', valid_loss, best_valid_loss, valid_acc / cfg.eval_batch_size)
if valid_loss > best_valid_loss:
patience += 1
print('......... Current patience', patience)
if patience >= cfg.max_patience:
change += 1
patience = 0
print('......... Current lr change', change)
if change >= cfg.max_change:
status = 'end_train' # (load param, stop training)
else:
status = 'change_lr' # (load param, change learning rate)
else:
status = 'keep_train' # (keep training)
else:
best_valid_loss = valid_loss
patience = 0
print('......... Current patience', patience)
status = 'save_param' # (save param, keep training)
end_time = time.time()
print('...... Time:', end_time - start_time)
test_loss, test_acc = evaluate(model, test_iter)
print('...... Test loss, accuracy', test_loss, test_acc / cfg.eval_batch_size)
if __name__ == '__main__':
from config import Config
config = Config()
main(config)
|
13,246 | f43cdd3b142969874df363a4fc436266680cb958 | import sqlalchemy as db
from flask import Flask, abort
import json
import decimal
import datetime
username = "kristensohm"
password = ""
dbname = "MIS5400"
servername = "gidferd.database.windows.net"
driver = "{ODBC Driver 17 for SQL Server}"
engine = db.create_engine(f'mssql+pyodbc://{username}:{password}@{servername}:1433/{dbname}?Driver={driver}', echo=True)
app = Flask(__name__)
app.config.from_object(__name__)
# Establish a database connection before requests
@app.before_request
def before_request():
try:
engine.connect()
except ConnectionError:
abort("No database connection could be established.", 503)
# Try really hard to dispose of the database connection cleanly
@app.teardown_request
def teardown_request(exception):
try:
engine.dispose()
except AttributeError:
pass
finally:
engine.dispose()
# Default route with no info
@app.route("/", methods=['GET'])
def hello():
html = "<h3>There be data here</h3>"
return html.format()
# Gets the data from the income table
@app.route("/api/v1/income", methods=['GET'])
def get_income():
query = "SELECT * FROM PersonalIncome"
results = engine.execute(query)
return json.dumps([dict(r) for r in results], default=alchemyencoder)
# Gets the data from the consumption (expenses) table
@app.route("/api/v1/expenses", methods=['GET'])
def get_expenses():
query = "SELECT * FROM ConsumptionByProduct"
results = engine.execute(query)
return json.dumps([dict(r) for r in results], default=alchemyencoder)
# Used to format decimals and dates properly on conversion from sqlalchemy object to json
def alchemyencoder(obj):
if isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, decimal.Decimal):
return float(obj)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
|
13,247 | 8829323b6eddaa4d6be3fab6852d9083cfb54343 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Alex Wang
# Import config file setting
import src.alex_cfg.cfg_basic as cfg_basic
# Set logger
from src.alex_misc.alex_logging import create_logger
logger_data = create_logger(logger_name=__name__, fmt='%(message)s')
def data_df(capture, filter_str, value_list, field_list, csv_enable, data_csv_path):
"""
???
:param capture: Wireshark capture file
:param filter_str: wireshark display filter string
:param value_list:
:param field_list:
:param csv_enable:
:param data_csv_path:
:return:
"""
# Set pandas display and float number options
import sys
import pandas
pandas.set_option("display.max_rows", cfg_basic.pd_display_max_row())
pandas.set_option("display.max_columns", cfg_basic.pd_display_max_col())
pandas.set_option('precision', cfg_basic.pd_precision())
count = 0 # init counter to 0
all_data_list = [] # init data list
# Set Wireshark display filter
logger_data.info('Filter based on: \n\t{0}\n'.format(filter_str))
# Pyshark file capture with display filter
import pyshark
cap = pyshark.FileCapture(capture, only_summaries=False, display_filter=filter_str)
# loop each packet in capture file
for i_cap in cap:
# counter + get packet values for all fields
data_list = [count] + value_list(i_cap)
# append each packet field values to all data list
all_data_list.append(data_list)
# increase counter
count += 1
# convert all data list to DataFrame
df = pandas.DataFrame(all_data_list)
if df.empty:
logger_data.info('DataFrame is empty! Maybe display filter below not match any packet.\n\t{0}'.format(filter_str))
return df
# add counter column
col_list = ['count']
# counter col + all fields name as DataFrame columns
col_list.extend(field_list[0])
df.columns = col_list
# assign count col as index
df.index = df['count']
# give index col a name
df.index.name = 'Index'
# write into csv or not
if str(csv_enable) == '1':
logger_data.info('Write get values into DataFrame.')
df.to_csv(data_csv_path)
elif str(csv_enable) == '0':
logger_data.info('Skip write get values into DataFrame.')
else:
logger_data.info('Something wrong with "csv_enable" setting, pls check. Exiting...')
sys.exit()
# Return DataFrame
return df
def check_df_warp_0(str_option, ref_value, input_value):
str_option = str(str_option)
pass_str = 'Pass'
fail_str = 'Fail'
skip_str = 'Skip'
if str_option == 'p':
return '{0}: {1} = {2}'.format(pass_str, input_value, ref_value)
elif str_option == 'f':
return '{0}: {1} != {2}'.format(fail_str, input_value, ref_value)
else:
return '{0}'.format(skip_str)
def check_df_warp_1(enable, df, row, row_index, col, ref_data):
pass_list = []
fail_list = []
skip_list = []
if str(enable) == '1':
col = str(col)
index = row_index
get_value = row[col]
ref_value = ref_data
logger_data.debug('\tGet value: {0}\n\tRef value: {1}'.format(get_value, ref_value))
if get_value == ref_value:
pass_list.append([index, col, check_df_warp_0('p', ref_value, get_value)])
df.loc[index, col] = check_df_warp_0('p', ref_value, get_value)
else:
fail_list.append([index, col, check_df_warp_0('f', ref_value, get_value)])
df.loc[index, col] = check_df_warp_0('f', ref_value, get_value)
else:
col = str(col)
index = row_index
get_value = row[col]
ref_value = ref_data
skip_list.append([index, col, check_df_warp_0('s', ref_value, get_value)])
df.loc[index, col] = check_df_warp_0('s', ref_value, get_value)
return df, pass_list, fail_list, skip_list
def check_df_warp_2(pass_list_all, fail_list_all, skip_list_all, new_list):
pass_list_all.extend(new_list[1])
fail_list_all.extend(new_list[2])
skip_list_all.extend(new_list[3])
return pass_list_all, fail_list_all, skip_list_all
def check_df_warp_3(df, cfg_file, field_list, row, row_index, pass_list_all, fail_list_all, skip_list_all):
# fields_all = fields_frame() + fields_radiotap() + fields_wlan_radio() + fields_wlan()
fields_to_check = field_list[1]
for i_field in fields_to_check:
logger_data.info('\nCheck data for row[{0}], col[{1}]:'.format(row_index, i_field))
col = i_field
enable = getattr(cfg_file, i_field)()[0]
ref_data = getattr(cfg_file, i_field)()[1]
df_updated = check_df_warp_1(enable, df, row, row_index, col, ref_data)
check_df_warp_2(pass_list_all, fail_list_all, skip_list_all, df_updated)
return df, pass_list_all, fail_list_all, skip_list_all
def check_df(capture, cfg_file, filter_str, value_list, field_list, csv_enable, data_csv_path, check_csv_path):
df = data_df(capture, filter_str, value_list, field_list, csv_enable, data_csv_path)
pass_list_all = []
fail_list_all = []
skip_list_all = []
# Loop for DF rows: index: index number, row: row data content
for row_index, row in df.iterrows():
check_df_warp_3(df, cfg_file, field_list, row, row_index,
pass_list_all, fail_list_all, skip_list_all)
df.to_csv(check_csv_path)
return df, pass_list_all, fail_list_all, skip_list_all
|
13,248 | fd41940b1530a8e4dd616777f42594ee473a03d4 |
import os
import time
import requests
import sys
def retrieve_html(): #retrive html
for year in range(2013,2019): #go for each and every year,month
for month in range(1,13):
if(month<10):
url='http://en.tutiempo.net/climate/0{}-{}/ws-421820.html'.format(month
,year)
else:
url='http://en.tutiempo.net/climate/{}-{}/ws-421820.html'.format(month
,year)
texts=requests.get(url) #retrive text download
text_utf=texts.text.encode('utf=8') #enconde due to some char in html tag fix it
if not os.path.exists("Data/Html_Data/{}".format(year)): #if not exist in local environment check folders exist or not
os.makedirs("Data/Html_Data/{}".format(year)) #make dir if not exist req path
with open("Data/Html_Data/{}/{}.html".format(year,month),"wb") as output: #if exist we make open in always write byte mode
output.write(text_utf) #here writting corresponding text
sys.stdout.flush() #flush everything we creating
if __name__=="__main__":
start_time=time.time()
retrieve_html() #calling function
stop_time=time.time()
print("Time taken {}".format(stop_time-start_time)) #time to execute
|
13,249 | 53e242bcbe47088285ab9d7eed0034767eb64b77 | from django.contrib import admin
from django.contrib.auth.models import *
from experiments.models import *
admin.site.register(Experimenter)
admin.site.register(All_Company)
admin.site.register(All_Laboratory)
'''
class User_LaboratoryAdmin(admin.ModelAdmin):
list_display = ('lab', 'user', 'validated')
raw_id_fields = ('lab', 'user', 'validated')
admin.site.register(User_Laboratory, User_LaboratoryAdmin)
'''
class All_ExperimenterAdmin(admin.ModelAdmin):
list_display = ('name', 'lab', 'validated')
#raw_id_fields = ('name', 'lab', 'validated')
admin.site.register(All_Experimenter, All_ExperimenterAdmin)
class Experimenter_infoAdmin(admin.ModelAdmin):
list_display = ('company', 'lab', 'experimenter')
admin.site.register(Experimenter_info, Experimenter_infoAdmin)
#Refirgerator
admin.site.register(Refrigerator_No)
admin.site.register(Refrigerator_Temperature)
admin.site.register(Refrigerator_Layer)
#Nitrogen
admin.site.register(Nitrogen_Container)
admin.site.register(Nitrogen_Basket)
admin.site.register(Nitrogen_Layer)
#Others
admin.site.register(Others_Temperature)
#Container
admin.site.register(Container)
class Container_NoAdmin(admin.ModelAdmin):
list_display = ('name', 'Container', 'validated')
#raw_id_fields = ('name', 'Container', 'validated')
admin.site.register(Container_No, Container_NoAdmin)
class Container_BasketAdmin(admin.ModelAdmin):
list_display = ('name', 'Container', 'validated')
#raw_id_fields = ('name', 'Container', 'validated')
admin.site.register(Container_Basket, Container_BasketAdmin)
class Container_LayerAdmin(admin.ModelAdmin):
list_display = ('name', 'Container', 'validated')
#raw_id_fields = ('name', 'Container', 'validated')
admin.site.register(Container_Layer, Container_LayerAdmin)
admin.site.register(All_AgeUnit)
#Source
admin.site.register(Source_TissueTaxonAorM)
class Source_TissueSystemAdmin(admin.ModelAdmin):
list_display = ('name', 'pid', 'validated')
#raw_id_fields = ('name', 'pid', 'validated')
admin.site.register(Source_TissueSystem, Source_TissueSystemAdmin)
class Source_TissueOrganAdmin(admin.ModelAdmin):
list_display = ('name', 'pid', 'validated')
#raw_id_fields = ('name', 'pid', 'validated')
admin.site.register(Source_TissueOrgan, Source_TissueOrganAdmin)
class Source_TissueStructureAdmin(admin.ModelAdmin):
list_display = ('name', 'pid', 'validated')
#raw_id_fields = ('name', 'pid', 'validated')
admin.site.register(Source_TissueStructure, Source_TissueStructureAdmin)
class Source_TissueTaxonIDAdmin(admin.ModelAdmin):
list_display = ('name', 'pid', 'validated')
#raw_id_fields = ('name', 'pid', 'validated')
admin.site.register(Source_TissueTaxonID, Source_TissueTaxonIDAdmin)
class Source_TissueTaxonStrainAdmin(admin.ModelAdmin):
list_display = ('name', 'pid', 'validated')
#raw_id_fields = ('name', 'pid', 'validated')
admin.site.register(Source_TissueTaxonStrain, Source_TissueTaxonStrainAdmin)
class Source_TissueTaxonNameAdmin(admin.ModelAdmin):
list_display = ('name', 'abbrev', 'pid', 'validated')
#raw_id_fields = ('name', 'abbrev', 'pid', 'validated')
admin.site.register(Source_TissueTaxonName, Source_TissueTaxonNameAdmin)
admin.site.register(Source_TissueType)
admin.site.register(source_CellType)
class Cell_NameAdmin(admin.ModelAdmin):
list_display = ('name', 'abbrev', 'pid', 'validated')
#raw_id_fields = ('name', 'abbrev', 'pid', 'validated')
admin.site.register(Cell_Name, Cell_NameAdmin)
admin.site.register(Fluid_name)
admin.site.register(Experiment_group)
admin.site.register(Galaxy_session)
class copartnerAdmin(admin.ModelAdmin):
list_display = ('from_experimenter', 'to_experimenter')
#raw_id_fields = ('from_experimenter', 'to_experimenter')
admin.site.register(copartner, copartnerAdmin)
admin.site.register(Project)
#Reagent Model
admin.site.register(Conjugate)
admin.site.register(Purification)
admin.site.register(React_species)
admin.site.register(Reagent_manufacturer)
admin.site.register(Affinity)
admin.site.register(Application)
#Reagent Type Model
admin.site.register(Antigen_species)
admin.site.register(Antigen_clonal_type)
admin.site.register(Antigen_modification)
class AntigenAdmin(admin.ModelAdmin):
list_display = ('gene_id', 'host_species', 'clonal_type', 'modification')
#raw_id_fields = ('gene_id', 'host_species', 'clonal_type', 'modification')
admin.site.register(Antigen, AntigenAdmin)
admin.site.register(Dna_info)
admin.site.register(Domain_info)
admin.site.register(Chemical_info)
admin.site.register(Remarks_info)
class ReagentAdmin(admin.ModelAdmin):
list_display = ('experimenter', 'date', 'type', 'name', 'manufacturer', 'catalog_no', 'conjugate', 'antigen', 'dna_info', 'domain_info', 'chemical_info', 'remarks_info')
#raw_id_fields = ('experimenter', 'date', 'type', 'name', 'manufacturer', 'catalog_no', 'applications', 'react_species_sources', 'react_species_targets', 'conjugate', 'antigen', 'dna_info', 'domain_info', 'chemical_info', 'remarks_info')
admin.site.register(Reagent, ReagentAdmin)
|
13,250 | ece0ff2954da3465b14a63b2bcb6b76a2b89a874 | from manimlib.imports import *
"""
TODO:
[ ] fix arrow head size
auto scale according to size?
have a default size, but, if the arrow size is too short, then shrink the head
[ ] slide the point according to the gradient
"""
class ParaboloidPlot(SpecialThreeDScene):
CONFIG = {
"three_d_axes_config": {
"num_axis_pieces": 1,
"number_line_config": {
"unit_size": 2,
"tick_frequency": 1,
"numbers_with_elongated_ticks": [0, 1, 2],
"stroke_width": 2,
},
"axis_config": {
"unit_size": 1,
"tick_frequency": 1,
"numbers_with_elongated_ticks": [],
"stroke_width": 2,
},
"x_min": 0,
"x_max": 7,
"y_min": 0,
"y_max": 7,
"z_min": 0,
"z_max": 7,
},
"init_camera_orientation": {
"phi": 80 * DEGREES,
# "theta": -135 * DEGREES,
"theta": 290 * DEGREES,
},
"paraboloid_config": {
"r_max": 1,
"center_point": 2*X_AXIS + 2*Y_AXIS,
},
"axes_center_point": -2.5*X_AXIS - 2.5*Y_AXIS - 0.75*Z_AXIS,
}
def construct(self):
self.init_camera()
self.init_axes()
self.init_paraboloid()
## add dot
x, y = 2.1, 2.9
# x, y = 2.1, 2.1
# x, y = 3, 2
z = self.paraboloid.get_value_at_point([x,y])
point = np.array([x,y,z])
sphere = Sphere(radius=0.05, fill_color=WHITE, checkerboard_colors=False)
sphere.shift(point)
sphere.shift(self.axes_center_point)
self.add(sphere)
self.rotate_phi()
## add force
gradient = self.paraboloid.get_gradient(point)
step = np.array([
gradient[0],
gradient[1],
gradient[0]**2 + gradient[1]**2
])
end = point - step
end = self.paraboloid_config["center_point"]
force = Arrow3d(start=point, end=end)
force.shift(self.axes_center_point)
self.add(force)
self.wait()
self.rotate_phi()
self.wait()
def init_camera(self):
self.set_camera_orientation(**self.init_camera_orientation)
def init_axes(self):
self.axes = self.get_axes()
self.axes.x_axis.set_color(BLUE)
self.axes.y_axis.set_color(GREEN)
self.axes.z_axis.set_color(RED)
# self.set_axes_labels()
self.axes.shift(self.axes_center_point)
self.add(self.axes)
def init_paraboloid(self):
paraboloid = self.paraboloid = ParaboloidPolar(**self.paraboloid_config)
paraboloid.shift(self.axes_center_point)
self.add(paraboloid)
def rotate_phi(self, duration=2, degrees=+20):
# e.g. duration=2 ; degrees = 20
# going 20 degrees in 2 seconds
# 60 frames per seconds
# 20 degrees in 120 frames
rate = - degrees / (60*duration)
# it won't be exact, but it'll be close enough
self.begin_ambient_camera_rotation(rate=rate, about="phi")
self.wait(2)
self.stop_ambient_camera_rotation(about="phi")
|
13,251 | 8790328c1a8f86c14af27df8ad9df9121d61762a | #!/usr/bin/env python
import urllib, urllib2
import cookielib
import os
from data_parser import WSDCDataParser, DataParseError
import json
import sys
import time
import sqlite3 as lite
import re
import httplib
DEBUG=True
class Counter:
def __init__(self, initval=0):
self.counter=initval
def inc(self):
self.counter +=1
return self.counter
def dec(self):
self.counter -=1
return self.counter
def val(self):
return self.counter
def getpage(url, data=None, headers={}):
if data: data = urllib.urlencode(data)
#auth(config.login)
#req = urllib2.urlopen(url)
response = False
req = urllib2.Request(url, data, headers)
while not response:
try:
response = urllib2.urlopen(req)
except:
print "sleep"
time.sleep(1)
return json.loads(response.read())
def act(action, param):
headers = { "User-Agent" :"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36" }
base_URL="http://swingdancecouncil.herokuapp.com/pages/"
actions = {
"get_id": "dancer_search_by_fragment.json?term=%s",
"get_history":"dancer_point_history.json?wscid=%s",
}
return getpage(urllib.basejoin(base_URL, actions[action] % urllib.quote(str(param))), headers=headers)
def p_sort(a,b):
if a.start_date > b.start_date: return -1
elif a.start_date < b.start_date: return 1
else: return 0
def prepare_table(cursor, table, temporary=False):
istemporary = "temporary"
if not temporary: istemporary = ""
create_table = """
create %s table %s (
uid integer,
first_name varchar,
last_name varchar,
role varchar,
ename varchar,
placement varchar,
points integer,
start_date datetime,
end_date datetime,
location varchar,
division varchar,
tier varchar
);
""" % (istemporary, table)
drop_table = "drop table if exists %s;" % table
cursor.execute(drop_table)
cursor.execute(create_table)
def add_entries(cursor, table, uid):
try:
data = WSDCDataParser(act("get_history", uid))
except DataParseError:
return None
p_list = []
for d in data.divisions:
for p in d.placements:
p_list.append(p)
p_list.sort(cmp=p_sort)
for p in p_list:
query = "INSERT INTO %s VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);" % table
cursor.execute(query,
(
data.wscid,
data.first_name,
data.last_name,
p.role,
p.name,
p.placement,
p.points,
p.start_date,
p.end_date,
p.location,
p.division,
p.tier,
))
def sync_db():
con = lite.connect('wsdc.tmp.sqlite')
cur = con.cursor()
prepare_table(cur, "dancers")
all_ids = act("get_id","")
total = len(all_ids)
if DEBUG:
print total
c = Counter()
for e in all_ids:
uid = e['value']
if DEBUG:
if c.inc()%10 == 0:
print "% 7d/%d" % (c.counter,total)
if not add_entries(cur, 'dancers', uid): continue
con.commit()
con.close()
os.rename("wsdc.tmp.sqlite", "wsdc.sqlite");
def main():
os.chdir(os.path.dirname(os.path.realpath(sys.argv[0])))
dcore_lim = 100;
con = lite.connect("wsdc.sqlite")
cur = con.cursor()
try:
last_event = cur.execute("select ename,location,start_date from dancers order by start_date desc limit 1").fetchall()[0]
inlocal = set([ e[0] for e in cur.execute("select distinct uid from dancers").fetchall()])
inremote = set( int(e['value']) for e in act("get_id","") )
# if there new ids in database, it definitely was updated
difflen = len(inlocal.difference(inremote))
except (lite.OperationalError, IndexError):
prepare_table(cur, "dancers", temporary=False)
difflen = 1
# assuming that usual event visitors in common all the same people,
# there is a HUGE probability they are getting points
# so taking 200 most-recent-points-gainers we can predict they will gain more
# points in consequent events. And this why we shouldn't check all the database, only
# some dancers
if difflen == 0:
dcore = cur.execute("select distinct uid from dancers order by start_date desc limit %s;" % dcore_lim).fetchall()
prepare_table(cur, "dancers_tmp", temporary=False)
c = Counter()
for uid in dcore:
#print "% 7d/%d" % (c.inc(),dcore_lim)
if not add_entries(cur, 'dancers_tmp', uid[0]): continue
con.commit()
new_events = cur.execute("select count(*) from dancers_tmp where start_date > '%s'" % last_event[2]).fetchall()[0][0]
if new_events > 0:
print "New events, syncing db"
con.close()
sync_db()
print "Sync done!"
else:
print "No new events"
else:
print "Non-zero difflen, syncing db"
con.close()
sync_db()
print "Sync done!"
#sync_db()
if __name__ == '__main__':
main()
|
13,252 | 5835407981dfffda82ff228341b3d831955a4529 | # art
"""
art.py -- contains ASCII art for Blind Auction project
"""
logo = '''
___________
\ /
)_______(
|"""""""|_.-._,.---------.,_.-._
| | | | | | ''-.
| |_| |_ _| |_..-'
|_______| '-' `'---------'` '-'
)"""""""(
/_________\\
.-------------.
/_______________\\
''' |
13,253 | 8ef72d90316b4c6fbeedb052a1b4bc6c7ca67f41 | from user.ui import display_message, get_string
def menu_selection_exists(menu, selection):
return menu.is_valid(selection)
def is_valid_bool(true,false,selection):
if selection == true:
return True
elif selection == false:
return True
else:
return False
def is_int(test_int):
try:
good_int = int(test_int)
return True
except ValueError:
return False
|
13,254 | b24d06b7fd7a43bb5f4fd4b022ee274ef2924550 | from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.sql.types import *
from pyspark.sql import SparkSession
if __name__ == "__main__":
sc = SparkContext("local","dfexercise")
session = SparkSession(sc)
schema = StructType([StructField("month",StringType(),True),StructField("reportedby",StringType(),True),StructField("crime",StringType(),True),StructField("category",StringType(),True)])
fileStreamDF = session.readStream\
.option("header","true")\
.schema(schema)\
.csv("C:\\Python_Scripts\\streamingdataset")
print(" ")
print("is the stream ready ?")
print(fileStreamDF.isStreaming)
trimmedDF = fileStreamDF.select(fileStreamDF.month,fileStreamDF.reportedby,fileStreamDF.crime,fileStreamDF.category).withColumnRenamed("category","newcat")
query = trimmedDF.writeStream.outputMode("append")\
.format("console")\
.option("truncate","false")\
.option("numRows",30)\
.start()\
.awaitTermination() |
13,255 | b67a1e4f6cca7e91a63a6cfdd3fd50384a581b0a | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
import datetime, uuid
# Create your models here.
#Custom User
class User(AbstractUser):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
username = models.CharField(max_length=25, unique=True, blank=False)
email = models.EmailField(_('email address'),unique=True, blank=False)
firstname = models.CharField(max_length=100, blank=False)
lastname = models.CharField(max_length=100,blank=False)
signup_date = models.DateField(auto_now_add=True,)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username','firstname','lastname',]
def __str__(self):
return self.username
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL,on_delete=models.CASCADE, related_name='profile')
cash_onhand = models.FloatField(blank=False, default=0)
#birthday = models.DateField(default=datetime.date.today)
#school = models.CharField(max_length=128)
#0 hs, 1 col, 2 ya
#level = models.PositiveSmallIntegerField()
#address = models.CharField(max_length=255)
#city = models.CharField(max_length=50)
# Ask.YouthLIVE
class Aylive(models.Model):
question = models.TextField(max_length=255,blank=False)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.questions
# Events System
class Events(models.Model):
eventname = models.CharField(max_length=60, blank=False)
datetime = models.DateTimeField()
attendees = models.ManyToManyField('User',blank=True)
def __str__(self):
return self.eventname
# Reimbursment System
class Transaction(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.OneToOneField(settings.AUTH_USER_MODEL,on_delete=models.DO_NOTHING,related_name='cash_user')
trans_date = models.DateField(blank=False)
purpose = models.CharField(max_length=254,blank=False)
#yg, fl, fellowship
attendees = models.ManyToManyField(settings.AUTH_USER_MODEL,blank=True)
def __str__(self):
return self.purpose
class Reciept(models.Model):
transaction = models.ForeignKey(Transaction, on_delete=models.PROTECT)
acc_title = models.CharField(max_length=254, blank=False)
# Hardcoded values
reci_establishment = models.CharField(max_length=254, blank=False)
reci_amt = models.FloatField(blank=False)
reci_date = models.DateField(blank=False)
reci_or = models.CharField(max_length=254)
img = models.ImageField(blank=True)
# Camper
#class Campers(models.Model): |
13,256 | cbfefedc32653e9197b211ce4770df1df79c684b | from flask import render_template, redirect, url_for, request, abort
from models.Elector import Elector
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def login_post():
username = request.json['usuario']
password = request.json['contraseรฑa']
if username is None or password is None:
print("user or pass is none")
return "Falta llenar usuario o contraseรฑa"
if Usuario.query.filter_by(usuario_usuario = username).first() is None or Usuario.query.filter_by(usuario_contraseรฑa = password).first() is None:
print("user or pass is ready")
rest = {
'idRoldes': None
}
return jsonify(rest)
user = Usuario.query.filter_by(usuario_usuario = username).first()
passt = Usuario.query.filter_by(usuario_contraseรฑa = password).first()
user_auth = user.idusuario
pass_auth = passt.idusuario
if user == passt:
print(passt.roles_idRoles)
freqs = {
'idRoldes': passt.roles_idRoles
}
return jsonify(freqs)
else:
rest = {
'idRoldes': None
}
return jsonify(rest) |
13,257 | 731963011d9a9c76f3d911d6c7d225e0cb0b22b7 | #!/usr/bin/python3
from dancerPDF import DancerPDF
class Modern(DancerPDF):
def __init__(self):
DancerPDF.__init__(self)
self.titleFontFamily = "Helvetica-Bold"
self.titleFontSize = 32
self.sectionFontFamily = "Helvetica-Bold"
self.creditsFontFamily = "Helvetica"
self.creditsFontSize = 10
self.musicalDirectionFontFamily = "Helvetica"
self.musicalDirectionFontSize = 40
self.timeMarkFontFamily = "Times-Roman-Bold"
self.creditsTopSkip = 14
self.creditLineTopSkip = 2
self.movesBlockTopSkip = 64
#! mod to capitalize credit names?
def titleCredits(self, performers, transcribedBy):
self.c.setFont(self.creditsFontFamily, self.creditsFontSize)
#48
self.c.drawRightString(self.rightStockAbs, self.rawToAbsY(self._titleHeightRaw + self.creditsTopSkip), performers)
self._titleHeightRaw += self.creditsTopSkip + self.creditsFontSize
#68
self.c.drawRightString(self.rightStockAbs, self.rawToAbsY(self._titleHeightRaw + self.creditLineTopSkip), "Trns: " + transcribedBy.upper())
|
13,258 | f9505fbd1ab0060f9f61d3d034e0cd607d1fee57 | import requests
from twilio.rest import Client
# sudo virtualenv -p python3 sms
account_sid = 'AC94505d002df1d9467c6028dbb47c0117' # auth_sid
auth_token = '2ed09a15c3e81673508b015e3b312757' # auth_token
client = Client(account_sid, auth_token)
message_body = '11Hi there'
number_to_text = '+380970568565'
twilio_number = '+19724401750'
mediaUrl = 'http://weknowyourdreams.com/beach.html'
post_data = {
'From': twilio_number,
'To': number_to_text,
'Body': message_body,
'MediaUrl': ''
}
"""
Create / Send --- POST METHOD
"""
message = client.messages.create(
to=number_to_text,
from_=twilio_number,
body=message_body)
print(message.sid)
print(message.media_list.list())
message_data = client.messages.get(sid='MM84e7ab9fd6af47a6a7e4012703ba317c')
print(message_data)
print(dir(message_data))
image_list = [i.uri for i in message_data.media_list.list()]
print(image_list)
"""
Optional
status_callback = website
message = client.messages.create(
to = number_to_text,
from_ = twilio_number,
body = message_body,
media_url = media_url,
status_callback = ''
)
"""
url = 'https://api.twilio.com/2010-04-01/Accounts'
message_url = url + '/' + user + '/Messages.json'
def xml_pretty(xml_string):
import xml.dom.minidom
xml = xml.dom.minidom.parseString(xml_string)
pretty_xml_ = xml.toprettyxml()
print(pretty_xml_)
auth_cred = (user, password)
r = requests.get(url, auth=auth)
r2 = requests.post(message_url, date=post_data, auth=auth_cred)
print(r.status_code)
xml_pretty(r.text)
message_url_id = message_url + '/AC94505d002df1d9467c6028dbb47c0117'
get_r = requests.get(message_url_id, auth=auth_cred)
print(r.status_code)
xml_pretty(get_r.text)
get_r.text()
|
13,259 | 3e2649fb139ab0037fc9c7ab2f0d64690fb5ae8e | import keras
import tensorflow
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.optimizers import SGD
def load_data_set():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
img_rows = x_train[0].shape[0]
img_cols = x_train[1].shape[0]
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_train /= 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
num_pixels = x_train.shape[1] * x_train.shape[2]
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=SGD(0.01), metrics=['accuracy'])
print(model.summary())
if __name__ == "__main__":
load_data_set()
|
13,260 | 3c9b605ba29dfcf9343be7d5af9b5e4ba154d742 | import six
import time
from datetime import datetime, timedelta
def _local_date(string):
dt = datetime.strptime(string[0:25], '%a, %d %b %Y %H:%M:%S')
return dt + timedelta(hours=6) + timedelta(seconds=_local_time_offset())
def _local_time_offset():
"""Return offset of local zone from GMT"""
if time.localtime().tm_isdst and time.daylight:
return -time.altzone
else:
return -time.timezone
def _date(string):
return datetime.st
def _boolstr(string):
return bool(int(string))
def flatten(x):
result = []
if not hasattr(x, "__iter__"):
result.append(x)
else:
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
class ToodledoData(object):
_typemap = {
'server': {
'unixtime': int,
'date': _local_date,
'tokenexpires': float
},
'folder': {
'id': int,
'name': str,
'archived': _boolstr,
'private': _boolstr,
'order': int
},
'context': {
'id': int,
'name': str,
'def': _boolstr,
},
'goal': {
'id': int,
'name': str,
'note': str,
'level': int,
'contributes': int,
'archived': _boolstr
},
'location': {
'id': int,
'name': str,
'description': str,
'lat': float,
'lon': float
},
'account': {
'userid': str,
'alias': str,
'pro': _boolstr,
'dateformat': int,
'timezone': int,
'hidemonths': int,
'hotlistpriority': int,
'hotlistduedate': int,
'hotliststar': _boolstr,
'hotliststatus': _boolstr,
'showtabnums': _boolstr,
'lastedit_folder': str,
'lastedit_context': str,
'lastedit_goal': str,
'lastedit_location': str,
'lastedit_task': str,
'lastdelete_task': str,
'lastedit_notebook': str,
'lastdelete_notebook': str,
'lastaddedit': str,
'lastdelete': str,
'lastfolderedit': str,
'lastcontextedit': str,
'lastgoaledit': str,
'lastnotebookedit': str,
},
'task': {
'added': str,
'children': int,
'completed': int,
'context': str,
'duedate': int,
'duedatemod': str,
'duetime': int,
'folder': int,
'goal': str,
'id': int,
'length': int,
'location': int,
'meta': str,
'modified': int,
'note': six.u,
'order': str,
'parent': int,
'priority': int,
'remind': str,
'reminder': int,
'rep_advanced': str,
'repeat': str,
'repeatfrom': int,
'stamp': str,
'star': _boolstr,
'startdate': str,
'starttime': str,
'status': int,
'tag': str,
'timer': int,
'timeron': str,
'title': six.u,
},
'notebook': {
'id': int,
'folder': int,
'added': str,
'modified': str,
'title': six.u,
'text': six.u,
'private': _boolstr,
'stamp': str,
},
}
def __init__(self, node=None):
typemap = ToodledoData._typemap[node.tag]
for elem in node.getchildren():
self.__dict__[elem.tag] = typemap[elem.tag](elem.text)
for a in node.attrib:
self.__dict__[a] = typemap[a](node.attrib[a])
if node.text and not node.text.isspace():
self.title = node.text
def __str__(self):
results = []
for k, v in six.iteritems(self.__dict__):
if v is None or v == 0 or v == "None" or v == "0":
continue
if k != "id":
try:
v = datetime.fromtimestamp(int(v))
except (TypeError, ValueError):
pass
results.append("%s: %s" % (k, v))
return '\n'.join(results)
def __repr__(self):
return str(self.__dict__)
def keys(self):
return self.__dict__.keys()
def values(self):
return self.__dict__.values()
def __getitem__(self, key):
return self.__dict__[key]
def __contains__(self, key):
return bool(self.__dict__[key])
def __iter__(self):
return six.iteritems(self.__dict__)
|
13,261 | d1fbcf5b153c4b7d85160a3950f350bec5479571 | import torch
class MSE(torch.nn.Module):
def __init__(self, model):
super(MSE, self).__init__()
self.model = model
self.hidden_features = torch.nn.Sequential(
*list(model.children())[:]
)
def forward(self, x):
return self.model(x)
def forward_da(self, a, b, c):
a = self.hidden_features(a)
b = self.hidden_features(b)
c = self.hidden_features(c)
return (
self.mse(a, b) +
self.mse(b, c) +
self.mse(c, a)
) / 3
@staticmethod
def mse(x, y):
return torch.mean(torch.pow(x - y, 2))
|
13,262 | b17a2c7ea3faf256274f0f96603642801c1f7931 | import glob
def doTheThing(lang):
path = 'D:\\SteamLibrary\\steamapps\\common\\dota 2 beta\\game\\dota_addons\\pathfinder\\pf_localizations\\' + lang
output = open(
'D:\\SteamLibrary\\steamapps\\common\\dota 2 beta\\game\\dota_addons\\pathfinder\\resource\\addon_' + lang.lower() + '.txt', 'w+', encoding='utf-8')
write_opener(output)
txtName = glob.glob(path + "\\*.txt")
for name in txtName:
if name != path + '\\' + "test_output.txt" and name != path + '\\' + "template.txt":
inPath = name
write_meat(inPath, output)
write_closer(output)
output.close
def write_meat(input, output):
with open(input, 'r', encoding='utf-8') as f:
for line in f:
if line[0] == '>':
line = '//' + line
output.write('\t' + line)
def write_opener(file):
opener = '"lang"\n{\n\t"Language"\t"English"\n\t"Tokens"\n\t{\n'
file.write(opener)
def write_closer(file):
closer = '\n\t}\n}'
file.write(closer)
doTheThing('english')
doTheThing('schinese')
doTheThing('russian')
|
13,263 | ea17f96b9805e8044afe8125636c63d0a60a1230 | from dataclasses import dataclass
from enum import Enum
class TokenType(Enum):
NUMBER = 0
PLUS = 1
MINUS = 2
MULTIPLY = 3
DIVIDE = 4
LPAREN = 5
RPAREN = 6
LETTER = 7
POWER = 8
SIN = 9
COS = 10
EXP = 11
SQRT = 12
LBRACKET = 13
RBRACKET = 14
@dataclass
class Token:
"""Class returns tokens."""
type: TokenType
value: any = None
def __repr__(self):
return self.type.name + (f":{self.value}" if self.value is not None else "")
|
13,264 | 67562db745c4aef34c300f41f06e1ae187bbce96 | #!/usr/bin/env python
# encoding: utf-8
'''
@author: leafcool
@license: (C) Copyright 2013-2020, Node Supply Chain Manager Corporation Limited.
@contact: leafcool@live.com
@software: leafcool
@file: test.py
@time: 2021/6/10 16:42
@desc:
'''
# code is far away from bugs with the god animal protecting
"""
I love animals. They taste delicious.
โโ โโ
โโโปโโโโโปโ
โ โ โ
โ โณโ โโณ โ
โ โป โ
โโโ โโโ
โ โโโโโ
โ ็ฅๅ
ฝไฟไฝ โฃโ
โใๆฐธๆ BUG๏ผ โโ
โโโโโโณโโโ
โโซโซ โโซโซ
โโปโ โโปโ
"""
import random
"""ๅปบ็ซไธไธช้ๆบๅญๅ
ธ็จๆฅ้ช่ฏไปฃ็ """
dict1={} #ไฝฟ็จๅญๅ
ธK,V่กจ็คบๆ้ๅๅ
ถๆๅๆฐๅผ
n=random.randint(1,500) #่ฎพๅฎๆฐ็ปไธไธช้ๆบ้ฟๅบฆ
for i in range(n):
dict1[i]=random.randint(0,100) #ๆฏไธช้ฎๅผๅฏน่ต้ๆบๅผ
# dict1={0:3,1:2,2:1,3:0,4:6}
# n=len(dict1)
"""่ฎก็ฎ็งปๅจไธๆญฅๅ่ฎก็ฎไธไธๆญฅๆ้็งปๅจๆๅคง่ๅดๅบ้ดm"""
def function(a,b):
arr=[0]
for i in range(a,b+1):
# if dict1[i]+i-b>0:
arr.append(dict1[i]+i-b)
print(arr)
return max(arr) # return max positive shift
"""้ๅฝๅคๆญๅฝๆฐ"""
def judgefunction(s1,s2):
#ๅคๅฎs2ๆ้่ฝๅฆ่ฆ็ๅฐๆๅๅญๅ
ธไธไฝ
if s2>n-2:
print("successfully go to the end of array")
else:
m=function(s1,s2)
#m<1,ๆ ๆณๆญฃๅ็งป้๏ผ่ฏดๆๆญคๆถ็ปๆ
if m<1:
print("can't go to the array[n-1]")
# m>1,ๆๆญฃๅ็งป้๏ผ็ปง็ปญ่ฐ็จๅฝๆฐๆฌ่บซ
else:
s1=s2
s2+=function(s1,s2)
return judgefunction(s1,s2)
s1=0
s2=dict1[0]
judgefunction(s1,s2)
|
13,265 | d2ebeb15c53461b827ec559caf70f62d113a2254 | from .shareable_content_xblock import ShareContentXBlock
|
13,266 | 22ee2ceecf1abff035d95d58f2e528b223f49c8a | #!/usr/bin/env python
# -*- coding: utf-8 -*
import os
from codecs import open
from setuptools import find_packages, setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
with open('requirements.txt') as f:
install_requires = f.read().splitlines()
desc = (
'A CLI tool to update your requirements.txt file package versions '
'to be locked/fixed to the latest versions available on PyPI.'
)
with open('README.md', 'r', 'utf-8') as f:
readme = f.read()
setup(
name='lock-requirements',
version='0.1.2.dev0',
packages=find_packages('src', exclude=('tests',)),
package_dir={'': 'src'},
description=desc,
include_package_data=True,
long_description=readme,
long_description_content_type='text/markdown',
zip_safe=False,
author='Richard O\'Dwyer',
author_email='richard@richard.do',
license='Apache 2.0',
install_requires=install_requires,
entry_points={
'console_scripts': [
'lock = lock.cli:main',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
|
13,267 | cd1fd3f312be93bf111bf1aacbdb8fded807f6f6 | import requests
import zipfile
import os
data_url = 'https://www.kaggle.com/c/dstl-satellite-imagery-feature-detection/download/'
data_path = "/Volumes/external/data/"
files = {
"sample_submission.csv.zip" : "sample_submission.csv.zip",
"grid_sizes.csv.zip" : "grid_sizes.csv.zip",
#sixteen_band is 7 GB
"sixteen_band.zip" : "sixteen_band.zip",
#three_band is 13 GB
"three_band.zip" : "three_band.zip",
"train_geojson_v3.zip" : "train_geojson_v3.zip",
"train_wkt_v4.csv.zip" : "train_wkt_v4.csv.zip"
}
delete_folders = {"__MACOSX"}
# Kaggle Username and Password
kaggle_info = {'UserName': "jakewalker", 'Password': "kaggle_Do_nO_haRm_please"}
if not os.path.exists(data_path):
os.makedirs(data_path)
for entry in files.keys():
#look the the existance of a file/folder with or without the .zip extension
if (not os.path.exists((data_path + files[entry])[0:len(data_path + files[entry]) - 4])) and (not os.path.exists((data_path + files[entry]))):
# Attempts to download the CSV file. Gets rejected if we are not logged in.
request_url = data_url + entry
r = requests.get(request_url)
print("downloading " + entry + "...")
# Login to Kaggle and retrieve the data.
r = requests.post(r.url, data = kaggle_info)
# Writes the data to a local file one chunk at a time.
f = open(data_path + files[entry], 'w')
for chunk in r.iter_content(chunk_size = 512 * 1024): # Reads 512KB at a time into memory
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.close()
else:
print("already downloaded " + data_path + files[entry])
if os.path.exists((data_path + files[entry])):
#unzip file
print("unzipping...")
with zipfile.ZipFile(data_path + files[entry]) as myzip:
myzip.extractall(path=data_path)
#delte the zip file
#print("deleting zip file...")
#os.remove(data_path + files[entry])
else:
print("already unzipped")
for folder in delete_folders:
print("removing " + folder + "...")
try:
os.rmdir(data_path + folder)
except (OSError):
print("failed to delete")
print("done")
|
13,268 | d60d8ce4dcea7c72bcc2bebe048e043cd4a0286d | def decorator(func):
def decorated(text):
new_args = '<b>' + text + '</b>'
return func(new_args)
return decorated
def hello(text):
print(text)
c = decorator(hello)
print(c)
c('you')
|
13,269 | 4446cc0ec293d53c197e65f5551f20f461c8f05b | import random
import pygame
from spaceshooter import BLACK, WIDTH, HEIGHT, POWERUP_TIME
from spaceshooter.asset import player_img, explosion_anime, powerup_images, missile_img, bullet_img, meteor_images
from spaceshooter.sound import shooting_sound, missile_sound
class Explosion(pygame.sprite.Sprite):
def __init__(self, center, size):
pygame.sprite.Sprite.__init__(self)
self.size = size
self.image = explosion_anime[self.size][0]
self.rect = self.image.get_rect()
self.rect.center = center
self.frame = 0
self.last_update = pygame.time.get_ticks()
self.frame_rate = 75
def update(self):
now = pygame.time.get_ticks()
if now - self.last_update > self.frame_rate:
self.last_update = now
self.frame += 1
if self.frame == len(explosion_anime[self.size]):
self.kill()
else:
center = self.rect.center
self.image = explosion_anime[self.size][self.frame]
self.rect = self.image.get_rect()
self.rect.center = center
class Player(pygame.sprite.Sprite):
def __init__(self, game):
pygame.sprite.Sprite.__init__(self)
# scale the player img down
self.image = pygame.transform.scale(player_img, (50, 38))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.radius = 20
self.rect.centerx = WIDTH / 2
self.rect.bottom = HEIGHT - 10
self.speedx = 0
self.shield = 100
self.shoot_delay = 250
self.last_shot = pygame.time.get_ticks()
self.lives = 3
self.hidden = False
self.hide_timer = pygame.time.get_ticks()
self.power = 1
self.power_time = pygame.time.get_ticks()
self.game = game
def update(self):
# time out for powerups
if self.power >= 2 and pygame.time.get_ticks() - self.power_time > POWERUP_TIME:
self.power -= 1
self.power_time = pygame.time.get_ticks()
# unhide
if self.hidden and pygame.time.get_ticks() - self.hide_timer > 1000:
self.hidden = False
self.rect.centerx = WIDTH / 2
self.rect.bottom = HEIGHT - 30
self.speedx = 0 # makes the player static in the screen by default.
# then we have to check whether there is an event hanlding being done for the arrow keys being
# pressed
# will give back a list of the keys which happen to be pressed down at that moment
keystate = pygame.key.get_pressed()
if keystate[pygame.K_LEFT]:
self.speedx = -5
elif keystate[pygame.K_RIGHT]:
self.speedx = 5
# Fire weapons by holding spacebar
if keystate[pygame.K_SPACE]:
self.shoot()
# check for the borders at the left and right
if self.rect.right > WIDTH:
self.rect.right = WIDTH
if self.rect.left < 0:
self.rect.left = 0
self.rect.x += self.speedx
def shoot(self):
# to tell the bullet where to spawn
now = pygame.time.get_ticks()
if now - self.last_shot > self.shoot_delay:
self.last_shot = now
if self.power == 1:
bullet = Bullet(self.rect.centerx, self.rect.top)
self.game.add_sprite(bullet)
self.game.add_bullet(bullet)
shooting_sound.play()
if self.power == 2:
bullet1 = Bullet(self.rect.left, self.rect.centery)
bullet2 = Bullet(self.rect.right, self.rect.centery)
self.game.add_sprite(bullet1)
self.game.add_sprite(bullet2)
self.game.add_bullet(bullet1)
self.game.add_bullet(bullet2)
shooting_sound.play()
""" MOAR POWAH """
if self.power >= 3:
bullet1 = Bullet(self.rect.left, self.rect.centery)
bullet2 = Bullet(self.rect.right, self.rect.centery)
missile1 = Missile(self.rect.centerx, self.rect.top) # Missile shoots from center of ship
self.game.add_sprite(bullet1)
self.game.add_sprite(bullet2)
self.game.add_sprite(missile1)
self.game.add_bullet(bullet1)
self.game.add_bullet(bullet2)
self.game.add_bullet(missile1)
shooting_sound.play()
missile_sound.play()
def powerup(self):
self.power += 1
self.power_time = pygame.time.get_ticks()
def hide(self):
self.hidden = True
self.hide_timer = pygame.time.get_ticks()
self.rect.center = (WIDTH / 2, HEIGHT + 200)
# defines the enemies
class Mob(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image_orig = random.choice(meteor_images)
self.image_orig.set_colorkey(BLACK)
self.image = self.image_orig.copy()
self.rect = self.image.get_rect()
self.radius = int(self.rect.width * .90 / 2)
self.rect.x = random.randrange(0, WIDTH - self.rect.width)
self.rect.y = random.randrange(-150, -100)
self.speedy = random.randrange(5, 20) # for randomizing the speed of the Mob
# randomize the movements a little more
self.speedx = random.randrange(-3, 3)
# adding rotation to the mob element
self.rotation = 0
self.rotation_speed = random.randrange(-8, 8)
self.last_update = pygame.time.get_ticks() # time when the rotation has to happen
def rotate(self):
time_now = pygame.time.get_ticks()
if time_now - self.last_update > 50: # in milliseconds
self.last_update = time_now
self.rotation = (self.rotation + self.rotation_speed) % 360
new_image = pygame.transform.rotate(self.image_orig, self.rotation)
old_center = self.rect.center
self.image = new_image
self.rect = self.image.get_rect()
self.rect.center = old_center
def update(self):
self.rotate()
self.rect.x += self.speedx
self.rect.y += self.speedy
# now what if the mob element goes out of the screen
if (self.rect.top > HEIGHT + 10) or (self.rect.left < -25) or (self.rect.right > WIDTH + 20):
self.rect.x = random.randrange(0, WIDTH - self.rect.width)
self.rect.y = random.randrange(-100, -40)
self.speedy = random.randrange(1, 8) # for randomizing the speed of the Mob
# defines the sprite for Powerups
class Pow(pygame.sprite.Sprite):
def __init__(self, center):
pygame.sprite.Sprite.__init__(self)
self.type = random.choice(['shield', 'gun'])
self.image = powerup_images[self.type]
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
# place the bullet according to the current position of the player
self.rect.center = center
self.speedy = 2
def update(self):
"""should spawn right in front of the player"""
self.rect.y += self.speedy
# kill the sprite after it moves over the top border
if self.rect.top > HEIGHT:
self.kill()
# defines the sprite for bullets
class Bullet(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = bullet_img
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
# place the bullet according to the current position of the player
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -10
def update(self):
"""should spawn right in front of the player"""
self.rect.y += self.speedy
# kill the sprite after it moves over the top border
if self.rect.bottom < 0:
self.kill()
# now we need a way to shoot
# lets bind it to "spacebar".
# adding an event for it in Game loop
# FIRE ZE MISSILES
class Missile(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = missile_img
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -10
def update(self):
"""should spawn right in front of the player"""
self.rect.y += self.speedy
if self.rect.bottom < 0:
self.kill()
|
13,270 | b23535b97fa10828841c44d1561a47c7b1660834 | import os, getpass
if getpass.getuser() == "samuel":
ironclust_path = "/home/samuel/Documents/SpikeInterface/ironclust/"
os.environ["IRONCLUST_PATH"] = ironclust_path
import unittest
import pytest
from spikeinterface.sorters import IronClustSorter
from spikeinterface.sorters.tests.common_tests import SorterCommonTestSuite
# This run several tests
@pytest.mark.skipif(not IronClustSorter.is_installed(), reason="ironclust not installed")
class IronclustCommonTestSuite(SorterCommonTestSuite, unittest.TestCase):
SorterClass = IronClustSorter
if __name__ == "__main__":
test = IronclustCommonTestSuite()
test.setUp()
test.test_with_run()
|
13,271 | 99f97c93b528eb136f9c029e8d72e1e85baec09d | # Build a web crawler
import string
index = {}
graph = {}
def get_next_target(page):
"""Return starting and ending positions of next url in 'page'"""
start_link = page.find('<a href=')
if start_link == -1:
return None,0
url_start = page.find('"',start_link)
url_end = page.find('"',url_start+1)
url= page[url_start+1:url_end]
return url, url_end
def get_all_links(page):
"""Return all urls in 'page' as a list"""
links = []
while True:
url, end_pos = get_next_target(page)
if url:
links.append(url)
page = page[end_pos:]
else:
break
return links
def get_page(page):
"""Return the content of 'page' as a string"""
import urllib2
source = urllib2.urlopen(page)
return source.read()
def union(p,q):
"""Store the union of p and q in p"""
for e in q:
if e not in p:
p.append(e)
def add_to_index(index,keyword,url):
"""Add keyword and correspoding url to index"""
if keyword in index:
if url not in index[keyword]:
index[keyword].append(url)
else:
index[keyword] = [url]
def lookup(index,keyword):
"""Lookup keyword in index and return correspoding urls """
if keyword in index:
return index[keyword]
return None
def split_string(source,splitlist):
"""Better split function to extract keywords from a page"""
spaces = " " * len(splitlist)
transtable = string.maketrans(splitlist, spaces)
source = string.translate(source, transtable)
return source.split()
def add_page_to_index(index,url,content):
"""Add given content as correspoding to given url into index """
keywords = split_string(content,".,-!<>/=\"")
for keyword in keywords:
add_to_index(index,keyword,url)
def rank_pages(graph):
damping_factor = 0.8
loops = 10
ranks = {}
npages = len(graph)
for page in graph:
ranks[page] = 1.0 / npages
for i in range(0):
newranks = {}
for page in graph:
newrank = (1 - d) / npages
newranks[page] = newrank
ranks = newranks
return ranks
def web_crawler(seed, max_depth):
"""Return all crawled links starting with seed page never
exceeding max_depth number of links"""
to_crawl = [seed]
crawled = []
next_depth = []
current_depth = 0
while to_crawl and current_depth <= max_depth:
link = to_crawl.pop(0)
if link not in crawled:
content = get_page(link)
add_page_to_index(index, link, content)
outlinks = get_all_links(content)
graph[link] = outlinks
union(next_depth, outlinks)
crawled.append(link)
if not to_crawl:
to_crawl, next_depth = next_depth, []
current_depth += 1
return index, graph
seed = "https://www.udacity.com/cs101x/index.html"
'''page = get_page("http://xkcd.com/353/")
page = get_page("https://www.udacity.com/cs101x/index.html")'''
print web_crawler(seed,3)[1]
print lookup(index,"a") |
13,272 | 52501f88894bf20e0f51f3a3ff0bc4c43dee6b4d | class Solution:
def buddyStrings(self, A: str, B: str) -> bool:
if len(A) != len(B):
return False
diff = {}
for i in range(len(A)):
if A[i] != B[i]:
diff[i] = A[i]
if len(diff) == 2:
i,j = diff.keys()
A = list(A)
A[i],A[j] = A[j],A[i]
return "".join(A) == B
elif len(diff) == 0 and A == B and len(A) > len(set(A)):
return True
else:
return False
|
13,273 | 8f506925cb913815b8112b01d66f51a898e74079 | import logging
import communication
logger = logging.getLogger(__name__)
class Client:
def __init__(self, communication_module):
self._communication_module = communication_module
self._process = None
def create_commincation_moduel(self, node_decoder):
communication_module = communication.CommunicationModule(server_adr, server_port, node_decoder, ssl_dict)
return Client(communication_module)
|
13,274 | fe68ca05a50d3f7daf96b837de78784080dfe5e8 | import numpy as np
print(np.linalg.det(np.array([[31, 22], [22, 31]])))
|
13,275 | dddf1c0f2113e3000cbf58caa4ce3756e4a03f92 | from django.db import models
from datetime import datetime
# Create your models here.
class topProjects(models.Model):
icon = models.CharField(max_length=500, blank=True)
title = models.CharField(max_length=50)
description = models.CharField(max_length=200)
site = models.URLField(max_length=300)
class featurette(models.Model):
#featurete 1
f1_title = models.CharField(max_length=200, blank=True)
f1_stxt = models.CharField(max_length=300, blank=True)
f1_desc = models.TextField(blank=True)
f1_img = models.CharField(max_length=500, blank=True)
site = models.URLField(max_length=300, blank=True)
name = models.CharField(max_length=50, blank=True)
class contact(models.Model):
name = models.CharField(max_length=50)
email = models.EmailField()
date = models.DateTimeField(default=datetime.now)
message = models.CharField(
max_length=2000,
help_text='Write your message here...'
)
def __str__(self):
return f'{self.name}'
class Meta:
ordering = ['-date',]
|
13,276 | af789b870a8058bb62960f82cde3f1bcaa96ff49 | # coding: utf-8
from __future__ import absolute_import
from swagger_server.models.evidence import Evidence
from . import BaseTestCase
from six import BytesIO
from flask import json
class TestEvidenceController(BaseTestCase):
""" EvidenceController integration test stubs """
def test_get_evidence(self):
"""
Test case for get_evidence
"""
query_string = [('keywords', 'keywords_example'),
('pageNumber', 56),
('pageSize', 56)]
response = self.client.open('/api/evidence/{statementId}'.format(statementId='statementId_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
|
13,277 | b75b68b1ae80c0d8a4828c78373c6327f00f2104 | from __future__ import division
import six
from keras.models import Model
from keras.layers import (
Input,
Activation,
Dense,
Flatten
)
from keras.layers.convolutional import Conv2D,MaxPooling2D,AveragePooling2D
from keras.layers import Activation
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras import backend as K
from variant_res_module import resnet_convolution_block, resnet_identity_block, xresneXt_convolution_block, xresneXt_identity_block,\
dresneXt_convolution_block, dresneXt_identity_block
import os
ROOT_DIR = os.getcwd()
if ROOT_DIR.endswith('keras_model'):
ROOT_DIR = os.path.dirname(ROOT_DIR)
def _bn_relu(input):
"""Helper to build a BN -> relu block
"""
norm = BatchNormalization()(input)
return Activation("relu")(norm)
def _conv_bn_relu(**conv_params):
"""Helper to build a conv -> BN -> relu block
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
conv = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(input)
return _bn_relu(conv)
return f
def _bn_relu_conv(**conv_params):
"""Helper to build a BN -> relu -> conv block.
This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
activation = _bn_relu(input)
return Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(activation)
return f
def _residual_block(input, id_block, conv_block, mid_f, output_f, repetitions, stage, is_first_layer=False):
"""Builds a residual block with repeating bottleneck blocks.
"""
for i in range(repetitions):
if i == 0 and is_first_layer is True:
input = conv_block(mid_f, output_f, stage, i, input, stride=(1, 1))
elif i == 0 and is_first_layer is False:
input = conv_block(mid_f, output_f, stage, i, input)
else:
input = id_block(mid_f, output_f, stage, i, input)
return input
def _get_block(identifier):
if isinstance(identifier, six.string_types):
res = globals().get(identifier)
if not res:
raise ValueError('Invalid {}'.format(identifier))
return res
return identifier
class ResnetBuilder(object):
@staticmethod
def build(input_shape, num_outputs, repetitions, mid_f = [64, 128, 256, 512], output_f=[256, 512, 1024, 2048], block_fn='resnet'):
"""Builds a custom ResNet like architecture.
Args:
input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols)
num_outputs: The number of outputs at final softmax layer
block_fn: The block function to use. This is either `basic_block` or `bottleneck`.
The original paper used basic_block for layers < 50
repetitions: Number of repetitions of various block units.
At each block unit, the number of filters are doubled and the input size is halved
Returns:
The keras `Model`.
"""
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)")
# Permute dimension order if necessary
#if K.image_dim_ordering() == 'tf':
# input_shape = (input_shape[1], input_shape[2], input_shape[0])
# Load function from str if needed.
if block_fn == 'xresnet':
id_block = xresneXt_identity_block
conv_block = xresneXt_convolution_block
elif block_fn == 'dresnet':
id_block = dresneXt_identity_block
conv_block = dresneXt_convolution_block
else:
id_block = resnet_identity_block
conv_block = resnet_convolution_block
print('the input shape: {}'.format(input_shape))
input = Input(shape=input_shape)
# initial building block
conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(input)
#pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(conv1)
block = conv1
#filters = 64
print('input before reisdual', block.shape)
for i, r in enumerate(repetitions):
if i == 0:
block = _residual_block(block, id_block=id_block, conv_block=conv_block, stage=i,
mid_f=mid_f[i], output_f=output_f[i], repetitions=r, is_first_layer=True)
else:
block = _residual_block(block, id_block=id_block, conv_block=conv_block, stage=i,
mid_f=mid_f[i], output_f=output_f[i], repetitions=r, is_first_layer=False)
# Last activation
block = _bn_relu(block)
# Classifier block
block_shape = K.int_shape(block)
pool2 = AveragePooling2D(pool_size=(block_shape[1], block_shape[2]),
strides=(1, 1))(block)
flatten1 = Flatten()(pool2)
dense = Dense(num_outputs)(flatten1)
dense = Activation('softmax', name='Softmax')(dense)
model = Model(inputs=input, outputs=dense)
return model
@staticmethod
def build_resnet_18(input_shape, num_outputs, block_fun):
return ResnetBuilder.build(input_shape, num_outputs, [2, 2, 2, 2], block_fn=block_fun)
@staticmethod
def build_resnet_34(input_shape, num_outputs, block_fun):
return ResnetBuilder.build(input_shape, num_outputs, [3, 4, 6, 3], block_fn=block_fun)
@staticmethod
def build_resnet_50(input_shape, num_outputs, block_fun):
return ResnetBuilder.build(input_shape, num_outputs, [3, 4, 6, 3], block_fn=block_fun)
@staticmethod
def build_resnet_101(input_shape, num_outputs, block_fun):
return ResnetBuilder.build(input_shape, num_outputs, [3, 4, 23, 3], block_fn=block_fun)
@staticmethod
def build_resnet_152(input_shape, num_outputs, block_fun):
return ResnetBuilder.build(input_shape, num_outputs, [3, 8, 36, 3], block_fn=block_fun) |
13,278 | 051c5cfb5fe5c7eb813c007fa236c054ca07cb0c | ## It would be really helpful to know what type of music everyone likes before
## throwing this festival.
## Lucky for us we've got the data to find out!
## We should be able to tell what music people like by figuring out what music they're buying.
## Write a query that returns the BillingCity,total number of invoices
## associated with that particular genre, and the genre Name.
## Return the top 3 most popular music genres for the city
## with the highest invoice total (you found this in the previous quiz!)
QUERY ='''
SELECT Invoice.BillingCity, count(Genre.Name) , Genre.Name from Invoice
join InvoiceLine on Invoice.InvoiceId=InvoiceLine.InvoiceId
join Track on InvoiceLine.TrackId = Track.TrackId
join Genre on Track.GenreId = Genre.GenreId and Invoice.BillingCity = 'Prague'
group by Invoice.BillingCountry, (Genre.Name)
order by count(Genre.Name) DESC
limit 3;
'''
'''
---Visual Guide---
Before Query...
############### ################# ############# #############
# Invoice # # InvoiceLine # # Track # # Genre #
############### ################# ############# #############
| InvoiceId | ---> | InvoiceId | | GenreId | ---> | GenreId |
+-------------+ +---------------+ +-----------+ +-----------+
| BillingCity| | TrackId | ---> | TrackId | | Name |
+-------------+ +---------------+ +-----------+ +-----------+
After Query..
#######################################
# InvoiceGenre #
#######################################
| BillingCity | COUNT(*) | Name |
+---------------+------------+--------+
'''
|
13,279 | 3851806b5d6c3eb94439d2a57b028cf9fe2b457a | weight = 0.5
input = 0.5
goal_prediction = 0.8
for iteration in range(20):
pred = input * weight
error = (pred - goal_prediction) ** 2
direction_and_amount = (pred - goal_prediction) * input
weight = weight - direction_and_amount
print("Error:" + str(error) + " prediction:" + str(pred))
|
13,280 | 2b652a7cd3ce8d1ebba96384a4787c1a503d9ac4 | # -*- coding: utf-8 -*-
'''
ๆฌๆไปถๅ็
ง https://mongoengine-odm.readthedocs.io/tutorial.htmๆฅ็ผๅ
'''
from mongoengine import Document, StringField, ReferenceField, ListField, EmbeddedDocument, EmbeddedDocumentField, \
CASCADE
class User(Document):
email = StringField(required=True)
first_name = StringField(max_length=50)
last_name = StringField(max_length=50)
'''
An embedded document should be treated no differently that a regular document;
it just doesnโt have its own collection in the database.
'''
class Comment(EmbeddedDocument):
content = StringField()
name = StringField(max_length=120)
'''
We are storing a reference to the author of the posts using a ReferenceField object.
These are similar to foreign key fields in traditional ORMs, and are automatically
translated into references when they are saved, and dereferenced when they are loaded.
MongoDB allows us to store lists of items natively, so rather than having a link table,
we can just store a list of tags in each post.
ๅฏไปฅไฝฟ็จto_jsonๆ่
to_mongoๆฅๆพ็คบdocument็ๅ
ๅฎน๏ผ
json.loads(post1.to_json())
{u'_cls': u'Post.TextPost',
u'_id': {u'$oid': u'57ac28a0541ccf99ac3eb44a'},
u'author': {u'$oid': u'57ac2878541ccf99ac3eb449'},
u'comments': [],
u'content': u'Took a look at MongoEngine today, looks pretty cool.',
u'tags': [u'mongodb', u'mongoengine'],
u'title': u'Fun with MongoEngine'}
'''
class Post(Document):
title = StringField(max_length=120, required=True)
# reverse_delete_rule: To delete all the posts if a user is deleted set the rule:
author = ReferenceField(User, reverse_delete_rule=CASCADE)
tags = ListField(StringField(max_length=30))
comments = ListField(EmbeddedDocumentField(Comment))
meta = {"allow_inheritance": True}
class TextPost(Post):
content = StringField()
class ImagePost(Post):
image_path = StringField()
class LinkPost(Post):
link_url = StringField()
|
13,281 | b09f00152a3de0b0021e2777f374e25265a53380 | #!/usr/env python
# Many-to-Many Relationships
'''
Many to Many
Sometimes we need to model a relationship that is many-to-many
We need to add a 'connection' table with two foreign keys
There is usually no seperate primary value
To do this we build a 'Junction Table' to combine the two tables
This will give us a table with two (ore more) foriegn keys but no primary keys
Course Member(junction) User
id user_id id
title course_id name
role
Start With a Fresh Database
CREATE TABLE User(
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT
email TEXT
);
CREATE TABLE Course (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
title TEXT
);
CREATE TABLE Member (
user_id INTEGER,
course_id INTEGER,
role INTEGER,
PRIMARY KEY(user_id, course_id)
)
Insert Users and Courses
INSERT INTO User (name, email) VALUES ('Jane', 'jane@tsugi.org');
INSERT INTO User (name, email) VALUES ('Ed', 'ed@tsugi.org');
INSERT INTO User (name, email) VALUES ('Sue', 'sue@tsugi.org');
INSERT INTO Course (title) VALUES ('Python');
INSERT INTO Course (title) VALUES ('SQL');
INSERT INTO Course (title) VALUES ('PHP')
Junction Table
INSERT INTO Member (user_id, course_id, role) VALUES (1,1,1);
INSERT INTO Member (user_id, course_id, role) VALUES (2,1,0);
INSERT INTO Member (user_id, course_id, role) VALUES (3,1,0);
INSERT INTO Member (user_id, course_id, role) VALUES (1,2,0);
INSERT INTO Member (user_id, course_id, role) VALUES (2,2,1);
INSERT INTO Member (user_id, course_id, role) VALUES (2,3,1);
INSERT INTO Member (user_id, course_id, role) VALUES (3,3,0)
Now we can combine the tables
SELECT User.name, Member.role, Course.title
FROM User JOIN Member JOIN Course
ON Member.user_id = User.id AND Member.course_id = Course.id
ORDER BY Course.title, Member.role DESC, User.name
''' |
13,282 | 3cf2df56fde67ac7738b263d4f56dbc3a541ffe2 | # //////////////// DEPTH FIRST SEARCH //////////////////////////
# //
# // Title: DEPTH FIRST SEARCH for graph
# // Course: CS 577 Spring 2021
# //
# // @author Zhuoyan Xu
# // Email: zxu444@wisc.edu
# //
class mygraph:
def __init__(self, numNode):
self.adjList = {}
self.todo = []
self.traversal = []
self.numNode = numNode
def addNode(self, inputRow):
inputList = inputRow.split()
self.adjList[inputList[0]] = inputList[1:]
def DFS(self):
keys = list(self.adjList.keys()) # get keys of adj list (get nodes in order)
NodesNotStepped = [node for node in keys if not node in self.traversal] ## get nodes not stepped on when we finish exploring one connected component
while (len(NodesNotStepped) > 0):
self.todo.append(NodesNotStepped[0])
while len(self.todo) > 0:
currentNode = self.todo.pop() ## get current walking node
self.traversal.append(currentNode) ## add current node to traversal
for adjNode in reversed(self.adjList[currentNode]): ## explore the children of current node, (start in reversed order)
if not adjNode in self.traversal: ## make sure node not in visited traversal
self.todo.append(adjNode)
## now remove duplicates in todo
#print(self.todo)
todo = []
for node in reversed(self.todo):
if node not in todo:
todo.append(node)
self.todo = [i for i in reversed(todo)]
#print(self.todo)
NodesNotStepped = [node for node in keys if not node in self.traversal] ## update NodeNotStepped
#print(self.traversal)
#print(len(NodesNotStepped))
def main():
numInstance = int(input()) # the number of instance, first input
insList = []
for i in range(numInstance):
numNode = int(input()) # the number of nodes in i-th instance
g = mygraph(numNode)
nodeIdx = 0 # count how many nodes have been added to i-th instance
while True:
g.addNode(input())
#print(g.adjList)
nodeIdx += 1
if nodeIdx >= numNode:
break
insList.append(g)
#print("\n")
for instance in insList:
instance.DFS()
print(' '.join(instance.traversal))
insList = main() |
13,283 | 15c9c26992ae703d6165706e9fdb94a7fd44a8c6 | S=input();
K=input();
i = 0;
for i in range(len(S)) :
if int(S[i]) > 1 :
print(S[i]);
break;
elif i + 1 >= int(K) :
print(1);
break;
|
13,284 | 9518adf44d0bf170db2c84a2e4b70d0487fb389f | """
PCA is a popular machine learning algorithm useful for dimension reduction and
information compression. It works by projecting the original data into a lower
dimensional (k) space while preserving as much information as possible (largest
variances) in the projected dimensions. The projection can be realized using
matrix product C = f(X) = XD and decoding X' = g(C) = XDD_{T}. Because we want to
preserve the information, the decoded data X' should be as close as possilbe to
the original data matrix X. This can be measured using the Frobenius norm of
(X - X') as a loss, and the optimal D can be obtained by minimizing it.
Mathematically:
D = argmin_D {||X - X'||_F2}
= argmin_D {||X - XDD_{T}||^2}
= argmin_D {(X - XDD_{T})_T dot (X - XDD_{T})}
It can be proved that D is the eigen vectors corresponding to the k largest eigen
values of X_{T}X
Programmatically, PCA can be computed using the following procedure:
1. Normalize the data X to have zero mean
2. Compute covariance matrix of X, cov(X) = X_{T}.dot(X) / (n - 1)
3. Compute the eigen values and eigen vectors of cov(X)
4. Sort the eigen vectors reversed using eigen values and select top K vectors.
5. Project data using C = (X - mean).dot(D)
Main purpose of using PCA:
1. Dimension reduction of features to speed up training downstream ML model.
2. Reduce noise by only keeping most relevant information (principal components).
3. Make visualization possible for high dimensional data.
PCA should not be used for solving over-fitting issue.
"""
import unittest
import numpy as np
class PCA:
def __init__(self, n_components = 2):
self.n_components = n_components
def fit(self, X):
n = X.shape[0]
self.mu = X.mean(axis=0, keepdims=True)
X_centered = X - self.mu
cov = np.dot(X_centered.T, X_centered) / (n - 1)
eigen_vals, eigen_vecs = np.linalg.eig(cov)
sorted_indexes = eigen_vals.argsort()[::-1][:self.n_components]
self.eigen_vecs = eigen_vecs[:, sorted_indexes]
self.eigen_vals = eigen_vals[sorted_indexes]
self.total_vars = eigen_vals.sum()
self.explained_vars = self.eigen_vals
self.explained_vars_ratio = self.eigen_vals / self.total_vars
return self
def transform(self, X):
return np.dot(X - self.mu, self.eigen_vecs)
class TestPCA(unittest.TestCase):
def test(self):
" validate result with sklearn.decomposition.PCA "
from sklearn.decomposition import PCA as skPCA
X = np.random.normal(3.2, 5.1, size=(20, 8))
pca = PCA(3).fit(X)
skpca = skPCA(3).fit(X)
output = skpca.transform(X)
self.assertTrue(np.allclose(np.abs(pca.transform(X)), np.abs(output)), "Should be equal")
if __name__ == "__main__":
unittest.main() |
13,285 | f5a4c616eae1a4f0d466dbab4f3954403ee26e4d | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 12 21:38:35 2017
@author: wei
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
time_steps = [1, 2, 4, 8, 16, 32, 64]
solver_types = ["RK4", "RK5", "RK"]
data = loadtxt("RK4_64.dat")
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection = '3d')
ax.plot(data[:,1], data[:,2], data[:,3], label='RK8_64')
ax.legend()
plt.show()
|
13,286 | 7c50ae78298ffdb29ea2d89e630bd8572185c2e1 | import pandas as pd
import webbrowser
import pyautogui
import time
# Reading the file
import datetime
import calendar
def findDay(tdate):
born = datetime.datetime.strptime(tdate, '%Y-%m-%d').weekday()
return calendar.day_name[born]
tdate = datetime.date.today()
# printing todays date
print('Current date: ', tdate)
d = findDay(str(tdate))
print(d)
df = pd.read_csv(str(d) + '.csv')
# Starting the session
def sign_in(url):
# url = 'https://meetingsapac15.webex.com/meet/ddpuri'
try:
path = 'C://Program Files//Google//Chrome//Application//chrome.exe'
webbrowser.register('chrome', None, webbrowser.BackgroundBrowser(path))
webbrowser.get('chrome').open(url)
except path.DoesNotExist:
path = 'C://Program Files(x86)//Google//Chrome//Application//chrome.exe'
webbrowser.register('chrome', None, webbrowser.BackgroundBrowser(path))
webbrowser.get('chrome').open(url)
time.sleep(20)
print('we are here')
meeting_id_btn = 'NONE'
while meeting_id_btn == 'NONE':
meeting_id_btn = pyautogui.locateCenterOnScreen('1.png')
print(meeting_id_btn)
pyautogui.moveTo(meeting_id_btn)
pyautogui.click()
def sign_out():
import wmi
ti = 0
name = ['CiscoCollabHost.exe', 'webexmta.exe', 'atmgr.exe']
f = wmi.WMI()
for process in f.Win32_Process():
if process.name in name:
process.Terminate()
ti += 1
break
if ti == 0:
print('Process not found!!!')
while True:
# checking of the current time exists in the csv file
now = datetime.now().strftime('%H:%M')
if now in str(df['timings']):
row = df.loc[df['timings'] == now]
url = str(row.iloc[0, 1])
sign_in(url)
print('signed in')
if now in str(df['end']):
row = df.loc[df['end'] == now]
sign_out()
print('signed out')
|
13,287 | b2cb234e676f3e8d4ee71c47d710b1d5f47a9142 | # Generated by Django 3.0.5 on 2021-05-26 17:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profileApp', '0011_auto_20210526_2305'),
]
operations = [
migrations.AlterField(
model_name='usermodel',
name='age',
field=models.CharField(blank=True, max_length=4, null=True),
),
]
|
13,288 | 0932ddf7b8135eb4c033012eb3fc81bf8bbdf04e | #!python3.5
# For command aliases prior to 3.2 - https://bugs.python.org/issue25299
#
# https://pythonconquerstheuniverse.wordpress.com/2011/08/29/lambda_tutorial/
import sys
import re
import json
import subprocess
import requests
import errno
import os
from velstor.restapi import Session
from velstor.vsh.vsh_parser import vsh_parser
from velstor.vclc.handler import error_response
from velstor.restapi import grid
def main(args=None):
"""The main routine."""
if args is None:
args = sys.argv[1:]
with Session() as session:
#
try:
return worker(session, vsh_parser().parse_args())
except requests.exceptions.RequestException as e:
#
# Requests raised an exception. Probably couldn't reach the vCNC
# server There is no HTTP code for this error, so we adopt 504,
# which is similar.
#
# Yes, it would have been cooler to have done this with a single
# RE.
#
details = str(e)
match_host = re.search("host='(\S+)'", details)
match_port = re.search("port=(\d+)", details)
match_error = re.search('NewConnectionError', details)
suffix = '.'
#
# If the server happens to match the vCNC server's default value,
# then add the additional suggestion to check configuration.
#
if match_host and match_port and match_error:
host = match_host.group(1)
port = match_port.group(1)
if host == 'vcnc' and port == "6130":
suffix = ''.join([
'. Did you mean to set a command line switch'
, ' or environment variable?'])
return error_response(
'Could not reach vCNC server at {}:{}{}'.format(
host, port, suffix)
, http_status=504
, error_sym='EHOSTDOWN')
else:
#
# We don't really know what happened. Just dump the raw data
# as the message.
#
return error_response(details)
#
#
except SystemExit:
raise
except KeyboardInterrupt:
sys.exit(errno.EINVAL)
except BaseException as e:
print(e)
raise
# return error_response('Unexpected exception in client: '+str(e))
def worker(session, args):
"""Mounts a VP and opens a terminal window into the job's files.
Returns a tuple (a, b) where 'a' is the exit code and 'b' a JSON string.
"""
#print("Hello " + args.jobid + "!")
session.login(args.vcnc)
#
# Lookup the grid job on the vcnc
# Exit if you don't find it.
response = grid.get(session, args.jobid)
if (response['status_code'] != 200):
return (1, response)
#
# Extract the PeerCache workspace name.
workspace = json.loads(response['body'])['job_spec']['workspace_name']
#
# Check that it's still there and the same (TODO)
#
# Spawn a vp. Our wrapper script ensures we are in the installation
# directory. Note that 'run(..)' is new in Python 3.5
#
if args.mount:
mount_point = args.mount
else:
mount_point = '/tmp/velstor/'+args.jobid
try:
os.makedirs(mount_point)
except FileExistsError as e:
pass
#
# Mount the VP
#
vp_cmd = [
'./bin/vp'
, '--mount={}'.format(mount_point)
, '--mentor={}'.format(args.vpm)
, '--workspace={}'.format(workspace)
]
result = subprocess.run(vp_cmd)
#
# Start an xterm.
#
# We start an xterm whether or not the vp mount fails. If everything
# is working correctly, the vp mount fails because there already
# is a vp open. If so, then opening another terminal window
# at the mount point is a feature.
#
# If things aren't working, then the user has to fix the underlying
# problem and also manually close the terminal windows. Hopefully
# this will not be the common case.
#
subprocess.run(['xterm', '-e', 'cd {}; bash'.format(mount_point)])
#
# We want to automagically unmount the VP when the user is finished.
# But how do we know when the user is finished? There are better
# (but expensive and complicated) and worse (but simpler) ways to
# do this.
#
# What we do here is: the user is done when (1) the original xterm
# window has been closed and also (2) there are no processes whose
# current directory is within the mount point.
#
# We achieve that by doing a lazy ( -z ) fuse unmount, but only if
# we are the the original terminal (that is, the terminal that
# originally successfully mounted the vp).
#
if result.returncode == 0:
subprocess.run(['fusermount', '-uz', '{}'.format(mount_point)])
#
# Always report success
#
return 0, ''
if __name__ == "__main__":
(exit_code, response) = main()
if response:
print(json.dumps(response, sort_keys=True, indent=2))
sys.exit(127 if (exit_code > 127) else exit_code)
|
13,289 | 09e32a639a39d4424f60677bed2daa1a4e33721d | from django.contrib.auth.models import User
from rest_framework import viewsets
from django.core import serializers
from studio_app.models import *
from studio_app.serializers import *
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
|
13,290 | e4fb48012d3aeaa07b23577a1fe9e1a69498a87c | from django.shortcuts import render
# Create your views here.
import random
from django.http import HttpResponse
from .models import ScrumyGoals, GoalStatus
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from .forms import SignupForm, CreateGoalForm
def index(request):
form = SignupForm()
if request.method == 'POST':
form = SignupForm(request.POST)
form.save()
user = User.objects.get(username=request.POST['username'])
my_group = Group.objects.get(name = 'Developer')
my_group.user_set.add(user)
if user.groups.filter(name='Developer').exists() :
return render(request, 'moshoodscrumy/success.html')
else:
form = SignupForm()
return render(request, 'moshoodscrumy/index.html', {'form':form})
def move_goal(request, **kwargs):
# output = ScrumyGoals.objects.get(goal_id=kwargs['goal_id'])
# return HttpResponse(output)
## LAB 14
# dictionary = {'error': 'A record with that goal id does not exist'}
# try:
# obj = ScrumyGoals.objects.get(goal_id=kwargs['goal_id'])
# except Exception as e:
# return render(request, 'moshoodscrumy/exception.html', dictionary)
# else:
# return HttpResponse(obj.goal_name)
## LAB 21
dictionary = {'pass': 'Successfully'}
if request.method == 'POST':
if request.user.groups.filter(name='Developer').exists():
obj = ScrumyGoals.objects.get(goal_id=kwargs['goal_id'])
if request.user == obj.user :
if request.POST['goal_status'] == 'Done Goal':
return HttpResponse('You are a developer')
else:
obj.goal_status = GoalStatus(status_name=request.POST['goal_status'])
obj.goal_status.save()
obj.save()
return render(request, 'moshoodscrumy/exception.html', dictionary)
else:
return HttpResponse('Not the owner')
elif request.user.groups.filter(name='Quality Assurance').exists():
obj = ScrumyGoals.objects.get(goal_id=kwargs['goal_id'])
if request.user == obj.user :
obj.goal_status = GoalStatus(status_name=request.POST['goal_status'])
obj.goal_status.save()
obj.save()
return render(request, 'moshoodscrumy/exception.html', dictionary)
else:
if request.POST['goal_status'] == 'Done Goal':
obj.goal_status = GoalStatus(status_name=request.POST['goal_status'])
obj.goal_status.save()
obj.save()
return render(request, 'moshoodscrumy/exception.html', dictionary)
else:
return HttpResponse('Not the owner')
elif request.user.groups.filter(name='Owner').exists():
obj = ScrumyGoals.objects.get(goal_id=kwargs['goal_id'])
if request.user == obj.user :
obj.goal_status = GoalStatus(status_name=request.POST['goal_status'])
obj.goal_status.save()
obj.save()
return render(request, 'moshoodscrumy/exception.html', dictionary)
else:
return HttpResponse('Not the owner')
elif request.user.groups.filter(name='Admin').exists():
obj = ScrumyGoals.objects.get(goal_id=kwargs['goal_id'])
obj.goal_status = GoalStatus(status_name=request.POST['goal_status'])
obj.goal_status.save()
obj.save()
return render(request, 'moshoodscrumy/exception.html', dictionary)
else:
return render(request, 'moshoodscrumy/exception.html')
else:
return render(request, 'moshoodscrumy/exception.html')
def add_goal(request):
# add_goal = ScrumyGoals.objects.create(goal_name = 'Keep Learning Django', goal_id = random.randrange(1000, 9999, 2), created_by = 'Louis', moved_by = 'Louis', owner = 'Louis', goal_status = GoalStatus.objects.get(status_name='Weekly Goal'), user = User.objects.get(username='louis'))
# add_goal.save()
# return HttpResponse('Saved Successfully')
form = CreateGoalForm()
if request.method == 'POST':
form = CreateGoalForm(request.POST)
good_to_go = form.save(commit=False)
good_to_go.goal_id = random.randrange(1000, 9999, 2)
good_to_go.created_by = 'Louis'
good_to_go.moved_by = 'Louis'
good_to_go.owner = 'Louis'
good_to_go.goal_status = GoalStatus(status_name='Weekly Goal')
good_to_go.goal_status.save()
good_to_go.save()
# user = User.objects.get(username=request.user)
# my_group = Group.objects.get(name = 'Developer')
# my_group.user_set.add(user)
# if user.groups.filter(name='Developer').exists() :
# return render(request, 'moshoodscrumy/success.html')
else:
form = CreateGoalForm()
return render(request, 'moshoodscrumy/goal.html', {'form':form})
def home(request):
# ## LAB 12
# # goal = ScrumyGoals.objects.all()
# goal = ScrumyGoals.objects.get(goal_name='Keep Learning Django')
# # output = ', '.join([eachgoal.goal_name for eachgoal in goal])
# # return HttpResponse(output)
# return HttpResponse(goal)
## LAB 13
# dictionary = {}
# goal = ScrumyGoals.objects.get(goal_name='Learn Django')
# dictionary['goal_name'] = goal.goal_name
# dictionary['goal_id'] = goal.goal_id
# dictionary['user'] = goal.user
# return render(request, 'moshoodscrumy/home.html', dictionary)
## LAB 15
dictionary = {}
users = User.objects.all()
# weekly_goals = ScrumyGoals.objects.filter(goal_status=GoalStatus.objects.get(status_name='Weekly Goal'))
# daily_goals = ScrumyGoals.objects.filter(goal_status=GoalStatus.objects.get(status_name='Daily Goal'))
# verify_goals = ScrumyGoals.objects.filter(goal_status=GoalStatus.objects.get(status_name='Verify Goal'))
# done_goals = ScrumyGoals.objects.filter(goal_status=GoalStatus.objects.get(status_name='Done Goal'))
# dictionary['weekly_goals'] = ' '.join([eachgoal_1.goal_name for eachgoal_1 in weekly_goals])
# dictionary['daily_goals'] = ' '.join([eachgoal_1.goal_name for eachgoal_1 in daily_goals])
# dictionary['verify_goals'] = ' '.join([eachgoal_1.goal_name for eachgoal_1 in verify_goals])
# dictionary['done_goals'] = ' '.join([eachgoal_1.goal_name for eachgoal_1 in done_goals])
dictionary['users'] = users
# dictionary['user_real'] = ' '.join([eachgoal_1.username for eachgoal_1 in users])
return render(request, 'moshoodscrumy/home.html', dictionary)
##Lab18
# return render(request, 'moshoodscrumy/home.html')
|
13,291 | b7c3d24948b21f3beb2ad32284e638c7df84c3fe | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class IdentityProvider(pulumi.CustomResource):
accepts_prompt_none_forward_from_client: pulumi.Output[bool]
"""
This is just used together with Identity Provider Authenticator or when kc_idp_hint points to this identity provider. In
case that client sends a request with prompt=none and user is not yet authenticated, the error will not be directly
returned to client, but the request with prompt=none will be forwarded to this identity provider.
"""
add_read_token_role_on_create: pulumi.Output[bool]
"""
Enable/disable if new users can read any stored tokens. This assigns the broker.read-token role.
"""
alias: pulumi.Output[str]
"""
The alias uniquely identifies an identity provider and it is also used to build the redirect uri.
"""
authenticate_by_default: pulumi.Output[bool]
"""
Enable/disable authenticate users by default.
"""
authorization_url: pulumi.Output[str]
"""
OIDC authorization URL.
"""
backchannel_supported: pulumi.Output[bool]
"""
Does the external IDP support backchannel logout?
"""
client_id: pulumi.Output[str]
"""
Client ID.
"""
client_secret: pulumi.Output[str]
"""
Client Secret.
"""
default_scopes: pulumi.Output[str]
"""
The scopes to be sent when asking for authorization. It can be a space-separated list of scopes. Defaults to 'openid'.
"""
display_name: pulumi.Output[str]
"""
Friendly name for Identity Providers.
"""
enabled: pulumi.Output[bool]
"""
Enable/disable this identity provider.
"""
extra_config: pulumi.Output[dict]
first_broker_login_flow_alias: pulumi.Output[str]
"""
Alias of authentication flow, which is triggered after first login with this identity provider. Term 'First Login' means
that there is not yet existing Keycloak account linked with the authenticated identity provider account.
"""
hide_on_login_page: pulumi.Output[bool]
"""
Hide On Login Page.
"""
internal_id: pulumi.Output[str]
"""
Internal Identity Provider Id
"""
jwks_url: pulumi.Output[str]
"""
JSON Web Key Set URL
"""
link_only: pulumi.Output[bool]
"""
If true, users cannot log in through this provider. They can only link to this provider. This is useful if you don't
want to allow login from the provider, but want to integrate with a provider
"""
login_hint: pulumi.Output[str]
"""
Login Hint.
"""
logout_url: pulumi.Output[str]
"""
Logout URL
"""
post_broker_login_flow_alias: pulumi.Output[str]
"""
Alias of authentication flow, which is triggered after each login with this identity provider. Useful if you want
additional verification of each user authenticated with this identity provider (for example OTP). Leave this empty if
you don't want any additional authenticators to be triggered after login with this identity provider. Also note, that
authenticator implementations must assume that user is already set in ClientSession as identity provider already set it.
"""
provider_id: pulumi.Output[str]
"""
provider id, is always oidc, unless you have a custom implementation
"""
realm: pulumi.Output[str]
"""
Realm Name
"""
store_token: pulumi.Output[bool]
"""
Enable/disable if tokens must be stored after authenticating users.
"""
token_url: pulumi.Output[str]
"""
Token URL.
"""
trust_email: pulumi.Output[bool]
"""
If enabled then email provided by this provider is not verified even if verification is enabled for the realm.
"""
ui_locales: pulumi.Output[bool]
"""
Pass current locale to identity provider
"""
user_info_url: pulumi.Output[str]
"""
User Info URL
"""
validate_signature: pulumi.Output[bool]
"""
Enable/disable signature validation of external IDP signatures.
"""
def __init__(__self__, resource_name, opts=None, accepts_prompt_none_forward_from_client=None, add_read_token_role_on_create=None, alias=None, authenticate_by_default=None, authorization_url=None, backchannel_supported=None, client_id=None, client_secret=None, default_scopes=None, display_name=None, enabled=None, extra_config=None, first_broker_login_flow_alias=None, hide_on_login_page=None, jwks_url=None, link_only=None, login_hint=None, logout_url=None, post_broker_login_flow_alias=None, provider_id=None, realm=None, store_token=None, token_url=None, trust_email=None, ui_locales=None, user_info_url=None, validate_signature=None, __props__=None, __name__=None, __opts__=None):
"""
Create a IdentityProvider resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] accepts_prompt_none_forward_from_client: This is just used together with Identity Provider Authenticator or when kc_idp_hint points to this identity provider. In
case that client sends a request with prompt=none and user is not yet authenticated, the error will not be directly
returned to client, but the request with prompt=none will be forwarded to this identity provider.
:param pulumi.Input[bool] add_read_token_role_on_create: Enable/disable if new users can read any stored tokens. This assigns the broker.read-token role.
:param pulumi.Input[str] alias: The alias uniquely identifies an identity provider and it is also used to build the redirect uri.
:param pulumi.Input[bool] authenticate_by_default: Enable/disable authenticate users by default.
:param pulumi.Input[str] authorization_url: OIDC authorization URL.
:param pulumi.Input[bool] backchannel_supported: Does the external IDP support backchannel logout?
:param pulumi.Input[str] client_id: Client ID.
:param pulumi.Input[str] client_secret: Client Secret.
:param pulumi.Input[str] default_scopes: The scopes to be sent when asking for authorization. It can be a space-separated list of scopes. Defaults to 'openid'.
:param pulumi.Input[str] display_name: Friendly name for Identity Providers.
:param pulumi.Input[bool] enabled: Enable/disable this identity provider.
:param pulumi.Input[str] first_broker_login_flow_alias: Alias of authentication flow, which is triggered after first login with this identity provider. Term 'First Login' means
that there is not yet existing Keycloak account linked with the authenticated identity provider account.
:param pulumi.Input[bool] hide_on_login_page: Hide On Login Page.
:param pulumi.Input[str] jwks_url: JSON Web Key Set URL
:param pulumi.Input[bool] link_only: If true, users cannot log in through this provider. They can only link to this provider. This is useful if you don't
want to allow login from the provider, but want to integrate with a provider
:param pulumi.Input[str] login_hint: Login Hint.
:param pulumi.Input[str] logout_url: Logout URL
:param pulumi.Input[str] post_broker_login_flow_alias: Alias of authentication flow, which is triggered after each login with this identity provider. Useful if you want
additional verification of each user authenticated with this identity provider (for example OTP). Leave this empty if
you don't want any additional authenticators to be triggered after login with this identity provider. Also note, that
authenticator implementations must assume that user is already set in ClientSession as identity provider already set it.
:param pulumi.Input[str] provider_id: provider id, is always oidc, unless you have a custom implementation
:param pulumi.Input[str] realm: Realm Name
:param pulumi.Input[bool] store_token: Enable/disable if tokens must be stored after authenticating users.
:param pulumi.Input[str] token_url: Token URL.
:param pulumi.Input[bool] trust_email: If enabled then email provided by this provider is not verified even if verification is enabled for the realm.
:param pulumi.Input[bool] ui_locales: Pass current locale to identity provider
:param pulumi.Input[str] user_info_url: User Info URL
:param pulumi.Input[bool] validate_signature: Enable/disable signature validation of external IDP signatures.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['accepts_prompt_none_forward_from_client'] = accepts_prompt_none_forward_from_client
__props__['add_read_token_role_on_create'] = add_read_token_role_on_create
if alias is None:
raise TypeError("Missing required property 'alias'")
__props__['alias'] = alias
__props__['authenticate_by_default'] = authenticate_by_default
if authorization_url is None:
raise TypeError("Missing required property 'authorization_url'")
__props__['authorization_url'] = authorization_url
__props__['backchannel_supported'] = backchannel_supported
if client_id is None:
raise TypeError("Missing required property 'client_id'")
__props__['client_id'] = client_id
if client_secret is None:
raise TypeError("Missing required property 'client_secret'")
__props__['client_secret'] = client_secret
__props__['default_scopes'] = default_scopes
__props__['display_name'] = display_name
__props__['enabled'] = enabled
__props__['extra_config'] = extra_config
__props__['first_broker_login_flow_alias'] = first_broker_login_flow_alias
__props__['hide_on_login_page'] = hide_on_login_page
__props__['jwks_url'] = jwks_url
__props__['link_only'] = link_only
__props__['login_hint'] = login_hint
__props__['logout_url'] = logout_url
__props__['post_broker_login_flow_alias'] = post_broker_login_flow_alias
__props__['provider_id'] = provider_id
if realm is None:
raise TypeError("Missing required property 'realm'")
__props__['realm'] = realm
__props__['store_token'] = store_token
if token_url is None:
raise TypeError("Missing required property 'token_url'")
__props__['token_url'] = token_url
__props__['trust_email'] = trust_email
__props__['ui_locales'] = ui_locales
__props__['user_info_url'] = user_info_url
__props__['validate_signature'] = validate_signature
__props__['internal_id'] = None
super(IdentityProvider, __self__).__init__(
'keycloak:oidc/identityProvider:IdentityProvider',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, accepts_prompt_none_forward_from_client=None, add_read_token_role_on_create=None, alias=None, authenticate_by_default=None, authorization_url=None, backchannel_supported=None, client_id=None, client_secret=None, default_scopes=None, display_name=None, enabled=None, extra_config=None, first_broker_login_flow_alias=None, hide_on_login_page=None, internal_id=None, jwks_url=None, link_only=None, login_hint=None, logout_url=None, post_broker_login_flow_alias=None, provider_id=None, realm=None, store_token=None, token_url=None, trust_email=None, ui_locales=None, user_info_url=None, validate_signature=None):
"""
Get an existing IdentityProvider resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] accepts_prompt_none_forward_from_client: This is just used together with Identity Provider Authenticator or when kc_idp_hint points to this identity provider. In
case that client sends a request with prompt=none and user is not yet authenticated, the error will not be directly
returned to client, but the request with prompt=none will be forwarded to this identity provider.
:param pulumi.Input[bool] add_read_token_role_on_create: Enable/disable if new users can read any stored tokens. This assigns the broker.read-token role.
:param pulumi.Input[str] alias: The alias uniquely identifies an identity provider and it is also used to build the redirect uri.
:param pulumi.Input[bool] authenticate_by_default: Enable/disable authenticate users by default.
:param pulumi.Input[str] authorization_url: OIDC authorization URL.
:param pulumi.Input[bool] backchannel_supported: Does the external IDP support backchannel logout?
:param pulumi.Input[str] client_id: Client ID.
:param pulumi.Input[str] client_secret: Client Secret.
:param pulumi.Input[str] default_scopes: The scopes to be sent when asking for authorization. It can be a space-separated list of scopes. Defaults to 'openid'.
:param pulumi.Input[str] display_name: Friendly name for Identity Providers.
:param pulumi.Input[bool] enabled: Enable/disable this identity provider.
:param pulumi.Input[str] first_broker_login_flow_alias: Alias of authentication flow, which is triggered after first login with this identity provider. Term 'First Login' means
that there is not yet existing Keycloak account linked with the authenticated identity provider account.
:param pulumi.Input[bool] hide_on_login_page: Hide On Login Page.
:param pulumi.Input[str] internal_id: Internal Identity Provider Id
:param pulumi.Input[str] jwks_url: JSON Web Key Set URL
:param pulumi.Input[bool] link_only: If true, users cannot log in through this provider. They can only link to this provider. This is useful if you don't
want to allow login from the provider, but want to integrate with a provider
:param pulumi.Input[str] login_hint: Login Hint.
:param pulumi.Input[str] logout_url: Logout URL
:param pulumi.Input[str] post_broker_login_flow_alias: Alias of authentication flow, which is triggered after each login with this identity provider. Useful if you want
additional verification of each user authenticated with this identity provider (for example OTP). Leave this empty if
you don't want any additional authenticators to be triggered after login with this identity provider. Also note, that
authenticator implementations must assume that user is already set in ClientSession as identity provider already set it.
:param pulumi.Input[str] provider_id: provider id, is always oidc, unless you have a custom implementation
:param pulumi.Input[str] realm: Realm Name
:param pulumi.Input[bool] store_token: Enable/disable if tokens must be stored after authenticating users.
:param pulumi.Input[str] token_url: Token URL.
:param pulumi.Input[bool] trust_email: If enabled then email provided by this provider is not verified even if verification is enabled for the realm.
:param pulumi.Input[bool] ui_locales: Pass current locale to identity provider
:param pulumi.Input[str] user_info_url: User Info URL
:param pulumi.Input[bool] validate_signature: Enable/disable signature validation of external IDP signatures.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["accepts_prompt_none_forward_from_client"] = accepts_prompt_none_forward_from_client
__props__["add_read_token_role_on_create"] = add_read_token_role_on_create
__props__["alias"] = alias
__props__["authenticate_by_default"] = authenticate_by_default
__props__["authorization_url"] = authorization_url
__props__["backchannel_supported"] = backchannel_supported
__props__["client_id"] = client_id
__props__["client_secret"] = client_secret
__props__["default_scopes"] = default_scopes
__props__["display_name"] = display_name
__props__["enabled"] = enabled
__props__["extra_config"] = extra_config
__props__["first_broker_login_flow_alias"] = first_broker_login_flow_alias
__props__["hide_on_login_page"] = hide_on_login_page
__props__["internal_id"] = internal_id
__props__["jwks_url"] = jwks_url
__props__["link_only"] = link_only
__props__["login_hint"] = login_hint
__props__["logout_url"] = logout_url
__props__["post_broker_login_flow_alias"] = post_broker_login_flow_alias
__props__["provider_id"] = provider_id
__props__["realm"] = realm
__props__["store_token"] = store_token
__props__["token_url"] = token_url
__props__["trust_email"] = trust_email
__props__["ui_locales"] = ui_locales
__props__["user_info_url"] = user_info_url
__props__["validate_signature"] = validate_signature
return IdentityProvider(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
13,292 | 5615ab3ca410f92c4013695268f38a5e5eb7abe1 | import struct
import cffi
import dataType
class CDataMaker:
__ffi = cffi.FFI()
def __checkIsSigned(self, dataTypeName):
if 'unsigned' in dataTypeName or dataTypeName == 'char':
return False
return True
def castToCDataType(self, byteSequence, dataTypeName):
cDataType = dataType.DataType(dataTypeName)
category = cDataType.getCategory()
if category == 'int':
return self.__ffi.cast(cDataType.makeFormalDataType(),
int.from_bytes(byteSequence, byteorder='big',
signed=self.__checkIsSigned(dataTypeName)))
elif category == 'floating-point':
return self.__ffi.cast(cDataType.makeFormalDataType(), struct.unpack(dataTypeName[0], byteSequence)[0])
elif category == 'string':
return self.__ffi.new('char []', bytes(byteSequence))
elif '*' in dataTypeName:
value = self.castToCDataType(byteSequence, dataTypeName[:-1])
return self.__ffi.new(cDataType.makeFormalDataType(), value)
raise NameError(dataTypeName + ' is not basic C Data Type.')
def cmakerTestCode():
ffi = cffi.FFI()
data = [('int', int.to_bytes(22, byteorder='big', length=4)),
('const char*', b'hello world\n'),
('float*', bytes(struct.pack("f", 3.14))),
('int*', int.to_bytes(85, byteorder='big', length=4))]
cDataList = []
maker = CDataMaker()
for elem in data:
cDataList.append(maker.castToCDataType(elem[1], elem[0]))
print(ffi.string(cDataList[1], 13))
print(ffi.unpack(cDataList[-2], 1))
print(ffi.unpack(cDataList[-1], 1)) # ๋ง์ง๋ง int *ํ ๋ณ์๊ฐ ๊ฐ๋ฆฌํค๋ ๊ณณ์ ๊ฐ์ด ์ ๋ค์ด๊ฐ๋์ง ํ์ธ
print(ffi.sizeof('float')) |
13,293 | 06a1008281483e4e621f1a70daf5900e79ddd33e | from django.conf.urls import url, include
from django.contrib import admin
from . import views
app_name = "inventories"
urlpatterns = [
url(r'^$', views.inventory_list, name="inventory_list"),
url(r'^/(?P<inventory>[a-zA-Z0-9_.-]+)$', views.generic_view, name="inventory_detail"),
url(r'^/(?P<inventory>[a-zA-Z0-9_.-]+)/groups$', views.generic_view, name="group_list"),
url(r'^/(?P<inventory>[a-zA-Z0-9_.-]+)/groups/(?P<group>[a-zA-Z0-9_.-]+)$', views.generic_view, name="group_detail"),
url(r'^/(?P<inventory>[a-zA-Z0-9_.-]+)/hosts$', views.generic_view, name="host_list"),
url(r'^/(?P<inventory>[a-zA-Z0-9_.-]+)/hosts/(?P<host>[a-zA-Z0-9_.-]+)$', views.generic_view, name="host_detail"),
]
|
13,294 | 606fc199fbead2a0382089452d85b254e3f5202c | import matplotlib.pyplot as plt
import numpy as np
from uncertainties import ufloat
import scipy.constants as const
import matplotlib as mpl
mpl.use('pgf')
import matplotlib.pyplot as plt
mpl.rcParams.update({
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'pgf.texsystem': 'lualatex',
'pgf.preamble': r'\usepackage{unicode-math}\usepackage{siunitx}',
})
e=const.physical_constants["elementary charge"]
z=np.array([30,31,35,37,38,40]) #Ordnungszahl
sig=np.array([3.62,3.68,3.84,4.07,4.11,4.33])
E_keV=np.array([9.6154176,10.32185278,13.47951301,15.08974341,16.00232593,17.77862823]) #Energie in keV
E_J=E_keV*e[0]*10**3 #Energie in Joule
params, covariance_matrix = np.polyfit(z-sig, np.sqrt(E_keV), deg=1, cov=True)
errors = np.sqrt(np.diag(covariance_matrix))
for name, value, error in zip('mb', params, errors):
print(f'{name} = {value:.3f} ยฑ {error:.3f}')
achse=np.linspace(25.5,36.5)
plt.plot(z-sig,np.sqrt(E_keV),"rx",label="Messwerte")
plt.plot(achse,params[0]*achse+params[1],label="Lineare Regression")
plt.xlabel(r'$z-\sigma _\text{K}$')
plt.ylabel(r'$\sqrt{E_\text{K}\,/\,\SI{e3}{\electronvolt}}$')
plt.legend()
plt.grid()
plt.savefig("../plots/regression2.pdf") |
13,295 | c52006129e0d9f24ed70d5b99043e3144f6a0196 | import requests
from Tools import requests_tools as rt, geoloc_commercial_db, web_mapping_services, network_measurer, geo_distance_calculator
import re
from bs4 import BeautifulSoup
import socket
import pyprind
import settings, strings
import json
import pyprind
def crawl_planetlab():
'''
crawl landmarks data from planetlab
:return:
'''
res = requests.get("https://www.planet-lab.org/db/pub/sites.php", headers=rt.get_random_headers(),
proxies=rt.get_proxies_abroad(), timeout=10)
soup = BeautifulSoup(res.text, "lxml")
tbody = soup.select_one("table#sites > tbody")
landmarks = []
for tr in tbody.select("tr"):
td_list = tr.select("td")
a = td_list[1].select_one("a")
lon = td_list[2].text
lat = td_list[3].text
if a is not None and lon != "" and lat != "":
url = a["href"]
organization = td_list[1].text
landmarks.append({
"organization": organization,
"url": url,
"longitude": lon,
"latitude": lat,
})
return landmarks
# def add_locinfo(landmarks_planetlab):
# '''
# add ipinfo into landmarks
# :param landmarks_planetlab:
# :return:
# '''
# count_different = 0
# count_fail = 0
# for lm in pyprind.prog_bar(landmarks_planetlab):
# ip = lm["ip"]
#
# try:
# json_ipinfo = geoloc_commercial_db.ip_geolocation_ipinfo(ip)
# json_ipip = geoloc_commercial_db.ip_geolocation_ipip(ip)
# city_ipinfo = json_ipinfo["city"]
# region_ipinfo = json_ipinfo["region"]
# country_ipinfo = json_ipinfo["country"]
# city_ipip = json_ipip["city"]
# region_ipip = json_ipip["region"]
# country_ipip = json_ipip["country"]
#
# loc = json_ipinfo["loc"].split(",")
# lm["ipinfo"] = {
# "city": city_ipinfo,
# "region": region_ipinfo,
# "country": country_ipinfo,
# "longitude": loc[1],
# "latitude": loc[0],
# }
# lm["ipip"] = {
# "city": city_ipip,
# "region": region_ipip,
# "country": country_ipip,
# "longitude": json_ipip["longitude"],
# "latitude": json_ipip["latitude"],
# }
#
# lm["city"] = ""
# lm["region"] = ""
# lm["country"] = ""
#
# assert country_ipinfo == "".join(re.findall("[A-Z]", country_ipip))
# lm["country"] = country_ipip
#
# assert region_ipinfo == region_ipip
# lm["region"] = region_ipip
#
# assert city_ipinfo == city_ipip
# lm["city"] = city_ipip
#
# except AssertionError as e:
# count_different += 1
# print("different: %d/%d" % (count_different, len(landmarks_planetlab)))
# # print("different location result, ipinfo: %s, %s, %s; ipip: %s, %s, %s" % (
# # city_ipinfo, region_ipinfo, country_ipinfo, city_ipip, region_ipip, country_ipip))
# except Exception as e:
# count_fail += 1
# print("fail: %d/%d" % (count_fail, len(landmarks_planetlab)))
# print(e)
#
# return landmarks_planetlab
def add_html(landmarks):
'''
extract html for landmarks that provide web services
:param landmarks:
:return:
'''
count_fail = 0
count_suc = 0
for lm in pyprind.prog_bar(landmarks):
if "html" in lm and "http-equiv=\"refresh\"" not in lm["html"].lower():
count_suc += 1
continue
url = lm["url"]
if "html" in lm:
soup = BeautifulSoup(lm["html"].lower(), "lxml")
list_meta = soup.select("meta")
for meta in list_meta:
if "http-equiv" in meta.attrs and meta["http-equiv"] == "refresh":
content = meta["content"]
search_group = re.search("url=(.*)", content)
if search_group:
url_refresh = search_group.group(1)
url = rt.recover_url(url, url_refresh)
print("refresh: %s" % str(meta))
if "http" not in url:
url = "http://www.%s" % url
# url_split = url.split("//")
# path = url_split[1]
# host = re.sub("/.*", "", path)
# url = url_split[0] + "//" + host
try:
res = requests.get(url, proxies=rt.get_proxies_abroad(), headers=rt.get_random_headers(), timeout=30)
assert res.status_code == 200
lm["html"] = res.text
lm["url"] = url
count_suc += 1
except Exception as e:
print(e)
print("failed: %s" % url)
count_fail += 1
print(count_fail)
print(count_suc)
return landmarks
def find_ip(landmarks):
count_fail = 0
for lm in pyprind.prog_bar(landmarks):
if "ip" in lm or "url" not in lm:
continue
url = lm["url"]
name = re.sub("http://", "", url)
name = re.sub("https://", "", name)
if "/" in name:
name = name.split("/")[0]
try:
ip = socket.gethostbyname(name)
lm["ip"] = ip
# print(("getaddrinfo succeed, domain_name: %s, org: %s" % (name, lm["university_name"])))
except Exception as e:
count_fail += 1
# print("getaddrinfo failed, domain_name: %s, org: %s" % (name, lm["university_name"]))
continue
print("fail_num: %d" % count_fail)
return landmarks
def find_ip_2(samples_planetlab):
for sample in pyprind.prog_bar(samples_planetlab):
lon_ground = sample["longitude"]
lat_ground = sample["latitude"]
host = re.search("https?://(.*)/?", sample["url"]).group(1)
ip_list = network_measurer.get_all_ips(host)
ip_list.append(sample["ip"])
ip_closest = ""
dis_min = 9999999999999999
for ip in ip_list:
locations = geoloc_commercial_db.get_locations_info_by_commercial_tools(ip)
for loc in locations:
dis = geo_distance_calculator.get_geodistance_btw_2coordinates(lon_ground, lat_ground, loc["longitude"], loc["latitude"])
if dis < dis_min:
dis_min = dis
ip_closest = ip
sample["ip"] = ip_closest
sample[strings.KEY_DIS_COARSE_LOC_2_GROUND] = dis_min
print("ip: {}, dis: {}".format(ip_closest, dis_min))
return samples_planetlab
def geocode_coordinate_2_addr(landmarks_planetlab):
'''
use coordinate to locate ip to city-level by google map
:param landmarks_planetlab:
:return:
'''
for lm in pyprind.prog_bar(landmarks_planetlab):
addr = web_mapping_services.google_map_geocode_coordinate2addr(float(lm["longitude"]), float(lm["latitude"]))
if addr:
lm["geo_lnglat"] = addr
return landmarks_planetlab
def filer_valid_samples_us(samples_planetlab):
sample_filtered = []
for sample in samples_planetlab:
try:
if sample["geo_lnglat"]["country"] == "United States" and "url" in sample \
and "ip" in sample and "html" in sample:
sample["longitude"] = float(sample["longitude"])
sample["latitude"] = float(sample["latitude"])
sample_filtered.append(sample)
except KeyError:
continue
print(len(sample_filtered))
return sample_filtered
if __name__ == "__main__":
samples = json.load(open("../Sources/experiments/samples_planetlab_us_0.1.json", "r", encoding="utf-8"))
# for sample in samples:
# print("{} {}".format(sample["ip"], sample[strings.KEY_POTENTIAL_OWNER_NAMES]))
samples = find_ip_2(samples)
json.dump(samples, open("../Sources/experiments/samples_planetlab_us_0.1.json", "w", encoding="utf-8"))
|
13,296 | 2de4513a7874408f01a0e77f42f51675c2114d0d | from tqdm import tqdm
import torch
import torch.nn as nn
from trainer.eval import eval_net
from pathlib import Path
import os
# trong mแปi phase, lฦฐu lแบกi best model ฤแป lร m training cho phase tiแบฟp theo.
def train(net: torch.nn, data_train, train_loader, criterion, optimizer, writer, epochs, pre_epoch, n_channels,
device, global_step, test_loader, n_classes, dir_checkpoint, logging, phase):
"""
Training function.
Args:
net: (todo): write your description
torch: (todo): write your description
nn: (array): write your description
data_train: (array): write your description
train_loader: (todo): write your description
criterion: (int): write your description
optimizer: (todo): write your description
writer: (todo): write your description
epochs: (array): write your description
pre_epoch: (bool): write your description
n_channels: (int): write your description
device: (todo): write your description
global_step: (todo): write your description
test_loader: (todo): write your description
n_classes: (int): write your description
dir_checkpoint: (str): write your description
logging: (todo): write your description
phase: (todo): write your description
"""
best_test_iou_score = 0
dropout_flag = "dropout" + str(net.is_dropout)
right_previous_ckpt_dir = Path(dir_checkpoint + f'best_CP_one64th_{dropout_flag}.pth')
if right_previous_ckpt_dir.is_file():
net.load_state_dict(
torch.load(dir_checkpoint + f'best_CP_one64th_{dropout_flag}.pth', map_location=device)
)
# epoch_start = pre_epoch if pre_epoch != 0 else 0
for epoch in range(epochs):
net.train()
epoch_loss = 0
n_train = len(data_train)
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
for batch in train_loader:
imgs = batch['image']
true_masks = batch['mask']
assert imgs.shape[1] == n_channels, \
f'Network has been defined with {n_channels} input channels, ' \
f'but loaded images have {imgs.shape[1]} channels. Please check that ' \
'the images are loaded correctly.'
imgs = imgs.to(device=device, dtype=torch.float32)
mask_type = torch.float32 if n_classes == 1 else torch.long
true_masks = true_masks.to(device=device, dtype=mask_type)
# print("img shape: ", imgs.shape) # 4 3 256 256
masks_pred = net(imgs) # return BCHW = 4_1_256_256
# print("mask gen shape: ", masks_pred.shape)
_tem = net(imgs)
# print("IS DIFFERENT OR NOT: ", torch.sum(masks_pred - _tem))
true_masks = true_masks[:, :1, :, :]
loss = criterion(masks_pred, true_masks)
epoch_loss += loss.item()
# writer.add_scalar('Loss/train', loss.item(), global_step)
pbar.set_postfix(**{'loss (batch)': loss.item()})
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_value_(net.parameters(), 0.1)
optimizer.step()
pbar.update(imgs.shape[0])
global_step += 1
# Tรญnh dice vร iou score trรชn tแบญp Test set, ghi vร o tensorboard .
test_score_dice, test_score_iou = eval_net(net, test_loader, n_classes, device)
if test_score_iou > best_test_iou_score:
best_test_iou_score = test_score_iou
try:
os.mkdir(dir_checkpoint)
logging.info('Created checkpoint directory')
except OSError:
pass
torch.save(net.state_dict(),
dir_checkpoint + f'best_CP_one64th_{dropout_flag}.pth')
logging.info(f'Checkpoint {epoch + 1} saved !')
logging.info('Test Dice Coeff: {}'.format(test_score_dice))
print('Test Dice Coeff: {}'.format(test_score_dice))
writer.add_scalar(f'Phase_{phase}_Dice_{dropout_flag}/test', test_score_dice, epoch)
logging.info('Test IOU : {}'.format(test_score_iou))
print('Test IOU : {}'.format(test_score_iou))
writer.add_scalar(f'Phase_{phase}_IOU_{dropout_flag}/test', test_score_iou, epoch)
print(f"Phase_{phase}_best iou: ", best_test_iou_score)
# torch.save(net.state_dict(),
# dir_checkpoint + 'ckpt.pth')
writer.add_scalar(f'Phases_IOU_{dropout_flag}/test', best_test_iou_score, phase)
return pre_epoch
|
13,297 | 8641c7baf93e8990d1744e41b34d6f6ec0ba41ce | from .ctc_loss import sequence_ctc_loss_with_logits
from .axe_loss import sequence_axe_loss_with_logits |
13,298 | a7b5dcdefb344bfdb9f2481e009a270214b4d3fb | #!/usr/bin/env python2.7
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Id$
#
# Author: mattmann
# Description: Takes an input file of LARGE size in which each line
# in the file is a full path to some file to ingest. SPLITS collections
# of lines in the file into sub-groups of size chunkSize
import sys
import getopt
def splitAndExecute(chunkFile, chunkSize):
theChunkFile = open(chunkFile, 'r')
numChunks = 0
fileList=[]
currentChunkSize = 0
while True:
line=theChunkFile.readline()
if not line:
chunkfilename = "filelist_chunk_"+str(numChunks)+".txt"
writeFile(chunkfilename, fileList)
# reset
fileList=[]
currentChunkSize = long(0)
numChunks = numChunks + 1
break
fileList.append(line)
currentChunkSize = currentChunkSize+1
if (currentChunkSize == chunkSize):
chunkfilename = "filelist_chunk_"+str(numChunks)+".txt"
writeFile(chunkfilename, fileList)
# reset
fileList=[]
currentChunkSize = long(0)
numChunks = numChunks + 1
print "Total Chunks: "+str(numChunks)
def writeFile(chunkfilename, filelist):
with open(chunkfilename, "w") as thefile:
for fileentry in filelist:
print>>thefile, fileentry.strip()
def main(argv):
chunkSize=0
chunkFile=None
ingestTaskId=''
usage = 'chunk_file.py -f <file> -c <chunkSize>'
try:
opts, args = getopt.getopt(argv,"hf:c:",["chunkFile=", "chunkSize="])
except getopt.GetoptError:
print usage
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print usage
sys.exit()
elif opt in ("-f", "--chunkFile"):
chunkFile = arg
elif opt in ("-c", "--chunkSize"):
chunkSize = long(arg)
if chunkFile == None or chunkSize == 0:
print usage
sys.exit()
print "Chunk Size: ["+str(chunkSize)+"]"
print "Chunk File: ["+str(chunkFile)+"]"
splitAndExecute(chunkFile, chunkSize)
if __name__ == "__main__":
main(sys.argv[1:])
|
13,299 | 19889965be0281e89e246a65c330a0f3cff56edd | # Lint as: python3
"""Quadruped toe position sensor."""
from typing import Any, Callable, Sequence, Text, Tuple, Type, Union
import gin
import gym
import numpy as np
from pybullet_envs.minitaur.envs_v2.sensors import sensor
from pybullet_envs.minitaur.envs_v2.utilities import noise_generators
def _convert_to_np_array(inputs: Union[float, Tuple[float], np.ndarray], dim):
"""Converts the inputs to a numpy array.
Args:
inputs: The input scalar or array.
dim: The dimension of the converted numpy array.
Returns:
The converted numpy array.
Raises:
ValueError: If the inputs is an array whose dimension does not match the
provided dimension.
"""
outputs = None
if isinstance(inputs, (tuple, np.ndarray)):
outputs = np.array(inputs)
else:
outputs = np.full(dim, inputs)
if len(outputs) != dim:
raise ValueError("The inputs array has a different dimension {}"
" than provided, which is {}.".format(len(outputs), dim))
return outputs
@gin.configurable
class ToePositionSensor(sensor.Sensor):
"""A sensor that outputs the toe positions of attached robots or objects."""
def __init__(
self,
name: Text = "toe_position",
dtype: Type[Any] = np.float64,
lower_bound: Union[float, Sequence[float]] = -1.0,
upper_bound: Union[float, Sequence[float]] = 1.0,
noise_generator: Union[Callable[..., Any],
noise_generators.NoiseGenerator] = None,
sensor_latency: Union[float, Sequence[float]] = 0.0,
):
"""Constructor.
Args:
name: Name of the sensor.
dtype: Data type of sensor value.
lower_bound: The optional lower bounds of the sensor reading.
upper_bound: The optional upper bounds of the sensor reading.
noise_generator: Used to add noise to the readings.
sensor_latency: Single or multiple latency in seconds. See sensor.Sensor
docstring for details.
"""
super().__init__(
name=name,
sensor_latency=sensor_latency,
interpolator_fn=sensor.linear_obs_blender)
self._dtype = dtype
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._noise_generator = noise_generator
def set_robot(self, robot):
self._robot = robot
num_legs = len(robot.urdf_loader.get_end_effector_id_dict().values())
lower_bound = _convert_to_np_array(self._lower_bound, num_legs * 3)
upper_bound = _convert_to_np_array(self._upper_bound, num_legs * 3)
self._observation_space = self._stack_space(
gym.spaces.Box(low=lower_bound, high=upper_bound, dtype=self._dtype))
def _get_original_observation(self) -> Tuple[float, np.ndarray]:
"""Returns raw observation with timestamp."""
toe_position = np.array(
self._robot.foot_positions(), dtype=self._dtype).flatten()
return self._robot.timestamp, toe_position
def get_observation(self) -> np.ndarray:
delayed_observation = super().get_observation()
if self._noise_generator:
if callable(self._noise_generator):
return self._noise_generator(delayed_observation)
else:
return self._noise_generator.add_noise(delayed_observation)
return delayed_observation
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.