id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
9786411 | <filename>setup.py
from setuptools import setup
setup(
name='bidsio',
version='0.1.0',
packages=['bidsio'],
url='https://github.com/npnl/bidsio',
license='Apache 2.0',
author='<NAME>',
install_requires=[
'numpy',
'nibabel',
'bids'
],
author_email='<EMAIL>',
description='BIDS IO for working with multiple BIDS datasets.'
)
| StarcoderdataPython |
5095417 | <reponame>manzanero/curso-python-auto<filename>testbdd/steps/renfe.py<gh_stars>1-10
import time
from behave import *
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
use_step_matcher("re")
@given(u'estoy en el portal de Renfe')
def step_impl(context):
context.driver.get("https://www.renfe.es/")
time.sleep(5)
cookies = context.driver.find_element(By.ID, 'onetrust-accept-btn-handler')
while not cookies.is_displayed():
time.sleep(1)
cookies.click()
@when(u'selecciono origen Madrid')
def step_impl(context):
origin = context.driver.find_element(By.ID, 'origin')
actions = ActionChains(context.driver)
actions.move_to_element(origin)
actions.click(origin)
actions.send_keys('Madrid')
actions.send_keys(Keys.DOWN)
actions.send_keys(Keys.RETURN)
actions.perform()
@when(u'selecciono destino Barcelona')
def step_impl(context):
destination = context.driver.find_element(By.ID, 'destination')
actions = ActionChains(context.driver)
actions.move_to_element(destination).click(destination).send_keys('Barcelona')
actions.send_keys(Keys.DOWN).send_keys(Keys.RETURN).perform()
buy = context.driver.find_element(By.CSS_SELECTOR, 'button[type="submit"]')
buy.click()
time.sleep(5)
@then(u'llego a la lista de billetes')
def step_impl(context):
billetes = context.driver.find_elements(By.CSS_SELECTOR, '[class="booking-list-element-price"]')
if not billetes:
raise AssertionError('No se ha cargado la lista de billetes')
@when(u'selecciono un billete')
def step_impl(context):
billetes = context.driver.find_elements(By.CSS_SELECTOR, '[class="booking-list-element-price"]')
for billete in billetes:
if billete.is_displayed():
billete.click()
break
continuar = context.driver.find_element(By.ID, 'buttonBannerContinuar')
continuar.click()
@then(u'el inicio de sesión es requerido')
def step_impl(context):
time.sleep(5)
| StarcoderdataPython |
9773353 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import models
# About linker maps:
# * "Discarded input sections" include symbols merged with other symbols
# (aliases), so the information there is not actually a list of unused things.
# * Linker maps include symbols that do not have names (with object path),
# whereas "nm" skips over these (they don't account for much though).
# * The parse time for compressed linker maps is dominated by ungzipping.
class MapFileParser(object):
"""Parses a linker map file (tested only on files from gold linker)."""
# Map file writer for gold linker:
# https://github.com/gittup/binutils/blob/HEAD/gold/mapfile.cc
def __init__(self):
self._common_symbols = []
self._symbols = []
self._section_sizes = {}
self._lines = None
def Parse(self, lines):
"""Parses a linker map file.
Args:
lines: Iterable of lines.
Returns:
A tuple of (section_sizes, symbols).
"""
self._lines = iter(lines)
logging.debug('Scanning for Header')
while True:
line = self._SkipToLineWithPrefix('Common symbol', 'Memory map')
if line.startswith('Common symbol'):
self._common_symbols = self._ParseCommonSymbols()
logging.debug('.bss common entries: %d', len(self._common_symbols))
continue
elif line.startswith('Memory map'):
self._ParseSections()
break
return self._section_sizes, self._symbols
def _SkipToLineWithPrefix(self, prefix, prefix2=None):
for l in self._lines:
if l.startswith(prefix) or (prefix2 and l.startswith(prefix2)):
return l
def _ParsePossiblyWrappedParts(self, line, count):
parts = line.split(None, count - 1)
if not parts:
return None
if len(parts) != count:
line = next(self._lines)
parts.extend(line.split(None, count - len(parts) - 1))
assert len(parts) == count, 'parts: ' + ' '.join(parts)
parts[-1] = parts[-1].rstrip()
return parts
def _ParseCommonSymbols(self):
# Common symbol size file
#
# ff_cos_131072 0x40000 obj/third_party/<snip>
# ff_cos_131072_fixed
# 0x20000 obj/third_party/<snip>
ret = []
next(self._lines) # Skip past blank line
name, size_str, path = None, None, None
for l in self._lines:
parts = self._ParsePossiblyWrappedParts(l, 3)
if not parts:
break
name, size_str, path = parts
sym = models.Symbol('.bss', int(size_str[2:], 16), full_name=name,
object_path=path)
ret.append(sym)
return ret
def _ParseSections(self):
# .text 0x0028c600 0x22d3468
# .text.startup._GLOBAL__sub_I_bbr_sender.cc
# 0x0028c600 0x38 obj/net/net/bbr_sender.o
# .text._reset 0x00339d00 0xf0 obj/third_party/icu/icuuc/ucnv.o
# ** fill 0x0255fb00 0x02
# .text._ZN4base8AutoLockD2Ev
# 0x00290710 0xe obj/net/net/file_name.o
# 0x00290711 base::AutoLock::~AutoLock()
# 0x00290711 base::AutoLock::~AutoLock()
# .text._ZNK5blink15LayoutBlockFlow31mustSeparateMarginAfterForChildERK...
# 0xffffffffffffffff 0x46 obj/...
# 0x006808e1 blink::LayoutBlockFlow::...
# .bss
# .bss._ZGVZN11GrProcessor11initClassIDI10LightingFPEEvvE8kClassID
# 0x02d4b294 0x4 obj/skia/skia/SkLightingShader.o
# 0x02d4b294 guard variable for void GrProcessor::initClassID
# .data 0x0028c600 0x22d3468
# .data.rel.ro._ZTVN3gvr7android19ScopedJavaGlobalRefIP12_jfloatArrayEE
# 0x02d1e668 0x10 ../../third_party/.../libfoo.a(bar.o)
# 0x02d1e668 vtable for gvr::android::GlobalRef<_jfloatArray*>
# ** merge strings
# 0x0255fb00 0x1f2424
# ** merge constants
# 0x0255fb00 0x8
# ** common 0x02db5700 0x13ab48
syms = self._symbols
while True:
line = self._SkipToLineWithPrefix('.')
if not line:
break
section_name = None
try:
# Parse section name and size.
parts = self._ParsePossiblyWrappedParts(line, 3)
if not parts:
break
section_name, section_address_str, section_size_str = parts
section_address = int(section_address_str[2:], 16)
section_size = int(section_size_str[2:], 16)
self._section_sizes[section_name] = section_size
if (section_name in ('.bss', '.rodata', '.text') or
section_name.startswith('.data')):
logging.info('Parsing %s', section_name)
if section_name == '.bss':
# Common symbols have no address.
syms.extend(self._common_symbols)
prefix_len = len(section_name) + 1 # + 1 for the trailing .
symbol_gap_count = 0
merge_symbol_start_address = section_address
sym_count_at_start = len(syms)
line = next(self._lines)
# Parse section symbols.
while True:
if not line or line.isspace():
break
if line.startswith(' **'):
zero_index = line.find('0')
if zero_index == -1:
# Line wraps.
name = line.strip()
line = next(self._lines)
else:
# Line does not wrap.
name = line[:zero_index].strip()
line = line[zero_index:]
address_str, size_str = self._ParsePossiblyWrappedParts(line, 2)
line = next(self._lines)
# These bytes are already accounted for.
if name == '** common':
continue
address = int(address_str[2:], 16)
size = int(size_str[2:], 16)
path = None
sym = models.Symbol(section_name, size, address=address,
full_name=name, object_path=path)
syms.append(sym)
if merge_symbol_start_address > 0:
merge_symbol_start_address += size
else:
# A normal symbol entry.
subsection_name, address_str, size_str, path = (
self._ParsePossiblyWrappedParts(line, 4))
size = int(size_str[2:], 16)
assert subsection_name.startswith(section_name), (
'subsection name was: ' + subsection_name)
mangled_name = subsection_name[prefix_len:]
name = None
address_str2 = None
while True:
line = next(self._lines).rstrip()
if not line or line.startswith(' .'):
break
# clang includes ** fill, but gcc does not.
if line.startswith(' ** fill'):
# Alignment explicitly recorded in map file. Rather than
# record padding based on these entries, we calculate it
# using addresses. We do this because fill lines are not
# present when compiling with gcc (only for clang).
continue
elif line.startswith(' **'):
break
elif name is None:
address_str2, name = self._ParsePossiblyWrappedParts(line, 2)
if address_str == '0xffffffffffffffff':
# The section needs special handling (e.g., a merge section)
# It also generally has a large offset after it, so don't
# penalize the subsequent symbol for this gap (e.g. a 50kb gap).
# There seems to be no corelation between where these gaps occur
# and the symbols they come in-between.
# TODO(agrieve): Learn more about why this happens.
if address_str2:
address = int(address_str2[2:], 16) - 1
elif syms and syms[-1].address > 0:
# Merge sym with no second line showing real address.
address = syms[-1].end_address
else:
logging.warning('First symbol of section had address -1')
address = 0
merge_symbol_start_address = address + size
else:
address = int(address_str[2:], 16)
# Finish off active address gap / merge section.
if merge_symbol_start_address:
merge_size = address - merge_symbol_start_address
merge_symbol_start_address = 0
if merge_size > 0:
# merge_size == 0 for the initial symbol generally.
logging.debug('Merge symbol of size %d found at:\n %r',
merge_size, syms[-1])
# Set size=0 so that it will show up as padding.
sym = models.Symbol(
section_name, 0,
address=address,
full_name='** symbol gap %d' % symbol_gap_count)
symbol_gap_count += 1
syms.append(sym)
sym = models.Symbol(section_name, size, address=address,
full_name=name or mangled_name,
object_path=path)
syms.append(sym)
section_end_address = section_address + section_size
if section_name != '.bss' and (
syms[-1].end_address < section_end_address):
# Set size=0 so that it will show up as padding.
sym = models.Symbol(
section_name, 0,
address=section_end_address,
full_name=(
'** symbol gap %d (end of section)' % symbol_gap_count))
syms.append(sym)
logging.debug('Symbol count for %s: %d', section_name,
len(syms) - sym_count_at_start)
except:
logging.error('Problem line: %r', line)
logging.error('In section: %r', section_name)
raise
| StarcoderdataPython |
3433911 | <gh_stars>0
import streamlit as st
import numpy as np
import pandas as pd
import keras
from keras.utils.np_utils import to_categorical
from keras.models import Sequential, load_model
from tensorflow.keras.layers import Conv2D, Flatten, Dense, MaxPool2D
from keras import backend as K
import time
import io
from PIL import Image
from pathlib import Path
import urllib.request
import tensorflow
st.markdown("<h1 style='text-align: center; color: teal;'>Artificial Intelligence Augmented Skin Imaging using Computer Vision and Neural Networks</h1>", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: center; color: teal;'> By <NAME></h1>", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: center; color: teal;'> Msc Data Science at Newcastle University</h1>", unsafe_allow_html=True)
st.sidebar.header("Patient Details")
patient_id = st.sidebar.text_input( "Patient ID:", '#')
st.sidebar.text("Title of Patient:")
choice = st.sidebar.radio("Select the title of the patient:",
options=['Mr', 'Mrs', 'Ms', 'Miss', 'Master'])
patient_surname = st.sidebar.text_input("Patient Surname:")
patient_first_name = st.sidebar.text_input("Patient First Name:")
patient_middle_name = st.sidebar.text_input("Patient Middle Name:")
patient_age = st.sidebar.date_input("Select Date of Birth:",value=None)
if st.sidebar.button("Submit"):
st.sidebar.success("Submission Successful")
else:
st.sidebar.error("Kindly fill in patient details and click 'Submit'")
def data_gen(x):
img = np.asarray(Image.open(x).resize((28, 28)))
x_test = np.asarray(img.tolist())
x_test_mean = np.mean(x_test)
x_test_std = np.std(x_test)
x_test = (x_test - x_test_mean) / x_test_std
x_validate = x_test.reshape(1, 28, 28, 3)
return x_validate
def data_gen_(img):
img = img.reshape(28, 28)
x_test = np.asarray(img.tolist())
x_test_mean = np.mean(x_test)
x_test_std = np.std(x_test)
x_test = (x_test - x_test_mean) / x_test_std
x_validate = x_test.reshape(1, 28, 28, 3)
return x_validate
def load_models():
with st.spinner("Downloading model... this may take awhile! \n Don't stop it!"):
#url = 'https://github.com/VinitaSilaparasetty/dissertation/releases/download/maiden/skincancer_98.h5'
#filename = url.split('/')[-1]
#urllib.request.urlretrieve(url, filename)
save_dest = Path('models')
save_dest.mkdir(exist_ok=True)
f_checkpoint = Path('models/skincancer_98.h5')
if not f_checkpoint.exists():
download_file_from_google_drive("https://github.com/VinitaSilaparasetty/dissertation/releases/download/maiden/skincancer_98.h5", f_checkpoint)
model = tensorflow.keras.models.load_model("skincancer_98.h5")
return model
def predict(x_test, model):
Y_pred = model.predict(x_test)
ynew = model.predict_proba(x_test)
K.clear_session()
ynew = np.round(ynew, 2)
ynew = ynew*100
y_new = ynew[0].tolist()
Y_pred_classes = np.argmax(Y_pred, axis=1)
K.clear_session()
return y_new, Y_pred_classes
st.markdown("<h3 style='text-align: left; color: black;'>Upload Image of Problem Area</h1>", unsafe_allow_html=True)
file_path = st.file_uploader('Upload an image', type=['png', 'jpg','jpeg'])
if file_path is not None:
x_test = data_gen(file_path)
image = Image.open(file_path)
img_array = np.array(image)
st.header("Image Preview")
st.success('Upload Successful')
st.image(img_array,use_column_width=True)
model = load_models()
st.header("Diagnosis")
with st.spinner('Analyzing Image...'):
time.sleep(5)
y_new, Y_pred_classes = predict(x_test, model)
if Y_pred_classes==0:
st.success('Patient has Actinic Keratoses')
elif Y_pred_classes==1:
st.success('Patient has Basal Cell Carcinoma')
elif Y_pred_classes==2:
st.success('Patient has Benign Keratosis-like Lesions')
elif Y_pred_classes==3:
st.success('Patient has Dermatofibroma')
elif Y_pred_classes==4:
st.success('Patient has Melanocytic Nevi')
elif Y_pred_classes==5:
st.success('Patient has Melanoma')
elif Y_pred_classes==6:
st.success('Patient has Vascular Lesions')
else:
st.error("Kindly try another image.")
else:
st.info('Kindly Upload an Image')
| StarcoderdataPython |
4835326 | <reponame>RyanJarv/Pacu2
# generated by datamodel-codegen:
# filename: openapi.yaml
# timestamp: 2021-12-31T02:50:26+00:00
from __future__ import annotations
from typing import Annotated, Any, List, Optional
from pydantic import BaseModel, Field, SecretStr
class ResourceNotFoundException(BaseModel):
__root__: Any
class ValidationException(ResourceNotFoundException):
pass
class AccessDeniedException(ResourceNotFoundException):
pass
class ThrottlingException(ResourceNotFoundException):
pass
class InternalServerException(ResourceNotFoundException):
pass
class AttributePath(BaseModel):
__root__: Annotated[
str,
Field(
max_length=255, min_length=1, regex='[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P} \xa0]+'
),
]
class IdentityStoreId(BaseModel):
__root__: Annotated[
str, Field(max_length=12, min_length=1, regex='^d-[0-9a-f]{10}$')
]
class ResourceId(BaseModel):
__root__: Annotated[
str,
Field(
max_length=47,
min_length=1,
regex='^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$',
),
]
class GroupDisplayName(BaseModel):
__root__: Annotated[
str,
Field(
max_length=1024,
min_length=1,
regex='[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\t\\n\\r \xa0]+',
),
]
class UserName(BaseModel):
__root__: Annotated[
SecretStr,
Field(max_length=128, min_length=1, regex='[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}]+'),
]
class SensitiveStringType(BaseModel):
__root__: Annotated[
SecretStr,
Field(
max_length=1024,
min_length=1,
regex='[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\t\\n\\r \xa0]+',
),
]
class Filter(BaseModel):
"""
A query filter used by <code>ListUsers</code> and <code>ListGroup</code>. This filter object provides the attribute name and attribute value to search users or groups.
"""
AttributePath: AttributePath
AttributeValue: SensitiveStringType
class Filters(BaseModel):
__root__: List[Filter]
class Group(BaseModel):
"""
A group object, which contains a specified group’s metadata and attributes.
"""
GroupId: ResourceId
DisplayName: GroupDisplayName
class Groups(BaseModel):
__root__: List[Group]
class MaxResults(BaseModel):
__root__: Annotated[int, Field(ge=1.0, le=50.0)]
class NextToken(BaseModel):
__root__: Annotated[
str, Field(max_length=65535, min_length=1, regex='^[-a-zA-Z0-9+=/:]*')
]
class User(BaseModel):
"""
A user object, which contains a specified user’s metadata and attributes.
"""
UserName: UserName
UserId: ResourceId
class DescribeGroupResponse(Group):
pass
class DescribeGroupRequest(BaseModel):
IdentityStoreId: IdentityStoreId
GroupId: ResourceId
class DescribeUserResponse(User):
pass
class DescribeUserRequest(BaseModel):
IdentityStoreId: IdentityStoreId
UserId: ResourceId
class ListGroupsResponse(BaseModel):
Groups: Groups
NextToken: Optional[NextToken] = None
class ListGroupsRequest(BaseModel):
IdentityStoreId: IdentityStoreId
MaxResults: Optional[MaxResults] = None
NextToken: Optional[NextToken] = None
Filters: Optional[Filters] = None
class ListUsersRequest(BaseModel):
IdentityStoreId: IdentityStoreId
MaxResults: Optional[MaxResults] = None
NextToken: Optional[NextToken] = None
Filters: Optional[Filters] = None
class Users(BaseModel):
__root__: List[User]
class ListUsersResponse(BaseModel):
Users: Users
NextToken: Optional[NextToken] = None
| StarcoderdataPython |
6406146 | <gh_stars>0
# %%
import numpy as np
import matplotlib.pyplot as plt
from scipy.fft import ifft,fft
from scipy.fftpack import fftshift
from scipy.signal import firwin, filtfilt
# %%
def LPF(n, fcut):
'''
Use Hamming window to design a n-th order LPF
Parameters
----------
n: Filter order
fcut: cutoff frequency
'''
lpf = firwin(n, fcut, window='hamming')
return lpf
# %%
def recover_signal(sig, fcut):
'''
Recover the signal by using 30th order LPF
Paramaters
----------
sig: Received signal
fcut: Cutoff frequency
----------
Returns
----------
recover: Recovered signal
----------
'''
lpf = LPF(30, fcut)
recover = filtfilt(lpf, 1, sig)
return recover
# %%
def awgn(sig, snr):
'''
Additive White Gaussian Noise
Parameters
----------
sig: original signal
snr: SNR in dB
----------
Returns
----------
sig+noise: signal with awgn
----------
'''
#SNR in dB
snr_w = 10**(snr/10)
sig_pow = np.sum(sig**2)/len(sig)
noise_pow = sig_pow/snr_w
noise = np.random.randn(len(sig))*np.sqrt(noise_pow)
return sig+noise
# %%
def AM(sig, fm, fc, fs, N):
'''
AM modulation
Parameters
----------
sig: original signal
fm: original signal frequency
fc: carrier frequency
fs: sampling frequency
N: the number of points on the frequency domain
----------
'''
t = np.arange(0,10/fm, 1/fs)
a0 = max(sig)
am = a0+sig*np.cos(2*np.pi*fc*t)
psd = fftshift(fft(am, n = N))
f = np.arange(0,N)/N*fs
f = f-fs/2
rec = am*np.cos(2*np.pi*fc*t)
rec_psd = fftshift(fft(rec, n = N))
fcut = 1.5*2*np.pi*fm/fs
recover = recover_signal(rec, fcut)
recover_fft = fftshift(fft(recover, N))
plt.figure()
plt.subplot(2,2,1)
plt.plot(f/fm, np.abs(psd))
plt.xlabel('Frequency')
plt.title('AM Modulation')
plt.subplot(2,2,2)
plt.plot(f/fm, np.abs(rec_psd))
plt.xlabel('Frequency')
plt.title('AM Demodulation')
plt.subplot(2,2,3)
plt.plot(f/fm, np.abs(recover_fft))
plt.xlabel('Frequency')
plt.title('AM Recovered Signal')
plt.subplot(2,2,4)
plt.plot(range(len(recover)), np.abs(recover))
plt.xlabel('Time')
plt.tight_layout()
plt.show()
# %%
def DSB(sig, fm, fc, fs, N):
'''
DSB modulation
Parameters
----------
sig: original signal
fm: original signal frequency
fc: carrier frequency
fs: sampling frequency
N: the number of points on the frequency domain
----------
'''
t = np.arange(0,10/fm, 1/fs)
dsb = sig*np.cos(2*np.pi*fc*t)
psd = fftshift(fft(dsb, n = N))
f = np.arange(0,N)/N*fs
f = f-fs/2
rec = dsb*np.cos(2*np.pi*fc*t)
rec_psd = fftshift(fft(rec, n = N))
fcut = 1.5*2*np.pi*fm/fs
recover = recover_signal(rec, fcut)
recover_fft = fftshift(fft(recover, N))
plt.figure()
plt.subplot(2,2,1)
plt.plot(f/fm, np.abs(psd))
plt.xlabel('Frequency')
plt.title('DSB Modulation')
plt.subplot(2,2,2)
plt.plot(f/fm, np.abs(rec_psd))
plt.xlabel('Frequency')
plt.title('DSB Demodulation')
plt.subplot(2,2,3)
plt.plot(f/fm, np.abs(recover_fft))
plt.xlabel('Frequency')
plt.title('DSB Recovered Signal')
plt.subplot(2,2,4)
plt.plot(range(len(recover)), np.abs(recover))
plt.xlabel('Time')
plt.tight_layout()
plt.show()
# %%
def SSB(sig, fm, fc, fs, N):
'''
SSB modulation
Parameters
----------
sig: original signal
fm: original signal frequency
fc: carrier frequency
fs: sampling frequency
N: the number of points on the frequency domain
----------
'''
t = np.arange(0,10/fm, 1/fs)
ssb = sig*np.cos(2*np.pi*fc*t)
psd = fftshift(fft(ssb, n = N))
f = np.arange(0,N)/N*fs
f = f-fs/2
rec = ssb*np.cos(2*np.pi*fc*t)
rec_psd = fftshift(fft(rec, n = N))
fcut = 1.5*2*np.pi*fm/fs
recover = recover_signal(rec, fcut)
recover_fft = fftshift(fft(recover, N))
recover_fft[:int(len(recover_fft)/2)] = 0
plt.figure()
plt.subplot(2,2,1)
plt.plot(f/fm, np.abs(psd))
plt.xlabel('Frequency')
plt.title('SSB Modulation')
plt.subplot(2,2,2)
plt.plot(f/fm, np.abs(rec_psd))
plt.xlabel('Frequency')
plt.title('SSB Demodulation')
plt.subplot(2,2,3)
plt.plot(f/fm, np.abs(recover_fft))
plt.xlabel('Frequency')
plt.title('SSB Recovered Signal')
plt.subplot(2,2,4)
plt.plot(range(len(recover)), np.abs(recover))
plt.xlabel('Time')
plt.tight_layout()
plt.show()
# %%
def main():
fc = 1e4
fs = 1e5
fm = 1e3
t = np.arange(0,10/fm,1/fs)
A = 5
m = A*np.sin(2*np.pi*fm*t)
m = awgn(m,100)
N = 4096
AM(m, fm, fc, fs, N)
DSB(m, fm, fc, fs, N)
SSB(m, fm, fc, fs, N)
# %%
if __name__ == '__main__':
main()
| StarcoderdataPython |
288516 | <filename>server/gcalendar.py
"""
Shows basic usage of the Google Calendar API. Creates a Google Calendar API
service object and outputs a list of the next 10 events on the user's calendar.
"""
from googleapiclient import discovery
import datetime
import json
with open("../secrets/keys.json") as f:
secrets = json.load(f)
def get_events(n=10):
build = discovery.build
service = build('calendar', 'v3', developerKey=secrets["calendar_api_key"])
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
events_result = service.events().list(calendarId=secrets["calendar_id"], timeMin=now,
maxResults=n, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
return {"events":events}
if __name__=="__main__":
print(get_events(20))
| StarcoderdataPython |
5176261 | <reponame>trcooke/57-exercises-python<filename>src/exercises/Ex03_printing_quotes/test_quotes.py
import unittest
from exercises.Ex03_printing_quotes import quotes
class TestQuotes(unittest.TestCase):
def test_givenPlainQuote_returnQuotationOutput(self):
self.assertEqual(quotes.quotation('<NAME>', 'If in doubt, git checkout'),
"<NAME> says, \"If in doubt, git checkout\"")
def test_givenTextWithQuotes_returnQuotationOutput(self):
self.assertEqual(quotes.quotation('Obi-Wan Kenobi', 'These aren\'t the droids you\'re looking for.'),
"Obi-Wan Kenobi says, \"These aren\'t the droids you\'re looking for.\"")
| StarcoderdataPython |
4903669 | <filename>tests/test_wxpython.py
import pytest
import psidialogs
from test_dialogs import check
backend = "wxpython"
if backend in psidialogs.backends():
if psidialogs.util.check_import("wx"):
@pytest.mark.parametrize("dialogtype", psidialogs.dialog_types())
def test_wxpython(dialogtype):
check(backend, dialogtype)
| StarcoderdataPython |
4908818 | <reponame>sah-py/exercises<gh_stars>0
# Class with the coordinates of the shapes
# Existing shapes:
# Cube
# Square
# Pyramid
# Triangular pyramid
# Triangular prism
class Shapes:
def __init__(self, grid):
self.grid = grid
self.shape = {}
def cube(self, x=1, y=1, ln=3):
self.shape = {
0: {
'A': (x + int(ln / 2), y),
'B': (x + int(ln / 2) + ln, y),
'C': (x + ln, y + int(ln / 2)),
'D': (x, y + int(ln / 2))
},
1: {
'A': (x + int(ln / 2), y + ln),
'B': (x + int(ln / 2) + ln, y + ln),
'C': (x + ln, y + int(ln / 2) + ln),
'D': (x, y + int(ln / 2) + ln)
}
}
return self._pack()
def square_pyramid(self, x=1, y=1, ln=3):
self.shape = {
0: {
'O': (x + int(ln / 2), y)
},
1: {
'A': (x, y + ln),
'B': (x + int(ln / 2), y + int(ln/1.3)),
'C': (x + ln, y + ln),
'D': (x + int(ln/2), y + ln + int(ln/3))
}
}
return self._pack()
def triangular_pyramid(self, x=1, y=1, ln=3):
self.shape = {
0: {
'O': (x + int(ln/2), y)
},
1: {
'A': (x, y + ln),
'B': (x + ln, y + ln),
'C': (x + int(ln/2), y + int(ln/3) + ln)
}
}
return self._pack()
def triangular_prism(self, x=1, y=1, ln=3):
self.shape = {
0: {
'A': (x + int(ln / 1.5), y),
'B': (x + ln, y + int(ln/3) + ln),
'C': (x, y + ln)
},
1: {
'A': (x + int(ln / 1.5) + ln * 2, y),
'B': (x + ln + ln * 2, y + int(ln / 3) + ln),
'C': (x + ln * 2, y + ln)
}
}
return self._pack()
def custom_shape(self, shape):
self.shape = shape
return self._pack()
def _parse(self):
points_count = 0
for points in self.shape.values():
points_count += len(list(points.values()))
# Edit names to indexes
for layer, points in self.shape.copy().items():
for name, point in points.copy().items():
del self.shape[layer][name]
# "O" connect with all points
if name == 'O':
self.shape[layer][0] = point
else:
self.shape[layer][ord(name) + int(layer)*(points_count*10)] = point
def _pack(self):
self._parse()
self.grid._shape = self.shape
return self.grid._draw_shape()
| StarcoderdataPython |
8143511 | <gh_stars>0
from . import domain
from . import poisson
from . import ins
from . import imbound
from . import quantum
from . import io
| StarcoderdataPython |
11382218 | """
Given a word W and a string S, find all starting indices in S which are anagrams of W.
For example, given that W is "ab", and S is "abxaba", return 0, 3, and 4
"""
MAX = 256
def compare(arr1, arr2):
print('arr1 ', arr1)
print('arr2 ', arr2)
for i in range(MAX):
if arr1[i] != arr2[i]:
return False
return True
# this function search for all permutations of pat[] in txt[]
def search(pat, txt):
M = len(pat)
N = len(txt)
# countP[]: Store count of all characters of pattern
# countTW[]: Store count of current window of text
countP = [0] * MAX
countTW = [0] * MAX
for i in range(M):
countP[ord(pat[i])] += 1
countTW[ord(txt[i])] += 1
# Traverse through remaining characters of pattern
for i in range(M, N):
# Compare counts of current window of txt with counts of pattern[]
if compare(countP, countTW):
print("Found at Index ", (i-M))
# Add current character to current window
countTW[ord(txt[i])] += 1
# Remove the first character of previous window
countTW[ord(txt[i-M])] -= 1
# Check for the last window in text
if compare(countP, countTW):
print("Found at Index", N - M)
if __name__ == "__main__":
txt = "BACDGABCDA"
pat = "ABCD"
search(pat, txt)
| StarcoderdataPython |
3450566 | <filename>train_utils.py<gh_stars>0
import torch
import torch.nn.functional as F
import os
from metrics_check import check_error_equal_rate, check_error_equal_rate2
def checkpoint_(epoch, model, optimizer, path):
"""
Read checkpoint example:
state = torch.load(filepath)
model.load_state_dict(state['state_dict'])
optimizer.load_state_dict(state['optimizer'])
"""
state = {
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()
}
torch.save(state, path)
def ramp_up(epoch, range_=80, gauss_factor=-5):
if epoch > range_:
return torch.tensor(1).type(torch.float32)
t = epoch / range_
return torch.exp(gauss_factor * torch.mul(1. - t, 1. - t)).type(torch.float32)
def train_(model, optimizer, train_dset, val_dset, train_eer_dset, device, epochs=5, print_every=5):
train_history = {
"loss": [],
"eer_train": [],
"eer_val": []
}
model = model.to(device=device)
for e in range(epochs):
for t, (x, y) in enumerate(train_dset):
model.train()
x = x.to(device=device, dtype=torch.float32)
y = y.to(device=device, dtype=torch.float32)
scores = model(x)
loss = F.binary_cross_entropy_with_logits(scores, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if t % print_every == 0:
print('Iteration %d, loss = %.4f' % (t, loss.item()))
check_error_equal_rate(val_dset, model, verbose=True)
print()
train_history['loss'].append(loss.item())
train_history['eer_train'].append(check_error_equal_rate(train_eer_dset, model, device))
train_history['eer_val'].append(check_error_equal_rate(val_dset, model, device))
return train_history
def loss_val(model, val_dset, device):
model.eval()
with torch.no_grad():
x, y = next(iter(val_dset))
x = x.to(device=device, dtype=torch.float32)
y = y.to(device=device, dtype=torch.float32)
scores = model(x)
loss = F.binary_cross_entropy_with_logits(scores, y)
return loss.item()
def train_pi_model(model, optimizer, train_dset, val_dset, train_dset_eer, device, epochs=5, print_every=5):
train_history = {
"loss_train": [],
"loss_val": [],
"eer_train": [],
"eer_val": []
}
model = model.to(device=device)
for e in range(epochs):
ramp_up_value = ramp_up(e)
ramp_up_value = ramp_up_value.to(device=device, dtype=torch.float32)
for t, (x1, x2, y, mask) in enumerate(train_dset):
model.train()
x1 = x1.to(device=device, dtype=torch.float32)
x2 = x2.to(device=device, dtype=torch.float32)
y = y.to(device=device, dtype=torch.float32)
mask = mask.to(device=device, dtype=torch.uint8)
scores1 = model(x1)
scores2 = model(x2)
cross_entropy = F.binary_cross_entropy_with_logits(scores1[mask == 1], y[mask == 1])
mse = ramp_up_value * F.mse_loss(scores1, scores2)
loss = cross_entropy + mse
optimizer.zero_grad()
loss.backward()
optimizer.step()
if t % print_every == 0:
print('Epoch %d, Iteration %d, loss = %.4f' % (e, t, loss.item()))
check_error_equal_rate(val_dset, model, device=device, verbose=True)
print()
train_history['loss_train'].append(loss.item())
train_history['loss_val'].append(loss_val(model, val_dset, device))
train_history['eer_train'].append(check_error_equal_rate(train_dset_eer, model, device))
train_history['eer_val'].append(check_error_equal_rate(val_dset, model, device))
return train_history
def loss_val_v2(model, val_dset, device):
model.eval()
with torch.no_grad():
x, y = next(iter(val_dset))
x = x.to(device=device, dtype=torch.float32)
y = y.to(device=device, dtype=torch.float32)
_, scores = model(x)
loss = F.binary_cross_entropy_with_logits(scores, y)
return loss.item()
def train_pi_model_v2(model, optimizer, train_dset, val_dset, train_dset_eer, device, start_e=0, epochs=5,
print_every=5):
train_history = {
"loss_train": [],
"loss_val": [],
"eer_train": [],
"eer_val": []
}
model = model.to(device=device)
for e in range(start_e, epochs):
ramp_up_value = ramp_up(e)
ramp_up_value = ramp_up_value.to(device=device, dtype=torch.float32)
for t, (x1, x2, y, mask) in enumerate(train_dset):
model.train()
x1 = x1.to(device=device, dtype=torch.float32)
x2 = x2.to(device=device, dtype=torch.float32)
y = y.to(device=device, dtype=torch.float32)
mask = mask.to(device=device, dtype=torch.uint8)
encodings1, scores1 = model(x1)
encodings2, scores2 = model(x2)
cross_entropy = F.binary_cross_entropy_with_logits(scores1[mask == 1], y[mask == 1])
mse = ramp_up_value * F.mse_loss(encodings1, encodings2)
loss = cross_entropy + mse
optimizer.zero_grad()
loss.backward()
optimizer.step()
if t % print_every == 0:
print('Epoch %d, Iteration %d, loss = %.4f' % (e, t, loss.item()))
check_error_equal_rate2(val_dset, model, device=device, verbose=True)
print()
train_history['loss_train'].append(loss.item())
train_history['loss_val'].append(loss_val_v2(model, val_dset, device))
train_history['eer_train'].append(check_error_equal_rate2(train_dset_eer, model, device))
train_history['eer_val'].append(check_error_equal_rate2(val_dset, model, device))
return train_history
def train_pi_model_v3(model, optimizer, train_dset, val_dset, train_dset_eer, device, checkpoint_params,
start_e=0, epochs=5, checkpoint=True, checkpoint_every=10):
lr, dr, checkpoint_path = checkpoint_params
train_history = {
"loss_train": [],
"loss_val": [],
"eer_train": [],
"eer_val": []
}
model = model.to(device=device)
for e in range(start_e, epochs):
ramp_up_value = ramp_up(e)
ramp_up_value = ramp_up_value.to(device=device, dtype=torch.float32)
for t, (x1, x2, y, mask) in enumerate(train_dset):
model.train()
x1 = x1.to(device=device, dtype=torch.float32)
x2 = x2.to(device=device, dtype=torch.float32)
y = y.to(device=device, dtype=torch.float32)
mask = mask.to(device=device, dtype=torch.uint8)
encodings1, scores1 = model(x1)
encodings2, scores2 = model(x2)
cross_entropy = F.binary_cross_entropy_with_logits(scores1[mask == 1], y[mask == 1])
mse = ramp_up_value * F.mse_loss(encodings1, encodings2)
loss = cross_entropy + mse
optimizer.zero_grad()
loss.backward()
optimizer.step()
if checkpoint and e % checkpoint_every == 0:
checkpoint_(e, model, optimizer,
path=os.path.join(checkpoint_path, str(lr) + "_" + str(dr)[2:6] + "_" + str(e) + '.pt'))
print(f"Checkpoint was created at epoch: {e}", end="\n")
train_history['loss_train'].append(loss.item())
train_history['loss_val'].append(loss_val_v2(model, val_dset, device))
train_history['eer_train'].append(check_error_equal_rate2(train_dset_eer, model, device))
train_history['eer_val'].append(check_error_equal_rate2(val_dset, model, device))
checkpoint_(e, model, optimizer,
path=os.path.join(checkpoint_path, str(lr) + "_" + str(dr)[2:6] + str(e) + '.pt'))
print()
return train_history
| StarcoderdataPython |
1962050 | <gh_stars>0
import torch
LOG_NORMAL_ZERO_THRESHOLD = 1e-5
pi_val = torch.acos(torch.zeros(1)).item() * 2 | StarcoderdataPython |
3382650 | <reponame>issca/inferbeddings<gh_stars>10-100
# -*- coding: utf-8 -*-
import pytest
import inferbeddings.parse.clauses as clauses
@pytest.mark.light
def test_parse_clauses_one():
clause_str = 'p(x, y) :- p(x, z), q(z, a), r(a, y)'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert isinstance(clause, clauses.Clause)
assert isinstance(clause.head, clauses.Atom)
assert isinstance(clause.body, tuple)
assert isinstance(clause.head.predicate, clauses.Predicate)
assert isinstance(clause.head.arguments, tuple)
assert isinstance(clause.head.negated, bool)
assert clause.weight == 1.0
@pytest.mark.light
def test_parse_atom_clause():
clause_str = 'p(X, y)'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert isinstance(clause, clauses.Clause)
assert isinstance(clause.head, clauses.Atom)
assert len(clause.body) == 0
assert clause.head.predicate.name == "p"
assert isinstance(clause.head.arguments[0], clauses.Variable)
assert isinstance(clause.head.arguments[1], clauses.Constant)
assert clause.head.arguments[1].name == "y"
assert clause.weight == 1.0
@pytest.mark.light
def test_parse_weighted_atom_clause():
clause_str = 'p(X, y) < -1.2 >'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert clause.weight == -1.2
@pytest.mark.light
def test_parse_weighted_arity_2_clause():
clause_str = 'p(X, y) :- r(X,Z), q(X) < 1.2 >'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert clause.weight == 1.2
@pytest.mark.light
def test_parse_learnable_weight_arity_2_clause():
clause_str = 'p(X, y) :- r(X,Z), q(X) < ? >'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert clause.weight is None
@pytest.mark.light
def test_parse_learnable_weight_atom_clause():
clause_str = 'p(X, y) < ? >'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert clause.weight is None
@pytest.mark.light
def test_parse_clauses_two():
clause_str = '"P"(x, y) :- p(x, z), q(z, a), "R"(a, y)'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert isinstance(clause, clauses.Clause)
assert isinstance(clause.head, clauses.Atom)
assert isinstance(clause.head.predicate.name, str)
assert isinstance(clause.body, tuple)
assert isinstance(clause.head.predicate, clauses.Predicate)
assert isinstance(clause.head.arguments, tuple)
assert isinstance(clause.head.negated, bool)
assert clause.weight == 1.0
if __name__ == '__main__':
pytest.main([__file__])
| StarcoderdataPython |
12811722 | from __future__ import print_function, division
import sys,os
qspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,qspin_path)
from quspin.basis import spin_basis_1d, boson_basis_1d, spinless_fermion_basis_1d, spinful_fermion_basis_1d
from quspin.basis import spin_basis_general, boson_basis_general, spinless_fermion_basis_general, spinful_fermion_basis_general
from itertools import product
import numpy as np
for L in [6,7]:
# symmetry-free
basis_1=spin_basis_1d(L=L,Nup=range(0,L,2))
basis_1g=spin_basis_general(N=L,Nup=range(0,L,2))
basis_2=boson_basis_1d(L=L,Nb=range(0,L,2))
basis_2g=boson_basis_general(N=L,Nb=range(0,L,2))
basis_3=spinless_fermion_basis_1d(L=L,Nf=range(0,L,2))
basis_3g=spinless_fermion_basis_general(N=L,Nf=range(0,L,2))
basis_4=spinful_fermion_basis_1d(L=L,Nf=product(range(0,L,2),range(0,L,2)) )
basis_4g=spinful_fermion_basis_general(N=L,Nf=product(range(0,L,2),range(0,L,2)) )
# symmetry-ful
t = (np.arange(L)+1)%L
basis_1=spin_basis_1d(L=L,Nup=range(0,L,2),kblock=0)
basis_1g=spin_basis_general(N=L,Nup=range(0,L,2),kblock=(t,0))
basis_2=boson_basis_1d(L=L,Nb=range(0,L,2),kblock=0)
basis_2g=boson_basis_general(N=L,Nb=range(0,L,2),kblock=(t,0))
basis_3=spinless_fermion_basis_1d(L=L,Nf=range(0,L,2),kblock=0)
basis_3g=spinless_fermion_basis_general(N=L,Nf=range(0,L,2),kblock=(t,0))
basis_4=spinful_fermion_basis_1d(L=L,Nf=product(range(0,L,2),range(0,L,2)),kblock=0 )
basis_4g=spinful_fermion_basis_general(N=L,Nf=product(range(0,L,2),range(0,L,2)),kblock=(t,0))
print("passed particle number sectors test")
| StarcoderdataPython |
379394 | <reponame>FurkanOzkaya/ParkApi
from gc import get_objects
from math import atan2, cos, radians, sin, sqrt
from app.models import ParkModel
from app.api.serializers import ParkSerializer
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
R = 6373.0 # approximate radius of earth in km
class ParkApiList(APIView):
def get_object(self):
try:
return ParkModel.objects.all()
except ParkModel.DoesNotExist:
return "NoContent"
def post(self, request, format=None):
try:
lat1 = request.data["latitude"]
lon1 = request.data["longitude"]
req_distance = float(request.data["distance"])
parking_areas = self.get_object()
if type(parking_areas) == str:
return Response(status=status.HTTP_204_NO_CONTENT)
available_parking_areas = []
for areas in parking_areas:
distance = self.get_distance(float(lat1), float(lon1), areas.latitude, areas.longitude)
if distance < req_distance:
available_parking_areas.append(areas)
serializer = ParkSerializer(available_parking_areas, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
def get_distance(self, lat1, lon1, lat2, lon2):
lat1 = radians(lat1)
lon1 = radians(lon1)
lat2 = radians(lat2)
lon2 = radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
return distance
class ParkApi(APIView):
def get_object(self, pk):
try:
return ParkModel.objects.get(id=pk)
except ParkModel.DoesNotExist:
return "NoContent"
def get(self, request, pk, format=None):
parking_area = self.get_object(pk)
if type(parking_area) == str:
return Response(status=status.HTTP_204_NO_CONTENT)
serializer = ParkSerializer(parking_area)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
serializer = ParkSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
parking_area = self.get_object(pk)
if type(parking_area) == str:
return Response(status=status.HTTP_204_NO_CONTENT)
parking_area.delete()
return Response(status=status.HTTP_202_ACCEPTED)
def put(self, request, pk, format=None):#no need
parking_area = self.get_object(pk)
if type(parking_area) == str:
return Response(status=status.HTTP_204_NO_CONTENT)
serializer = ParkSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| StarcoderdataPython |
5070884 | print("Please enter feeding map as a list:")
harita = input()
h1 = harita.replace("[" , " ")
h2 = h1.replace("]" , " ")
h3 = h2.strip()
h4 = h3.replace("'" , "")
h5 = h4.split(" , ")
gor1 = h4.replace(" , " , "---")
gor2 = gor1.replace(" " , " ")
gor3 = gor2.replace("," , "")
Show_map = gor3.replace("---" , "\n") #Harita görünümü
Map_list = [] #Haritanın liste hali
for i in h5:
i = i.split(", ")
Map_list.append(i)
for s in range(len(Map_list)):
if "*" in Map_list[s]:
row = s #Tavşanın satırı
for j in range(0 , len(Map_list[s])):
if Map_list[s][j] == "*":
columns = j #Tavşanın sütunu"""
print("Please enter direction of movements as a list:")
yonler = input()
yonler1 = yonler.replace("[" , "")
yonler2 = yonler1.replace("]" , "")
yonler3 = yonler2.replace("'" , "")
yonler4 = yonler3.replace(", " , "")
Movements_list = yonler3.split(", ")#yönler liste hali
point = 0
breaker = 0
print("Your board is:")
print(Show_map)
for k in range(0 , len(Movements_list)):
if Movements_list[k] == "U":
if row != 0:
if Map_list[row - 1][columns] == "W":
continue
elif Map_list[row - 1][columns] == "C":
point += 10
Map_list[row - 1][columns] = "*"
Map_list[row][columns] = "X"
row -= 1
elif Map_list[row - 1][columns] == "A":
point += 5
Map_list[row - 1][columns] = "*"
Map_list[row][columns] = "X"
row -= 1
elif Map_list[row - 1][columns] == "M":
point -= 5
Map_list[row - 1][columns] = "*"
Map_list[row][columns] = "X"
row -= 1
elif Map_list[row - 1][columns] == "X":
Map_list[row - 1][columns] = "*"
Map_list[row][columns] = "X"
row -= 1
elif Map_list[row - 1][columns] == "P":
Map_list[row - 1][columns] = "*"
Map_list[row][columns] = "X"
break
else:
continue
else:
continue
elif Movements_list[k] == "D":
if Map_list[row] != Map_list[-1]:
if Map_list[row + 1][columns] == "W":
continue
elif Map_list[row + 1][columns] == "C":
point += 10
Map_list[row + 1][columns] = "*"
Map_list[row][columns] = "X"
row += 1
elif Map_list[row + 1][columns] == "A":
point += 5
Map_list[row + 1][columns] = "*"
Map_list[row][columns] = "X"
row += 1
elif Map_list[row + 1][columns] == "M":
point -= 5
Map_list[row + 1][columns] = "*"
Map_list[row][columns] = "X"
row += 1
elif Map_list[row + 1][columns] == "X":
Map_list[row + 1][columns] = "*"
Map_list[row][columns] = "X"
row += 1
elif Map_list[row + 1][columns] == "P":
Map_list[row + 1][columns] = "*"
Map_list[row][columns] = "X"
break
else:
continue
else:
continue
elif Movements_list[k] == "L":
if columns != 0:
if Map_list[row][columns - 1] == "W":
continue
elif Map_list[row][columns - 1] == "C":
point += 10
Map_list[row][columns - 1] = "*"
Map_list[row][columns] = "X"
columns -= 1
elif Map_list[row][columns - 1] == "A":
point += 5
Map_list[row][columns - 1] = "*"
Map_list[row][columns] = "X"
columns -= 1
elif Map_list[row][columns - 1] == "M":
point -= 5
Map_list[row][columns - 1] = "*"
Map_list[row][columns] = "X"
columns -= 1
elif Map_list[row][columns - 1] == "X":
Map_list[row][columns - 1] = "*"
Map_list[row][columns] = "X"
columns -= 1
elif Map_list[row][columns - 1] == "P":
Map_list[row][columns - 1] = "*"
Map_list[row][columns] = "X"
break
else:
continue
else:
continue
elif Movements_list[k] == "R":
if Map_list[row][columns] != Map_list[row][-1]:
if Map_list[row][columns + 1] == "W":
continue
elif Map_list[row][columns + 1] == "C":
point += 10
Map_list[row][columns + 1] = "*"
Map_list[row][columns] = "X"
columns += 1
elif Map_list[row][columns + 1] == "A":
point += 5
Map_list[row][columns + 1] = "*"
Map_list[row][columns] = "X"
columns += 1
elif Map_list[row][columns + 1] == "M":
point -= 5
Map_list[row][columns + 1] = "*"
Map_list[row][columns] = "X"
columns += 1
elif Map_list[row][columns + 1] == "X":
Map_list[row][columns + 1] = "*"
Map_list[row][columns] = "X"
columns += 1
elif Map_list[row][columns + 1] == "P":
Map_list[row][columns + 1] = "*"
Map_list[row][columns] = "X"
break
else:
break
else:
continue
sh = str(Map_list)
sh2 = sh.replace("[" , " ")
sh3 = sh2.replace("]" , " ")
sh4 = sh3.replace("'" , "")
sh5 = sh4.replace(" , " , "---")
sh6 = sh5.replace("," , "")
sh7 = sh6.strip()
Last_map = sh7.replace("---" , "\n")
print("Your output should be like this:")
print(Last_map)
print("Your score is : {}".format(point))
| StarcoderdataPython |
5080705 | <reponame>SolidStateGroup/Bullet-Train-API
# Generated by Django 2.2.25 on 2022-01-14 17:49
from django.db import migrations, models
import django.db.models.deletion
import django_lifecycle.mixins
import environments.api_keys
class Migration(migrations.Migration):
dependencies = [
('environments', '0016_webhook_secret'),
]
operations = [
migrations.AlterField(
model_name='environment',
name='api_key',
field=models.CharField(default=environments.api_keys.generate_client_api_key, max_length=100, unique=True),
),
migrations.CreateModel(
name='EnvironmentAPIKey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(default=environments.api_keys.generate_server_api_key, max_length=100, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=100)),
('expires_at', models.DateTimeField(blank=True, null=True)),
('active', models.BooleanField(default=True)),
('environment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='api_keys', to='environments.Environment')),
],
options={
'abstract': False,
},
bases=(django_lifecycle.mixins.LifecycleModelMixin, models.Model),
),
]
| StarcoderdataPython |
1602213 | <filename>InterviewCake/StockPrice.py
class StockPrice:
def get_max_profit(self, stock_prices_yesterday):
# make sure we have at least 2 prices
if len(stock_prices_yesterday) < 2:
raise IndexError('Getting a profit requires at least 2 prices')
min_price = stock_prices_yesterday[0]
max_profit = stock_prices_yesterday[1] - stock_prices_yesterday[0]
for index, current_price in enumerate(stock_prices_yesterday):
if index == 0:
continue
current_profit = current_price - min_price
max_profit = max(max_profit, current_profit)
min_price = min(min_price, current_price)
return max_profit
| StarcoderdataPython |
5094080 | <reponame>parrisma/TicTacToe-DeepLearning
import logging
import random
import numpy as np
from reflrn.Interface.ReplayMemory import ReplayMemory
from reflrn.Interface.State import State
#
# Manage the shared replay memory between {n} actors in an Actor/Critic model.
#
# ToDo: Consider https://github.com/robtandy/randomdict as a non functional improvement
#
class DictReplayMemory(ReplayMemory):
# Memory List Entry Off Sets
mem_episode_id = 0
mem_state = 1
mem_next_state = 2
mem_action = 3
mem_reward = 4
mem_complete = 5
def __init__(self,
lg: logging,
replay_mem_size: int
):
self.__replay_memory = dict()
self.__replay_mem_size = replay_mem_size
self.__episode_id = 0
self.__lg = lg
return
#
# Add a memory to the reply memory, but tag it with the episode id such that whole episodes
# can later be recovered for training.
#
def append_memory(self,
state: State,
next_state: State,
action: int,
reward: float,
episode_complete: bool) -> None:
# Add the memory, if the same memory (by state) exists then remove it before adding the more
# recent memory.
#
sas = state.state_as_string()
if sas in self.__replay_memory:
del self.__replay_memory[sas]
if len(self.__replay_memory) >= self.__replay_mem_size:
# remove random element
rndk = random.choice(list(self.__replay_memory.keys()))
del self.__replay_memory[rndk]
self.__replay_memory[sas] = (self.__episode_id, state, next_state, action, reward, episode_complete)
if episode_complete:
self.__episode_id += 1
return
#
# How many items in the replay memory deque
#
def len(self) -> int:
return len(self.__replay_memory)
#
# Get a random set of memories buy taking sample_size random samples and then
# returning the whole episode for each random sample.
#
# return list of elements [episode, curr_state, next_state, action, reward, complete]
#
# ToDo: Whole Episodes = False
#
def get_random_memories(self,
sample_size: int,
whole_episodes: bool = False) -> [[int, State, State, int, float, bool]]:
ln = self.len()
samples = list()
for k in np.random.choice(list(self.__replay_memory.keys()), min(ln, sample_size)):
samples.append(self.__replay_memory[k])
# Ensure results are random order
return random.sample(samples, len(samples))
def get_last_memory(self, state: State = None) -> [int, State, State, int, float, bool]:
raise RuntimeError("get_last_memory, method not implemented")
| StarcoderdataPython |
4926488 | data = [
{
"img": "https://i.imgur.com/CidvAPT.png",
},
{
"img": "https://i.imgur.com/oiT1TNx.png",
},
{
"img": "https://i.imgur.com/W2ox2xI.png",
},
{
"img": "https://i.imgur.com/WOamDpN.png",
},
{
"img": "https://i.imgur.com/MSRh0eP.jpg",
},
{
"img": "https://i.imgur.com/k0doLk1.png",
},
{
"img": "https://i.imgur.com/2WEQyRR.png",
}
]
| StarcoderdataPython |
6415203 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 IBM Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
* <NAME> <<EMAIL>>
"""
import unittest
from ma.xml_loader.asm_to_ppc import AssemblyReplacer
class AssemblyReplacerTest(unittest.TestCase):
""" Test cases for the Assembly replacer """
def get_replace_test(self):
'''This test aim to check correct replace value return from
get_replace() method in AssemblyReplacer class'''
asm_replacer = AssemblyReplacer()
self.assertTrue(asm_replacer.get_replace('lock;xchgl;') ==
"__atomic_test_and_set(/*void *ptr, int memmodel*/);")
self.assertTrue(asm_replacer.get_replace('lock;orl;') ==
"__atomic_fetch_or(/*type *ptr, type val, int memmodel*/);")
self.assertTrue(asm_replacer.get_replace('pause;') == "\"or 27,27,27; isync\"")
def get_type_test(self):
'''This test aim to check correct type return from
get_type() method in AssemblyReplacer class'''
asm_replacer = AssemblyReplacer()
self.assertTrue(asm_replacer.get_type('lock;xchgl;') == "builtin")
self.assertTrue(asm_replacer.get_type('lock;orl;') == "builtin")
self.assertTrue(asm_replacer.get_type('rdtsc;') == "asm")
self.assertTrue(asm_replacer.get_type('pause;') == "asm")
| StarcoderdataPython |
8115124 | # BE THE JODI MAKER
for _ in range(int(input())):
d = {}
for i in range(int(input())):
A = [a for a in input().split()]
d[A[0]] = A[1]
if i==0:
s = A[0]
#print(d)
n = i+1
ct = 0
s1 = s
f = 1
#print(s1,s,n)
while ct<n:
if s not in d:
f = 0
break
s = d[s]
ct += 1
if s==s1 and ct<n:
f = 0
break
if s==s1 and f and n>2:
print("YES")
print()
else:
ct = 0
for key in d:
if d[key] in d:
if key==d[d[key]]:
d[d[key]] = '0'
ct += 1
print("NO")
print(ct)
print()
| StarcoderdataPython |
12804939 | #!/usr/bin/env python
from setuptools import find_packages, setup
try:
import ConfigParser as configparser
except ImportError:
import configparser
with open("README.rst") as f:
LONG_DESCRIPTION = f.read()
config = configparser.ConfigParser()
config.read("setup.cfg")
setup(
name="gits",
version=config.get("src", "version"),
license="MIT",
description="One command to interact with multiple git repositories",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/x-rst",
author="Peilonrayz",
author_email="<EMAIL>",
url="https://peilonrayz.github.io/gits",
project_urls={
"Bug Tracker": "https://github.com/Peilonrayz/gits/issues",
"Documentation": "https://peilonrayz.github.io/gits",
"Source Code": "https://github.com/Peilonrayz/gits",
},
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
zip_safe=False,
install_requires=[],
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="",
entry_points={"console_scripts": ["gits=gits.__main__:main"]},
)
| StarcoderdataPython |
1729457 | <filename>surprise/genre_similarities.py
import numpy as np
import pandas as pd
import rdkit.Chem as Chem
def compute_f_matrix(user_based, trainset, genre_file):
"""
Compute (n_x)-by-(n_g) matrix of relative frequencies in pd.Data.Frame, where
n_x is the size of users if user_based is True;
n_x is the size of items if user_based is not True.
n_g is the number of genres.
:param user_based: True to indicate user-based collaborative filtering method.
:param trainset: a Trainset class instance.
:param genre_file: a csv file that consists of
genre.columns[0] is the items column if user_based is True; users column if user_based is False.
genre.columns[1] is an unused column.
genre.columns[2:] are the genre levels.
:returns: (n_x, f), where f is a DataFrame of size (n_x)-by-(n_g)
"""
if user_based:
n_x = trainset.n_users
else:
n_x = trainset.n_items
# Build genre and ratings in pandas data frame
genre = pd.read_csv(genre_file) # item-by-genre
genre[genre.columns[0]] = genre[genre.columns[0]].apply(str)
if user_based:
ratings = pd.DataFrame(trainset.build_testset(), columns=['x','y','r'])
else:
ratings = pd.DataFrame(trainset.build_testset(), columns=['y','x','r'])
# Merge ratings and genre
ratings_genre = pd.merge(ratings, genre, how='left', left_on='y', right_on=genre.columns[0])
ratings_genre = ratings_genre.drop(['y','r',genre.columns[0],genre.columns[1]], axis=1)
# Compute relative genre frequency matrix
z = ratings_genre.groupby('x').sum()
denom = z.sum(axis=1)
f = z.div(denom, axis=0) ## Check div by zero
# Sort rows of f by x's inner id
if user_based:
inner_id_f = [trainset.to_inner_uid(x) for x in f.index]
else:
inner_id_f = [trainset.to_inner_iid(x) for x in f.index]
f = f.set_index([inner_id_f])
f.sort_index(inplace=True, ascending=True)
return n_x, f
def squared_deviance(n_x, f):
"""
Compute genre similarity matrix based on squared deviance
"""
fa = np.array(f)
n_g = fa.shape[1]
num = np.zeros((n_x, n_x))
den = np.zeros((n_x,n_x))
for k in range(0, n_g):
ff_max = np.maximum.outer(fa[:, k], fa[:, k])
ff_sub = np.subtract.outer(fa[:, k], fa[:, k])
num += ff_max * ff_sub * ff_sub
den += ff_max
return 1 - np.divide(num, den, out=np.ones_like(num), where=den!=0)
def absolute_deviance(n_x, f):
"""
Compute genre similarity matrix based on absolute deviance
"""
fa = np.array(f)
n_g = fa.shape[1]
num = np.zeros((n_x, n_x))
den = np.zeros((n_x, n_x))
for k in range(0, n_g):
ff_max = np.maximum.outer(fa[:, k], fa[:, k])
ff_sub = np.subtract.outer(fa[:, k], fa[:, k])
num += ff_max * np.abs(ff_sub)
den += ff_max
return 1 - np.divide(num, den, out=np.ones_like(num), where=den!=0)
def compute_fpsim(user_based, trainset, fpsim_file):
"""
Compute (n_x)-by-(n_x) matrix of fingerprint similarity
n_x is the size of users if user_based is True, and "CID" indicates user;
n_x is the size of items if user_based is not True, and "CID" indicates item.
:param user_based: True to indicate user-based collaborative filtering method.
:param trainset: a Trainset class instance.
:param fpsim_file: a pickle file containing dataframe including columns "CID" and "fp".
:returns: (n_x, n_x) numpy array
"""
# Read fp
fp_df = pd.read_pickle(fpsim_file) # with columns "CID" and "fp"
# Select subset of fp
if user_based:
n_x = trainset.n_users
raw_uid_list = [trainset.to_raw_uid(x) for x in range(n_x)]
fp_df_subset = fp_df[fp_df["CID"].isin(raw_uid_list)].copy()
fp_df_subset["inner_id"] = fp_df_subset["CID"].apply(trainset.to_inner_uid)
else:
n_x = trainset.n_items
raw_iid_list = [trainset.to_raw_iid(x) for x in range(n_x)]
fp_df_subset = fp_df[fp_df["CID"].isin(raw_iid_list)].copy()
fp_df_subset["inner_id"] = fp_df_subset["CID"].apply(trainset.to_inner_iid)
# Sort fp by inner-id of trainset
fp_df_subset.sort_values(by='inner_id', inplace=True)
# Compute fp similarity
fp_sim = np.ones(shape=(n_x, n_x), dtype=np.float32)
for i in range(n_x):
fp_sim[i, :] = Chem.DataStructs.BulkTanimotoSimilarity(
fp_df_subset["fp"].iloc[i], fp_df_subset["fp"].to_list())
return fp_sim
| StarcoderdataPython |
3412251 | <filename>run.py
import sc2, sys
from __init__ import run_ladder_game
from sc2 import Race, Difficulty
from sc2.player import Bot, Computer, Human
import random
# Load bot
from Overmind import Overmind
bot = Bot(Race.Zerg, Overmind())
# Start game
if __name__ == '__main__':
if "--LadderServer" in sys.argv:
# Ladder game started by LadderManager
print("Starting ladder game...")
run_ladder_game(bot)
else:
# Local game
print("Starting local game...")
map_name = random.choice(["CatalystLE"])
#map_name = random.choice(["ProximaStationLE", "NewkirkPrecinctTE", "OdysseyLE", "MechDepotLE", "AscensiontoAiurLE", "BelShirVestigeLE"])
#map_name = "(2)16-BitLE"
sc2.run_game(sc2.maps.get(map_name), [
#Human(Race.Terran),
bot,
Computer(Race.Random, Difficulty.VeryHard) # CheatInsane VeryHard
], realtime=False, save_replay_as="Example.SC2Replay")
| StarcoderdataPython |
117857 | <reponame>cariad/stackwhy<filename>tests/test_cli.py
from io import StringIO
from mock import Mock
from stackwhy.cli import entry
valid_arn = "arn:aws:cloudformation:eu-west-2:000000000000:stack/X/00000000-0000-0000-0000-000000000000"
def test_help() -> None:
writer = StringIO()
assert entry([], session=Mock(), writer=writer) == 0
assert writer.getvalue().startswith("usage:")
def test_render() -> None:
response = {
"StackEvents": [
{
"LogicalResourceId": "LogicalResourceId1",
"PhysicalResourceId": "PhysicalResourceId1",
"ResourceStatus": "ResourceStatus1",
"ResourceStatusReason": "ResourceStatusReason1",
"ResourceType": "ResourceType1",
},
{
"LogicalResourceId": "LogicalResourceId2",
"PhysicalResourceId": valid_arn,
"ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceStatusReason": "ResourceStatusReason2",
"ResourceType": "ResourceType2",
},
],
}
describe_stack_events = Mock(return_value=response)
client = Mock()
client.describe_stack_events = describe_stack_events
session = Mock()
session.client = Mock(return_value=client)
writer = StringIO()
assert (
entry(
[valid_arn],
session=session,
writer=writer,
)
== 0
)
assert (
writer.getvalue()
== """\x1b[1mLogical ID\x1b[22m \x1b[1mPhysical ID\x1b[22m \x1b[1mResource Type\x1b[22m \x1b[1mStatus\x1b[22m \x1b[1mReason\x1b[22m
\x1b[38;5;10mLogicalResourceId2\x1b[39m \x1b[38;5;10mResourceType2\x1b[39m \x1b[38;5;10mCREATE IN PROGRESS\x1b[39m \x1b[38;5;10mResourceStatusReason2\x1b[39m
\x1b[38;5;10mLogicalResourceId1\x1b[39m \x1b[38;5;10mPhysicalResourceId1\x1b[39m \x1b[38;5;10mResourceType1\x1b[39m \x1b[38;5;10mResourceStatus1\x1b[39m \x1b[38;5;10mResourceStatusReason1\x1b[39m
"""
)
def test_version() -> None:
writer = StringIO()
assert entry(["--version"], session=Mock(), writer=writer) == 0
assert writer.getvalue() == "-1.-1.-1\n"
| StarcoderdataPython |
11364759 | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
# abstract base class for inheritance
class Base(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
# question data model
class Questions(Base):
question = db.Column(db.String(5000))
def __repr__(self):
return self.question
def to_json(self):
return {
'question': self.question,
#'options': [Polls(answer=Answers(name=answer))
#if answers_query(answer).count() == 0
#else Polls(answer=answers_query(answer).first()) for answer in poll['answers']
#],
'options':
[{'name': option.option.name, 'vote_count': option.vote_count}
for option in self.options.all()],
'status': self.status
}
# answer data model
class Answers(Base):
answer = db.Column(db.String(500))
# poll (question + answer choices) data model
class Polls(Base):
question_id = db.Column(db.Integer, db.ForeignKey('questions.id'))
answer_id = db.Column(db.Integer, db.ForeignKey('answers.id'))
vote_count = db.Column(db.Integer, default=0)
status = db.Column(db.Boolean)
#relations
question = db.relationship('Questions', foreign_keys=[question_id],
backref=db.backref('answers', lazy='dynamic'))
answer = db.relationship('Answers', foreign_keys=[answer_id])
def __repr__(self):
return self.answer.name
# user data model
class Users(Base):
username = db.Column(db.String(50), unique=True)
password = db.Column(db.String(200))
role = db.Column(db.String(7))
# attendance data model
class Attendance(Base):
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
keyword = db.Column(db.String(100))
course = db.Column(db.String(7))
def to_json(self):
return {
'user': self.user_id,
'course': self.course,
'keyword': self.keyword
} | StarcoderdataPython |
1968109 | <gh_stars>0
from pymongo import MongoClient
import sys
solution = {
'coder_id': 123,
'task_id': 12,
'solution': 'def subtract(x, y):\n return x - y\n',
'test_cases': 'assert subtract(5, 3) == 2\nassert subtract(7, 4) == 3\n',
'status': 'edited',
'language': 'Python'
}
def populate_db(solutions):
for solution in solutions:
db.solution.insert_one(solution)
if __name__ == '__main__':
MONGODB_USER = 'mongo-ad'
MONGODB_USER_PASS = '<PASSWORD>'
MONGODB_HOST = 'localhost'
url = f'mongodb://{MONGODB_USER}:{MONGODB_USER_PASS}@{MONGODB_HOST}/admin?retryWrites=true&w=majority'
db = MongoClient(url).codearena_mdb
solutions = []
for i in range(int(sys.argv[1])):
solutions.append(solution.copy())
populate_db(solutions)
| StarcoderdataPython |
9777265 | <gh_stars>100-1000
import os
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Reshape, Dropout
from tensorflow.keras import Model
def get_hyperparams():
local_params = {
'training': {
'epochs': 3
}
}
return local_params
def get_model_config(folder_configs, dataset, is_agg=False, party_id=0):
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Reshape, Dropout
import copy
num_classes = 10
img_rows, img_cols = 28, 28
input_shape = (img_rows, img_cols, 1)
model = tf.keras.Sequential()
model.add(Reshape((784,), input_shape=input_shape))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True)
model.compile(loss=loss_object,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
# Save model
fname = os.path.join(folder_configs, 'compiled_keras_global.h5')
# set save_format = 'h5'
model.save(fname, save_format='h5')
spec = {'model_name': 'tf-cnn',
'model_definition': fname}
model = {
'name': 'TensorFlowFLModel',
'path': 'ibmfl.model.tensorflow_fl_model',
'spec': spec
}
return model
| StarcoderdataPython |
269497 | #!/usr/bin/python
# API Gateway Ansible Modules
#
# Modules in this project allow management of the AWS API Gateway service.
#
# Authors:
# - <NAME> <github: bjfelton>
#
# apigw_resource
# Manage creation, update, and removal of API Gateway Resource resources
#
# MIT License
#
# Copyright (c) 2016 <NAME>, Emerson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
DOCUMENTATION='''
module: apigw_resource
author: <NAME> (@bjfelton)
short_description: Add or remove Resource resources
description:
- An Ansible module to add or remove Resource resources for AWS API Gateway.
version_added: "2.2"
options:
name:
description:
- The name of the resource on which to operate
required: True
rest_api_id:
description:
- The id of the parent rest api
required: True
state:
description:
- Determine whether to assert if resource should exist or not
choices: ['present', 'absent']
default: 'present'
required: False
requirements:
- python = 2.7
- boto
- boto3
notes:
- This module requires that you have boto and boto3 installed and that your credentials are created or stored in a way that is compatible (see U(https://boto3.readthedocs.io/en/latest/guide/quickstart.html#configuration)).
'''
EXAMPLES = '''
- name: Add resource to Api Gateway
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create resource
apigw_resource:
name: '/thing/{param}/awesomeness'
rest_api_id: 'abcd1234'
state: present
register: resource
- name: debug
debug: var=resource
- name: Rest api from Api Gateway
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete resource
apigw_rest_api:
name: '/thing/not-awesome'
rest_api_id: 'abcd1234'
state: absent
register: resource
- name: debug
debug: var=resource
'''
RETURN = '''
# Sample create output
{
"changed": true,
"invocation": {
"module_args": {
"name": "/test",
"rest_api_id": "abc123def567",
"state": "present"
}
},
"resource": {
"ResponseMetadata": {
"HTTPHeaders": {
"content-length": "73",
"content-type": "application/json",
"date": "Wed, 02 Nov 2016 20:47:23 GMT",
"x-amzn-requestid": "an id was here"
},
"HTTPStatusCode": 201,
"RequestId": "an id was here",
"RetryAttempts": 0
},
"id": "abc55tda",
"parentId": "xyz123",
"path": "/test",
"pathPart": "test"
}
}
'''
__version__ = '${version}'
import copy
try:
import boto3
import boto
from botocore.exceptions import BotoCoreError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
class ApiGwResource:
def __init__(self, module):
"""
Constructor
"""
self.module = module
if (not HAS_BOTO3):
self.module.fail_json(msg="boto and boto3 are required for this module")
self.client = module.client('apigateway')
self.path_map = {
'paths': {}
}
@staticmethod
def _define_module_argument_spec():
"""
Defines the module's argument spec
:return: Dictionary defining module arguments
"""
return dict( name=dict(required=True),
rest_api_id=dict(required=True),
state=dict(default='present', choices=['present', 'absent'])
)
def _build_resource_dictionary(self):
try:
resources = self.client.get_resources(restApiId=self.module.params.get('rest_api_id'), limit=500)
for res in resources.get('items'):
self.path_map['paths'][res.get('path')] = {'id': res.get('id')}
if 'parentId' in res:
self.path_map['paths'][res.get('path')]['parentId'] = res.get('parentId')
except BotoCoreError as e:
self.module.fail_json(msg="Error calling boto3 get_resources: {}".format(e))
@staticmethod
def _build_create_resources_list(path_map, resource):
"""
Splits resource and builds a list of create operations
:param path_map: A map containing path parts
:param resource: The url to create
:return: Ordered list of resources to create
"""
operations = []
last_part = ''
parts = resource.split('/')[1:]
for part in parts:
new_part = "{0}/{1}".format(last_part, part)
if new_part not in path_map['paths']:
operations.append({'part': part, 'path': new_part, 'parent': '/' if last_part == '' else last_part})
last_part = new_part
return operations
def _create_resource(self):
"""
Create an API Gateway Resource
:return: (changed, result)
changed: Boolean indicating whether or not a change occurred
result: Output of the create_resource call
"""
changed = False
result = None
if self.module.params.get('name') not in self.path_map['paths']:
changed = True
if not self.module.check_mode:
try:
operations = ApiGwResource._build_create_resources_list(self.path_map, self.module.params.get('name'))
for op in operations:
part = op['part']
result = self.client.create_resource(
restApiId=self.module.params.get('rest_api_id'),
parentId=self.path_map['paths'][op['parent']]['id'],
pathPart=part
)
self.path_map['paths'][op['path']] = {'id': result.get('id')}
except BotoCoreError as e:
self.module.fail_json(msg="Error calling boto3 create_resource: {}".format(e))
else:
result = copy.deepcopy(self.path_map['paths'][self.module.params.get('name')])
result['path'] = self.module.params.get('name')
return changed, result
def _delete_resource(self):
"""
Delete an API Gateway Resource
:return: (changed, result)
changed: Boolean indicating whether or not a change occurred
result: Output of the delete_resource call
"""
changed = False
if self.module.params.get('name') in self.path_map['paths']:
try:
changed = True
if not self.module.check_mode:
self.client.delete_resource(
restApiId=self.module.params.get('rest_api_id'),
resourceId=self.path_map['paths'][self.module.params.get('name')]['id']
)
except BotoCoreError as e:
self.module.fail_json(msg="Error calling boto3 delete_resource: {}".format(e))
return changed, None
def process_request(self):
"""
Process the user's request -- the primary code path
:return: Returns either fail_json or exit_json
"""
changed = False
result = None
self._build_resource_dictionary()
if self.module.params.get('state') == 'absent':
(changed, result) = self._delete_resource()
else:
(changed, result) = self._create_resource()
self.module.exit_json(changed=changed, resource=result)
def main():
"""
Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleAWSModule(
argument_spec=ApiGwResource._define_module_argument_spec(),
supports_check_mode=True
)
rest_api = ApiGwResource(module)
rest_api.process_request()
from ansible.module_utils.basic import * # pylint: disable=W0614
if __name__ == '__main__':
main()
| StarcoderdataPython |
11246938 | <filename>tests/demos/demo_phrase_extractor.py<gh_stars>1000+
# # -*- coding:utf-8 -*-
# Author:wancong
# Date: 2018-04-30
from pyhanlp import *
def demo_phrase_extractor(text):
""" 短语提取
>>> text = '''
... 算法工程师
... 算法(Algorithm)是一系列解决问题的清晰指令,也就是说,能够对一定规范的输入,在有限时间内获得所要求的输出。
... 如果一个算法有缺陷,或不适合于某个问题,执行这个算法将不会解决这个问题。不同的算法可能用不同的时间、
... 空间或效率来完成同样的任务。一个算法的优劣可以用空间复杂度与时间复杂度来衡量。算法工程师就是利用算法处理事物的人。
...
... 1职位简介
... 算法工程师是一个非常高端的职位;
... 专业要求:计算机、电子、通信、数学等相关专业;
... 学历要求:本科及其以上的学历,大多数是硕士学历及其以上;
... 语言要求:英语要求是熟练,基本上能阅读国外专业书刊;
... 必须掌握计算机相关知识,熟练使用仿真工具MATLAB等,必须会一门编程语言。
...
... 2研究方向
... 视频算法工程师、图像处理算法工程师、音频算法工程师 通信基带算法工程师
...
... 3目前国内外状况
... 目前国内从事算法研究的工程师不少,但是高级算法工程师却很少,是一个非常紧缺的专业工程师。
... 算法工程师根据研究领域来分主要有音频/视频算法处理、图像技术方面的二维信息算法处理和通信物理层、
... 雷达信号处理、生物医学信号处理等领域的一维信息算法处理。
... 在计算机音视频和图形图像技术等二维信息算法处理方面目前比较先进的视频处理算法:机器视觉成为此类算法研究的核心;
... 另外还有2D转3D算法(2D-to-3D conversion),去隔行算法(de-interlacing),运动估计运动补偿算法
... (Motion estimation/Motion Compensation),去噪算法(Noise Reduction),缩放算法(scaling),
... 锐化处理算法(Sharpness),超分辨率算法(Super Resolution) 手势识别(gesture recognition) 人脸识别(face recognition)。
... 在通信物理层等一维信息领域目前常用的算法:无线领域的RRM、RTT,传送领域的调制解调、信道均衡、信号检测、网络优化、信号分解等。
... 另外数据挖掘、互联网搜索算法也成为当今的热门方向。
... 算法工程师逐渐往人工智能方向发展。
... '''
>>> demo_phrase_extractor(text)
[算法工程师, 算法处理, 一维信息, 算法研究, 信号处理]
"""
phrase_list = HanLP.extractPhrase(text, 5)
print(phrase_list)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| StarcoderdataPython |
6620488 | <gh_stars>1-10
import ast
import collections
import re
def read_text_file(filename, encoding='utf-8'):
with open(filename, 'r', encoding=encoding) as file:
return file.read()
def parse_version_requirement(text, expect_major):
version = text.split('.')
if not all(re.fullmatch(r'[0-9]+', x) for x in version):
raise ValueError(f'invalid version {text!r}')
if len(version) not in (2, 3):
raise ValueError(f'invalid version {text!r}, should be major.minor or major.minor.micro')
major = int(version[0])
minor = int(version[1])
micro = int(version[2]) if len(version) > 2 else None
if major not in (1, 2, 3):
raise ValueError(f'invalid major version {major}')
if major == 2 and (minor > 7 or minor < 0):
raise ValueError(f'minor version {minor} out of range for major version 2')
if micro is not None and (micro >= 20 or micro < 0):
raise ValueError(f'micro version {micro} out of range')
if major == 1:
major = 2
minor = 0
if major != expect_major:
raise ValueError(f'major version {major} does not match expected major version {expect_major}')
return (major, minor)
def is_removed_in_py3(name):
name_parts = name.split('.')
return any(name_parts[:len(x)] == x for x in removed_in_py3_list)
def validate_identifier_name(name):
parts = name.split('.')
if not all(re.fullmatch('[_A-Za-z][_0-9A-Za-z]*', x) for x in parts):
raise ValueError('invalid identifier name')
py3_rules = read_text_file('py3_rules.txt').rstrip('\n').split('\n\n')[1:]
py2_rules = read_text_file('py2_rules.txt').rstrip('\n').split('\n\n')[1:]
removed_in_py3_list = [x.split('.') for x in read_text_file('removed_in_py3.txt').splitlines() if not x.startswith('#')]
modules_rules = collections.defaultdict(lambda: [None, None])
classes_rules = collections.defaultdict(lambda: [None, None])
exceptions_rules = collections.defaultdict(lambda: [None, None])
functions_rules = collections.defaultdict(lambda: [None, None])
variables_and_constants_rules = collections.defaultdict(lambda: [None, None])
decorators_rules = collections.defaultdict(lambda: [None, None])
kwargs_rules = collections.defaultdict(lambda: [None, None])
for ruleset, major_version in ((py2_rules, 2), (py3_rules, 3)):
for part in ruleset:
rules = part.split('\n')
rule_type = rules[0]
if not rule_type.endswith(':'):
raise ValueError('rule type line should end with ":"')
rule_type = rule_type[:-1]
if rule_type == 'misc': # skip misc part
continue
elif rule_type == 'module':
target = 'modules_rules'
elif rule_type in ('data', 'attribute'):
target = 'variables_and_constants_rules'
elif rule_type == 'class':
target = 'classes_rules'
elif rule_type == 'exception':
target = 'exceptions_rules'
elif rule_type in ('function', 'method'):
target = 'functions_rules'
elif rule_type == 'decorator':
target = 'decorators_rules'
elif rule_type == 'argument':
target = 'kwargs_rules'
else:
raise ValueError(f'unknown rule type {rule_type!r}')
rules = rules[1:]
for rule in rules:
rule_version, rule_content = rule.split(' ', 1)
rule_version = parse_version_requirement(rule_version, major_version)
if target == 'kwargs_rules':
func, kwargs = ast.literal_eval(rule_content)
validate_identifier_name(func)
for kwarg in kwargs:
validate_identifier_name(kwarg)
kwargs_rules[(func, kwarg)][major_version - 2] = rule_version
else:
validate_identifier_name(rule_content)
globals()[target][rule_content][major_version - 2] = rule_version
modules_rules = sorted(modules_rules.items())
classes_rules = sorted(classes_rules.items())
exceptions_rules = sorted(exceptions_rules.items())
functions_rules = sorted(functions_rules.items())
variables_and_constants_rules = sorted(variables_and_constants_rules.items())
decorators_rules = sorted(decorators_rules.items())
kwargs_rules = sorted(kwargs_rules.items())
with open('vermin_rules_generated.py', 'w', encoding='utf-8') as rulefile:
for rule_type in ('modules_rules', 'classes_rules', 'exceptions_rules', 'functions_rules', 'variables_and_constants_rules', 'decorators_rules'):
rulefile.write(f'{rule_type} = {{\n')
for name, versions in globals()[rule_type]:
if not any(versions):
raise ValueError('invalid versions tuple')
if versions[1] is None and not is_removed_in_py3(name):
versions[1] = (3, 0)
rulefile.write(f' "{name}": {tuple(versions)!r},\n')
rulefile.write('}\n\n')
rulefile.write('kwargs_rules = {\n')
for name, versions in kwargs_rules:
if not any(versions):
raise ValueError('invalid versions tuple')
if versions[1] is None and not is_removed_in_py3(name[0]):
versions[1] = (3, 0)
rulefile.write(f' ("{name[0]}", "{name[1]}"): {tuple(versions)!r},\n')
rulefile.write('}\n')
| StarcoderdataPython |
12849603 | import geoflow1D
from geoflow1D.GridModule import *
from geoflow1D.FieldsModule import *
from geoflow1D.LinearSystemModule import *
from geoflow1D.GeoModule import *
from geoflow1D.SolverModule import *
import numpy as np
from matplotlib import pyplot as plt
# -------------- PROBLEM ILLUSTRATION -----------------
# | sigma
# |
# +---V---+ ---
# | | |
# | | |
# | | |
# | | |
# | | |
# | | | H
# | | |
# | | |
# | | |
# x ^ | | |
# | | | |
# _|_ |_______| _|_
# -----------------------------------------------------
class SolidProps(object):
def __init__(self, grid, M, rho):
self.M = ScalarField(grid.getNumberOfRegions())
self.M.setValue(grid.getRegions()[0], M)
self.rho = ScalarField(grid.getNumberOfRegions())
self.rho.setValue(grid.getRegions()[0], rho)
mm = 1000.
# -------------- GRID DATA ----------------------------
H = 10
nVertices = 15
nodesCoord, elemConn = createGridData(H, nVertices)
gridData = GridData()
gridData.setElementConnectivity(elemConn)
gridData.setNodeCoordinates(nodesCoord)
grid = Grid_1D(gridData)
grid.buildStencil()
# -----------------------------------------------------
# -------------- PROPERTIES ----------------------------
M = 1.3e8 # Constrained modulus
rho = 2300. # Solid density
props = SolidProps(grid, M, rho)
g = -9.81
# -----------------------------------------------------
# ------------- CREATE LINEAR SYSTEM ------------------
nDOF = 1
ls = LinearSystemCOO(grid.stencil, nDOF)
ls.initialize()
# -----------------------------------------------------
# -------------- NUMERICAL SOLUTION -------------------
AssemblyStiffnessMatrix(ls, grid, props, 0)
AssemblyGravityToVector(ls, grid, props, g, 0)
# -----------------------------------------------------
# ------------- BOUNDARY CONDITIONS -------------------
ls.applyDirichlet(0, 0)
sigma = -5e4
ls.applyNeumann(-1, sigma)
# -----------------------------------------------------
# ----------------- DEFINE SOLVER ---------------------
solver = Solver(tol=1e-8, maxiter=500)
solver.solve(ls.matrix, ls.rhs)
# -----------------------------------------------------
# ------------- ANALYTICAL SOLUTION -------------------
def analyticalSolution(M, stress, L, x, gravity, rho):
x = np.array(x)
return x*(-stress + rho*g*L)/M - rho*g*x*x/(2*M)
x_a = np.linspace(0, H, 100)
u_a = analyticalSolution(M, sigma, H, x_a, g, rho)
# -----------------------------------------------------
# -------------- PLOT SOLUTION ------------------------
x_n = [v.getCoordinate() for v in grid.getVertices()]
u_n = solver.solution
plt.plot(u_n*mm, x_n, 'o', label='Numeric')
plt.plot(u_a*mm, x_a, '-', label='Analytic')
plt.grid(True)
plt.xlabel('Displacement (mm)')
plt.ylabel('Coordinate X (m)')
plt.show()
# -----------------------------------------------------
| StarcoderdataPython |
3360171 | # This script is used as a bitbake task to create a new python manifest
# $ bitbake python -c create_manifest
#
# Our goal is to keep python-core as small as posible and add other python
# packages only when the user needs them, hence why we split upstream python
# into several packages.
#
# In a very simplistic way what this does is:
# Launch python and see specifically what is required for it to run at a minimum
#
# Go through the python-manifest file and launch a separate task for every single
# one of the files on each package, this task will check what was required for that
# specific module to run, these modules will be called dependencies.
# The output of such task will be a list of the modules or dependencies that were
# found for that file.
#
# Such output will be parsed by this script, we will look for each dependency on the
# manifest and if we find that another package already includes it, then we will add
# that package as an RDEPENDS to the package we are currently checking; in case we dont
# find the current dependency on any other package we will add it to the current package
# as part of FILES.
#
#
# This way we will create a new manifest from the data structure that was built during
# this process, on this new manifest each package will contain specifically only
# what it needs to run.
#
# There are some caveats which we try to deal with, such as repeated files on different
# packages, packages that include folders, wildcards, and special packages.
# Its also important to note that this method only works for python files, and shared
# libraries. Static libraries, header files and binaries need to be dealt with manually.
#
# This script differs from its python2 version mostly on how shared libraries are handled
# The manifest file for python3 has an extra field which contains the cached files for
# each package.
# Tha method to handle cached files does not work when a module includes a folder which
# itself contains the pycache folder, gladly this is almost never the case.
#
# Author: <NAME> "aehs29" <aehs29 at gmail dot com>
import sys
import subprocess
import json
import os
import collections
# Get python version from ${PYTHON_MAJMIN}
pyversion = str(sys.argv[1])
# Hack to get native python search path (for folders), not fond of it but it works for now
pivot = 'recipe-sysroot-native'
for p in sys.path:
if pivot in p:
nativelibfolder = p[:p.find(pivot)+len(pivot)]
# Empty dict to hold the whole manifest
new_manifest = collections.OrderedDict()
# Check for repeated files, folders and wildcards
allfiles = []
repeated = []
wildcards = []
hasfolders = []
allfolders = []
def isFolder(value):
value = value.replace('${PYTHON_MAJMIN}',pyversion)
if os.path.isdir(value.replace('${libdir}',nativelibfolder+'/usr/lib')) or os.path.isdir(value.replace('${libdir}',nativelibfolder+'/usr/lib64')) or os.path.isdir(value.replace('${libdir}',nativelibfolder+'/usr/lib32')):
return True
else:
return False
def isCached(item):
if '__pycache__' in item:
return True
else:
return False
def prepend_comments(comments, json_manifest):
with open(json_manifest, 'r+') as manifest:
json_contents = manifest.read()
manifest.seek(0, 0)
manifest.write(comments + json_contents)
# Read existing JSON manifest
with open('python3-manifest.json') as manifest:
# The JSON format doesn't allow comments so we hack the call to keep the comments using a marker
manifest_str = manifest.read()
json_start = manifest_str.find('# EOC') + 6 # EOC + \n
manifest.seek(0)
comments = manifest.read(json_start)
manifest_str = manifest.read()
old_manifest = json.loads(manifest_str, object_pairs_hook=collections.OrderedDict)
#
# First pass to get core-package functionality, because we base everything on the fact that core is actually working
# Not exactly the same so it should not be a function
#
print ('Getting dependencies for package: core')
# This special call gets the core dependencies and
# appends to the old manifest so it doesnt hurt what it
# currently holds.
# This way when other packages check for dependencies
# on the new core package, they will still find them
# even when checking the old_manifest
output = subprocess.check_output([sys.executable, 'get_module_deps3.py', 'python-core-package']).decode('utf8')
for coredep in output.split():
coredep = coredep.replace(pyversion,'${PYTHON_MAJMIN}')
if isCached(coredep):
if coredep not in old_manifest['core']['cached']:
old_manifest['core']['cached'].append(coredep)
else:
if coredep not in old_manifest['core']['files']:
old_manifest['core']['files'].append(coredep)
# The second step is to loop through the existing files contained in the core package
# according to the old manifest, identify if they are modules, or some other type
# of file that we cant import (directories, binaries, configs) in which case we
# can only assume they were added correctly (manually) so we ignore those and
# pass them to the manifest directly.
for filedep in old_manifest['core']['files']:
if isFolder(filedep):
if isCached(filedep):
if filedep not in old_manifest['core']['cached']:
old_manifest['core']['cached'].append(filedep)
else:
if filedep not in old_manifest['core']['files']:
old_manifest['core']['files'].append(filedep)
continue
if '${bindir}' in filedep:
if filedep not in old_manifest['core']['files']:
old_manifest['core']['files'].append(filedep)
continue
if filedep == '':
continue
if '${includedir}' in filedep:
if filedep not in old_manifest['core']['files']:
old_manifest['core']['files'].append(filedep)
continue
# Get actual module name , shouldnt be affected by libdir/bindir, etc.
pymodule = os.path.splitext(os.path.basename(os.path.normpath(filedep)))[0]
# We now know that were dealing with a python module, so we can import it
# and check what its dependencies are.
# We launch a separate task for each module for deterministic behavior.
# Each module will only import what is necessary for it to work in specific.
# The output of each task will contain each module's dependencies
print ('Getting dependencies for module: %s' % pymodule)
output = subprocess.check_output([sys.executable, 'get_module_deps3.py', '%s' % pymodule]).decode('utf8')
print ('The following dependencies were found for module %s:\n' % pymodule)
print (output)
for pymodule_dep in output.split():
pymodule_dep = pymodule_dep.replace(pyversion,'${PYTHON_MAJMIN}')
if isCached(pymodule_dep):
if pymodule_dep not in old_manifest['core']['cached']:
old_manifest['core']['cached'].append(pymodule_dep)
else:
if pymodule_dep not in old_manifest['core']['files']:
old_manifest['core']['files'].append(pymodule_dep)
# At this point we are done with the core package.
# The old_manifest dictionary is updated only for the core package because
# all others will use this a base.
# To improve the script speed, we check which packages contain directories
# since we will be looping through (only) those later.
for pypkg in old_manifest:
for filedep in old_manifest[pypkg]['files']:
if isFolder(filedep):
print ('%s is a folder' % filedep)
if pypkg not in hasfolders:
hasfolders.append(pypkg)
if filedep not in allfolders:
allfolders.append(filedep)
# This is the main loop that will handle each package.
# It works in a similar fashion than the step before, but
# we will now be updating a new dictionary that will eventually
# become the new manifest.
#
# The following loops though all packages in the manifest,
# through all files on each of them, and checks whether or not
# they are modules and can be imported.
# If they can be imported, then it checks for dependencies for
# each of them by launching a separate task.
# The output of that task is then parsed and the manifest is updated
# accordingly, wether it should add the module on FILES for the current package
# or if that module already belongs to another package then the current one
# will RDEPEND on it
for pypkg in old_manifest:
# Use an empty dict as data structure to hold data for each package and fill it up
new_manifest[pypkg] = collections.OrderedDict()
new_manifest[pypkg]['summary'] = old_manifest[pypkg]['summary']
new_manifest[pypkg]['rdepends'] = []
new_manifest[pypkg]['files'] = []
new_manifest[pypkg]['cached'] = old_manifest[pypkg]['cached']
# All packages should depend on core
if pypkg != 'core':
new_manifest[pypkg]['rdepends'].append('core')
new_manifest[pypkg]['cached'] = []
print('\n')
print('--------------------------')
print ('Handling package %s' % pypkg)
print('--------------------------')
# Handle special cases, we assume that when they were manually added
# to the manifest we knew what we were doing.
special_packages = ['misc', 'modules', 'dev', 'tests']
if pypkg in special_packages or 'staticdev' in pypkg:
print('Passing %s package directly' % pypkg)
new_manifest[pypkg] = old_manifest[pypkg]
continue
for filedep in old_manifest[pypkg]['files']:
# We already handled core on the first pass, we can ignore it now
if pypkg == 'core':
if filedep not in new_manifest[pypkg]['files']:
new_manifest[pypkg]['files'].append(filedep)
continue
# Handle/ignore what we cant import
if isFolder(filedep):
new_manifest[pypkg]['files'].append(filedep)
# Asyncio (and others) are both the package and the folder name, we should not skip those...
path,mod = os.path.split(filedep)
if mod != pypkg:
continue
if '${bindir}' in filedep:
if filedep not in new_manifest[pypkg]['files']:
new_manifest[pypkg]['files'].append(filedep)
continue
if filedep == '':
continue
if '${includedir}' in filedep:
if filedep not in new_manifest[pypkg]['files']:
new_manifest[pypkg]['files'].append(filedep)
continue
# Get actual module name , shouldnt be affected by libdir/bindir, etc.
# We need to check if the imported module comes from another (e.g. sqlite3.dump)
path,pymodule = os.path.split(filedep)
path = os.path.basename(path)
pymodule = os.path.splitext(os.path.basename(pymodule))[0]
# If this condition is met, it means we need to import it from another module
# or its the folder itself (e.g. unittest)
if path == pypkg:
if pymodule:
pymodule = path + '.' + pymodule
else:
pymodule = path
# We now know that were dealing with a python module, so we can import it
# and check what its dependencies are.
# We launch a separate task for each module for deterministic behavior.
# Each module will only import what is necessary for it to work in specific.
# The output of each task will contain each module's dependencies
print ('\nGetting dependencies for module: %s' % pymodule)
output = subprocess.check_output([sys.executable, 'get_module_deps3.py', '%s' % pymodule]).decode('utf8')
print ('The following dependencies were found for module %s:\n' % pymodule)
print (output)
reportFILES = []
reportRDEPS = []
for pymodule_dep in output.split():
# Warning: This first part is ugly
# One of the dependencies that was found, could be inside of one of the folders included by another package
# We need to check if this happens so we can add the package containing the folder as an rdependency
# e.g. Folder encodings contained in codecs
# This would be solved if no packages included any folders
# This can be done in two ways:
# 1 - We assume that if we take out the filename from the path we would get
# the folder string, then we would check if folder string is in the list of folders
# This would not work if a package contains a folder which contains another folder
# e.g. path/folder1/folder2/filename folder_string= path/folder1/folder2
# folder_string would not match any value contained in the list of folders
#
# 2 - We do it the other way around, checking if the folder is contained in the path
# e.g. path/folder1/folder2/filename folder_string= path/folder1/folder2
# is folder_string inside path/folder1/folder2/filename?,
# Yes, it works, but we waste a couple of milliseconds.
pymodule_dep = pymodule_dep.replace(pyversion,'${PYTHON_MAJMIN}')
inFolders = False
for folder in allfolders:
# The module could have a directory named after it, e.g. xml, if we take out the filename from the path
# we'll end up with ${libdir}, and we want ${libdir}/xml
if isFolder(pymodule_dep):
check_path = pymodule_dep
else:
check_path = os.path.dirname(pymodule_dep)
if folder in check_path :
inFolders = True # Did we find a folder?
folderFound = False # Second flag to break inner for
# Loop only through packages which contain folders
for pypkg_with_folder in hasfolders:
if (folderFound == False):
# print('Checking folder %s on package %s' % (pymodule_dep,pypkg_with_folder))
for folder_dep in old_manifest[pypkg_with_folder]['files'] or folder_dep in old_manifest[pypkg_with_folder]['cached']:
if folder_dep == folder:
print ('%s folder found in %s' % (folder, pypkg_with_folder))
folderFound = True
if pypkg_with_folder not in new_manifest[pypkg]['rdepends'] and pypkg_with_folder != pypkg:
new_manifest[pypkg]['rdepends'].append(pypkg_with_folder)
else:
break
# A folder was found so we're done with this item, we can go on
if inFolders:
continue
# No directories beyond this point
# We might already have this module on the dictionary since it could depend on a (previously checked) module
if pymodule_dep not in new_manifest[pypkg]['files'] and pymodule_dep not in new_manifest[pypkg]['cached']:
# Handle core as a special package, we already did it so we pass it to NEW data structure directly
if pypkg == 'core':
print('Adding %s to %s FILES' % (pymodule_dep, pypkg))
if pymodule_dep.endswith('*'):
wildcards.append(pymodule_dep)
if isCached(pymodule_dep):
new_manifest[pypkg]['cached'].append(pymodule_dep)
else:
new_manifest[pypkg]['files'].append(pymodule_dep)
# Check for repeated files
if pymodule_dep not in allfiles:
allfiles.append(pymodule_dep)
else:
if pymodule_dep not in repeated:
repeated.append(pymodule_dep)
else:
# Last step: Figure out if we this belongs to FILES or RDEPENDS
# We check if this module is already contained on another package, so we add that one
# as an RDEPENDS, or if its not, it means it should be contained on the current
# package, and we should add it to FILES
for possible_rdep in old_manifest:
# Debug
# print('Checking %s ' % pymodule_dep + ' in %s' % possible_rdep)
if pymodule_dep in old_manifest[possible_rdep]['files'] or pymodule_dep in old_manifest[possible_rdep]['cached']:
# Since were nesting, we need to check its not the same pypkg
if(possible_rdep != pypkg):
if possible_rdep not in new_manifest[pypkg]['rdepends']:
# Add it to the new manifest data struct as RDEPENDS since it contains something this module needs
reportRDEPS.append('Adding %s to %s RDEPENDS, because it contains %s\n' % (possible_rdep, pypkg, pymodule_dep))
new_manifest[pypkg]['rdepends'].append(possible_rdep)
break
else:
# Since this module wasnt found on another package, it is not an RDEP,
# so we add it to FILES for this package.
# A module shouldn't contain itself (${libdir}/python3/sqlite3 shouldnt be on sqlite3 files)
if os.path.basename(pymodule_dep) != pypkg:
reportFILES.append(('Adding %s to %s FILES\n' % (pymodule_dep, pypkg)))
if isCached(pymodule_dep):
new_manifest[pypkg]['cached'].append(pymodule_dep)
else:
new_manifest[pypkg]['files'].append(pymodule_dep)
if pymodule_dep.endswith('*'):
wildcards.append(pymodule_dep)
if pymodule_dep not in allfiles:
allfiles.append(pymodule_dep)
else:
if pymodule_dep not in repeated:
repeated.append(pymodule_dep)
print('\n')
print('#################################')
print('Summary for module %s' % pymodule)
print('FILES found for module %s:' % pymodule)
print(''.join(reportFILES))
print('RDEPENDS found for module %s:' % pymodule)
print(''.join(reportRDEPS))
print('#################################')
print('The following FILES contain wildcards, please check if they are necessary')
print(wildcards)
print('The following FILES contain folders, please check if they are necessary')
print(hasfolders)
# Sort it just so it looks nicer
for pypkg in new_manifest:
new_manifest[pypkg]['files'].sort()
new_manifest[pypkg]['cached'].sort()
new_manifest[pypkg]['rdepends'].sort()
# Create the manifest from the data structure that was built
with open('python3-manifest.json.new','w') as outfile:
json.dump(new_manifest,outfile, indent=4)
outfile.write('\n')
prepend_comments(comments,'python3-manifest.json.new')
if (repeated):
error_msg = '\n\nERROR:\n'
error_msg += 'The following files are repeated (contained in more than one package),\n'
error_msg += 'this is likely to happen when new files are introduced after an upgrade,\n'
error_msg += 'please check which package should get it,\n modify the manifest accordingly and re-run the create_manifest task:\n'
error_msg += '\n'.join(repeated)
error_msg += '\n'
sys.exit(error_msg)
| StarcoderdataPython |
5178443 | <reponame>cpezzato/discrete_active_inference
#! /usr/bin/env python
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
from visualization_msgs.msg import Marker
from tiago_moveit import tiagoMoveit
from tiago_gripper import GripperControl
from aruco_msgs.msg import Marker
from aruco_msgs.msg import MarkerArray
class detectMarker(object):
def __init__(self):
# Aruco
self._aruco_sub = rospy.Subscriber("aruco_marker_publisher/markers", MarkerArray, self.aruco_cb)
self._aruco_pub = rospy.Publisher("/objects_poses/object", geometry_msgs.msg.Pose, queue_size=10)
self._aruco_pose = geometry_msgs.msg.PoseStamped()
# self._aruco_id = 1008
self._aruco_id = 333
self._aruco_found = False
def aruco_cb(self, msg):
# Callback to update pose of detected aruco marker on the object
for marker in msg.markers:
if marker.id == self._aruco_id or marker.id == 111:
self._aruco_found = True
self._aruco_pose = marker.pose
# Use fixed orientation, we do not really care with the current skills
self._aruco_pose.pose.orientation.x = 0.707
self._aruco_pose.pose.orientation.y = 0.0
self._aruco_pose.pose.orientation.z = 0.0
self._aruco_pose.pose.orientation.w = 0.707
#print('Ready to publish',self._aruco_pose)
self._aruco_pub.publish(self._aruco_pose.pose) | StarcoderdataPython |
3548193 | n, t = map(int, input().split())
nums = set(map(int, input().split()))
years = set(map(int, input().split()))
for i in years:
if i in nums:
print("Yes")
else:
print("No")
| StarcoderdataPython |
158950 | <reponame>JojoReikun/ClimbingLizardDLCAnalysis<filename>lizardanalysis/calculations/hip_and_shoulder_angles.py<gh_stars>1-10
def hip_and_shoulder_angles(**kwargs):
"""
calculates the shoulder and hip angles for every frame.
Shoulder angle: angle between shoulder vector (FORE: Shoulder<->Shoulder_foot or HIND: Hip<->Shoulder_foot)
and limb vector (Shoulder_foot<->foot_knee)
:param kwargs: different parameters needed for calculation
:return: results dataframe with 4 key value pairs (list of frame-wise angles for every foot)
"""
import numpy as np
from lizardanalysis.utils import auxiliaryfunctions
from lizardanalysis.utils import animal_settings
#print('HIP AND SHOULDER ANGLE CALCULATION')
# define necessary **kwargs:
data = kwargs.get('data')
data_rows_count = kwargs.get('data_rows_count')
likelihood = kwargs.get('likelihood')
filename = kwargs.get('filename')
animal = kwargs.get('animal')
scorer = data.columns[1][0]
feet = animal_settings.get_list_of_feet(animal)
results = {}
for foot in feet:
results[foot] = np.full((data_rows_count,), np.NAN)
for i in range(data_rows_count):
# test for likelihoods:
shoulder_likelihood = data.loc[i][scorer, "Shoulder", "likelihood"]
shoulder_foot_likelihood = data.loc[i][scorer, "Shoulder_{}".format(foot), "likelihood"]
knee_foot_likelihood = data.loc[i][scorer, "{}_knee".format(foot), "likelihood"]
hip_likelihood = data.loc[i][scorer, "Hip", "likelihood"]
# get shoulder vector (shoulder - shoulder_foot or hip - shoulder_foot)
if foot == "FR" or foot == "FL":
# only calculate if likelihoods of involved tracking points are good enough else nan
if shoulder_likelihood >= likelihood and shoulder_foot_likelihood >= likelihood:
shoulder_vector = ((data.loc[i, (scorer, "Shoulder", "x")]
- data.loc[i, (scorer, "Shoulder_{}".format(foot), "x")]),
(data.loc[i, (scorer, "Shoulder", "y")]
- data.loc[i, (scorer, "Shoulder_{}".format(foot), "y")]))
#print("shoulder vector: ", shoulder_vector)
else:
shoulder_vector = (np.nan, np.nan)
else: # use HIP
# only calculate if likelihoods of involved tracking points are good enough else nan
if hip_likelihood >= likelihood and shoulder_foot_likelihood >= likelihood:
shoulder_vector = ((data.loc[i, (scorer, "Hip", "x")]
- data.loc[i, (scorer, "Shoulder_{}".format(foot), "x")]),
(data.loc[i, (scorer, "Hip", "y")]
- data.loc[i, (scorer, "Shoulder_{}".format(foot), "y")]))
#print("hip vector: ", shoulder_vector)
else:
shoulder_vector = (np.nan, np.nan)
# get limb vector (shoulder_foot - foot_knee)
if shoulder_foot_likelihood >= likelihood and knee_foot_likelihood >= likelihood:
limb_vector = ((data.loc[i, (scorer, "Shoulder_{}".format(foot), "x")]
- data.loc[i, (scorer, "{}_knee".format(foot), "x")]),
(data.loc[i, (scorer, "Shoulder_{}".format(foot), "y")]
- data.loc[i, (scorer, "{}_knee".format(foot), "y")]))
#print("limb vector: ", limb_vector)
else:
limb_vector = (np.nan, np.nan)
#print("shoulder vector, limb vector: ", shoulder_vector, limb_vector)
# calculate the shoulder/hip angle
shoulder_angle = auxiliaryfunctions.py_angle_betw_2vectors(shoulder_vector, limb_vector)
#print("shoulder angle: ", shoulder_angle)
results[foot][i] = 180.0 - shoulder_angle
# rename dictionary keys of results
results = {'shoulder_angle_' + key: value for (key, value) in results.items()}
return results | StarcoderdataPython |
11286236 | #%%
import numpy as np
import pandas as pd
import altair as alt
import anthro.io
# Generate a plot for global atmospheric SF6 concentration from NOAA GML data
data = pd.read_csv('../processed/monthly_global_sf6_data_processed.csv')
data['date'] = pd.to_datetime(data['year'].astype(str) + data['month'].astype(str), format='%Y%m', errors='coerce')
agg_data = pd.DataFrame()
agg_data['date'] = (data[data['Reported value']=='monthly mean'])['date']
agg_data['concentration'] = (data[data['Reported value']=='monthly mean'])['Concentration (ppt)']
agg_data['total error std'] = ((data[data['Reported value']=='mon. mean 1-sigma unc.'])['Concentration (ppt)']).to_numpy()
agg_data['lower bound'] = agg_data['concentration'] - 1.96*agg_data['total error std'] # 95% confidence interval
agg_data['upper bound'] = agg_data['concentration'] + 1.96*agg_data['total error std'] # 95% confidence interval
chart = alt.Chart(agg_data).encode(
x=alt.X(field='date', type='temporal', timeUnit='yearmonth', title='date'),
y=alt.Y(field=r'concentration', type='quantitative', title=r'[SF6] (ppt)',
scale=alt.Scale(domain=[3, 11])),
tooltip=[alt.Tooltip(field='date', type='temporal', title='date', format='%Y, %m'),
alt.Tooltip(field=r'concentration', type='nominal', title=r'concentration')]
).properties(width='container', height=300)
# Add uncertainty bands
bands = chart.mark_area(color='dodgerblue', fillOpacity=0.4).encode(
x=alt.X(field='date', type='temporal', timeUnit='yearmonth', title='date'),
y=alt.Y('lower bound:Q', scale=alt.Scale(zero=False)),
y2='upper bound:Q'
).properties(width='container', height=300)
l = chart.mark_line(color='dodgerblue')
p = chart.mark_point(color='dodgerblue', filled=True)
layer = alt.layer(bands, l, p)
layer.save('atmospheric_SF6.json')
| StarcoderdataPython |
284569 | <reponame>david-soto-m/Pycodoc
#!/usr/bin/python3
# An installer that will create a dektop file and add it to your apps menu
from pathlib import Path
from os import geteuid
def main():
path1 = str(Path(__file__).parent.resolve()) + '/main.py'
path2 = str(
Path(__file__).parent.resolve()
/ 'data'
/ 'AppIcon'
/ 'AppIcon.svg'
)
stri = '''\
[Desktop Entry]
Encoding=UTF-8
Type=Application
Terminal=false
Categories=Development;Documentation
Exec=%s
Name=Pycodoc
Icon=%s''' % (path1, path2)
if geteuid() == 0:
target = '/usr/share/applications/Pycodoc.desktop'
else:
target = str(
Path.home()
/ '.local'
/ 'share'
/ 'applications'
/ 'Pycodoc.desktop'
)
with open(target, 'w+') as f:
f.write(stri)
if __name__ == '__main__':
main()
| StarcoderdataPython |
5101727 | """
Models of energy storage systems
Two types of energy storage systems are supported.
1) Battery energy storage system (BESS)
2) Thermal energy storage system (TESS)
"""
import configuration.configuration_default_ess as default_parameters
BESS =\
{ #1) Static information
"ID": default_parameters.BESS["AREA"],
"CAP": default_parameters.BESS["CAP"],
"PMAX_DIS": default_parameters.BESS["PMAX_DIS"],
"PMAX_CH": default_parameters.BESS["PMAX_CH"],
"EFF_DIS":default_parameters.BESS["EFF_DIS"],
"EFF_CH":default_parameters.BESS["EFF_CH"],
"SOC_MAX":default_parameters.BESS["SOC_MAX"],
"SOC_MIN":default_parameters.BESS["SOC_MIN"],
"COST_MODEL":default_parameters.BESS["COST_MODEL"],
"NCOST_DIS":default_parameters.BESS["NCOST_DIS"],
"COST_DIS":default_parameters.BESS["COST_DIS"],
"NCOST_CH":default_parameters.BESS["NCOST_CH"],
"COST_CH":default_parameters.BESS["COST_CH"],
# 2) Measurement information
"STATUS": default_parameters.BESS["STATUS"],
"SOC":default_parameters.BESS["SOC"],
"PG": default_parameters.BESS["PG"],
"RG": default_parameters.BESS["RG"],
# 3) Scheduling information
"TIME_GENERATED": default_parameters.BESS["TIME_GENERATED"],
"TIME_APPLIED": default_parameters.BESS["TIME_APPLIED"],
"TIME_COMMANDED": default_parameters.BESS["TIME_COMMANDED"],
"COMMAND_PG":default_parameters.BESS["COMMAND_PG"],
"COMMAND_RG":default_parameters.BESS["COMMAND_RG"],
} | StarcoderdataPython |
3534142 | <reponame>mc18g13/teensy-drone
import matplotlib.pyplot as plt
import numpy as np
import csv
import sys
commandLineArg = sys.argv[1]
with open(commandLineArg, newline = '') as motorData:
reader = csv.reader(motorData, delimiter=' ')
columnCount = len(next(reader))
pairCount = int(columnCount/2)
print(pairCount)
fig, ax = plt.subplots(columnCount, 1)
for pairIndex in range(pairCount):
freq=[]
amplitude=[]
motorData.seek(0)
next(reader, None)
for row in reader:
freq.append(float(row[2*pairIndex]))
amplitude.append(float(row[2*pairIndex+1]))
ax[2*pairIndex].plot(amplitude)
ax[2*pairIndex].set_xlabel('Time ' + str(pairIndex))
ax[2*pairIndex].set_ylabel('Amplitude ' + str(pairIndex))
ax[2*pairIndex + 1].plot(freq[1:], 'r') # plotting the spectrum
ax[2*pairIndex + 1].set_xlabel('Freq (Hz) ' + str(pairIndex + 1))
ax[2*pairIndex + 1].set_ylabel('|Y(freq)| ' + str(pairIndex + 1))
plt.show() | StarcoderdataPython |
3558295 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import find_packages, setup
with open("README.md") as readme_file:
readme = readme_file.read()
test_requirements = [
"codecov",
"flake8",
"black",
"pytest",
"pytest-cov",
"pytest-raises",
"quilt3==3.1.5",
"python-dateutil==2.8.0",
]
setup_requirements = [
"pytest-runner",
]
examples_requirements = [
"quilt3==3.1.10",
"python-dateutil==2.8.0",
]
dev_requirements = [
"bumpversion>=0.5.3",
"coverage>=5.0a4",
"flake8>=3.7.7",
"ipython>=7.5.0",
"m2r>=0.2.1",
"pytest>=4.3.0",
"pytest-cov==2.6.1",
"pytest-raises>=0.10",
"pytest-runner>=4.4",
"Sphinx>=2.0.0b1",
"sphinx_rtd_theme>=0.1.2",
"tox>=3.5.2",
"twine>=1.13.0",
"wheel>=0.33.1",
]
interactive_requirements = [
"altair",
"jupyterlab",
"matplotlib",
]
requirements = [
"matplotlib",
"numpy",
"pandas",
"scipy",
"tifffile>=2021.7.30",
"torch>=1.0",
"tqdm",
"scikit-image>=0.18.0",
"aicsimageio==4.0.5",
]
extra_requirements = {
"test": test_requirements,
"setup": setup_requirements,
"dev": dev_requirements,
"interactive": interactive_requirements,
"examples": examples_requirements,
"all": [
*requirements,
*test_requirements,
*setup_requirements,
*dev_requirements,
*interactive_requirements
]
}
setup(
author="<NAME> and <NAME>. and <NAME> and "
"<NAME>. and <NAME> and Johnson, <NAME>.",
author_email="<EMAIL>",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: Free for non-commercial use",
"Natural Language :: English",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
description="A machine learning model for transforming microsocpy images between "
"modalities",
entry_points={
"console_scripts": ["fnet = fnet.cli.main:main"],
},
install_requires=requirements,
license="Allen Institute Software License",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
keywords="fnet",
name="fnet",
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*"]),
python_requires=">=3.6",
setup_requires=setup_requirements,
test_suite="fnet/tests",
tests_require=test_requirements,
extras_require=extra_requirements,
url="https://github.com/AllenCellModeling/pytorch_fnet",
# Do not edit this string manually, always use bumpversion
# Details in CONTRIBUTING.rst
version="0.2.0",
zip_safe=False,
)
| StarcoderdataPython |
11360709 | from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.postgres.search import SearchQuery
from django.contrib.postgres.search import SearchVector
from django.core.files.storage import get_storage_class
from django.db import models
from django.db.models import Q, F, Case, When, Value, Sum, Min, Max, OuterRef, Subquery, Count, CharField
from django.db.models.functions import Length
from django.views import View
from django.views.generic.detail import SingleObjectMixin
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import get_language_info
from django.utils.decorators import method_decorator
from django.utils.text import slugify
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from django.http import HttpResponseForbidden
from django.views.decorators.cache import cache_control
from collections import Counter
import json
import datetime
import re
import pytz
import logging
from studygroups.decorators import user_is_group_facilitator
from studygroups.decorators import user_is_team_organizer
from studygroups.models import Course
from studygroups.models import StudyGroup
from studygroups.models import Application
from studygroups.models import Meeting
from studygroups.models import Team
from studygroups.models import TeamMembership
from studygroups.models import TeamInvitation
from studygroups.models import Announcement
from studygroups.models import generate_meetings_from_dates
from studygroups.models import get_json_response
from studygroups.models.course import course_platform_from_url
from studygroups.models.team import eligible_team_by_email_domain
from uxhelpers.utils import json_response
from api.geo import getLatLonDelta
from api import schema
from api.forms import ImageForm
logger = logging.getLogger(__name__)
def studygroups(request):
# TODO remove this API endpoint, where is it currently being used??
study_groups = StudyGroup.objects.published()
if 'course_id' in request.GET:
study_groups = study_groups.filter(course_id=request.GET.get('course_id'))
def to_json(sg):
data = {
"name": sg.name,
"course_title": sg.course.title,
"facilitator": sg.facilitator.first_name + " " + sg.facilitator.last_name,
"venue": sg.venue_name,
"venue_address": sg.venue_address + ", " + sg.city,
"city": sg.city,
"day": sg.day(),
"start_date": sg.start_date,
"meeting_time": sg.meeting_time,
"time_zone": sg.timezone_display(),
"end_time": sg.end_time(),
"weeks": sg.meeting_set.active().count(),
"url": f"{settings.PROTOCOL}://{settings.DOMAIN}" + reverse('studygroups_signup', args=(slugify(sg.venue_name, allow_unicode=True), sg.id,)),
}
if sg.image:
data["image_url"] = f"{settings.PROTOCOL}://{settings.DOMAIN}" + sg.image.url
#TODO else set default image URL
return data
data = [ to_json(sg) for sg in study_groups ]
return json_response(request, data)
class CustomSearchQuery(SearchQuery):
""" use to_tsquery to support partial matches """
""" NOTE: This is potentially unsafe!!"""
def as_sql(self, compiler, connection):
query = re.sub(r'[!\'()|&\:=,\.\ \-\<\>@]+', ' ', self.value).strip().lower()
tsquery = ":* & ".join(query.split(' '))
tsquery += ":*"
params = [tsquery]
if self.config:
config_sql, config_params = compiler.compile(self.config)
template = 'to_tsquery({}::regconfig, %s)'.format(config_sql)
params = config_params + [tsquery]
else:
template = 'to_tsquery(%s)'
if self.invert:
template = '!!({})'.format(template)
return template, params
def serialize_learning_circle(sg):
data = {
"course": {
"id": sg.course.pk,
"title": sg.course.title,
"provider": sg.course.provider,
"link": sg.course.link,
"course_page_url": settings.PROTOCOL + '://' + settings.DOMAIN + reverse('studygroups_course_page', args=(sg.course.id,)),
"discourse_topic_url": sg.course.discourse_topic_url if sg.course.discourse_topic_url else settings.PROTOCOL + '://' + settings.DOMAIN + reverse("studygroups_generate_course_discourse_topic", args=(sg.course.id,)),
},
"id": sg.id,
"name": sg.name,
"facilitator": sg.facilitator.first_name,
"venue": sg.venue_name,
"venue_address": sg.venue_address + ", " + sg.city,
"venue_website": sg.venue_website,
"city": sg.city,
"region": sg.region,
"country": sg.country,
"country_en": sg.country_en,
"latitude": sg.latitude,
"longitude": sg.longitude,
"place_id": sg.place_id,
"online": sg.online,
"language": sg.language,
"day": sg.day(),
"start_date": sg.start_date,
"start_datetime": sg.local_start_date(),
"meeting_time": sg.meeting_time,
"time_zone": sg.timezone_display(),
"last_meeting_date": sg.end_date, # TODO rename to end_date or last_meeting_date - ie make consistent
"end_time": sg.end_time(),
"weeks": sg.weeks if sg.draft else sg.meeting_set.active().count(), # TODO
"url": f"{settings.PROTOCOL}://{settings.DOMAIN}" + reverse('studygroups_signup', args=(slugify(sg.venue_name, allow_unicode=True), sg.id,)),
"report_url": sg.report_url(),
"studygroup_path": reverse('studygroups_view_study_group', args=(sg.id,)),
"draft": sg.draft,
"signup_count": sg.application_set.active().count(),
"signup_open": sg.signup_open and sg.end_date > datetime.date.today(),
}
if sg.image:
data["image_url"] = settings.PROTOCOL + '://' + settings.DOMAIN + sg.image.url
# TODO else set default image URL
if sg.signup_question:
data["signup_question"] = sg.signup_question
if hasattr(sg, 'next_meeting_date'):
data["next_meeting_date"] = sg.next_meeting_date
if hasattr(sg, 'status'):
data["status"] = sg.status
return data
def _intCommaList(csv):
values = csv.split(',') if csv else []
cleaned = []
for value in values:
try:
v = int(value)
cleaned += [v]
except ValueError:
return None, 'Not a list of integers seperated by commas'
return cleaned, None
def _limit_offset(request):
if 'offset' in request.GET or 'limit' in request.GET:
try:
offset = int(request.GET.get('offset', 0))
except ValueError as e:
offset = 0
try:
limit = int(request.GET.get('limit', 100))
except ValueError as e:
limit = 100
return limit, offset
@method_decorator(cache_control(max_age=15*60), name='dispatch')
class LearningCircleListView(View):
def get(self, request):
query_schema = {
"latitude": schema.floating_point(),
"longitude": schema.floating_point(),
"distance": schema.floating_point(),
"offset": schema.integer(),
"limit": schema.integer(),
"weekdays": _intCommaList,
"user": schema.boolean(),
"scope": schema.text(),
"draft": schema.boolean(),
"team_id": schema.integer(),
"order": lambda v: (v, None) if v in ['name', 'start_date', 'created_at', 'first_meeting_date', 'last_meeting_date', None] else (None, "must be 'name', 'created_at', 'first_meeting_date', 'last_meeting_date', or 'start_date'"),
}
data = schema.django_get_to_dict(request.GET)
clean_data, errors = schema.validate(query_schema, data)
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
study_groups = StudyGroup.objects.published().prefetch_related('course', 'meeting_set', 'application_set').order_by('id')
if 'draft' in request.GET:
study_groups = StudyGroup.objects.active().order_by('id')
if 'id' in request.GET:
id = request.GET.get('id')
study_groups = StudyGroup.objects.filter(pk=int(id))
if 'user' in request.GET:
user_id = request.user.id
study_groups = study_groups.filter(facilitator=user_id)
today = datetime.date.today()
active_meetings = Meeting.objects.filter(study_group=OuterRef('pk'), deleted_at__isnull=True).order_by('meeting_date')
# TODO status is being used by the learning circle search page?
study_groups = study_groups.annotate(
status=Case(
When(signup_open=True, start_date__gt=today, then=Value('upcoming')),
When(signup_open=True, start_date__lte=today, end_date__gte=today, then=Value('in_progress')),
When(signup_open=False, end_date__gte=today, then=Value('closed')),
default=Value('completed'),
output_field=CharField(),
),
)
# TODO scope is used by dashboard?
if 'scope' in request.GET:
scope = request.GET.get('scope')
upcoming_meetings = Meeting.objects.filter(study_group=OuterRef('pk'), deleted_at__isnull=True, meeting_date__gte=today).order_by('meeting_date')
if scope == "active":
study_groups = study_groups\
.annotate(next_meeting_date=Subquery(upcoming_meetings.values('meeting_date')[:1]))\
.filter(Q(end_date__gte=today) | Q(draft=True))
elif scope == "upcoming":
study_groups = study_groups\
.annotate(next_meeting_date=Subquery(upcoming_meetings.values('meeting_date')[:1]))\
.filter(Q(start_date__gt=today) | Q(draft=True))
elif scope == "current":
study_groups = study_groups\
.annotate(next_meeting_date=Subquery(upcoming_meetings.values('meeting_date')[:1]))\
.filter(start_date__lte=today, end_date__gte=today)
elif scope == "completed":
study_groups = study_groups\
.filter(end_date__lt=today)
q = request.GET.get('q', '').strip()
if q:
tsquery = CustomSearchQuery(q, config='simple')
study_groups = study_groups.annotate(
search = SearchVector(
'city',
'name',
'course__title',
'course__provider',
'course__topics',
'venue_name',
'venue_address',
'venue_details',
'facilitator__first_name',
'facilitator__last_name',
config='simple'
)
).filter(search=tsquery)
if 'course_id' in request.GET:
study_groups = study_groups.filter(
course_id=request.GET.get('course_id')
)
city = request.GET.get('city')
if city is not None:
study_groups = study_groups.filter(city=city)
team_id = request.GET.get('team_id')
if team_id is not None:
team = Team.objects.get(pk=team_id)
members = team.teammembership_set.active().values('user')
team_users = User.objects.filter(pk__in=members)
study_groups = study_groups.filter(facilitator__in=team_users)
# TODO How is this different from scope=active?
if 'active' in request.GET:
active = request.GET.get('active') == 'true'
if active:
study_groups = study_groups.filter(end_date__gte=today)
else:
study_groups = study_groups.filter(end_date__lt=today)
if 'latitude' in request.GET and 'longitude' in request.GET:
# work with floats for ease
latitude = float(request.GET.get('latitude'))
longitude = float(request.GET.get('longitude'))
distance = float(request.GET.get('distance', False) or 50)
lat_delta, lon_delta = getLatLonDelta(latitude, longitude, distance)
lat_min = max(-90, latitude - lat_delta)
lat_max = min(90, latitude + lat_delta)
lon_min = max(-180, longitude - lon_delta)
lon_max = min(180, longitude + lon_delta)
# NOTE doesn't wrap around,
# iow, something at lat=45, lon=-189 and distance=1000 won't match
# lat=45, lon=189 even though they are only 222 km apart.
study_groups = study_groups.filter(
latitude__gte=lat_min,
latitude__lte=lat_max,
longitude__gte=lon_min,
longitude__lte=lon_max
)
# NOTE could use haversine approximation to filter more accurately
if 'topics' in request.GET:
topics = request.GET.get('topics').split(',')
query = Q(course__topics__icontains=topics[0])
for topic in topics[1:]:
query = Q(course__topics__icontains=topic) | query
study_groups = study_groups.filter(query)
if 'weekdays' in request.GET:
weekdays = request.GET.get('weekdays').split(',')
query = None
for weekday in weekdays:
# __week_day differs from datetime.weekday()
# Monday should be 0
weekday = int(weekday) + 2 % 7
query = query | Q(start_date__week_day=weekday) if query else Q(start_date__week_day=weekday)
study_groups = study_groups.filter(query)
# TODO this conflates signup open and active
study_groups_signup_open = study_groups.filter(signup_open=True, end_date__gte=today)
study_groups_signup_closed = study_groups.filter(Q(signup_open=False) | Q(end_date__lt=today))
if 'signup' in request.GET:
signup_open = request.GET.get('signup') == 'open'
if signup_open:
study_groups = study_groups_signup_open
else:
study_groups = study_groups_signup_closed
order = request.GET.get('order', None)
if order == 'name':
study_groups = study_groups.order_by('name')
elif order == 'start_date':
study_groups = study_groups.order_by('-start_date')
elif order == 'created_at':
study_groups = study_groups.order_by('-created_at')
elif order == 'first_meeting_date':
study_groups = study_groups.order_by('start_date')
elif order == 'last_meeting_date':
study_groups = study_groups.order_by('-end_date')
data = {
'count': study_groups.count(),
'signup_open_count': study_groups_signup_open.count(),
'signup_closed_count': study_groups_signup_closed.count(),
}
if 'offset' in request.GET or 'limit' in request.GET:
limit, offset = _limit_offset(request)
data['offset'] = offset
data['limit'] = limit
study_groups = study_groups[offset:offset+limit]
data['items'] = [ serialize_learning_circle(sg) for sg in study_groups ]
return json_response(request, data)
class LearningCircleTopicListView(View):
""" Return topics for listed courses """
def get(self, request):
study_group_ids = Meeting.objects.active().filter(
meeting_date__gte=timezone.now()
).values('study_group')
course_ids = None
course_ids = StudyGroup.objects.published().filter(id__in=study_group_ids).values('course')
topics = Course.objects.active()\
.filter(unlisted=False)\
.filter(id__in=course_ids)\
.exclude(topics='')\
.values_list('topics')
topics = [
item.strip().lower() for sublist in topics for item in sublist[0].split(',')
]
data = {}
data['topics'] = { k: v for k, v in list(Counter(topics).items()) }
return json_response(request, data)
def _studygroup_object_for_map(sg):
active = sg.end_date > datetime.date.today()
report_available = sg.learnersurveyresponse_set.count() > 0
data = {
"id": sg.id,
"title": sg.name,
"latitude": sg.latitude,
"longitude": sg.longitude,
"city": sg.city,
"start_date": sg.start_date,
"active": active
}
if active:
data["url"] = settings.PROTOCOL + '://' + settings.DOMAIN + reverse('studygroups_signup', args=(slugify(sg.venue_name, allow_unicode=True), sg.id,))
elif report_available:
data["report_url"] = sg.report_url()
return data
class LearningCirclesMapView(View):
def get(self, request):
study_groups = StudyGroup.objects.published().select_related('course').prefetch_related("learnersurveyresponse_set")
data = {}
data['items'] = [ _studygroup_object_for_map(sg) for sg in study_groups ]
return json_response(request, data)
def _course_check(course_id):
if not Course.objects.filter(pk=int(course_id)).exists():
return None, 'Course matching ID not found'
else:
return Course.objects.get(pk=int(course_id)), None
def serialize_course(course):
data = {
"id": course.id,
"title": course.title,
"provider": course.provider,
"platform": course.platform,
"link": course.link,
"caption": course.caption,
"on_demand": course.on_demand,
"topics": [t.strip() for t in course.topics.split(',')] if course.topics else [],
"language": course.language,
"overall_rating": course.overall_rating,
"total_ratings": course.total_ratings,
"rating_step_counts": course.rating_step_counts,
"course_page_url": settings.PROTOCOL + '://' + settings.DOMAIN + reverse("studygroups_course_page", args=(course.id,)),
"course_page_path": reverse("studygroups_course_page", args=(course.id,)),
"course_edit_path": reverse("studygroups_course_edit", args=(course.id,)),
"created_at": course.created_at,
"unlisted": course.unlisted,
"discourse_topic_url": course.discourse_topic_url if course.discourse_topic_url else settings.PROTOCOL + '://' + settings.DOMAIN + reverse("studygroups_generate_course_discourse_topic", args=(course.id,)),
}
if hasattr(course, 'num_learning_circles'):
data["learning_circles"] = course.num_learning_circles
return data
class CourseListView(View):
def get(self, request):
query_schema = {
"offset": schema.integer(),
"limit": schema.integer(),
"order": lambda v: (v, None) if v in ['title', 'usage', 'overall_rating', 'created_at', None] else (None, "must be 'title', 'usage', 'created_at', or 'overall_rating'"),
"user": schema.boolean(),
"include_unlisted": schema.boolean(),
}
data = schema.django_get_to_dict(request.GET)
clean_data, errors = schema.validate(query_schema, data)
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
courses = Course.objects.active().filter(archived=False)
# include_unlisted must be != false and the query must be scoped
# by user to avoid filtering out unlisted courses
if request.GET.get('include_unlisted', "false") == "false" or 'user' not in request.GET:
# return only courses that is not unlisted
# if the user is part of a team, include unlisted courses from the team
if request.user.is_authenticated:
team_query = TeamMembership.objects.active().filter(user=request.user).values('team')
team_ids = TeamMembership.objects.active().filter(team__in=team_query).values('user')
courses = courses.filter(Q(unlisted=False) | Q(unlisted=True, created_by__in=team_ids))
else:
courses = courses.filter(unlisted=False)
courses = courses.annotate(
num_learning_circles=Sum(
Case(
When(
studygroup__deleted_at__isnull=True, then=Value(1),
studygroup__course__id=F('id')
),
default=Value(0), output_field=models.IntegerField()
)
)
)
if 'user' in request.GET:
user_id = request.user.id
courses = courses.filter(created_by=user_id)
if 'course_id' in request.GET:
course_id = request.GET.get('course_id')
courses = courses.filter(pk=int(course_id))
order = request.GET.get('order', None)
if order in ['title', None]:
courses = courses.order_by('title')
elif order == 'overall_rating':
courses = courses.order_by('-overall_rating', '-total_ratings', 'title')
elif order == 'created_at':
courses = courses.order_by('-created_at')
else:
courses = courses.order_by('-num_learning_circles', 'title')
query = request.GET.get('q', '').strip()
if query:
tsquery = CustomSearchQuery(query, config='simple')
courses = courses.annotate(
search=SearchVector('topics', 'title', 'caption', 'provider', config='simple')
).filter(search=tsquery)
if 'topics' in request.GET:
topics = request.GET.get('topics').split(',')
query = Q(topics__icontains=topics[0])
for topic in topics[1:]:
query = Q(topics__icontains=topic) | query
courses = courses.filter(query)
if 'languages' in request.GET:
languages = request.GET.get('languages').split(',')
courses = courses.filter(language__in=languages)
if 'oer' in request.GET and request.GET.get('oer', False) == 'true':
courses = courses.filter(license__in=Course.OER_LICENSES)
if 'active' in request.GET:
active = request.GET.get('active') == 'true'
study_group_ids = Meeting.objects.active().filter(
meeting_date__gte=timezone.now()
).values('study_group')
course_ids = None
if active:
course_ids = StudyGroup.objects.published().filter(id__in=study_group_ids).values('course')
else:
course_ids = StudyGroup.objects.published().exclude(id__in=study_group_ids).values('course')
courses = courses.filter(id__in=course_ids)
data = {
'count': courses.count()
}
if 'offset' in request.GET or 'limit' in request.GET:
limit, offset = _limit_offset(request)
data['offset'] = offset
data['limit'] = limit
courses = courses[offset:offset+limit]
data['items'] = [ serialize_course(course) for course in courses ]
return json_response(request, data)
class CourseTopicListView(View):
""" Return topics for listed courses """
def get(self, request):
topics = Course.objects.active()\
.filter(unlisted=False)\
.exclude(topics='')\
.values_list('topics')
topics = [
item.strip().lower() for sublist in topics for item in sublist[0].split(',')
]
from collections import Counter
data = {}
data['topics'] = { k: v for k, v in list(Counter(topics).items()) }
return json_response(request, data)
def _image_check():
def _validate(value):
if value.startswith(settings.MEDIA_URL):
return value.replace(settings.MEDIA_URL, '', 1), None
else:
return None, 'Image must be a valid URL for an existing file'
return _validate
def _user_check(user):
def _validate(value):
if value is False:
if user.profile.email_confirmed_at is None:
return None, 'Users with unconfirmed email addresses cannot publish courses'
return value, None
return _validate
def _studygroup_check(studygroup_id):
if not StudyGroup.objects.filter(pk=int(studygroup_id)).exists():
return None, 'Learning circle matching ID not found'
else:
return StudyGroup.objects.get(pk=int(studygroup_id)), None
def _venue_name_check(venue_name):
if len(slugify(venue_name, allow_unicode=True)):
return venue_name, None
return None, 'Venue name should include at least one alpha-numeric character.'
def _meetings_validator(meetings):
if len(meetings) == 0:
return None, 'Need to specify at least one meeting'
meeting_schema = schema.schema({
"meeting_date": schema.date(),
"meeting_time": schema.time()
})
results = list(map(meeting_schema, meetings))
errors = list(filter(lambda x: x, map(lambda x: x[1], results)))
mtngs = list(map(lambda x: x[0], results))
if errors:
return None, 'Invalid meeting data'
else:
return mtngs, None
def _make_learning_circle_schema(request):
post_schema = {
"name": schema.text(length=128, required=False),
"course": schema.chain([
schema.integer(),
_course_check,
], required=True),
"description": schema.text(required=True, length=2000),
"course_description": schema.text(required=False, length=2000),
"venue_name": schema.chain([
schema.text(required=True, length=256),
_venue_name_check,
], required=True),
"venue_details": schema.text(required=True, length=128),
"venue_address": schema.text(required=True, length=256),
"venue_website": schema.text(length=256),
"city": schema.text(required=True, length=256),
"region": schema.text(required=True, length=256),
"country": schema.text(required=True, length=256),
"country_en": schema.text(required=True, length=256),
"latitude": schema.floating_point(),
"longitude": schema.floating_point(),
"place_id": schema.text(length=256),
"language": schema.text(required=True, length=6),
"online": schema.boolean(),
"meeting_time": schema.time(required=True),
"duration": schema.integer(required=True),
"timezone": schema.text(required=True, length=128),
"signup_question": schema.text(length=256),
"facilitator_goal": schema.text(length=256),
"facilitator_concerns": schema.text(length=256),
"image_url": schema.chain([
schema.text(),
_image_check(),
], required=False),
"draft": schema.boolean(),
"meetings": _meetings_validator,
}
return post_schema
@method_decorator(login_required, name='dispatch')
class LearningCircleCreateView(View):
def post(self, request):
post_schema = _make_learning_circle_schema(request)
data = json.loads(request.body)
data, errors = schema.validate(post_schema, data)
if errors != {}:
logger.debug('schema error {0}'.format(json.dumps(errors)))
return json_response(request, {"status": "error", "errors": errors})
# start and end dates need to be set for db model to be valid
start_date = data.get('meetings')[0].get('meeting_date')
end_date = data.get('meetings')[-1].get('meeting_date')
# create learning circle
study_group = StudyGroup(
name=data.get('name', None),
course=data.get('course'),
course_description=data.get('course_description', None),
facilitator=request.user,
description=data.get('description'),
venue_name=data.get('venue_name'),
venue_address=data.get('venue_address'),
venue_details=data.get('venue_details'),
venue_website=data.get('venue_website', ''),
city=data.get('city'),
region=data.get('region'),
country=data.get('country'),
country_en=data.get('country_en'),
latitude=data.get('latitude'),
longitude=data.get('longitude'),
place_id=data.get('place_id', ''),
online=data.get('online', False),
language=data.get('language'),
start_date=start_date,
end_date=end_date,
meeting_time=data.get('meeting_time'),
duration=data.get('duration'),
timezone=data.get('timezone'),
image=data.get('image_url'),
signup_question=data.get('signup_question', ''),
facilitator_goal=data.get('facilitator_goal', ''),
facilitator_concerns=data.get('facilitator_concerns', '')
)
# use course.caption if course_description is not set
if study_group.course_description is None:
study_group.course_description = study_group.course.caption
# use course.title if name is not set
if study_group.name is None:
study_group.name = study_group.course.title
# only update value for draft if the use verified their email address
if request.user.profile.email_confirmed_at is not None:
study_group.draft = data.get('draft', True)
study_group.save()
# notification about new study group is sent at this point, but no associated meetings exists, which implies that the reminder can't use the date of the first meeting
generate_meetings_from_dates(study_group, data.get('meetings', []))
studygroup_url = f"{settings.PROTOCOL}://{settings.DOMAIN}" + reverse('studygroups_view_study_group', args=(study_group.id,))
return json_response(request, { "status": "created", "studygroup_url": studygroup_url })
@method_decorator(user_is_group_facilitator, name='dispatch')
@method_decorator(login_required, name='dispatch')
class LearningCircleUpdateView(SingleObjectMixin, View):
model = StudyGroup
pk_url_kwarg = 'study_group_id'
def post(self, request, *args, **kwargs):
study_group = self.get_object()
post_schema = _make_learning_circle_schema(request)
data = json.loads(request.body)
data, errors = schema.validate(post_schema, data)
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
# update learning circle
published = False
draft = data.get('draft', True)
# only publish a learning circle for a user with a verified email address
if draft is False and request.user.profile.email_confirmed_at is not None:
published = study_group.draft is True
study_group.draft = False
study_group.name = data.get('name', None)
study_group.course = data.get('course')
study_group.description = data.get('description')
study_group.course_description = data.get('course_description', None)
study_group.venue_name = data.get('venue_name')
study_group.venue_address = data.get('venue_address')
study_group.venue_details = data.get('venue_details')
study_group.venue_website = data.get('venue_website', '')
study_group.city = data.get('city')
study_group.region = data.get('region')
study_group.country = data.get('country')
study_group.country_en = data.get('country_en')
study_group.latitude = data.get('latitude')
study_group.longitude = data.get('longitude')
study_group.place_id = data.get('place_id', '')
study_group.language = data.get('language')
study_group.online = data.get('online')
study_group.meeting_time = data.get('meeting_time')
study_group.duration = data.get('duration')
study_group.timezone = data.get('timezone')
study_group.image = data.get('image_url')
study_group.signup_question = data.get('signup_question', '')
study_group.facilitator_goal = data.get('facilitator_goal', '')
study_group.facilitator_concerns = data.get('facilitator_concerns', '')
study_group.save()
generate_meetings_from_dates(study_group, data.get('meetings', []))
studygroup_url = f"{settings.PROTOCOL}://{settings.DOMAIN}" + reverse('studygroups_view_study_group', args=(study_group.id,))
return json_response(request, { "status": "updated", "studygroup_url": studygroup_url })
@method_decorator(csrf_exempt, name="dispatch")
class SignupView(View):
def post(self, request):
signup_questions = {
"goals": schema.text(required=True),
"support": schema.text(required=True),
"custom_question": schema.text(),
}
post_schema = {
"learning_circle": schema.chain([
schema.integer(),
lambda x: (None, 'No matching learning circle exists') if not StudyGroup.objects.filter(pk=int(x)).exists() else (StudyGroup.objects.get(pk=int(x)), None),
], required=True),
"name": schema.text(required=True),
"email": schema.email(required=True),
"communications_opt_in": schema.boolean(),
"consent": schema.chain([
schema.boolean(),
lambda consent: (None, 'Consent is needed to sign up') if not consent else (consent, None),
], required=True),
"mobile": schema.mobile(),
"signup_questions": schema.schema(signup_questions, required=True)
}
data = json.loads(request.body)
clean_data, errors = schema.validate(post_schema, data)
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
study_group = clean_data.get('learning_circle')
# Not sure how to cleanly implement validation like this using the schema?
if study_group.signup_question:
if not clean_data.get('signup_questions').get('custom_question'):
return json_response(request, {"status": "error", "errors": { "signup_questions": [{"custom_question": ["Field is required"]}]}})
if Application.objects.active().filter(email__iexact=clean_data.get('email'), study_group=study_group).exists():
application = Application.objects.active().get(email__iexact=clean_data.get('email'), study_group=study_group)
else:
application = Application(
study_group=study_group,
name=clean_data.get('name'),
email=clean_data.get('email'),
accepted_at=timezone.now()
)
application.name = clean_data.get('name')
application.signup_questions = json.dumps(clean_data.get('signup_questions'))
if clean_data.get('mobile'):
application.mobile = clean_data.get('mobile')
application.communications_opt_in = clean_data.get('communications_opt_in', False)
application.save()
return json_response(request, {"status": "created"})
class LandingPageLearningCirclesView(View):
""" return upcoming learning circles for landing page """
def get(self, request):
query_schema = {
"scope": schema.text(),
}
data = schema.django_get_to_dict(request.GET)
clean_data, errors = schema.validate(query_schema, data)
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
study_groups_unsliced = StudyGroup.objects.published()
if 'scope' in request.GET and request.GET.get('scope') == "team":
user = request.user
team_ids = TeamMembership.objects.active().filter(user=user).values("team")
if team_ids.count() == 0:
return json_response(request, { "status": "error", "errors": ["User is not on a team."] })
team_members = TeamMembership.objects.active().filter(team__in=team_ids).values("user")
study_groups_unsliced = study_groups_unsliced.filter(facilitator__in=team_members)
# get learning circles with image & upcoming meetings
study_groups = study_groups_unsliced.filter(
meeting__meeting_date__gte=timezone.now(),
).annotate(
next_meeting_date=Min('meeting__meeting_date')
).order_by('next_meeting_date')[:3]
# if there are less than 3 with upcoming meetings and an image
if study_groups.count() < 3:
# pad with learning circles with the most recent meetings
past_study_groups = study_groups_unsliced.filter(
meeting__meeting_date__lt=timezone.now(),
).annotate(
next_meeting_date=Max('meeting__meeting_date')
).order_by('-next_meeting_date')
study_groups = list(study_groups) + list(past_study_groups[:3-study_groups.count()])
data = {
'items': [ serialize_learning_circle(sg) for sg in study_groups ]
}
return json_response(request, data)
class LandingPageStatsView(View):
""" Return stats for the landing page """
"""
- Number of active learning circles
- Number of cities where learning circle happened
- Number of facilitators who ran at least 1 learning circle
- Number of learning circles to date
"""
def get(self, request):
study_groups = StudyGroup.objects.published().filter(
meeting__meeting_date__gte=timezone.now()
).annotate(
next_meeting_date=Min('meeting__meeting_date')
)
cities = StudyGroup.objects.published().filter(
latitude__isnull=False,
longitude__isnull=False,
).distinct('city').values('city')
learning_circle_count = StudyGroup.objects.published().count()
facilitators = StudyGroup.objects.active().distinct('facilitator').values('facilitator')
cities_s = list(set([c['city'].split(',')[0].strip() for c in cities]))
data = {
"active_learning_circles": study_groups.count(),
"cities": len(cities_s),
"facilitators": facilitators.count(),
"learning_circle_count": learning_circle_count
}
return json_response(request, data)
class ImageUploadView(View):
def post(self, request):
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
image = form.cleaned_data['image']
storage = get_storage_class()()
filename = storage.save(image.name, image)
# TODO - get full URL
image_url = ''.join([settings.MEDIA_URL, filename])
return json_response(request, {"image_url": image_url})
else:
return json_response(request, {'error': 'not a valid image'})
def detect_platform_from_url(request):
url = request.GET.get('url', "")
platform = course_platform_from_url(url)
return json_response(request, { "platform": platform })
class CourseLanguageListView(View):
""" Return langugages for listed courses """
def get(self, request):
languages = Course.objects.active().filter(unlisted=False).values_list('language', flat=True)
languages = set(languages)
languages_dict = [
get_language_info(language) for language in languages
]
data = { "languages": languages_dict }
return json_response(request, data)
class FinalReportListView(View):
def get(self, request):
today = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
studygroups = StudyGroup.objects.published().annotate(surveys=Count('learnersurveyresponse')).filter(surveys__gt=0, end_date__lt=today).order_by('-end_date')
data = {}
if 'offset' in request.GET or 'limit' in request.GET:
limit, offset = _limit_offset(request)
data['offset'] = offset
data['limit'] = limit
studygroups = studygroups[offset:offset+limit]
def _map(sg):
data = serialize_learning_circle(sg)
if request.user.is_authenticated:
data['signup_count'] = sg.application_set.active().count()
return data
data['items'] = [ _map(sg) for sg in studygroups ]
return json_response(request, data)
class InstagramFeed(View):
def get(self, request):
""" Get user media from Instagram Basic Diplay API """
""" https://developers.facebook.com/docs/instagram-basic-display-api/reference/media """
url = "https://graph.instagram.com/me/media?fields=id,permalink&access_token={}".format(settings.INSTAGRAM_TOKEN)
try:
response = get_json_response(url)
if response.get("data", None):
return json_response(request, { "items": response["data"] })
if response.get("error", None):
return json_response(request, { "status": "error", "errors": response["error"]["message"] })
logger.error('Could not make request to Instagram: {}'.format(response["error"]["message"]))
return json_response(request, { "status": "error", "errors": "Could not make request to Instagram" })
except ConnectionError as e:
logger.error('Could not make request to Instagram')
return json_response(request, { "status": "error", "errors": str(e) })
def serialize_team_data(team):
serialized_team = {
"id": team.pk,
"name": team.name,
"subtitle": team.subtitle,
"page_slug": team.page_slug,
"member_count": team.teammembership_set.active().count(),
"zoom": team.zoom,
"date_established": team.created_at.strftime("%B %Y"),
"intro_text": team.intro_text,
"website": team.website,
"email_address": team.email_address,
"location": team.location,
"facilitators": [],
}
members = team.teammembership_set.active().values('user')
studygroup_count = StudyGroup.objects.published().filter(facilitator__in=members).count()
serialized_team["studygroup_count"] = studygroup_count
facilitators = team.teammembership_set.active()
for facilitator in facilitators:
facilitator_role = "FACILITATOR" if facilitator.role == TeamMembership.MEMBER else facilitator.role
serialized_facilitator = {
"first_name": facilitator.user.first_name,
"city": facilitator.user.profile.city,
"bio": facilitator.user.profile.bio,
"contact_url": facilitator.user.profile.contact_url,
"role": facilitator_role,
}
if facilitator.user.profile.avatar:
serialized_facilitator["avatar_url"] = f"{settings.PROTOCOL}://{settings.DOMAIN}" + facilitator.user.profile.avatar.url
serialized_team["facilitators"].append(serialized_facilitator)
if team.page_image:
serialized_team["image_url"] = f"{settings.PROTOCOL}://{settings.DOMAIN}" + team.page_image.url
if team.logo:
serialized_team["logo_url"] = f"{settings.PROTOCOL}://{settings.DOMAIN}" + team.logo.url
if team.latitude and team.longitude:
serialized_team["coordinates"] = {
"longitude": team.longitude,
"latitude": team.latitude,
}
return serialized_team
class TeamListView(View):
def get(self, request):
data = {}
teams = Team.objects.all().order_by('name')
data["count"] = teams.count()
if 'image' in request.GET and request.GET.get('image') == "true":
teams = teams.exclude(page_image="")
if 'offset' in request.GET or 'limit' in request.GET:
limit, offset = _limit_offset(request)
data['offset'] = offset
data['limit'] = limit
teams = teams[offset:offset+limit]
data['items'] = [ serialize_team_data(team) for team in teams ]
return json_response(request, data)
class TeamDetailView(SingleObjectMixin, View):
model = Team
pk_url_kwarg = 'team_id'
def get(self, request, **kwargs):
data = {}
team = self.get_object()
serialized_team = serialize_team_data(team)
if request.user.is_authenticated and team.teammembership_set.active().filter(user=request.user, role=TeamMembership.ORGANIZER).exists():
# ensure user is team organizer
serialized_team['team_invitation_url'] = team.team_invitation_url()
data['item'] = serialized_team
return json_response(request, data)
def serialize_team_membership(tm):
role_label = dict(TeamMembership.ROLES)[tm.role]
email_validated = hasattr(tm.user, 'profile') and tm.user.profile.email_confirmed_at is not None
email_confirmed_at = tm.user.profile.email_confirmed_at.strftime("%-d %B %Y") if email_validated else "--"
return {
"facilitator": {
"first_name": tm.user.first_name,
"last_name": tm.user.last_name,
"email": tm.user.email,
"email_confirmed_at": email_confirmed_at
},
"role": role_label,
"id": tm.id,
}
def serialize_team_invitation(ti):
role_label = dict(TeamMembership.ROLES)[ti.role]
return {
"facilitator": {
"email": ti.email,
},
"created_at": ti.created_at.strftime("%-d %B %Y"),
"role": role_label,
"id": ti.id,
}
@method_decorator(login_required, name="dispatch")
class TeamMembershipListView(View):
def get(self, request, **kwargs):
query_schema = {
"offset": schema.integer(),
"limit": schema.integer(),
"team_id": schema.integer(required=True),
}
data = schema.django_get_to_dict(request.GET)
clean_data, errors = schema.validate(query_schema, data)
team_id = clean_data["team_id"]
user_is_team_organizer = TeamMembership.objects.active().filter(team=team_id, user=request.user, role=TeamMembership.ORGANIZER).exists()
if not user_is_team_organizer:
return HttpResponseForbidden()
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
team_memberships = TeamMembership.objects.active().filter(team=team_id)
data = {
'count': team_memberships.count()
}
if 'offset' in request.GET or 'limit' in request.GET:
limit, offset = _limit_offset(request)
data['offset'] = offset
data['limit'] = limit
team_memberships = team_memberships[offset:offset+limit]
data['items'] = [serialize_team_membership(m) for m in team_memberships]
return json_response(request, data)
@method_decorator(login_required, name="dispatch")
class TeamInvitationListView(View):
def get(self, request, **kwargs):
query_schema = {
"offset": schema.integer(),
"limit": schema.integer(),
"team_id": schema.integer(required=True)
}
data = schema.django_get_to_dict(request.GET)
clean_data, errors = schema.validate(query_schema, data)
team_id = clean_data["team_id"]
user_is_team_organizer = TeamMembership.objects.active().filter(team=team_id, user=request.user, role=TeamMembership.ORGANIZER).exists()
if not user_is_team_organizer:
return HttpResponseForbidden()
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
team_invitations = TeamInvitation.objects.filter(team=team_id, responded_at__isnull=True)
data = {
'count': team_invitations.count()
}
if 'offset' in request.GET or 'limit' in request.GET:
limit, offset = _limit_offset(request)
data['offset'] = offset
data['limit'] = limit
team_invitations = team_invitations[offset:offset+limit]
data['items'] = [serialize_team_invitation(i) for i in team_invitations]
return json_response(request, data)
def serialize_invitation_notification(invitation):
return {
"team_name": invitation.team.name,
"team_organizer_name": invitation.organizer.first_name,
"team_invitation_confirmation_url": reverse("studygroups_facilitator_invitation_confirm", args=(invitation.id,)),
}
@login_required
def facilitator_invitation_notifications(request):
email_validated = hasattr(request.user, 'profile') and request.user.profile.email_confirmed_at is not None
pending_invitations = TeamInvitation.objects.filter(email__iexact=request.user.email, responded_at__isnull=True)
eligible_team = eligible_team_by_email_domain(request.user)
invitation_notifications = [ serialize_invitation_notification(i) for i in pending_invitations]
if email_validated and eligible_team:
implicit_invitation_notification = {
'team_name': eligible_team.name,
'team_invitation_confirmation_url': reverse("studygroups_facilitator_invitation_confirm")
}
invitation_notifications.append(implicit_invitation_notification)
data = {
"items": invitation_notifications
}
return json_response(request, data)
@user_is_team_organizer
@login_required
@require_http_methods(["POST"])
def create_team_invitation_url(request, team_id):
team = Team.objects.get(pk=team_id)
team.generate_invitation_token()
return json_response(request, { "status": "updated", "team_invitation_url": team.team_invitation_url() })
@user_is_team_organizer
@login_required
@require_http_methods(["POST"])
def delete_team_invitation_url(request, team_id):
team = Team.objects.get(pk=team_id)
team.invitation_token = None
team.save()
return json_response(request, { "status": "deleted", "team_invitation_url": None })
def serialize_announcement(announcement):
return {
"text": announcement.text,
"link": announcement.link,
"link_text": announcement.link_text,
"color": announcement.color,
}
class AnnouncementListView(View):
def get(self, request):
announcements = Announcement.objects.filter(display=True)
data = {
"count": announcements.count(),
"items": [ serialize_announcement(announcement) for announcement in announcements ]
}
return json_response(request, data)
def cities(request):
cities = StudyGroup.objects.published().annotate(city_len=Length('city')).filter(city_len__gt=1).values_list('city', flat=True).distinct('city')
data = {
"count": cities.count(),
"items": [{ "label": city, "value": city.split(',')[0].lower().replace(' ', '_') } for city in cities]
}
return json_response(request, data)
| StarcoderdataPython |
130113 | from typing import Any
from typing import Dict
from typing import Generic
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from typing import TypeVar
import attr
from xsdata.exceptions import XmlContextError
from xsdata.formats.dataclass.compat import ClassType
from xsdata.formats.dataclass.models.elements import XmlType
T = TypeVar("T", bound=object)
@attr.s
class AnyElement:
"""
Generic model to bind xml document data to wildcard fields.
:param qname: The element's qualified name
:param text: The element's text content
:param tail: The element's tail content
:param children: The element's list of child elements.
:param attributes: The element's key-value attribute mappings.
"""
qname: Optional[str] = attr.ib(default=None)
text: Optional[str] = attr.ib(default=None)
tail: Optional[str] = attr.ib(default=None)
children: List[object] = attr.ib(factory=list, metadata={"type": XmlType.WILDCARD})
attributes: Dict[str, str] = attr.ib(
factory=dict, metadata={"type": XmlType.ATTRIBUTES}
)
@attr.s
class DerivedElement(Generic[T]):
"""
Generic model wrapper for type substituted elements.
Example: eg. <b xsi:type="a">...</b>
:param qname: The element's qualified name
:param value: The wrapped value
:param type: The real xsi:type
"""
qname: str = attr.ib()
value: T = attr.ib()
type: Optional[str] = attr.ib(default=None)
class Attrs(ClassType):
@property
def any_element(self) -> Type:
return AnyElement
@property
def derived_element(self) -> Type:
return DerivedElement
def is_model(self, obj: Any) -> bool:
return attr.has(obj if isinstance(obj, type) else type(obj))
def verify_model(self, obj: Any):
if not self.is_model(obj):
raise XmlContextError(f"Type '{obj}' is not an attrs model.")
def get_fields(self, obj: Any) -> Tuple[Any, ...]:
if not isinstance(obj, type):
return self.get_fields(type(obj))
# Emulate dataclasses fields ordering
fields = {}
for b in obj.__mro__[-1:0:-1]:
if self.is_model(b):
for f in self.get_fields(b):
fields[f.name] = f
for f in attr.fields(obj):
fields[f.name] = f
return tuple(fields.values())
def default_value(self, field: attr.Attribute) -> Any:
res = field.default
if res is attr.NOTHING:
return None
if isinstance(res, attr.Factory): # type: ignore
return res.factory # type: ignore
return res
def default_choice_value(self, choice: Dict) -> Any:
factory = choice.get("factory")
if callable(factory):
return factory
return choice.get("default")
| StarcoderdataPython |
3211091 | # Importar spacy e criar o objeto nlp do Português
import ____
nlp = ____
# Processar o texto
doc = ____("Eu gosto de gatos e cachorros.")
# Selecionar o primeiro token
first_token = doc[____]
# Imprimir o texto do primeito token
print(first_token.____)
| StarcoderdataPython |
1821195 | <reponame>DunnCreativeSS/cash_carry_leveraged_futures_arbitrageur<gh_stars>1-10
'''
Copyright (C) 2017-2020 <NAME> - <EMAIL>
Please see the LICENSE file for the terms and conditions
associated with this software.
Pair generation code for exchanges
'''
import logging
import requests
from cryptofeed.defines import (BINANCE, BINANCE_FUTURES, BINANCE_JERSEY, BINANCE_US, BITCOINCOM, BITFINEX, BITMAX,
BITSTAMP, BITTREX, BLOCKCHAIN, BYBIT, COINBASE, COINBENE, EXX, FTX, FTX_US, GEMINI,
HITBTC, HUOBI, HUOBI_DM, HUOBI_SWAP, KRAKEN, OKCOIN, OKEX, POLONIEX, UPBIT, GATEIO)
LOG = logging.getLogger('feedhandler')
PAIR_SEP = '-'
_pairs_retrieval_cache = dict()
def set_pair_separator(symbol: str):
global PAIR_SEP
PAIR_SEP = symbol
def gen_pairs(exchange):
if exchange not in _pairs_retrieval_cache:
LOG.info("%s: Getting list of pairs", exchange)
pairs = _exchange_function_map[exchange]()
LOG.info("%s: %s pairs", exchange, len(pairs))
_pairs_retrieval_cache[exchange] = pairs
return _pairs_retrieval_cache[exchange]
def _binance_pairs(endpoint: str):
ret = {}
pairs = requests.get(endpoint).json()
for symbol in pairs['symbols']:
split = len(symbol['baseAsset'])
normalized = symbol['symbol'][:split] + PAIR_SEP + symbol['symbol'][split:]
#print(normalized)
ret[normalized] = symbol['symbol']
return ret
def binance_pairs():
return _binance_pairs('https://api.binance.com/api/v1/exchangeInfo')
def binance_us_pairs():
return _binance_pairs('https://api.binance.us/api/v1/exchangeInfo')
def binance_jersey_pairs():
return _binance_pairs('https://api.binance.je/api/v1/exchangeInfo')
def binance_futures_pairs():
return _binance_pairs('https://dapi.binance.com/dapi/v1/exchangeInfo')
def bitfinex_pairs():
ret = {}
r = requests.get('https://api.bitfinex.com/v2/tickers?symbols=ALL').json()
for data in r:
pair = data[0]
if pair[0] == 'f':
continue
normalized = pair[1:-3] + PAIR_SEP + pair[-3:]
normalized = normalized.replace('UST', 'USDT')
ret[normalized] = pair
return ret
def bybit_pairs():
ret = {}
r = requests.get('https://api.bybit.com/v2/public/tickers').json()
for pair in r['result']:
symbol = pair['symbol']
normalized = symbol.replace("USD", f"{PAIR_SEP}USD")
ret[normalized] = symbol
return ret
def ftx_pairs():
ret = {}
r = requests.get('https://ftx.com/api/markets').json()
for data in r['result']:
normalized = data['name'].replace("/", PAIR_SEP)
pair = data['name']
ret[normalized] = pair
return ret
def ftx_us_pairs():
ret = {}
r = requests.get('https://ftx.us/api/markets').json()
for data in r['result']:
normalized = data['name'].replace("/", PAIR_SEP)
pair = data['name']
ret[normalized] = pair
return ret
def coinbase_pairs():
r = requests.get('https://api.pro.coinbase.com/products').json()
return {data['id'].replace("-", PAIR_SEP): data['id'] for data in r}
def gemini_pairs():
ret = {}
r = requests.get('https://api.gemini.com/v1/symbols').json()
for pair in r:
std = f"{pair[:-3]}{PAIR_SEP}{pair[-3:]}"
std = std.upper()
ret[std] = pair.upper()
return ret
def hitbtc_pairs():
ret = {}
pairs = requests.get('https://api.hitbtc.com/api/2/public/symbol').json()
for symbol in pairs:
split = len(symbol['baseCurrency'])
normalized = symbol['id'][:split] + PAIR_SEP + symbol['id'][split:]
ret[normalized] = symbol['id']
return ret
def poloniex_id_pair_mapping():
ret = {}
pairs = requests.get('https://poloniex.com/public?command=returnTicker').json()
for pair in pairs:
ret[pairs[pair]['id']] = pair
return ret
def poloniex_pairs():
return {value.split("_")[1] + PAIR_SEP + value.split("_")[0]: value for _, value in poloniex_id_pair_mapping().items()}
def bitstamp_pairs():
ret = {}
r = requests.get('https://www.bitstamp.net/api/v2/trading-pairs-info/').json()
for data in r:
normalized = data['name'].replace("/", PAIR_SEP)
pair = data['url_symbol']
ret[normalized] = pair
return ret
def kraken_pairs():
ret = {}
r = requests.get('https://api.kraken.com/0/public/AssetPairs')
data = r.json()
for pair in data['result']:
if 'wsname' not in data['result'][pair] or '.d' in pair:
# https://blog.kraken.com/post/259/introducing-the-kraken-dark-pool/
# .d is for dark pool pairs
continue
base, quote = data['result'][pair]['wsname'].split("/")
normalized = f"{base}{PAIR_SEP}{quote}"
exch = data['result'][pair]['wsname']
normalized = normalized.replace('XBT', 'BTC')
normalized = normalized.replace('XDG', 'DOG')
ret[normalized] = exch
return ret
def kraken_rest_pairs():
return {normalized: exchange.replace("/", "") for normalized, exchange in kraken_pairs().items()}
def exx_pairs():
r = requests.get('https://api.exx.com/data/v1/tickers').json()
exchange = [key.upper() for key in r.keys()]
pairs = [key.replace("_", PAIR_SEP) for key in exchange]
return dict(zip(pairs, exchange))
def huobi_common_pairs(url: str):
r = requests.get(url).json()
return {'{}{}{}'.format(e['base-currency'].upper(), PAIR_SEP, e['quote-currency'].upper()): '{}{}'.format(e['base-currency'], e['quote-currency']) for e in r['data']}
def huobi_pairs():
return huobi_common_pairs('https://api.huobi.pro/v1/common/symbols')
def huobi_us_pairs():
return huobi_common_pairs('https://api.huobi.com/v1/common/symbols')
def huobi_dm_pairs():
"""
Mapping is, for instance: {"BTC_CW":"BTC190816"}
See comments in exchange/huobi_dm.py
"""
mapping = {
"this_week": "CW",
"next_week": "NW",
"quarter": "CQ",
"next_quarter": "NQ"
}
r = requests.get('https://www.hbdm.com/api/v1/contract_contract_info').json()
pairs = {}
for e in r['data']:
pairs[f"{e['symbol']}_{mapping[e['contract_type']]}"] = e['contract_code']
return pairs
def huobi_swap_pairs():
r = requests.get('https://api.hbdm.com/swap-api/v1/swap_contract_info').json()
pairs = {}
for e in r['data']:
pairs[e['contract_code']] = e['contract_code']
return pairs
def okcoin_pairs():
r = requests.get('https://www.okcoin.com/api/spot/v3/instruments').json()
return {e['instrument_id']: e['instrument_id'] for e in r}
def okex_pairs():
r = requests.get('https://www.okex.com/api/spot/v3/instruments').json()
data = {e['instrument_id']: e['instrument_id'] for e in r}
# swaps
r = requests.get('https://www.okex.com/api/swap/v3/instruments/ticker').json()
futs = []
for update in r:
futs.append(update['instrument_id'])
data[update['instrument_id']] = update['instrument_id']
#print(futs)
# futures
r = requests.get('https://www.okex.com/api/futures/v3/instruments/ticker').json()
for update in r:
futs.append(update['instrument_id'])
data[update['instrument_id']] = update['instrument_id']
#print(futs)
return data
def coinbene_pairs():
r = requests.get('http://api.coinbene.com/v1/market/symbol').json()
return {f"{e['baseAsset']}{PAIR_SEP}{e['quoteAsset']}": e['ticker'] for e in r['symbol']}
def bittrex_pairs():
r = requests.get('https://api.bittrex.com/api/v1.1/public/getmarkets').json()
r = r['result']
return {f"{e['MarketCurrency']}{PAIR_SEP}{e['BaseCurrency']}": e['MarketName'] for e in r if e['IsActive']}
def bitcoincom_pairs():
r = requests.get('https://api.exchange.bitcoin.com/api/2/public/symbol').json()
return {f"{data['baseCurrency']}{PAIR_SEP}{data['quoteCurrency'].replace('USD', 'USDT')}": data['id'] for data in r}
def bitmax_pairs():
r = requests.get('https://bitmax.io/api/v1/products').json()
return {f"{data['baseAsset']}{PAIR_SEP}{data['quoteAsset']}": data['symbol'] for data in r}
def upbit_pairs():
r = requests.get('https://api.upbit.com/v1/market/all').json()
return {f"{data['market'].split('-')[1]}{PAIR_SEP}{data['market'].split('-')[0]}": data['market'] for data in r}
def blockchain_pairs():
r = requests.get("https://api.blockchain.com/mercury-gateway/v1/instruments").json()
return {data["symbol"].replace("-", PAIR_SEP): data["symbol"] for data in r}
def gateio_pairs():
r = requests.get("https://api.gateio.ws/api/v4/spot/currency_pairs").json()
return {data['id'].replace("_", PAIR_SEP): data['id'] for data in r}
_exchange_function_map = {
BITFINEX: bitfinex_pairs,
COINBASE: coinbase_pairs,
GEMINI: gemini_pairs,
HITBTC: hitbtc_pairs,
POLONIEX: poloniex_pairs,
BITSTAMP: bitstamp_pairs,
KRAKEN: kraken_pairs,
KRAKEN + 'REST': kraken_rest_pairs,
BINANCE: binance_pairs,
BINANCE_US: binance_us_pairs,
BINANCE_JERSEY: binance_jersey_pairs,
BINANCE_FUTURES: binance_futures_pairs,
BLOCKCHAIN: blockchain_pairs,
EXX: exx_pairs,
HUOBI: huobi_pairs,
HUOBI_DM: huobi_dm_pairs,
HUOBI_SWAP: huobi_swap_pairs,
OKCOIN: okcoin_pairs,
OKEX: okex_pairs,
COINBENE: coinbene_pairs,
BYBIT: bybit_pairs,
FTX: ftx_pairs,
FTX_US: ftx_us_pairs,
BITTREX: bittrex_pairs,
BITCOINCOM: bitcoincom_pairs,
BITMAX: bitmax_pairs,
UPBIT: upbit_pairs,
GATEIO: gateio_pairs
}
| StarcoderdataPython |
6633389 | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Goal
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import goal
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class GoalTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("Goal", js["resourceType"])
return goal.Goal(js)
def testGoal1(self):
inst = self.instantiate_from("goal-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Goal instance")
self.implGoal1(inst)
js = inst.as_json()
self.assertEqual("Goal", js["resourceType"])
inst2 = goal.Goal(js)
self.implGoal1(inst2)
def implGoal1(self, inst):
self.assertEqual(
force_bytes(inst.category[0].coding[0].code), force_bytes("dietary")
)
self.assertEqual(
force_bytes(inst.category[0].coding[0].system),
force_bytes("http://hl7.org/fhir/goal-category"),
)
self.assertEqual(
force_bytes(inst.description.text),
force_bytes("Target weight is 160 to 180 lbs."),
)
self.assertEqual(force_bytes(inst.id), force_bytes("example"))
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("123"))
self.assertEqual(
force_bytes(inst.priority.coding[0].code), force_bytes("high-priority")
)
self.assertEqual(
force_bytes(inst.priority.coding[0].display), force_bytes("High Priority")
)
self.assertEqual(
force_bytes(inst.priority.coding[0].system),
force_bytes("http://hl7.org/fhir/goal-priority"),
)
self.assertEqual(force_bytes(inst.priority.text), force_bytes("high"))
self.assertEqual(inst.startDate.date, FHIRDate("2015-04-05").date)
self.assertEqual(inst.startDate.as_json(), "2015-04-05")
self.assertEqual(force_bytes(inst.status), force_bytes("on-hold"))
self.assertEqual(inst.statusDate.date, FHIRDate("2016-02-14").date)
self.assertEqual(inst.statusDate.as_json(), "2016-02-14")
self.assertEqual(
force_bytes(inst.statusReason),
force_bytes("Patient wants to defer weight loss until after honeymoon."),
)
self.assertEqual(
force_bytes(inst.target.detailRange.high.code), force_bytes("[lb_av]")
)
self.assertEqual(
force_bytes(inst.target.detailRange.high.system),
force_bytes("http://unitsofmeasure.org"),
)
self.assertEqual(
force_bytes(inst.target.detailRange.high.unit), force_bytes("lbs")
)
self.assertEqual(inst.target.detailRange.high.value, 180)
self.assertEqual(
force_bytes(inst.target.detailRange.low.code), force_bytes("[lb_av]")
)
self.assertEqual(
force_bytes(inst.target.detailRange.low.system),
force_bytes("http://unitsofmeasure.org"),
)
self.assertEqual(
force_bytes(inst.target.detailRange.low.unit), force_bytes("lbs")
)
self.assertEqual(inst.target.detailRange.low.value, 160)
self.assertEqual(inst.target.dueDate.date, FHIRDate("2016-04-05").date)
self.assertEqual(inst.target.dueDate.as_json(), "2016-04-05")
self.assertEqual(
force_bytes(inst.target.measure.coding[0].code), force_bytes("3141-9")
)
self.assertEqual(
force_bytes(inst.target.measure.coding[0].display),
force_bytes("Weight Measured"),
)
self.assertEqual(
force_bytes(inst.target.measure.coding[0].system),
force_bytes("http://loinc.org"),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("additional"))
def testGoal2(self):
inst = self.instantiate_from("goal-example-stop-smoking.json")
self.assertIsNotNone(inst, "Must have instantiated a Goal instance")
self.implGoal2(inst)
js = inst.as_json()
self.assertEqual("Goal", js["resourceType"])
inst2 = goal.Goal(js)
self.implGoal2(inst2)
def implGoal2(self, inst):
self.assertEqual(
force_bytes(inst.description.text), force_bytes("Stop smoking")
)
self.assertEqual(force_bytes(inst.id), force_bytes("stop-smoking"))
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("123"))
self.assertEqual(
force_bytes(inst.outcomeCode[0].coding[0].code), force_bytes("8517006")
)
self.assertEqual(
force_bytes(inst.outcomeCode[0].coding[0].display),
force_bytes("Ex-smoker (finding)"),
)
self.assertEqual(
force_bytes(inst.outcomeCode[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.outcomeCode[0].text), force_bytes("Former smoker")
)
self.assertEqual(inst.startDate.date, FHIRDate("2015-04-05").date)
self.assertEqual(inst.startDate.as_json(), "2015-04-05")
self.assertEqual(force_bytes(inst.status), force_bytes("achieved"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("additional"))
| StarcoderdataPython |
3527185 | <filename>dod/__init__.py<gh_stars>1-10
from .character_sheet import CharacterSheet
__all__ = ["CharacterSheet"]
| StarcoderdataPython |
4999532 | <filename>EpcisIoT/documentdb.py
import logging
from pymongo import DESCENDING
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def obtain_time_duration(collection, new_document):
"""obtain a time duration between the recent events of the same bizLocation
:param: collection: MongoDB collection to parse
:type: pymongo.collection
:param: new inserted document detected by change streams
:type: dict
:returns: dictionary of timestamps relevant for sensor DB
:rtype: dict
"""
# Obtain the previously existing two document for the incoming bizLocation
# Sort them in descending order
# The first in the list is the newly inserted document detected by Change Streams
# the second document is of interest
prev_documents = collection.find({'epcList.epc': new_document['epcList'][0]['epc']}).limit(2).sort([("eventTime", DESCENDING)])
if prev_documents is not None:
# if there is a previous set of documents
prev_doc_list = list(prev_documents)
# print(prev_doc_list)
if len(prev_doc_list) == 1:
logger.info('Only Single entry exists for Product.. It implies it is the a new product with no previous events.')
return None
else:
logger.debug('Previous BizLocation of Product: {}, Present BizLocation of Product: {}'.format(
prev_doc_list[1]['bizLocation']['id'], new_document['bizLocation']['id']))
logger.debug('Time Duration: From {} to {}'.format(prev_doc_list[1]['eventTime'], new_document['eventTime']))
# make the dictionary to return
duration = {
'bizLocation': {
'prev': prev_doc_list[1]['bizLocation']['id'],
'present': new_document['bizLocation']['id']
},
'from_time': prev_doc_list[1]['eventTime'].isoformat(timespec='milliseconds') + 'Z',
'to_time': new_document['eventTime'].isoformat(timespec='milliseconds') + 'Z'
}
# print(duration)
return duration
else:
logger.info('No Previous Information of Event Found')
return None
| StarcoderdataPython |
3426002 | password_database = """15-16 l: klfbblslvjclmlnqklvg
6-13 h: pghjchdxhnjhjd
4-13 n: nnznntzznqnzbtzj
10-16 r: nrrrrkrjtxwrrrwx
1-6 t: rttftttttttttmdttttt
4-12 l: zhllfxlmvqtnhx
6-8 d: wxpwgdbjtffddkb
7-9 q: rqcqxjqhsm
6-8 x: xxxfxdxxx
5-9 d: dwnwnbsddfmc
2-6 j: jvdrrjchml
8-10 x: xxxcxxxzxxxxx
15-16 f: ffffffffffffffnfc
3-12 v: vgvvvvvvvvfqvvvv
2-5 p: tknppm
11-12 r: frrnrfqtshrgw
13-16 f: rfjmkrqkqrxmfnqj
3-5 f: ffffxffn
2-4 f: fffwf
11-13 s: srslssshssssx
1-6 c: cccdcn
2-6 t: tthhnc
12-17 w: wwwwwwwwwwwmwwwwg
5-6 k: kqkksskkp
4-5 p: ppppx
5-12 h: hbjhqhdwnqxhhsh
6-8 p: ppgzpppw
3-4 z: zzcz
6-15 x: hfvxxsqbfxxlhpxtb
4-7 m: qmxzhgt
18-20 b: fvzbbrbcvskxrpkwnmwb
1-11 c: ccccccccccggccc
1-3 p: dppp
2-6 j: fjbmljmdb
2-4 l: slpmqpk
3-5 b: stbpbbndtgzxlsnbdk
6-11 h: hlzkdhhhrhchmhbhl
1-4 w: wwpv
10-12 p: pmpppkpppgph
11-14 f: fbdxffpffffffkfffjvf
4-9 v: vkvfhfvvrvvv
16-17 t: tmtbztrptdlvttvkt
2-6 w: wwwwphwwwwnwwj
6-11 x: xxxxxxxxxxkxx
9-10 t: ttftchttnd
2-9 w: wkwwwwwwlww
2-4 g: ggvcgggggggggg
5-6 d: dldddsddddddd
5-8 j: jddjjnld
9-12 l: xldxlklvlkll
2-3 w: xwnrwdwndlvl
16-18 m: mmmmrmmmmmrvvmmvmm
3-4 t: ctts
11-12 b: ndgjdbppcdbhhssw
2-11 s: nssksssnssxvq
2-5 g: qggwcvf
10-14 l: llxlllrjlmlllwllll
8-9 w: wwwwfpwtvtjwjw
8-10 m: mmmmmmmmmmm
1-11 z: rldjzzzpzzs
16-17 v: qvsrnvvvwxgmvjjcvhcx
14-18 k: kkkkkkkkkkkkkkkkklk
2-3 q: qqrqqqxqqqq
4-9 c: lclckhcgn
2-3 b: nbbbbldlbbb
15-18 v: vtvvrvxvvnvxhvnvvv
4-7 q: qqqnqqqq
3-13 j: svjjdcnkwnbpl
4-11 q: blnqvqsngwj
3-4 j: dxdkj
5-7 c: dzxbsctqwclcgc
3-4 f: jfhffff
3-5 t: sttwk
7-8 t: tnqtvsztqwpvtftknt
1-12 r: grrrrrrrrrrrrrrr
7-8 k: kkkkkkqkk
3-4 l: llls
1-14 h: khhhjhhhhhhhhh
3-5 d: ttcdddd
1-7 w: dqwhwkww
4-5 p: pppqppppp
10-14 l: lqlltwlllllllzjk
15-17 d: ddddddddddddddddcdd
1-8 z: xpbbjxrzlzznxlz
1-2 v: vrvv
7-11 l: llllllclllslll
3-14 h: hhvhhhhhhhhhhhh
6-7 m: mmgpbjmgqmm
9-13 h: hhhhhhhhhhhhfnhhhhwh
12-17 c: cccpfcccccnccccqscc
6-12 x: kqfxbhxbdxxxl
6-8 z: zqkbtzzb
6-8 l: llllllllllllll
6-13 d: wdzdxdddddlzdtbd
3-8 h: gzcknrxkjgtggnqwqjs
1-6 t: tttttvtt
5-8 x: fjxnxrtn
6-9 z: zzzzzzzzszzz
2-4 v: vpvvtv
1-7 w: wcwrxgf
2-3 t: vxftld
1-2 n: nnxmntsqzk
5-11 l: nmvllllnlckl
7-13 c: cwchccjcrfsccc
2-3 z: dzdz
7-8 s: sddgbhws
3-11 w: qqsqrgmwwmwgfdhpsct
16-17 q: qqqqzqqqqqqqqqqqh
7-15 h: thqthdphmbhhjphhh
6-10 k: gcflljjwkk
9-17 f: ffffffffffffffffdf
10-12 q: qqqqbqlpqqfdqq
3-16 x: dbxmwtlqdmvldxlgxdhk
1-2 x: xxjxh
10-11 p: ppqpppkpptp
3-5 w: wwwwwwwww
10-11 j: xjmhqgdznjf
9-11 n: dxnnnnnnnnmnnn
1-5 l: mlllvllwlrblllll
14-16 q: tqqqqqqwqqqqqmqqqqq
2-3 b: nvbbbcf
5-8 x: xmkxxgxq
1-4 x: xxxfx
1-6 n: nnnrnm
9-17 p: pppppppppppppppppp
2-4 d: ddqd
13-14 p: fmpkxpprppppzhpp
5-6 t: tttttgrttt
10-12 r: rrrrrrrrrlrrrrrrrr
4-7 h: gnmqvqrhwhh
4-5 m: smsmmmm
4-5 q: qgzqb
6-12 c: rdnnsccnrjkcnm
4-16 k: gfkxktpwhxzkgcdnb
4-5 f: ffgvf
6-13 n: nfslcrnkksfmnmvcfnnz
15-18 s: sssqsssssmshmsmsss
1-15 v: vvvvwvvxvvvcvvwvvvvv
2-8 h: gphlnhph
4-13 t: vtjvrcxqwbsrk
2-10 t: tvttftzttttqttt
12-13 t: dvsbstlkfpztdqs
7-8 w: rvwncwww
13-16 v: vlqzjlvxljrvvjvvzp
12-13 w: wwwwwwwwnwdwwgwwwx
14-15 d: dcpdddddddddddmdbd
1-3 b: bbdbbbb
11-12 t: rkdtncbtkktzmvtghb
3-7 n: nzbfgbnnx
5-13 v: vsvbkvvvvvvvv
3-8 n: nltdrmbn
14-15 p: mzkmmqdfpcptzbzc
4-6 q: qtqkgqfmqwm
9-10 v: vvdvvxvvxvwd
5-7 m: mmmmmmkm
6-7 k: kkkkkgkk
8-9 h: shhhhhhhh
8-11 p: pppppppppczplbwcpb
11-17 k: kkkkkkkkkkkkkktkc
9-17 w: mwtfbvsfnhwxqqvxw
8-12 j: jhjjjjjrkjnjjjjjjjj
3-5 g: sgfgqgg
12-14 j: jjjjjjjjjjjjjjjjjjjj
1-11 f: jsffffzfffffxffftf
16-17 f: fffffffffftfffffff
2-4 k: klkwkk
5-8 g: gtjngzkhgb
4-7 b: bbbmbbbb
4-5 w: wwwdw
5-15 g: tplhhgmggbhggrllqg
9-13 v: jvvbqvsbvzhxvh
1-3 d: ddsddn
2-5 w: cfwds
7-11 x: xxxxjxvvxvxhxvml
5-6 v: jwqxvb
3-15 s: ssqsssssssssssss
5-9 d: dddddddddddddddd
5-8 l: vklnlclj
10-13 s: sltdlfbssxsss
11-18 j: hxjxjnjcpmkppnpjpjk
4-6 t: ttbzhvt
2-17 v: rzmhrssrjvpncwqwl
2-4 x: mtcxz
11-12 d: dddddddwddbd
2-13 h: whgpdxvkfxrhg
5-7 d: ddddddxdtddddddhdddq
4-8 b: lhcfbkbbdmb
8-15 l: lvtmkgwllqttgplbgjj
3-5 n: nqngjvw
12-19 m: mmmmmmmmmmmmmmmmmmgm
5-9 g: grhgsdcwgmdt
6-9 j: msgrcrfvzksfxxjq
6-10 s: xdssssssssssssssg
8-9 h: hhhnhhnfhh
12-15 f: fffffffffffffffwfff
3-7 p: pzzpbjkfcczbmnpkf
6-7 r: cnjlbhr
2-5 f: dwxcftrbljgstf
5-11 h: hhhhjhhhhhqh
16-18 r: rrrrrrrrrrrrrrrrrmrr
2-4 r: rxmrd
7-9 c: ccccczccccc
7-13 d: ddddddddddddfd
3-11 k: mmkspkltjwm
8-11 c: rvccccgtclc
1-3 v: vvhhv
4-7 t: jttnttgx
4-16 n: nnnjnfnnnnnnnnnnnn
5-6 m: mmmdmqmm
9-10 x: xxxxxxxkxrx
2-4 m: tmcmzjmvtcghlgm
6-7 q: lqqjqqnzqqmq
16-19 k: kjwkhkkjhkskskpnkfk
19-20 b: pbbpsbnbbmnntbnbbbbn
4-5 p: ppppmp
3-4 d: grlqndd
4-7 n: nnnnnnsn
9-12 k: kkkkkkkkfkkkkk
4-10 h: hfhhhxhfhd
4-9 t: ttttxbbbcdmxsrhjj
4-11 l: npxwbsmklbls
17-20 d: dddddwdhdkddjdddtfdd
3-4 s: nldssq
4-8 s: sssdsssgs
5-16 t: ttttprtcttttttzt
9-17 j: tjjtjjjjgljjjcjjfj
7-8 b: bbbbtbkbb
2-13 l: szllllllllxllll
8-11 n: bmpnwrnngnmsfw
1-5 d: jddqtdwddxddjd
10-11 s: hqgnvslssstqdnvs
14-16 p: ppppppppvpplpgpp
2-8 h: khhjfcqp
6-7 d: fxdpdwvdjkd
5-7 n: bnnprnf
9-13 f: vfffwfffffffkfxff
2-10 j: vjhcggzrjqt
5-7 g: gggfggzwgg
5-6 d: ddbsjddfsdnddxfpgdd
12-13 g: gggqjgxgzpgsggxggxk
1-8 l: hlslllllll
12-13 f: sbfftpvbjsmff
6-7 s: hqcthsb
5-11 s: qsstnssssqsrsskdss
2-6 x: hxrfnjfclx
1-2 w: swwwwwwwwwwwjwmwwwww
7-12 s: whsbkgsmvnfjnnkq
10-12 l: fllllllllsllll
8-9 s: lsssssssqs
1-7 p: pdpppghqppxpppppp
1-3 x: jhxmjhkdrxgd
1-7 b: lbxbbbvbwbbbbt
4-5 c: cdckcbcmqcc
5-9 k: kkmklhlmkkgkrld
12-18 x: dvjxqxxxxxdxxxzxhp
5-8 h: nhfhnfqx
15-16 k: kkkkktrkkkkkpkhqxkkd
1-13 j: dcnbwjxllpksj
11-13 v: wvqhznrrfgmnvz
2-5 m: bpjbmbrmfh
10-15 t: mgtttttqspthnttkktt
9-10 n: nnpnnnnpnh
8-10 b: bjbzhbbbcq
4-12 n: nnnvnnnnnnnnnn
13-14 v: zbkchmmvjvsgkvhcz
2-9 j: jdqjgnnjjjvjt
11-16 g: ggggxqglgglfhvgggn
1-2 b: bsbm
8-11 v: lvvvjhvmxvvvv
5-7 x: xxsxxxpr
2-15 t: stwqdkrqcwbmngtd
5-8 r: rcrrvndr
5-6 v: vvvvvr
4-5 x: qpxxr
1-2 k: nkkkkkd
10-13 r: znlqhvwdvrqbrrrfzwc
1-2 h: hwhhhchhhhhhhhhhhh
3-6 b: bbbbbvbbbvbb
13-14 s: ssssssssmssssdss
7-11 x: cwxhmnhzxrxdrzzxbw
5-16 m: rgjtmrqqmfmmckxm
17-18 b: bbbbbbbbbbbbbbbbbk
11-20 k: kppkqkftkgkkknbtglkb
13-14 t: rtcfrkjvtgnrtf
5-6 z: kdzjxzjgmd
2-3 p: rxppj
9-12 b: mtpbbbbbbbbgp
8-9 b: rbbbbbsbbbbxb
13-17 r: rxgbrrnrrrrrrkrrlrrr
8-11 w: pzqswqwcfln
3-4 x: xxxx
6-9 j: fjhkjjklc
6-7 m: ctmzmfm
8-9 l: lllllllgllpts
8-9 t: hgpwblqkvqtb
4-7 d: dddpddd
4-5 c: ccccwcccg
1-7 q: lvvtpqqvnsqbzqpr
8-15 q: qqqqqqqqqqpqqqlqqq
4-8 f: frfffffffxf
7-12 r: rrrrrrrrrrrkprrtrrrp
14-15 j: jsjjjjjjjjjjjjl
6-17 b: khgflbrbrjbgzjtjt
5-7 j: jjknjpw
2-9 h: rhlqbdklh
5-7 j: lrtvccjbzgjh
12-14 t: tltwtxtctttnttttft
8-9 q: qqqqqqqqx
4-15 p: ppppppppppppppc
2-3 v: vvvvv
5-7 h: djvplxb
11-12 g: gjxlvggsgggxsgfgp
5-6 g: gggggfg
1-6 x: wxtxxxxxxxxxx
1-4 r: rqrcrvrzbrxrwjr
9-10 w: wwwmwwwwpww
7-15 z: pzrbrxpzzzzzntzv
4-5 q: qjqwqqnlqhclq
12-16 g: ggccgzggggsggjgg
11-13 b: phbmbsgkvszbbrbb
2-3 l: xhlbhl
1-7 q: qwdqmtv
5-9 x: xxxxjxxxxx
6-8 p: kvpwqbdp
11-13 t: tqttttttttttmt
9-16 l: wzdxdmlxlkfjknnz
1-11 j: vjjjjjjjjjjj
2-18 t: tsttttttttttttttthtt
6-7 n: fnnncnl
3-5 s: lssvgfssgsshsvstsss
3-5 h: hhhbhhh
4-10 x: tsxxxxxxbls
2-4 n: nnjw
17-18 p: bptpftnxjchrwthtppk
14-15 d: dddddddddddddddd
10-14 m: mmmwnmmgmdmmmmmmmmm
8-9 k: pkpqffpkktbgkbk
7-11 l: lllllllllqplllllllll
11-15 n: nnnzngknbnzdnsnnn
2-4 l: vbqm
10-11 w: wwwwwwwwwwq
3-5 x: xxtxxxxx
3-15 b: spbcxjlbbbrbbbbb
4-5 n: nvfsn
13-16 n: bppjnncnbgjndnnntwgs
1-8 t: mttttttttt
6-9 d: jvmssdkdhxsd
9-10 d: gdrllcfdnd
7-11 c: cscxccccnztccccz
15-16 p: pppppkphpppppspg
2-4 n: vnkjlwwsswscmxktnj
9-11 r: rrrrrkrzslr
7-10 t: tttgqtnttttttttt
4-10 b: bbrzbzpbsbnbbjnbsb
7-8 j: jlljxtsj
5-15 z: lzzzfqzzzzzqzzzzzz
10-11 r: rrrrrrrrrrcrrrr
6-8 t: tztpbttttt
2-9 c: vcgtfxfbr
9-16 r: vgjrwcxhrrhbzxnj
17-18 r: bczjtmjwrtzwvmgbrl
5-6 d: dqgddvd
1-2 r: krzrkr
8-9 j: rxjjjjgbj
8-9 c: cjccccccc
13-14 b: bbbbgbbbbbbbbvb
5-6 p: hbhwtp
1-2 w: wfhrfsfbjtpkqcjrwdb
10-13 q: qqqqqqqnqrqqzqq
3-4 m: xcmcz
11-15 d: dldddcddddjddqd
4-10 h: khhlhhhhhhhhhhhh
2-4 d: bdfpmxzqzbzmxmmwg
4-5 l: lgrbl
9-13 s: ssssssssfsssss
7-8 p: pplkpmkppppppxp
3-8 j: jjjjjjjj
7-8 f: jfffffhzfff
2-5 p: pppppppp
8-9 l: tlclllmlz
4-5 m: mtrmx
13-16 x: xxxxsxmxxxxvwxxw
5-8 n: jknnngnv
4-7 p: ppptppgpppp
12-15 r: rrrrrrrrrrrxrrrhr
2-9 s: sssssssssss
5-6 z: zphzzr
13-17 m: mmlmmsmzmsddbxmkb
5-7 g: gggggggg
8-9 f: ndzfvfkfz
2-10 g: wtngwdkcxgrth
6-13 r: rrqrrqrrqrrrcrrrr
1-6 z: hfzzzz
10-13 j: jjjjjjjjjjjjdj
5-11 m: wfqvmhbjhhnmzdlmpjc
6-8 d: clddzxcdghn
8-12 n: qfqbbvkpjbznrz
3-6 r: rrbrrrrrrx
9-10 r: rrrrrrrrrrr
4-5 m: chltmcm
7-8 r: rrrnprrzrhr
3-4 j: jfcj
16-17 n: nnnnnnnnnnnnnnnnwnnn
7-8 r: rrrrrrnfr
4-12 n: nnnnspnlcnmsnnnn
11-12 v: vvvvvvfvvvhvvv
2-8 h: hlhhkhhchhhghh
4-5 t: stntztttmp
3-5 v: vvvmbvhvvvlvn
9-14 x: srxmtbtgxgmqxxxc
4-5 z: zrlgz
10-11 p: phxkvklpmdp
8-15 m: mmmmmmmpmmmmmmm
1-13 h: mzwqkhhhphsgh
17-19 s: qssssmssgfmssmmpssk
6-8 b: bbbbbbbjbb
3-4 l: rllr
2-4 j: kvjj
2-5 n: lnnkrm
15-16 w: wwwkwwwwwwwwwwwc
1-3 c: ccncc
2-5 s: bhppmsttzcscss
1-4 t: tttxtttt
1-9 r: rfrrrbhtxrrbrmrmr
12-13 s: qplrssssmfswrsswss
2-4 h: zrhhhhzhzhch
1-5 g: rgjgg
11-14 t: ttttvftttttqlsttt
3-4 s: sskss
5-15 m: mmmmnmmmmmvmmmm
1-8 j: sjjjjjjj
1-4 c: ccclcc
5-7 d: ddddhdd
2-8 z: tczzjzzzzzzzgz
1-5 s: qsqdz
8-11 z: zhzpzrvzhzkzzgm
5-9 g: nvdfgshgbgltlqggbp
1-12 r: rrrrrrrrrrrrr
3-4 m: xmmb
8-10 j: jjjjdjmjshqjzk
2-4 t: tptttftt
4-13 t: ggqscfggftnxgjpn
4-12 q: hvzjzqsqwqjlqx
7-8 q: qqqlqqqkqq
9-10 f: ffffffffdfff
2-7 k: kgkkkkkk
7-11 t: xbpdkrtplmfbtkwgp
4-7 c: ccccccccc
12-14 d: dddddddddddqdddddd
6-7 k: xhnntkpnbkfwlr
3-5 q: qqxxq
13-17 d: ddddddddddddvddddd
2-3 g: smglpgpflsrbwxcjctb
5-9 z: zjzzznfzf
7-13 l: lmrkvvxtltcnj
15-16 r: vjrrrkzxrrrqrtrpzv
2-7 x: xsxxxxxx
7-14 m: npmnmgdvnqmgrmm
2-4 x: pxxl
7-15 k: nkkkckgwdkffkmb
7-11 n: dznnmcnssmcn
8-10 s: skssgswssmsss
14-16 n: nnnnnnnncdnjnnhwnnn
4-7 g: kggmgggbggggwggcqgng
4-7 g: gggqggggg
1-2 c: cccc
4-8 j: mzzvwjgrbrv
11-12 x: pksjnwlltskxlj
1-9 k: zfkkkksrkkkkkkz
1-6 k: xkkkkknkk
1-6 c: gccccncbccccc
14-15 h: hhhhhrhhgkhhhmhh
7-8 f: qffgnfhf
2-6 m: vmqwxzgsm
5-8 k: kkzjkwkvk
3-4 s: dcsss
2-4 p: jjbps
2-11 z: zjzzzzzzzzzz
4-7 f: bnkhvjfff
13-16 m: mmmmmmmmmmmmjmmmmmm
5-10 t: kdwjmcgtvtdttmtvw
3-12 j: jjljwjfjjjjjnhjjjjc
13-14 f: fffffflffxfffbf
2-6 m: zmhmsm
4-18 n: nnnvnnnnnhnnnfnnnhnn
3-6 n: xznnsnn
7-11 g: ggggpgggggggsg
6-8 l: llllltllll
4-5 s: ssnttsrs
6-14 w: wwwwwwwwwwwpwlwwww
9-12 q: pxqvswnqqbsqvgsd
1-4 j: mjjjjz
9-19 f: fffffffflfffffffffff
2-6 t: rttqtstct
6-9 d: dddddtddddd
7-12 l: lllllllllllllll
1-12 p: gppppppplpppppp
1-4 s: fslssssssfcd
6-7 v: wvvzffggdfvbvvv
2-7 t: kkwgspt
6-16 n: nncbnnnntwrxlzzwdrsj
3-4 z: zzszzhrqwngprjt
4-5 v: vkvvvcfjc
4-5 j: hcjtj
2-4 k: kqkkkkkkkksk
7-9 g: gzgmgpgkv
5-9 s: kjnwsrjrzssssdlnsqs
2-8 j: jtjxxgmjkjx
5-12 r: bfrcrtjkzqffsjtvwj
1-2 v: xvvvmvvvv
5-9 v: vzrgdldfvx
3-5 m: mqsfmmlmvdfj
12-15 v: wswvvbqqhlvqvwx
6-7 b: bbmjdhb
2-4 b: hbjmjr
6-7 n: pnnnnln
13-15 z: zzlgzpmztfzqzzl
5-6 c: pcppscccccc
6-7 z: zzzzzzz
6-14 l: lllllllllllllqllqlll
13-16 r: rrrrrwrrrrrrhrrr
11-12 r: rrrrhrvhqmqrwr
15-19 w: wwwwwwwwwwwwwwwwwwdw
3-6 w: wwqxkw
17-18 z: zzzzzzzzszzzzzzczvz
3-4 g: lgxx
4-5 x: nsdxdskjxtrz
11-12 w: wvdgqppfwwdf
2-3 w: qgrz
7-8 w: wwrppwfwwrcwr
2-6 n: gtspnngz
3-7 j: jjpjjjzjjjjj
1-2 l: pllblvgmfjvv
15-18 r: rrlgctrrrcrqqgldpr
3-6 v: vvvvvt
2-10 s: vslltcxpbg
5-8 j: blvzjtjnfmjnjtsrzmc
9-13 b: bbbbbbbbbbbbsb
3-4 w: wwfjw
6-7 x: xxxxxxmx
7-15 g: ggggggrgggggggfggg
16-18 f: zffffffffffffffwfff
3-4 q: qcqn
3-4 k: kvzqk
4-18 z: wpclkbsxwbmbmkdpzzcm
1-8 z: zkzzzzzgzzzzz
7-16 x: xxxxxxxxxxxxxxjkxxx
3-6 t: xwcjztz
8-9 r: rdrprrtrs
1-7 v: dvvvvvcvvd
6-8 t: tttttkttt
1-5 w: xzwww
6-7 c: ctdjbwcc
3-4 t: ttnttt
9-10 q: qmcqqqsqqqvqqq
11-12 d: dpftdmdmdmld
7-8 j: tbjmbjlj
3-6 t: tttttqtt
6-8 t: tdttthbnqt
2-5 q: qqqqp
1-3 t: ttht
13-17 n: nnnnnnnnnnnnnnnndnn
4-6 j: ftkvjj
5-10 c: hvccfcsccb
7-11 v: vmqvqnnnvzglvvvv
5-8 k: kkkkkksckkk
1-12 z: rzzzzzzzzzzzzzzzvzz
17-18 t: tttttmtttmttttttpttt
16-17 z: xljjnngmjmhprcqzw
4-11 d: dtddbpddlhn
17-18 t: tptttttttttttttttbtt
9-12 w: wwwwwwwwwwwcw
3-4 z: zdzj
1-8 q: qlqqkqqhqtqhxqbbwcp
6-8 r: rkrrlrrmzrcnnrk
13-17 b: dbsbbbbbbbsbmkbbb
5-16 q: gqnvqwwgqshmtrdbplfb
7-9 c: mcvccccczc
10-11 p: pppxppppppc
1-6 v: vvvvvmvvvvv
5-6 s: ssssks
6-14 r: prgrbvrjrgrkrrr
2-11 c: qbnxcvxgldcvdd
11-15 d: ddddddddddsddddd
13-14 w: rwwwwwwwwwwwwsw
12-15 t: ttttttttttttttt
5-8 g: skjgxsjpgvsgr
5-11 g: ggddglggxgqgg
2-4 c: cccc
2-3 h: rvdg
9-13 r: rrrrrrrrsrrrjrrr
2-7 f: dflmgzsdftgkgc
1-5 f: jffffffnfcff
3-4 c: cccrqc
2-7 m: kblcghm
3-4 r: rscrnbbpfkr
2-3 d: wdtqpd
5-6 q: qqqpqnqq
13-15 d: ddddsdddddddddjd
3-4 c: cmcw
2-7 q: qqqqqqjqqqqbqfqq
11-14 x: txxxxpxdxxkxqcv
10-11 b: bbbbbbbbbkbb
7-11 p: ppppppppppmpv
5-6 r: dnxrcvrrr
1-4 d: rdddddd
5-9 f: lxgfvffdf
8-16 k: vmfksjkwkrkkkhkkvknk
15-16 v: vvvvvqvvvvvvvvgv
2-5 p: xpctr
11-15 w: wfwwwwwkwwwwwpr
11-13 x: xxxxxxxlxrfxxs
18-19 b: bbbbbbbbbbqbbbbbbbb
2-4 l: fgwl
8-11 m: xmjnmmmmnmh
4-6 g: lgvpksgq
5-7 t: jtgtvttjlfhpljtzzstd
7-11 p: mjcgvcmxgqpntxkpb
2-3 r: rrgrrnr
6-7 r: rrrrrcr
4-14 d: dddjdddddddddpdddd
5-7 q: qqqqkqqqq
1-4 t: qttqt
4-10 f: hbfjbzrfgvffq
16-17 j: wjjwjwpsjjdvfjnhv
3-4 v: vvtwv
5-6 s: ssdswspdss
1-2 r: rnrrmskrr
4-7 h: rqxbjjhsfh
4-9 l: llllllllhl
4-6 w: sgtwwmcpwd
4-11 l: fjdllznlllmdnll
4-5 k: kkklk
4-6 s: sszrssssssss
12-14 w: wwwwwwwwwwwvwww
2-4 b: vnzb
7-18 g: ggggggggggggggggggg
1-13 s: srjdvvpqnrsnnkzfj
14-15 g: bmhbclmqcgtqxgg
8-12 g: gggfhxgglgzb
3-5 c: scrkcshctccmhm
7-12 q: qklqqqhqvxqqqqw
1-6 m: mlmxzwkmm
3-16 r: rswggrrrrczxfpjt
14-16 l: llldlblllqllllns
2-5 w: wwwwmw
7-9 r: rrrhrprrr
2-3 j: zjmdfcth
3-8 w: xwgwwwwww
1-5 l: plllllll
2-4 b: fbqp
2-3 p: jwpmp
3-4 c: kscm
2-5 b: mbpbnfkvh
4-15 m: qrswmmmgmvmfqtmrrn
12-15 r: rxrrrrrdrhpfrrrrcrrr
10-13 j: jjljjjjjjnjjjjj
8-20 w: gjccptmwtrwxjtskrdfp
11-14 j: jjjjjxjjjjgjjk
1-4 g: gggzzgg
4-8 q: wqqqqblsmsk
1-11 h: lhhhfhhhhbvhhtjwrbh
4-8 x: xlxqxxtxxxxx
13-14 x: xxxxsxxxxxxxqxxx
12-16 b: bxvbbbqbbbbfzbblb
1-4 j: jzbd
5-6 q: qqqqwqkkqqq
2-5 x: xxxxgx
8-10 n: nnnnnnnnnhn
5-10 m: mmmmrmmmmmmmm
2-3 c: qwcz
3-5 n: cckvz
18-19 q: qqqqqqlqtqqqqqqqjqv
5-13 k: kkpkkkkkkkkklkkk
5-10 q: jfjgqllmqzv
15-17 l: llllllllllllllhll
4-10 b: pgqbfrsnvbhzfmtbtw
1-4 c: mccc
6-16 f: fffffpfffffffcfffff
4-9 j: qjbjlcsgljtbvv
11-12 z: dstqbtxxvdchsg
3-8 d: jvdpnvqs
2-3 n: nfvnnnn
6-12 r: zdbfdrvtmpkrjkr
1-15 k: kbdckwtkfhvrbjk
6-8 v: fvhsvgcv
2-3 p: ppkppp
2-14 r: drclhnrdhtcbmnr
7-10 p: vchpmlpjfxbfnwsdxjr
1-3 v: lvvv
1-3 b: bbjbb
2-12 q: qmxbbbgqlscqvdtpq
7-9 l: lllllkllqlllclll
16-17 r: hrrrrrrrrrrrrrrrr
8-9 g: gggggggfg
6-8 v: zvqffmkv
9-12 t: tttttznttjtfrtsjxtd
5-12 q: bqhbltqnjlhqqhtkq
8-9 d: ddddddddkd
2-8 q: tvpqmrrq
13-16 q: qqqgmqqqqqsqmqqpqqqs
7-9 p: pppppplppp
4-6 h: hhxhlhhhxhp
6-8 g: gggggdggggg
4-5 j: jjjkjj
4-5 x: xxxfg
2-13 x: xxhrhxwbrgrdskghtkj
12-19 x: cxxxxxxxxxxhxxxxxxx
1-4 m: tmmmm
5-11 x: mgqnkjxxscxdxr
3-4 t: tttctt
4-13 b: bfkbfbbwbcbpvcbkdqww
5-7 d: dwddddqd
4-15 l: lvvlfclwbssvsdl
9-14 d: ndkfnddtdddfjpvdqt
8-9 s: ssssssssk
9-13 l: dlljknlsllbslhlml
4-5 n: lnnwn
8-12 v: rlvbqlwvfhvv
6-7 f: rcpfwfvffqfcpf
10-11 g: rnqgxbtnxqfzgggtn
8-11 d: ntdzzddkbdhcdddrgdld
5-6 s: xkmssxscvbpwj
1-3 r: rrmr
9-14 p: pvppppppppppppppp
11-15 t: vhftnbtmwpgbcvt
2-7 t: pmbwnrj
10-11 c: cccccccdcvcc
9-14 s: ssksssssssssslsss
2-5 t: ctrtqpn
4-6 t: mttjftttt
4-5 z: zfbzz
2-3 x: xwxlwstzmvvt
4-5 b: vbsbjwpbsbbb
3-4 x: cwxxlmdf
5-6 z: zzzqszzzzzz
1-4 c: ncczc
3-6 k: kkkkkkk
2-3 z: rjzkpxxdvzlzxjzz
8-10 z: fzjzszzdnzbzkzzfz
1-9 c: ccccxccjpccccc
3-12 j: jjjjjjjjjjjcj
10-14 l: llplllvbllllsp
8-12 r: hnrcrxfzrkcjcprrzjbr
17-20 q: qqkqqqqqqqqqqqqbqqqh
6-9 l: lllljlrlpxllllll
8-9 v: vzzmtwfgv
3-7 v: fqdbsshw
12-13 f: fwffnjfbfffzzqfxfffx
1-3 m: zjmdvjjg
5-7 r: nrswrlbw
8-11 c: crzfvzpcbcbchgxbln
9-17 g: ggggggzgwwjpggpgvgrk
6-9 k: hhkrvkkqfhqwbdb
10-16 q: mzzjqfmtkqwvgdfttj
11-13 m: mmmmmmmmmmtmm
1-2 j: jjhrrj
3-12 h: hhhhhhhhhhhhh
8-11 w: zwkrwznrppw
7-14 d: dpxjnddbvdbcllqr
5-8 c: ccccfcccccc
12-16 p: tnptbppgwxxpppjb
1-14 t: qktttttttttttttt
9-12 t: tttttcttgjtt
3-5 p: tcpdgwhrkxkzjjhvsgqp
13-14 k: kkkkkkkkkkkkklk
11-13 x: xxxxxxsxxxxxt
5-12 n: nxxnnznnnnmlnnn
12-14 h: hhlhhhhhhhzcnnhhhhkh
8-10 j: jbjbjmjvjj
6-9 c: ccccccccsc
5-12 j: jjjjjjjjjjjcj
1-2 f: fhffff
13-17 x: xnlxxxxsxxxxxxxxp
2-5 d: sddzn
2-4 q: qqqt
2-6 p: fmxfxnkp
9-11 k: nkdgncqkkgwjp
2-5 p: hpmgcfcmz
1-3 f: ffczf
5-9 r: rtrrprrrrrr
2-4 p: pnpprdn
9-11 l: ltllbllvllc
4-5 x: xxxmxxx
5-9 x: xxxxxxxxxx
16-20 b: hgcbhbbhbgbbdxhdgjwb
10-15 c: qcccpcccdcccccfccwcf
10-11 x: xndsgkwqpxb
15-17 f: wwbqxrffgfvffdmdffw
5-6 q: qqqhqvqqqq
3-4 h: hhhchc
7-9 x: xxxxxxxxgx
7-8 f: fmfflfgf
12-13 v: vvvvvvvvvvvcvvvv
3-7 g: gggbggsgg
1-5 w: wpswsggtw
3-5 h: hhhhkhhhhhdhw
6-7 w: wwwwwmwwwww
5-11 m: mfdmcwmlvpmfc
14-15 j: jjjjjjjjjjjjjjrjj
3-5 v: rvtvkdvqv
3-6 p: frppxpdpg
3-5 t: ttttrt
2-7 x: xxwxxkfxxxxxxxxxxgxr
6-8 k: kkkkkpkgkkkk
10-11 f: fffffpfffkfdf
11-12 g: gggghgnggflg
1-7 x: xxnxxxxkxx
3-4 h: rhhx
6-8 r: bcwrrrrrhrxmqgrhrzm
2-6 l: mpdlnnp
2-4 j: jbjjdhx
2-15 d: dfdddddddnddddfzddjd
5-12 h: gbngwvffshph
1-2 t: txcpth
14-15 f: ffffkfflfffffgf
8-9 b: bbbbbbpbj
5-6 f: zffhfz
8-15 j: qjjfrtjllqfjjcj
3-5 l: llxllll
10-15 z: tpdzhnzkzrtzrzzzrfrz
3-5 q: qqlqq
3-4 d: vmzddpm
8-11 c: cmvzlxtqxcvckxqm
2-13 r: drpppdsqsmvnlkjb
7-9 w: wwwwwwkww
3-6 s: svfspfnpzq
3-4 b: fqpbbbbzpgqf
2-4 s: smssm
8-9 q: qqqqqlqbq
4-7 s: stcssbw
15-16 k: kkkkkkkkkkkkkkkk
10-13 x: xxsxxzxzxxsxphxxxxx
4-5 p: drpsx
7-11 v: vvvvvvvvvvvvv
14-15 n: nnnnnnnnnpnnnbnn
5-8 k: blgkkkkc
12-16 g: ggggggggggvggggggg
1-8 c: cccccccdc
3-9 p: ppqppppppp
7-9 q: cxqqqbhqqzhjqv
1-12 m: mrkvwmmsjmkmw
2-4 s: xrrj
2-4 w: mbww
7-14 v: vvvvvvvvvvvvvbv
3-8 p: kppjprpxppb
4-10 n: znncndnnjncnrnkn
5-7 k: kktnkks
10-13 r: rrrrrrrrrrrrnr
1-6 k: kkkkkrvk
3-5 k: jhjzkbqsjwkxsvd
5-8 p: pcqpprckrp
2-5 f: fsjwf
6-9 l: jxhtplclr
4-13 x: gdxdcdmmxlfxxs
2-10 v: vvvrvvvvvhvvvvvvvv
11-12 v: wqnwrlvjsxvs
1-4 m: mfqm
3-5 d: bmddv
8-10 h: hbhhdhfchh
4-5 s: sssdb
2-3 l: lsll
10-11 k: kkkkdcpnkkgkk
6-8 r: vrrrrxrrrrr
3-4 s: gxsbfr
17-18 p: ptppppppbbpbprpcqpp
16-17 s: gpgwvvnjbdqpksnsd
5-10 f: gfkfzqflfzffvf
6-18 m: kxxfrxjmwmltdvxwmmb
5-6 d: ddntbd
11-15 b: bbbrbhbgbbltbbhb
6-7 r: krbrrrp
5-6 q: qqqqqkqqqqq
3-6 k: kfkmjt
8-10 d: drbwtrdshdbg
13-15 n: tqnprfnndhplcjn
2-4 w: wjwg
10-11 p: xprpnpvhppp
9-10 r: rrrrrrrrqrrr
2-8 w: wjwnwhwwwwwww
12-13 g: gggggggggggwggg
1-6 s: sssssss
1-9 k: xkkkkkkkkkkkkkv
2-4 c: ccccc
3-7 s: ftssfrm
12-13 k: kkkkkkkkbkksk
9-16 q: qqqmqbbqhzqwtlwqb
7-9 p: ppppppppdppp
4-6 b: pcvbvkhb
9-10 g: qmchkfggmgpqgxxgxq
2-9 t: sthtnttthtlsq
3-4 x: xxxx
8-14 s: ssssssssdssssssss
1-3 m: gmbmvbmqbxvm
10-15 s: sgwkdvnsswsszwrn
3-4 s: gmgs
5-11 f: fffffffffffffffff
7-9 v: vvfvsvzdhvv
4-8 l: lllllllpllllllllll
11-16 x: xxxplxxxxxpxxxxxxxxx
9-13 t: tdtftttjdtttc
9-13 g: rknsckngkvdxg
3-4 d: sdqwddmr
1-3 l: lljl
7-17 r: trhrpmrqswrmnrbrrhb
7-8 g: ghkxwgttgk
5-7 b: fbnbtbndb
6-7 v: vnxlvgv
1-6 t: txtkttg
2-13 n: ljnnnnlpsnscfnnnnnnf
4-11 b: bbbbbbbbbbfb
6-9 x: wxdkxxnnnx
2-4 w: wswww
5-13 j: jjsjjjjjjjjjkjjjj
1-4 f: fszhf
5-9 z: wzgfpzzzzjszfvfpwz
5-9 x: xxxxhxxxsx
15-17 b: bbbbbbbbbbbbbbkbqb
2-7 x: xvdmgxxwgxx
3-4 q: qqqq
6-7 m: mqmlltc
8-10 k: kkkfkhkkkpkkkk
2-6 n: cnxnts
1-6 s: gsssssssssscssss
1-9 n: bbvkmnkdn
2-5 p: nmlhpv
4-6 g: vnggdpgm
7-12 b: bbbbbbvwsbbbbbbbbbbx
2-3 b: lgbjjr
7-12 n: chqccgnwsznngz
4-6 v: nntkvvv
2-5 g: qkrjgkqgljh
1-5 f: rffffsfzxfjfzffff
15-16 z: zpzzzzzzzzzhzzkszz
3-4 l: lmflkl
7-8 f: fffvzxff
2-4 r: rrrgrrrr
12-14 w: tjwgwkkwwwwwwwwww
2-12 m: mzmkmmmmmhmmcmmmrmf
5-7 q: nfqbgqqqpqqgjq
1-4 r: ktpr
5-6 j: jjjgbj
9-13 d: ddpbdddwbbdmd
8-9 r: rphfrrrsrr
10-12 f: fffffffffffbffpff
5-12 b: xhbbcbbbbxtbbb
9-10 j: jjjjjjjjqjjj
3-4 q: qqqn
3-9 r: srrtqqprjmqgnp
2-3 c: gcqc
3-4 c: bclc
3-6 n: jnxhshghnnqncgncn
2-4 h: ckhh
9-12 t: tdjjqcdbtkbtmb
11-13 x: cxxkxbxktxxxkxxxpxxh
4-14 m: hjqmqjvqgjdjzsssdkm
6-12 r: rrrrrlrrrrrrrr
14-16 g: gngggggkfglzgggv
9-13 h: hvhhdzhhmhhhh
3-15 x: xxjxxxxxxxxxxxbxx
10-12 z: zzzzzzzzzpzzz
3-4 h: dnhv
13-14 g: gggggggggggggg
7-11 r: rrrrrrrrrrr
6-9 x: czxxlsxxx
3-5 h: hhchh
1-9 j: jjjjjgdjjjjjjjf
7-10 b: ctsbpxrvwbkbjklghnbn
10-11 g: gggggggggglg
1-7 x: xnpsxfm
9-10 q: xxxzqbpdqj
4-10 q: pqsqbqcvllfn
11-12 x: xxxkxxxxxxgxxxx
3-12 s: pstrsssntsssssbssj
9-12 s: wnsdfrtssllsfbsssccb
1-6 r: nrrrrmrc
11-13 k: kkskkkbkkkzkkk
4-5 h: hhhhshh
5-19 f: ffffffffffffffffffcf
3-4 z: zczzz
5-8 l: ghlllsllfw
3-9 g: ggpngglkkf
7-9 b: xwbbbhhbmgjhbbbhnbb
4-5 c: cjmgczcccpzqr
15-16 j: jqfqsnnvffpjjjqj
5-6 z: zzzztvws
3-7 t: tftxtvfzs
1-5 l: tlllll
5-18 n: nnnngnnnnnnnnnnnnnn
5-6 m: bchzqm
9-10 p: lmpsbqgzpxggltl
2-9 h: hqdpjwpxhg
4-6 n: nqbnbnknnzd
2-7 q: qbqqqvq
7-8 c: cccccclc
8-12 r: rrkrrrprvrcqnr
14-15 j: jkpjjnjjjwjjjjjjhjwj
7-9 s: stsstspsn
8-11 h: hhhhbhhhhjhhhh
5-6 d: dddtpdd"""
valid_passwords = 0
for line in password_database.split("\n"):
policy, password = line.split(": ")
length, letter = policy.split(" ")
min_len, max_len = length.split("-")
count = password.count(letter)
if int(min_len) <= count <= int(max_len):
valid_passwords += 1
print(valid_passwords)
| StarcoderdataPython |
9625584 | <gh_stars>100-1000
"""
Save a bunch of random samples of each of our models to put up on the website while we figure out
how to host the actual TensorFlow models.
"""
import tensorflow as tf
from os.path import join
import getopt
import sys
from LSTMModel import LSTMModel
from data_reader import DataReader
import constants as c
def process_sample(string):
words = string.split()
#remove everything before the first line break
words = words[words.index('*break*'):]
#remove opening line breaks
while words[0] == '*break':
words = words[1:]
newString = ' '.join(words)
newString = newString.replace('*break*', '\n')
return newString
def save(artist, model_path, num_save):
sample_save_dir = c.get_dir('../save/samples/')
sess = tf.Session()
print artist
data_reader = DataReader(artist)
vocab = data_reader.get_vocab()
print 'Init model...'
model = LSTMModel(sess,
vocab,
c.BATCH_SIZE,
c.SEQ_LEN,
c.CELL_SIZE,
c.NUM_LAYERS,
test=True)
saver = tf.train.Saver()
sess.run(tf.initialize_all_variables())
saver.restore(sess, model_path)
print 'Model restored from ' + model_path
artist_save_dir = c.get_dir(join(sample_save_dir, artist))
for i in xrange(num_save):
print i
path = join(artist_save_dir, str(i) + '.txt')
sample = model.generate()
processed_sample = process_sample(sample)
with open(path, 'w') as f:
f.write(processed_sample)
def main():
artist = 'kanye_west'
model_path = '../save/models/kanye_west/kanye_west.ckpt-30000'
num_save = 1000
try:
opts, _ = getopt.getopt(sys.argv[1:], 'l:a:N:', ['load_path=', 'artist_name=', 'num_save='])
except getopt.GetoptError:
sys.exit(2)
for opt, arg in opts:
if opt in ('-l', '--load_path'):
model_path = arg
if opt in ('-a', '--artist_name'):
artist = arg
if opt in ('-n', '--num_save'):
num_save = int(arg)
save(artist, model_path, num_save)
if __name__ == '__main__':
main() | StarcoderdataPython |
1794049 | #!/usr/bin/env python3
import argparse
def set_input_args(logger):
"""
Setup Parser Arguments
"""
parser = argparse.ArgumentParser(
usage="python3 run.py --path=/tmp/foo/ --scheme=http --host=127.0.0.1 --port=8000 \
--api_version=1 --api_user=exampleUser --api_password=<PASSWORD>",
description="Recursively hashes all files discovered within the path dir."
)
parser.add_argument(
'-p', '--path', help="directory from which file recursion will begin.",
required=True
)
parser.add_argument(
'-s', '--scheme', help="possible values: http or https.",
required=True
)
parser.add_argument(
'-a', '--host', help="host name of hash_api service.",
required=True
)
parser.add_argument(
'-d', '--port', help="network port of hash_api service.",
required=True
)
parser.add_argument(
'-r', '--api_version', help="version of hash_api to run.",
required=True
)
parser.add_argument(
'-u', '--api_user', help="username for access to hash_api.",
required=True
)
parser.add_argument(
'-w', '--api_password', help="password for access to hash_api.",
required=True
)
args = parser.parse_args()
logger.debug(f'[!] Received path: {args.path}')
logger.debug(f'[!] Received scheme: {args.scheme}')
logger.debug(f'[!] Received host: {args.host}')
logger.debug(f'[!] Received port: {args.port}')
logger.debug(f'[!] Received version: {args.api_version}')
logger.debug(f'[!] Received username: {args.api_user}')
return args
| StarcoderdataPython |
1911496 | #!/usr/bin/env python3
import base64
import os
import io
import re
import sqlite3
import json
import requests
import xml.etree.ElementTree as ET
from datetime import datetime
from collections import deque
from urllib.parse import quote_plus, unquote_plus
from flask import Flask, request, redirect, url_for, flash, Response, g, make_response, send_file, abort
from pprint import pprint
from gevent.wsgi import WSGIServer
app = Flask(__name__)
###### Configuration Begin ######
app.config['DEBUG'] = True
WEB_PORT = 21958
WEBROOT = 'http://127.0.0.1:{}'.format(WEB_PORT)
DB_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cache.db')
###### Configuration End ######
CLEAR_SUFFIX = [
'repack', 'unrated',
'480p', '720p', '1080i', '1080p', '4k',
'web', 'web-dl', 'bluray', 'blu-ray', 'hdtv',
'dd5.1', 'dts', 'ddp5.1', 'avc',
'x264', 'x.264', 'h264', 'h.264',
]
REGEX_SEASON_EPISODE = re.compile('\.s([0-9]+)(e([0-9]+))?')
DIGITS_TO_CHINESE_NUMBER = list(sum(map(lambda s: [s], '零一二三四五六七八九十'), [])) + list(map(lambda s: '十'+s, '一二三四五六七八九'))
CHINESE_NUMBER_TO_DIGITS = dict(zip(DIGITS_TO_CHINESE_NUMBER, map(str, range(len(DIGITS_TO_CHINESE_NUMBER)))))
def get_db():
db = getattr(g, '_database', None)
if db is None:
conn = sqlite3.connect(DB_PATH)
conn.executescript('''
CREATE TABLE IF NOT EXISTS cache (
key TEXT UNIQUE,
value TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS stats (
key TEXT UNIQUE,
value INT NOT NULL
);
''')
conn.row_factory = sqlite3.Row
db = g._database = conn
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def cache_get(key, func, type='json'):
assert type in ['json', 'bytes']
db = get_db()
cur = db.cursor()
cur.execute('SELECT value FROM cache WHERE key=?', (key, ))
row = cur.fetchone()
cur.execute('UPDATE stats SET value=value+1 WHERE key=?', ('num_query', ))
if row:
cur.execute('UPDATE stats SET value=value+1 WHERE key=?', ('num_hit', ))
db.commit()
if type == 'json':
return json.loads(row['value'])
elif type == 'bytes':
return base64.decodebytes(bytes(row['value'], 'utf-8'))
elif type == 'str':
return row['value']
else:
assert False
else:
r = func()
if type == 'json':
value = r.json()
value_str = json.dumps(value, indent=2)
elif type == 'bytes':
value = r.content
value_str = str(base64.encodebytes(value), 'ascii')
elif type == 'str':
value = r.text
value_str = r.text
else:
assert False
cur.execute('INSERT OR REPLACE INTO cache (key, value) VALUES (?, ?)', (key, value_str))
db.commit()
return value
def xmlify(root):
sio = io.StringIO()
ET.ElementTree(root).write(sio, xml_declaration=True, encoding='unicode')
xml = sio.getvalue()
if app.config['DEBUG']:
import xml.dom.minidom as minidom
reparsed = minidom.parseString(xml)
xml = reparsed.toprettyxml(indent=' ', encoding='utf-8')
return Response(xml, mimetype='text/xml')
def get_title_from_filename(filename):
"""
>>> get_title_from_filename('')
('', None, None, None)
>>> get_title_from_filename('Kingsman.The.Secret.Service.2014.UNRATED.720p.BluRay.DD5.1.x264-PuTao')
('kingsman the secret service', 2014, None, None)
>>> get_title_from_filename('Kingsman.The.Secret.Service.2014.UNRATED.1080p.BluRay.DTS.x264-PuTao')
('kingsman the secret service', 2014, None, None)
>>> get_title_from_filename('Atomic.Blonde.2017.1080p.WEB-DL.DD5.1.H264-FGT.mkv')
('atomic blonde', 2017, None, None)
>>> get_title_from_filename('Atomic.Blonde.2017.720p.BluRay.x264.DTS-HDChina')
('atomic blonde', 2017, None, None)
>>> get_title_from_filename('Annihilation.2018.1080p.BluRay.x264.Atmos.TrueHD7.1-HDChina')
('annihilation', 2018, None, None)
>>> get_title_from_filename('')
('', None, None, None)
>>> get_title_from_filename('House.Of.Cards.2013.S01.720p.BluRay.x264-DEMAND')
('house of cards', 2013, 1, None)
>>> get_title_from_filename('House.of.Cards.2013.S02.720p.BluRay.x264-DEMAND')
('house of cards', 2013, 2, None)
>>> get_title_from_filename('Person.of.Interest.S02.720p.BluRay.DD5.1.x264-DON')
('person of interest', None, 2, None)
>>> get_title_from_filename('Person.of.Interest.S04.720p.BluRay.x264-DEMAND')
('person of interest', None, 4, None)
>>> get_title_from_filename('Billions.S01.720p.HDTV.x264-Scene')
('billions', None, 1, None)
>>> get_title_from_filename('Person.of.Interest.S01.720p.Bluray.DD5.1.x264-DON')
('person of interest', None, 1, None)
>>> get_title_from_filename('Person.of.Interest.S03.720p.BluRay.DD5.1.x264-NTb')
('person of interest', None, 3, None)
>>> get_title_from_filename('Person.of.Interest.S05.BluRay.720p.x264.DTS-HDChina')
('person of interest', None, 5, None)
>>> get_title_from_filename('Silicon.Valley.S03.720p.BluRay.DD5.1.x264-ZQ')
('silicon valley', None, 3, None)
>>> get_title_from_filename('How.to.Get.Away.with.Murder.S04E01.REPACK.720p.HDTV.x264-KILLERS.mkv')
('how to get away with murder', None, 4, 1)
>>> get_title_from_filename('How.to.Get.Away.with.Murder.S04E02.720p.HDTV.x264-KILLERS.mkv')
('how to get away with murder', None, 4, 2)
>>> get_title_from_filename('How.to.Get.Away.With.Murder.S01.1080p.WEB-DL.DD5.1.H.264-BS')
('how to get away with murder', None, 1, None)
>>> get_title_from_filename('Billions.S02.720p.AMZN.WEBRip.DD5.1.x264-NTb')
('billions', None, 2, None)
>>> get_title_from_filename('Silicon.Valley.S04.1080p.BluRay.x264-ROVERS')
('silicon valley', None, 4, None)
>>> get_title_from_filename('Silicon.Valley.S05.720p.AMZN.WEB-DL.DDP5.1.H.264-NTb')
('silicon valley', None, 5, None)
>>> get_title_from_filename('13.Reasons.Why.S02.1080p.WEB.x264-STRiFE')
('13 reasons why', None, 2, None)
>>> get_title_from_filename('Rick and Morty S03 1080p Blu-ray AVC TrueHD 5.1-CtrlHD')
('rick and morty', None, 3, None)
>>> get_title_from_filename('Sense8.S00E02.Amor.Vincit.Omnia.1080p.NF.WEB-DL.DD5.1.x264-NTb.mkv')
('sense8', None, 0, 2)
>>> get_title_from_filename('Billions.S03.1080p.AMZN.WEB-DL.DDP5.1.H.264-NTb')
('billions', None, 3, None)
"""
name = filename.lower().replace(' ', '.')
season, episode = None, None
end = len(name)
match = REGEX_SEASON_EPISODE.search(name)
if match:
season, _, episode = match.groups()
season = int(season)
if episode:
episode = int(episode)
end = min(end, match.start())
for suffix in CLEAR_SUFFIX:
idx = name.find(suffix)
if idx != -1:
end = min(end, idx)
split = name[:end].replace('.', ' ').strip().split()
year = None
if len(split) > 1:
try:
year = int(split[-1])
except ValueError:
pass
if year is not None and 1900 <= year <= 2100:
split = split[:-1]
title = ' '.join(split)
return title, year, season, episode
def replace_chinese_season_number(title):
for digit, chinese in enumerate(DIGITS_TO_CHINESE_NUMBER):
title = title.replace('第{}季'.format(chinese), '第{:02d}季'.format(digit))
return title
@app.route('/GetSearchResults/<filename>')
def GetSearchResults(filename):
title, year, season, episode = get_title_from_filename(filename)
print('(title, year, season, episode) =', repr((title, year, season, episode)))
value = cache_get('search:' + title, lambda: requests.get('https://api.douban.com/v2/movie/search', params=dict(q=title)))
# pprint(value)
subjects = deque()
for subject in value['subjects']:
try:
subject_year = int(subject['year'])
except:
subject_year = None
if not (subject_year is None or year is None or subject_year-1 <= year <= subject_year+1):
continue
prepend = False
if season is not None:
str_chinese_season = '第{}季'.format(DIGITS_TO_CHINESE_NUMBER[season])
prepend = str_chinese_season in subject['title']
if prepend:
subjects.appendleft(subject)
else:
subjects.append(subject)
root = ET.Element('results')
root.attrib['sorted'] = 'yes'
for subject in subjects:
entity = ET.SubElement(root, 'entity')
ET.SubElement(entity, 'title').text = replace_chinese_season_number(subject['title'])
url = '{}/GetDetails/{}'.format(WEBROOT, subject['id'])
if episode is not None:
url += '?episode={}'.format(episode)
ET.SubElement(entity, 'url').text = url
return xmlify(root)
@app.route('/GetDetails/<int:subject_id>')
def GetDetails(subject_id):
value = cache_get('subject:{}'.format(subject_id), lambda: requests.get('https://api.douban.com/v2/movie/subject/{}'.format(subject_id)))
try:
episode = int(request.args['episode'])
except:
episode = None
title = replace_chinese_season_number(value['title'])
if episode is not None:
title += ' 第{:02d}集'.format(episode)
root = ET.Element('details')
ET.SubElement(root, 'title').text = title
ET.SubElement(root, 'rating').text = '{:.1f}'.format(value['rating']['average'])
if 'ratings_count' in value:
ET.SubElement(root, 'votes').text = '{}'.format(value['ratings_count'])
if 'year' in value:
ET.SubElement(root, 'year').text = value['year']
if 'summary' in value:
ET.SubElement(root, 'plot').text = value['summary']
if 'originaltitle' in value:
ET.SubElement(root, 'original_title').text = value['originaltitle']
if 'directors' in value:
for director in value['directors']:
ET.SubElement(root, 'director').text = director.get('name', '')
if episode is None and 'images' in value and 'large' in value['images']:
ET.SubElement(root, 'thumb').text = '{}/GetImage?url={}'.format(WEBROOT, quote_plus(value['images']['large']))
if 'genres' in value:
for genre in value['genres']:
ET.SubElement(root, 'genre').text = genre
if 'casts' in value:
for cast in value['casts']:
actor = ET.SubElement(root, 'actor')
ET.SubElement(actor, 'name').text = cast['name']
if 'avatars' in cast and 'large' in cast['avatars']:
ET.SubElement(actor, 'thumb').text = '{}/GetImage?url={}'.format(WEBROOT, quote_plus(cast['avatars']['large']))
if 'countries' in value:
for country in value['countries']:
ET.SubElement(root, 'country').text = country
return xmlify(root)
@app.route('/GetImage')
def GetImage():
url = request.args['url']
print('GetImage', url)
content = cache_get('image:'+url, lambda: requests.get(url), type='bytes')
return send_file(io.BytesIO(content), mimetype='image/jpeg', as_attachment=False)
if __name__ == '__main__':
if app.config['DEBUG']:
app.run(port=WEB_PORT)
else:
http_server = WSGIServer(('127.0.0.1', WEB_PORT), app)
try:
print('WSGIServer start')
http_server.serve_forever()
except KeyboardInterrupt:
print('WSGIServer stopped')
| StarcoderdataPython |
5042746 | <reponame>xolynrac/examen_final_4c<filename>src/structurizr/model/perspective.py
# Copyright (c) 2020, <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide an architectural perspective model."""
from pydantic import Field
from ..abstract_base import AbstractBase
from ..base_model import BaseModel
__all__ = ("Perspective", "PerspectiveIO")
class PerspectiveIO(BaseModel):
"""
Represent an architectural perspective.
Architectural perspectives can be applied to elements and relationships.
Notes:
See https://www.viewpoints-and-perspectives.info/home/perspectives/ for more
details of this concept.
Attributes:
name (str): The name of the perspective, e.g., 'Security'.
description (str): A longer description of the architectural perspective.
"""
name: str = Field(..., description="The name of the perspective, e.g., 'Security'.")
description: str = Field(
..., description="A longer description of the architectural perspective."
)
class Perspective(AbstractBase):
"""
Represent an architectural perspective.
Architectural perspectives can be applied to elements and relationships.
Notes:
See https://www.viewpoints-and-perspectives.info/home/perspectives/ for more
details of this concept.
Attributes:
name (str): The name of the perspective, e.g., 'Security'.
description (str): A longer description of the architectural perspective.
"""
def __init__(self, *, name: str, description: str, **kwargs) -> None:
"""Initialize an architectural perspective."""
super().__init__(**kwargs)
self.name = name
self.description = description
@classmethod
def hydrate(cls, perspective_io: PerspectiveIO) -> "Perspective":
"""Hydrate a new Perspective instance from its IO."""
return cls(name=perspective_io.name, description=perspective_io.description)
| StarcoderdataPython |
11484 | <reponame>spacerunaway/world_recoder
import sys
sys.path.append('../utils')
from utils import *
from doubly_linkedlist import *
def link_chords(chordprogression):
"""
Chord progression is a sequences of chords.
A valid linked_chords can be one of the following:
1: the chord name(str) in CHORD dict
2: the key(type Key)
and a music have to a signal of start and end.
>>> c_p1 = [START,C_Major,'C','Am','F','G','C','Am','F','G7',END]
>>> c_p2 = [START,C_Major,'C','Am','F','G','C','Am','F','G',G_Major,'Em','C','D','D7','G',END]
>>> l1 = link_chords(c_p1)
>>> l1
start - C - Am - F - G - C - Am - F - G7 - end
>>> l2 = link_chords(c_p2)
>>> l2
start - C - Am - F - G - C - Am - F - G - Em - C - D - D7 - G - end
>>> l2[8].key is C_Major
True
>>> l2[8].chord == CHORD['G']
True
>>> l2[9].key is G_Major
True
>>> l2[9].chord == CHORD['Em']
True
>>> c_p3 = [C_Major,C_Major,START,'C',END,START,START,END,'F',G_Major]
>>> l3 = link_chords(c_p3)
>>> l3
start - C - end - start - start - end - F
"""
key = None
res = LinkedList()
for item in chordprogression:
if type(item) is Major_Scale or type(item) is minor_Scale:
key = item
else:
if item not in CHORD:
chord = item
else:
chord = CHORD[item]
node = LinkedChord(chord,key,item)
res.append(node)
return res
def parse_chordprogression(chordprogression):
link_chords(chordprogression)
cpd(chordprogression)
class Music(object):
melody = []
chordprogression = []
rhythm = []
def __init__(self,title,composer,key_signature,metre,arranger=''):
self.title = title
self.composer = composer
self.arranger = arranger
self.key = key
self.metre = metre
def add_subtitle(self,subtitle):
self.subtitle = subtitle
def add_chordprogression(self,chordprogression):
self.chordprogression = chordprogression
def add_tags(self,tags):
self.tags = tags
class Info(object):
def __init__(self,key,beat,tempo=90,rhythmtype=''):
self.key = key
self.beat = beat
self.tempo = tempo
self.rhythmtype = rhythmtype
| StarcoderdataPython |
6425192 | from django.urls import path
from . import views
from django.contrib.auth import views as auth
urlpatterns = [
path('login/', views.mylogin, name="login"),
path('register/', views.myregister, name="register"),
path('logout/', auth.LogoutView.as_view(next_page="home"), name="logout"),
]
| StarcoderdataPython |
255667 | from splinter import Browser
from bs4 import BeautifulSoup as bs
import pandas as pd
import requests
from selenium import webdriver
# Initialize browser
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {"executable_path": "chromedriver"}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
mars_data = {}
#-----------------------------------
# Get Mars News first
urlnews = "https://mars.nasa.gov/news/"
browser.visit(urlnews)
# Scrape page into soup
html = browser.html
soup = bs(html, "html.parser")
article = soup.find("div", class_="list_text")
news_p = soup.find("div", class_="article_teaser_body").text
news_title = soup.find("div", class_="content_title").text
news_date = soup.find("div", class_="list_date").text
# Put Mars data into dictionary
mars_data["news_date"] = news_date
mars_data["news_title"] = news_title
mars_data["information"] = news_p
#-----------------------------------
# Get featured image
urlfeatured = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(urlfeatured)
html = browser.html
soup = bs(html, "html.parser")
image = soup.find('img', class_='thumb')["src"]
image_url = 'https://jpl.nasa.gov'+ image
# Add featured image link to dictionary
mars_data['featured_image'] = image_url
#-----------------------------------
# Get recent Mars weather
urlweather = "https://twitter.com/marswxreport?lang=en"
browser.visit(urlweather)
html = browser.html
soup = bs(html, "html.parser")
weather_list = []
for weather in soup.find_all('p',class_="TweetTextSize TweetTextSize--normal js-tweet-text tweet-text"):
weather_list.append(weather.text)
mars_weather = weather_list[0]
# Add weather to dictionary
mars_data["mars_weather"] = mars_weather
#-----------------------------------
# Get Mars facts
urlfacts = "https://space-facts.com/mars/"
browser.visit(urlfacts)
html = browser.html
soup = bs(html, "html.parser")
tables = pd.read_html(urlfacts)
mars_facts = pd.DataFrame(tables[0])
mars_facts.columns = ['Details', 'Measurements']
mars_facts.set_index('Details', inplace=False)
df_mars = mars_facts.to_html('mars_table_pd.html')
df_mars = df_mars.replace('\n', ' ')
# Add dataframe to dictionary
mars_data["mars_info"] = df_mars
#-----------------------------------
# Get Mars hemispheres
urlhemis = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(urlhemis)
html = browser.html
soup = bs(html, "html.parser")
hemi_pics = []
image_one = 'https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/cerberus_enhanced.tif/full.jpg'
image_two = 'https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/schiaparelli_enhanced.tif/full.jpg'
image_three = 'https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/syrtis_major_enhanced.tif/full.jpg'
image_four = 'https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/valles_marineris_enhanced.tif/full.jpg'
hemi_pics = {"Cerberus": image_one, "Schiaparelli":image_two, "<NAME>":image_three, "<NAME>":image_four}
# Add pics to dictionary
mars_data['hemi_pics']= hemi_pics
return mars_data | StarcoderdataPython |
6533989 | <filename>dimdrop/layers/__init__.py
from .clustering_layer import ClusteringLayer
__all__ = [
'ClusteringLayer'
]
| StarcoderdataPython |
3324726 | __all__ = ["Settings", "AppCommonContainer", "App", "Server", "BasicResponse"] | StarcoderdataPython |
3468430 | <filename>sdk/bento/example/mountcar.py
#
# bentobox-sdk
# mountain car example simulation
#
from bento import types
from bento.graph.plotter import Plotter
from bento.spec.ecs import ComponentDef, EntityDef
from bento.example.specs import Velocity, Position
from bento.sim import Simulation
from bento.spec.sim import SimulationDef
Action = ComponentDef(
name="action",
schema={
"accelerate": types.int32,
},
)
"""
Action allows the agent to control the car in the Simulation via its acceleration.
Attributes:
**accelerate** (`types.int32`): Set the acceleration of the car.
- 0: Accelerate to the Left
- 1: Don't accelerate
- 2: Accelerate to the Right
"""
State = ComponentDef(
name="state",
schema={
"reward": types.int32,
"ended": types.boolean,
},
)
"""
State tracks the current state of the Simulation.
Attributes:
**reward** (`types.int32`): Reward given to agent for the current Simulation step:
- Reward of 0 is awarded if the agent reached the flag (`Position.x >= 0.5`) on top of the mountain.
- Reward of -1 is penalized if the position of the agent `Position.x < 0.5`
**ended** (`types.boolean`): Whether the Simulation is has ended (`Position.x > 0.5`).
"""
MountainCar = SimulationDef(
name="mountain_car",
components=[Velocity, Position, Action, State],
entities=[
EntityDef(components=[Velocity, Position]),
EntityDef(components=[Action, State]),
],
)
"""
Mountain Car Simulation implemented using `bento`
A car is started at random position the bottom of a valley.
The agent may choose to accelerate the car to the left, right or cease any acceleration.
The objective of the Simulation is to reach the flag on top of the mountain at `Position.x > 0.5`
The simulation ends when the car's `Position.x > 0.5`
"""
@MountainCar.init
def init_fn(g: Plotter):
car = g.entity(components=[Velocity, Position])
car[Velocity].x = 0.0
car[Position].x = g.random(-0.6, -0.4)
env = g.entity(components=[Action, State])
env[State].reward = 0
env[State].ended = False
env[Action].accelerate = 1
@MountainCar.system
def sim_fn(g: Plotter):
env = g.entity(components=[Action, State])
car = g.entity(components=[Velocity, Position])
# process car physics
# compute velocity based on acceleration action & decceleration due to gravity
acceleration, gravity, max_speed = 0.001, 0.0025, 0.07
# apply acceleration based on accelerate action:
# 0: Accelerate to the Left
# 1: Don't accelerate
# 2: Accelerate to the Right
car[Velocity].x += (env[Action].accelerate - 1) * acceleration
# apply gravity inverse to the mountain path used by the car
# the mountain is defined by y = sin(3*x)
# as such we apply gravity inversely using y = cos(3*x)
# apply negative gravity as gravity works in the opposite direction of movement
car[Velocity].x += g.cos(3 * car[Position].x) * (-gravity)
car[Velocity].x = g.clip(car[Velocity].x, min_x=-max_speed, max_x=max_speed)
# compute new position from current velocity
min_position, max_position = -1.2, 0.6
car[Position].x += car[Velocity].x
car[Position].x = g.clip(car[Position].x, min_position, max_position)
# collision: stop car when colliding with min_position
if car[Position].x <= min_position:
car[Velocity].x = 0.0
# resolve simulation state: reward and simulation completition
env[State].reward = 0 if car[Position].x >= 0.5 else -1
env[State].ended = True if car[Position].x > 0.5 else False
| StarcoderdataPython |
1793097 | <reponame>WilliamMayor/scytale.xyz<gh_stars>1-10
from scytale.ciphers.base import Cipher
from scytale.exceptions import ScytaleError
class RailFence(Cipher):
name = "RailFence"
default = 5
def __init__(self, key=None):
self.key = self.validate(key)
def validate(self, key):
if key is None:
key = self.default
try:
return int(key)
except:
raise ScytaleError("The Rail Fence key should be a number")
def fence(self, text):
fence = [[None] * len(text) for n in range(self.key)]
rails = list(range(self.key - 1)) + list(range(self.key - 1, 0, -1))
for n, x in enumerate(text):
fence[rails[n % len(rails)]][n] = x
return [c for rail in fence for c in rail if c is not None]
def encrypt(self, plaintext):
plaintext = self.clean(plaintext.upper())
return "".join(self.fence(plaintext))
def decrypt(self, ciphertext):
rng = range(len(ciphertext))
pos = self.fence(rng)
return "".join(ciphertext[pos.index(n)] for n in rng)
| StarcoderdataPython |
5069081 | #!/usr/bin/env python
#coding=utf-8
import os
import time
from instapush import Instapush, App
class InstaPushNotify():
@staticmethod
def notify(title, check_num=0, type_info=1):
app = App(appid=os.getenv('instapush_id'), secret=os.getenv('instapush_secret'))
try:
if type_info == 1:
res = app.notify(event_name='get_list', trackers={'title': title, 'check_num':check_num})
else:
date_info = time.strftime('%Y-%m-%d %H:%M:%S')
res = app.notify(event_name='zhihufav', trackers={'title': title, 'date':date_info})
print(res)
except Exception, e:
print(Exception)
print(e)
if __name__ == '__main__':
InstaPushNotify.notify('收藏', type_info=2) | StarcoderdataPython |
6469220 | <filename>mamba/sema/types.py
class Type(object):
def __init__(self, description=None):
self._description = description
def specialized(self, args: dict):
return SpecializedType(type=self, args=args)
def equals(self, other, memo: dict = None) -> bool:
return self is other
def to_string(self, memo: set) -> str:
return f'<Type at {hex(id(self))}>'
def __str__(self) -> str:
if self._description is not None:
return self._description
return self.to_string(set())
def __repr__(self) -> str:
return str(self)
class SpecializedType(Type):
def __init__(self, type: Type, args: dict):
super().__init__()
self.type = type
self.args = args
def to_string(self, memo: set) -> str:
args = ', '.join(f'{key} = {type}' for key, type in self.args.items())
return '[ ' + args + ' ]' + self.type.to_string(memo)
class GroundType(Type):
def __init__(self, name: str):
super().__init__()
self.name = name
def to_string(self, memo: set) -> str:
return self.name
class ListType(Type):
def __init__(self, element_type=None):
super().__init__()
self.placeholder = TypePlaceholder(name='Element')
self.element_type = element_type or self.placeholder
@property
def placeholders(self):
return [self.placeholder] if (self.element_type is self.placeholder) else None
def equals(self, other, memo: dict = None) -> bool:
if not isinstance(other, ListType):
return False
if self.element_type is None:
return other.element_type is None
return self.element_type.equals(other.element_type, memo=memo)
def specialized(self, args: dict):
if set(args.keys()) == { '_0' }:
return ListType(element_type=args['_0'])
else:
return ListType(element_type=args['Element'])
def to_string(self, memo: set) -> str:
if self.element_type:
return f'List[ Element = {self.element_type} ]'
else:
return 'List'
class TypeVariable(Type):
next_id = 0
def __init__(self):
super().__init__()
self.id = TypeVariable.next_id
TypeVariable.next_id += 1
def to_string(self, memo: set) -> str:
return f'__{self.id}'
class TypeAlias(object):
def __init__(self, subject):
super().__init__()
self.subject = subject
def equals(self, other, memo: dict = None) -> bool:
if not isinstance(other, TypeAlias):
return False
return self.subject.equals(other.subject, memo=memo)
def to_string(self, memo: set) -> str:
return f'~{self.subject.to_string(memo)}'
class TypePlaceholder(Type):
def __init__(self, name: str):
super().__init__()
self.name = name
def to_string(self, memo: set) -> str:
return self.name
class ObjectType(Type):
def __init__(self, properties=None, placeholders=None):
super().__init__()
self.properties = properties or {}
self.placeholders = placeholders or []
for ph in self.placeholders:
assert isinstance(ph, TypePlaceholder)
def equals(self, other, memo: dict = None) -> bool:
memo = memo if memo is not None else {}
pair = (self, other)
if pair in memo:
return memo[pair]
memo[pair] = True
if (
not isinstance(other, ObjectType) or
len(self.properties) != len(other.properties) or
len(self.placeholders) != len(other.placeholders)
):
memo[pair] = False
return False
for prop_name in self.properties:
if (
(prop_name not in other.properties) or
self.properties[prop_name].equals(other.properties[prop_name], memo=memo)
):
memo[pair] = False
return False
for i in range(len(self.placeholders)):
if self.placeholders[i].equals(other.placeholders[i], memo=memo):
memo[pair] = False
return False
return True
def to_string(self, memo: set) -> str:
if self.placeholders:
placeholders = '[ ' + ', '.join(self.placeholders) + ' ]'
else:
placeholders = ''
if self in memo:
return placeholders + '{ ... }'
memo.add(self)
props = [f'{key}: {value.to_string(memo)}' for key, value in self.properties.items()]
return placeholders + '{ ' + ', '.join(props) + ' }'
def __len__(self):
return len(self.properties)
def __iter__(self):
return iter(self.properties)
def __getitem__(self, item):
return self.properties[item]
class UnionType(Type):
def __init__(self, types):
super().__init__()
self.types = types
def to_string(self, memo: set) -> str:
return ' | '.join(str(t) for t in self.types)
class FunctionType(Type):
def __init__(self, domain, codomain, placeholders=None):
super().__init__()
self.domain = domain
self.codomain = codomain
self.placeholders = placeholders or []
for ph in self.placeholders:
assert isinstance(ph, TypePlaceholder)
def to_string(self, memo: set) -> str:
if self.placeholders:
placeholders = '[ ' + ', '.join(map(str, self.placeholders)) + ' ]'
else:
placeholders = ''
return placeholders + f'{self.domain} -> {self.codomain}'
Nothing = GroundType('Nothing')
Bool = GroundType('Bool')
Int = GroundType('Int')
Float = GroundType('Float')
String = GroundType('String')
List = ListType()
| StarcoderdataPython |
1719107 | class dots(object):
def get_keyword_names(self):
return ['In.name.conflict']
def run_keyword(self, name, args):
return '-'.join(args)
| StarcoderdataPython |
4978213 | import unittest
from rover import Rover, Move, Turn
from rover import Orientation
class RoverTests(unittest.TestCase):
def test_should_create_new_rover_with_default_coordinates(self):
r = Rover()
self.assertEquals(r.position, (0, 0))
def test_should_create_new_rover_with_default_direction(self):
r = Rover()
self.assertEquals(r.orientation, Orientation.North)
def test_should_create_new_rover_with_given_coordinates(self):
r = Rover((1, 2))
self.assertEquals(r.position, (1, 2))
def test_should_create_new_rover_with_given_coordinates_and_direction(self):
r = Rover((1, 2), orientation=Orientation.South)
self.assertEquals(r.position, (1, 2))
self.assertEquals(r.orientation, Orientation.South)
def test_should_be_able_to_move_one_forward(self):
r = Rover()
r.move(Move.FORWARD)
self.assertEquals(r.position, (0, 1))
def test_should_be_able_to_move_one_backward(self):
r = Rover((1, 2))
r.move(Move.BACKWARD)
self.assertEquals(r.position, (1, 1))
def test_should_be_able_to_move_left(self):
r = Rover(orientation=Orientation.North)
r.turn(Turn.LEFT)
self.assertEquals(r.orientation, Orientation.West, msg=None)
def test_should_be_able_to_turn_around(self):
r = Rover(orientation=Orientation.North)
r.turn(Turn.LEFT)
r.turn(Turn.LEFT)
r.turn(Turn.LEFT)
r.turn(Turn.LEFT)
self.assertEquals(r.orientation, Orientation.North)
| StarcoderdataPython |
8025267 | '''Process callbacks from users' interactions with keyboards'''
from __future__ import annotations
from contextlib import suppress
from aiogram.utils.exceptions import MessageNotModified
from .markups import sections_kb
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from aiogram import Dispatcher
from aiogram.types import CallbackQuery
from aiogram.dispatcher import FSMContext
def register_callbacks(dispatcher: Dispatcher):
'''Register callback handlers in dispatcher'''
dispatcher.register_callback_query_handler(
update_sections_info,
lambda c: c.data[:3] in ['del', 'add'])
async def update_sections_info(callback_query: CallbackQuery, state: FSMContext):
'''Handle section state update button'''
section = callback_query.data.split("_")[-1]
sections = (await state.get_data())['sections']
if callback_query.data.startswith("add"):
sections.append(section)
elif len(sections) > 1:
sections.remove(section)
await state.update_data(sections=sections)
with suppress(MessageNotModified):
await callback_query.message.edit_reply_markup(
reply_markup=await sections_kb(state))
await callback_query.answer()
| StarcoderdataPython |
9737282 | import time
from collections import Iterable
import pytest
from pydent.base import ModelBase
from pydent.browser import Browser
from pydent.browser import BrowserException
from pydent.exceptions import ForbiddenRequestError
NUM_MODELS = 10
def check_model_in_cache(model, cache):
"""Check if model is in the cache."""
data = cache.get(model.__class__.__name__, {})
if not data:
return "Cache does not have model class: {}".format(model.__class__.__name__)
m = data.get(model.id, None)
if not m:
return "Cache for {} does not have model id {}".format(
model.__class__.__name__, model.id
)
if not m is model:
return "Model in cache has a different id".format(id(m), id(model))
return ""
def is_model(v):
"""Check if value is a model base."""
return issubclass(type(v), ModelBase)
def collect_deserialized_samples(model):
"""Collects the deserialized samples."""
data = model._get_deserialized_data()
deserialized_models = []
for k, v in data.items():
if is_model(v):
deserialized_models.append((k, v))
elif issubclass(type(v), str):
pass
elif issubclass(type(v), Iterable):
for _v in v:
if is_model(_v):
deserialized_models.append((k, _v))
return deserialized_models
def check_model_deserialized_data(model, cache):
"""Checks to ensure the deserialized samples are in the model cache."""
deserialized_models = collect_deserialized_samples(model)
err_msgs = []
for key, model in deserialized_models:
err_msg = check_model_in_cache(model, cache)
if err_msg:
err_msgs.append(
"{model}.{key}: {err}".format(model=model, key=key, err=err_msg)
)
return err_msgs
def check_model(model, cache):
"""Checks that the model is in the cache and its deserialized samples are
also in the model cache."""
err_msgs = []
err = check_model_in_cache(model, cache)
if err:
err_msgs.append(err)
else:
err_msgs += check_model_deserialized_data(model, cache)
return err_msgs
def check_cache_consistency(cache):
"""Validates the consistency of the cache."""
err_msgs = []
for model_name, model_dict in cache.items():
for model in model_dict.values():
new_errs = check_model(model, cache)
err_msgs += ["{}: {}".format(model, e) for e in new_errs]
return err_msgs
def check_models_have(models, key):
"""Check models have deserialized the provided key."""
n_err_models = 0
for m in models:
if not m.is_deserialized(key):
n_err_models += 1
if n_err_models:
return "errors: {}/{} models {} don't have key {}".format(
n_err_models, len(models), models[0].__class__.__name__, key
)
def check_models_dont_have(models, key):
"""Check models have not deserialized the provided key."""
n_err_models = 0
for m in models:
if m.is_deserialized(key):
n_err_models += 1
if n_err_models:
return "errors: {}/{} models {} have key {}".format(
n_err_models, len(models), models[0].__class__.__name__, key
)
class TestRaisesExceptions:
def test_get_raises_keyword_err(self, session):
browser = Browser(session)
browser.last(NUM_MODELS, "Item")
with pytest.raises(BrowserException) as e:
browser.get("Item", "data_assoc")
assert "Did you mean" in str(e.value)
def test_get_raises_keyword_err_in_dict(self, session):
browser = Browser(session)
browser.last(NUM_MODELS, "Item")
with pytest.raises(BrowserException) as e:
browser.get("Item", {"sample": "sample_typ"})
assert "Did you mean" in str(e.value)
class TestGetAPI:
"""'get' is a dispatching function with a few options.
These test those options.
"""
def test_get(self, session):
"""Calling 'get' with just a single keyword should return the models in
the cache."""
browser = Browser(session)
assert browser.get("Sample") == []
samples = browser.last(NUM_MODELS, "Sample")
assert browser.get("Sample") == samples
def test_get_relation(self, session):
"""Calling 'get' with just a single keyword and a string return the
models the list of models returned."""
browser = Browser(session)
assert browser.get("Sample", "sample_type") == []
samples = browser.last(NUM_MODELS, "Sample")
sample_types = browser.get("Sample", "sample_type")
assert sample_types
for st in sample_types:
assert st.__class__.__name__ == "SampleType"
def test_get_relation_with_list_of_models(self, session):
"""Calling 'get' with just a single keyword and a string return the
models the list of models returned."""
browser = Browser(session)
assert browser.get("Sample", "sample_type") == []
samples = browser.last(NUM_MODELS, "Sample")
sample_types = browser.get(samples, "sample_type")
assert sample_types
for st in sample_types:
assert st.__class__.__name__ == "SampleType"
def test_get_nested_relation_from_dict(self, session):
"""Calling 'get' with just a single keyword and a string return the
models the list of models returned."""
browser = Browser(session)
d = {"sample": {"sample_type"}, "object_type": {}}
assert browser.get("Item", d)
browser.last(NUM_MODELS, "Item")
results = browser.get("Item", d)
assert "sample" in results
assert "sample_type" in results
assert "object_type" in results
assert results["sample"]
assert results["sample_type"]
assert results["object_type"]
errs = check_cache_consistency(browser.model_cache)
assert not errs
@pytest.mark.parametrize(
"using_cache",
[True, False],
ids=["Using cache (consistent)", "No cache (expect inconsistent)"],
)
def test_consistent_cache_with_attribute_access(session, using_cache):
"""When we do not use the cache, the cache will remain inconsistent when
attributes are called."""
def new_sess():
if using_cache:
return session.with_cache()
else:
return session()
with new_sess() as sess:
browser = sess.browser
samples = browser.last(NUM_MODELS, "Sample")
errs = check_models_dont_have(samples, "sample_type")
assert not errs
browser.retrieve(samples, "sample_type")
for sample in samples:
assert sample.is_deserialized("sample_type")
errs = check_model_in_cache(sample, browser.model_cache)
assert not errs
# now we reset the field and attempt to retrieve it
# if we are using the cache, this should automatically
# retrieve the cached result
sample.reset_field("sample_type")
assert not sample.is_deserialized("sample_type")
sample.sample_type
errs = check_cache_consistency(browser.model_cache)
if using_cache:
assert not errs
else:
assert errs
@pytest.mark.parametrize("method", ["get", "retrieve"])
def test_consistency_with_has_many(session, method):
browser = Browser(session)
samples = browser.last(NUM_MODELS, "Sample")
sample_types = getattr(browser, method)(samples, "sample_type")
new_samples = getattr(browser, method)(sample_types[:1], "samples")
# test at least one sample is in the model
passes = False
for s in new_samples:
errs = check_model_in_cache(s, browser.model_cache)
if not errs:
passes = True
assert passes, "At least one of the recalled samples should be in the model cache"
@pytest.mark.parametrize("method", ["get", "retrieve"])
def test_consistency_with_has_many_for_session_cache(session, method):
with session.with_cache() as sess:
browser = sess.browser
samples = browser.last(NUM_MODELS, "Sample")
sample_types = getattr(browser, method)(samples, "sample_type")
new_samples = sample_types[0].samples
# test at least one sample is in the model
passes = False
for s in new_samples:
errs = check_model_in_cache(s, browser.model_cache)
if not errs:
passes = True
assert passes, (
"At least one of the recalled samples should be in the " "model cache"
)
class TestGet:
@pytest.mark.parametrize("method", ["get", "retrieve"], ids=["get()", "retrieve()"])
@pytest.mark.parametrize(
"force_refresh",
[True, False, None],
ids=["force_refresh", "no_refresh", "default"],
)
def test_consistent_cache_with_get_or_retrieve(
self, session, method, force_refresh
):
"""We expect 'retrieve' or 'get' to correctly gather the sample_types
and place them in the model cache."""
browser = session.browser
samples = browser.last(NUM_MODELS, "Sample")
errs = check_models_dont_have(samples, "sample_type")
assert not errs
method = getattr(browser, method)
if force_refresh is None:
sample_types = method(samples, "sample_type")
else:
sample_types = method(samples, "sample_type", force_refresh=force_refresh)
assert sample_types, "sample_types_should_be_returned"
# the browser DO attach the deserialized data to the models
errs = check_models_have(samples, "sample_type")
assert not errs
for sample in samples:
assert sample.is_deserialized("sample_type")
errs = check_model_deserialized_data(sample, browser.model_cache)
assert not errs
for sample_type in sample_types:
errs = check_model_in_cache(sample_type, browser.model_cache)
assert not errs
for sample in samples:
errs = check_model_in_cache(sample, browser.model_cache)
assert not errs
errs = check_cache_consistency(browser.model_cache)
assert not errs
@pytest.mark.parametrize("method", ["get", "retrieve"], ids=["get()", "retrieve()"])
@pytest.mark.parametrize(
"force_refresh",
[True, False, None],
ids=["force_refresh", "no_refresh", "default"],
)
def test_consistent_cache_with_get_or_recursive_retrieve(
self, session, method, force_refresh
):
"""We expect 'retrieve' or 'get' to correctly gather the sample_types
and place them in the model cache."""
browser = session.browser
samples = browser.last(NUM_MODELS, "Sample")
errs = check_models_dont_have(samples, "sample_type")
assert not errs
method = getattr(browser, method)
if force_refresh is None:
sample_types = method(samples, "sample_type")
else:
sample_types = method(samples, "sample_type", force_refresh=force_refresh)
assert sample_types, "sample_types_should_be_returned"
# the browser DO attach the deserialized data to the models
errs = check_models_have(samples, "sample_type")
assert not errs
for sample in samples:
assert sample.is_deserialized("sample_type")
errs = check_model_deserialized_data(sample, browser.model_cache)
assert not errs
for sample_type in sample_types:
errs = check_model_in_cache(sample_type, browser.model_cache)
assert not errs
for sample in samples:
errs = check_model_in_cache(sample, browser.model_cache)
assert not errs
errs = check_cache_consistency(browser.model_cache)
assert not errs
class TestForceRefresh:
def test_retrieve_same_n_samples(self, session):
browser = Browser(session)
samples = browser.last(NUM_MODELS, "Sample")
sample_types = browser.retrieve(samples, "sample_type")
assert sample_types
browser2 = Browser(session)
samples2 = browser2.last(NUM_MODELS, "Sample")
sample_types2 = browser2.retrieve(samples2, "sample_type", force_refresh=True)
assert len(sample_types) == len(sample_types2)
def test_get_same_n_samples(self, session):
browser = Browser(session)
samples = browser.last(NUM_MODELS, "Sample")
sample_types = browser.get(samples, "sample_type")
assert sample_types
browser2 = Browser(session)
samples2 = browser2.last(NUM_MODELS, "Sample")
sample_types2 = browser2.get(samples2, "sample_type", force_refresh=True)
assert len(sample_types) == len(sample_types2)
@pytest.mark.parametrize(
"force_refresh", [False, None], ids=["no_refresh", "default"]
)
@pytest.mark.parametrize(
"func_name", ["get", "retrieve"], ids=["get()", "retrieve()"]
)
def test_retrieve_get_refresh(self, session, force_refresh, func_name):
"""If force refresh is ON, then retrieve should get the EXACT same
models every time."""
browser = Browser(session)
samples = browser.last(NUM_MODELS, "Sample")
def method():
if force_refresh is None:
return getattr(browser, func_name)(samples, "sample_type")
else:
return getattr(browser, func_name)(
samples, "sample_type", force_refresh=force_refresh
)
sample_types = method()
sample_types2 = method()
assert sample_types
assert len(sample_types) == len(sample_types2)
# no new items
total_num_items = len({id(x) for x in sample_types + sample_types2})
assert total_num_items == len(sample_types)
for st in sample_types:
errs = check_model_in_cache(st, browser.model_cache)
assert not errs
for st in sample_types2:
errs = check_model_in_cache(st, browser.model_cache)
assert not errs
# @pytest.mark.parametrize('func_name', ['get', 'retrieve'],
# ids=['get()', 'retrieve()'])
# def test_retrieve_get_no_refresh(self, session, func_name):
# """If force refresh is ON, then retrieve should get the EXACT same models
# every time."""
# browser = Browser(session)
# samples = browser.last(NUM_MODELS, 'Sample')
#
# def method():
# return getattr(browser, func_name)(samples, 'sample_type', force_refresh=True)
# sample_types = method()
# sample_types2 = method()
# assert sample_types
# assert len(sample_types) == len(sample_types2)
#
# # no new items
# total_num_items = len(set([id(x) for x in sample_types + sample_types2]))
# assert total_num_items == 2 * len(sample_types)
# @pytest.mark.parametrize('force_refresh', [False, True],
# ids=['no_refresh', 'force_refresh'])
# @pytest.mark.parametrize('func_name', ['get', 'retrieve'],
# ids=['get()', 'retrieve()'])
# def test_no_refresh_requires_no_requests(self, session, func_name, force_refresh):
# """If force refresh is ON, then retrieve should get the EXACT same models
# every time."""
#
# with session() as new_session:
# browser = Browser(new_session)
# samples = browser.last(NUM_MODELS, 'Sample')
#
# def method():
# return getattr(browser, func_name)(samples, 'sample_type', force_refresh=True)
# method()
#
# browser.session.using_requests = False
# if force_refresh:
# with pytest.raises(ForbiddenRequestError):
# method()
def test_speed_improvements(session):
n = 10
t1 = time.time()
n1 = session._aqhttp.num_requests
items = session.Item.last(n)
samples = [item.sample for item in items]
object_types = [item.object_type for item in items]
sample_types = [sample.sample_type for sample in samples if sample]
for st in sample_types:
st.field_types
t2 = time.time()
n2 = session._aqhttp.num_requests
t3 = time.time()
n3 = session._aqhttp.num_requests
browser = Browser(session)
items = browser.last(n, "Item")
browser.get(items, {"sample": {"sample_type": "field_types"}, "object_type": []})
t4 = time.time()
n4 = session._aqhttp.num_requests
fold_diff = (t2 - t1) / (t4 - t3)
print("Browser is {} times faster than nested for-loops".format(fold_diff))
print("Browser uses {} requests, while for-loops use {}".format(n4 - n3, n2 - n1))
assert fold_diff > 1
| StarcoderdataPython |
3580394 | from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import object
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist, FieldError, ValidationError
from django.contrib.contenttypes.models import ContentType
from django.db.utils import IntegrityError
from isisdata.models import *
import datetime, iso8601, string
import xml.etree.ElementTree as ET
import os
import copy
import json
import re
import pprint
from collections import Counter
def _update_or_create(model, pk, data):
try:
instance = model.objects.get(pk=pk)
for key, value in list(data.items()):
if key == 'id':
pass
setattr(instance, key, value)
instance.save()
except model.DoesNotExist:
instance = model.objects.create(**data)
for key, value in list(data.items()):
print(key, value)
if getattr(instance, key) != value:
setattr(instance, key, value)
instance.save()
return instance
with open('isisdata/fixtures/language.json', 'r') as f:
languages = json.load(f)
languageLookup = {l['fields']['name'].lower(): l['pk'] for l in languages}
languageLookup.update({l['pk'].lower(): l['pk'] for l in languages})
def fast_iter(context, func, tag, *extra):
for event, elem in context:
func(elem, *extra)
if elem.tag.replace('{http://www.filemaker.com/fmpdsoresult}', '') == tag:
elem.clear()
del context
def _strip_non_numbers(s):
transmap = string.maketrans('','')
nodigs = transmap.translate(transmap, string.digits)
return s.translate(transmap, nodigs)
class FMPDSOParser(object):
"""
Parses FileMaker's FMPDSO XML format into field-data that can be ingested
into the IsisCB Explore ORM.
"""
fm_namespace = '{http://www.filemaker.com/fmpdsoresult}'
datetime_formats = [
'%m/%d/%Y %I:%M:%S %p',
'%m/%d/%Y %I:%M %p'
]
date_formats = [
'%m/%d/%Y',
'%Y'
]
chunk_size = 10000 # Number of instances to include in each fixture file.
as_int = lambda x: int(x)
as_upper = lambda x: x.upper()
@staticmethod
def _as_datetime(model_name, fm_field, fm_value):
"""
Attempt to coerce a value to ``datetime``.
"""
if len(fm_value) < 4:
fm_value = string.zfill(fm_value, 4)
for format in FMPDSOParser.datetime_formats + FMPDSOParser.date_formats:
try:
return datetime.datetime.strptime(fm_value, format)
except ValueError:
pass
try:
return iso8601.parse_date(fm_value)
except ValueError:
pass
raise ValueError('Could not coerce value to datetime: %s' % fm_value)
@staticmethod
def _to_int(model_name, fm_field, fm_value):
try:
return int(fm_value)
except TypeError:
return 1
@staticmethod
def _to_float(model_name, fm_field, fm_value):
return float(fm_value)
@staticmethod
def _to_date(model_name, fm_field, fm_value):
return FMPDSOParser._as_datetime(model_name, fm_field, fm_value).date()
@staticmethod
def _try_int(model_name, fm_field, fm_value):
try:
return int(fm_value)
except ValueError:
return fm_value
@staticmethod
def _try_positive_int(model_name, fm_field, fm_value):
attempts = [
lambda x: abs(int(x)),
lambda x: abs(int(_strip_non_numbers(x)))
]
for func in attempts:
try:
return func(fm_value)
except ValueError:
pass
return fm_value
@staticmethod
def _handle_record_status(model_name, fm_field, fm_value):
fm_value = fm_value.title()
if not fm_value:
return True, u'Active', u'Set active by default'
match = re.match('(In)?[aA]ctive(.*)', fm_value)
if match:
public_raw, explanation_raw = match.groups()
public = False if public_raw else True
explanation = explanation_raw.strip()
status = 'Active' if public else 'Inactive'
else:
match = re.match('Redirect(.*)', fm_value)
if match:
explanation_raw = match.groups()[0].strip()
public, status, explanation = False, u'Redirect', explanation_raw
else:
public, status, explanation = True, u'Active', u''
return public, status, explanation
@staticmethod
def _handle_attribute_value(model_name, fm_field, fm_value):
if fm_field == 'DateBegin':
return (fm_value, 'BGN')
elif fm_field == 'DateEnd':
return (fm_value, 'END')
@staticmethod
def _handle_language(model_name, fm_field, fm_value):
return languageLookup.get(fm_value.lower(), None)
@staticmethod
def _handle_citation_fk(model_name, fm_field, fm_value):
if fm_value == 'CBB0':
return None
return fm_value
fields = {
'StaffNotes': 'administrator_notes',
'RecordStatus': ('public',
'record_status_value',
'record_status_explanation'),
'RecordHistory': 'record_history',
'ID': 'id',
'Dataset': 'dataset',
'CreatedBy': 'created_by_fm',
'CreatedOn': 'created_on_fm',
'ModifiedBy': 'modified_by_fm',
'ModifiedOn': 'modified_on_fm',
'Description': 'description',
'Name': 'name',
'Type.free': 'type_free',
'Type.controlled': 'type_controlled',
'DataDisplayOrder': 'data_display_order',
'ConfidenceMeasure': 'confidence_measure',
'RelationshipWeight': 'relationship_weight',
'citation': {
'Abstract': 'abstract',
'Title': 'title',
'Type.controlled': 'type_controlled',
'AdditionalTitles': 'additional_titles',
'BookSeries': 'book_series',
'EditionDetails': 'edition_details',
'PhysicalDetails': 'physical_details',
'RecordHistory': 'record_history',
'NotesOnContent.notpublished': 'administrator_notes',
'NotesOnProvenance': 'record_history',
'Language': 'language',
},
'partdetails': {
'IssueBegin': 'issue_begin',
'IssueEnd': 'issue_end',
'IssueFreeText': 'issue_free_text',
'PageBegin': 'page_begin',
'PageEnd': 'page_end',
'PagesFreeText': 'pages_free_text',
'VolumeEnd': 'volume_end',
'VolumeBegin': 'volume_begin',
'VolumeFreeText': 'volume_free_text',
'Extent': 'extent',
'ExtentNote': 'extent_note',
'ID': None, # These are all fields from Citation that we
'CreatedBy': None, # don't want in PartDetails.
'CreatedOn': None,
'ModifiedBy': None,
'ModifiedOn': None,
'Description': None,
'Dataset': None,
'RecordStatus': None,
'Type.controlled': None,
'RecordHistory': None,
'StaffNotes': None,
},
'authority': {
'ClassificationSystem': 'classification_system',
'ClassificationCode': 'classification_code',
'ClassificationHierarchy': 'classification_hierarchy',
'RedirectTo': 'redirect_to',
},
'person': {
'PersonalNameFirst': 'personal_name_first',
'PersonalNameLast': 'personal_name_last',
'PersonalNameSuffix': 'personal_name_suffix',
'PersonalNamePreferredForm': 'personal_name_preferred'
},
'acrelation': {
'ID.Authority.link': 'authority',
'ID.Citation.link': 'citation',
'Type.Broad.controlled': 'type_broad_controlled',
'PersonalNameFirst': 'personal_name_first',
'PersonalNameLast': 'personal_name_last',
'PersonalNameSuffix': 'personal_name_suffix',
},
'ccrelation': {
'ID.Subject.link': 'subject',
'ID.Object.link': 'object',
},
'tracking': {
'ID.Subject.link': 'subject',
'TrackingInfo': 'tracking_info',
'Notes': 'notes',
},
'attribute': {
'ID.Subject.link': 'source',
'DateAttribute.free': 'value_freeform',
'DateBegin': ('value', 'type_qualifier'),
'DateEnd': ('value', 'type_qualifier'),
'Type.Broad.controlled': 'type_controlled_broad',
'Type.controlled': 'type_controlled',
},
'linkeddata': {
'AccessStatus': 'access_status',
'AccessStatusDateVerified': 'access_status_date_verified',
'ID.Subject.link': 'subject',
'Type.controlled': 'type_controlled',
'Type.Broad.controlled': 'type_controlled_broad',
'UniversalResourceName.link': 'universal_resource_name',
'NameOfResource': 'resource_name',
'URLOfResource': 'url',
}
}
mappings = {
'classification_system': {
'WELDON THESAURUS TERMS (2002-PRESENT)': 'SPWT',
'WELDON THESAURUS': 'SPWT',
'WELDON CLASSIFICATION SYSTEM (2002-PRESENT)': 'SPWC',
'SWP': 'SPWC',
'NEU': 'NEU',
'MW': 'MW',
'SHOT': 'SHOT',
'SHOT THESAURUS TERMS': 'SHOT',
'GUERLAC COMMITTEE CLASSIFICATION SYSTEM (1953-2001)': 'GUE',
'WHITROW CLASSIFICATION SYSTEM (1913-1999)': 'MW',
'FORUM FOR THE HISTORY OF SCIENCE IN AMERICA': 'FHSA',
'SEARCH APP CONCEPT': 'SAC',
'PROPER NAME': 'PN',
},
'type_broad_controlled': {
'acrelation': {
'HASPERSONALRESPONSIBILITYFOR': 'PR',
'PROVIDESSUBJECTCONTENTABOUT': 'SC',
'ISINSTITUTIONALHOSTOF': 'IH',
'ISPUBLICATIONHOSTOF': 'PH',
}
},
'created_on_fm': _as_datetime,
'modified_on_fm': _as_datetime,
'extent': _try_positive_int,
'issue_begin': _try_int,
'issue_end': _try_int,
'page_begin': _try_int,
'page_end': _try_int,
'volume_begin': _try_int,
'volume_end': _try_int,
'data_display_order': _to_float,
'access_status_date_verified': _as_datetime,
('public',
'record_status_value',
'record_status_explanation'): _handle_record_status,
('value', 'type_qualifier'): {
'attribute': _handle_attribute_value,
},
'language': _handle_language,
'subject': {
'ccrelation': _handle_citation_fk,
},
'type_controlled': {
'citation': {
'BOOK': 'BO',
'ARTICLE': 'AR',
'CHAPTER': 'CH',
'REVIEW': 'RE',
'ESSAYREVIEW': 'ES',
'ESSAY REVIEW': 'ES',
'THESIS': 'TH',
'EVENT': 'EV',
'PRESENTATION': 'PR',
'INTERACTIVERESOURCE': 'IN',
'INTERACTIVE RESOURCE': 'IN',
'WEBSITE': 'WE',
'APPLICATION': 'AP',
},
'authority': {
'PERSON': 'PE',
'PUBLISHER': 'PU',
'INSTITUTION': 'IN',
'TIMEPERIOD': 'TI',
'GEOGRAPHICTERM': 'GE',
'SERIALPUBLICATION': 'SE',
'CLASSIFICATIONTERM': 'CT',
'CONCEPT': 'CO',
'CREATIVEWORK': 'CW',
'EVENT': 'EV',
'PUBLISHERS': 'PU',
'CROSS-REFERENCE': 'CR',
},
'person': {
'PERSON': 'PE',
'INSTITUTION': 'IN',
'TIMEPERIOD': 'TI',
'GEOGRAPHICTERM': 'GE',
'SERIALPUBLICATION': 'SE',
'CLASSIFICATIONTERM': 'CT',
'CONCEPT': 'CO',
'CREATIVEWORK': 'CW',
'EVENT': 'EV',
'PUBLISHERS': 'PU',
'PUBLISHER': 'PU',
'CROSS-REFERENCE': 'CR',
},
'acrelation': {
'AUTHOR': 'AU',
'EDITOR': 'ED',
'ADVISOR': 'AD',
'CONTRIBUTOR': 'CO',
'TRANSLATOR': 'TR',
'SUBJECT': 'SU',
'CATEGORY': 'CA',
'PUBLISHER': 'PU',
'SCHOOL': 'SC',
'INSTITUTION': 'IN',
'MEETING': 'ME',
'PERIODICAL': 'PE',
'BOOKSERIES': 'BS',
'COMMITTEE MEMBER': 'CM',
},
'ccrelation': {
'INCLUDESCHAPTER': 'IC',
'INCLUDESSERIESARTICLE': 'ISA',
'ISREVIEWOF': 'RO',
'ISREVIEWEDBY': 'RB',
'RESPONDSTO': 'RE',
'ISASSOCIATEDWITH': 'AS'
},
'tracking': {
'HSTMUPLOAD': 'HS',
'PRINTED': 'PT',
'AUTHORIZED': 'AU',
'PROOFED': 'PD',
'FULLYENTERED': 'FU',
'BULK DATA UPDATE': 'BD'
}
}
}
def __init__(self, handler):
"""
Parameters
----------
handler : object
"""
self.handler = handler
def _map_field_value(self, model_name, fm_field, fm_value):
"""
Given a model and a filemaker field/value pair, obtain the correct
model field and value.
The configuration in FMPDSOParser.mappings is used to convert
``fm_value`` to the correct Python type for the identified model field.
Parameters
----------
model_name : str
Must be the (lowercase normed) name of a model in
:mod:`isiscb.isisdata.models`\.
fm_field : str
Name of a field in the FileMaker database.
fm_value : str
Raw value from the FileMaker database.
Returns
-------
model_field : str
value
The type of this object will depend on the model field.
"""
if not fm_value:
return []
fm_field = fm_field.replace('Modiefied', 'Modified')
model_field = self.fields[model_name].get(fm_field, False)
if model_field is None:
return []
if not model_field:
model_field = self.fields.get(fm_field, False)
if not model_field:
return [] # Skip the field.
# ``mapper`` is a function (staticmethod) or dict. See
# :prop:`FMPDSOParser.mappings`.
mapper = self.mappings.get(model_field, None)
# This might not be necessary, but I'm paranoid.
value = copy.copy(fm_value).strip()
if mapper:
attrs = (model_name, fm_field, value.upper())
# If the mapper is a method of some kind, it applies to all models
# with this field.
if type(mapper) is staticmethod:
value = self.mappings[model_field].__func__(*attrs)
# Otherwise, it may be model-specific or not. If it's
# model-specific, then we should find an entry for the model name
# in the mapper.
elif hasattr(mapper, 'get'):
# If there is a model-specific mapping, then we prefer that
# over a more general mapping.
model_mapper = mapper.get(model_name, mapper)
# The mapper itself may be a static method...
if type(model_mapper) is staticmethod:
value = model_mapper.__func__(*attrs)
# ...or a hashmap (dict).
elif hasattr(model_mapper, 'get'):
value = model_mapper.get(value.upper(), value)
# This should only be falsey if it is set explicitly.
if not value:
return []
# A single field/value in FM may map to two or more fields/values in
# IsisCB Explore.
if type(model_field) is tuple and type(value) is tuple:
return list(zip(model_field, value))
return [(model_field, value)]
def _get_handler(self, model_name):
"""
The class of the handler instance (passed to constructor, and assigned
to ``self.handler``) should define a handler method for each model,
named ``handle_[model_name]``.
Parameters
----------
model_name : str
Must be the (lowercase normed) name of a model in
:mod:`isiscb.isisdata.models`\.
Returns
-------
instancemethod
"""
return getattr(self.handler, 'handle_%s' % model_name, None)
def _tag(self, element):
return copy.copy(element.tag).replace(self.fm_namespace, '')
def parse_record(self, record, model_name, parse_also=None):
"""
Parse a single row of data from FMPDSO XML.
"""
# There are some strange elements early in the XML document that we
# don't care about. <ROW>s hold the data that we're after.
if self._tag(record) != 'ROW':
return
fielddata = []
extra = [[] for _ in parse_also]
for element in record.getchildren():
fm_field = self._tag(element)
fm_value = copy.copy(element.text)
fielddata += self._map_field_value(model_name, fm_field, fm_value)
# Data for some models (e.g. Citation, Authority) need to be
# handled at the same time as data for other models (e.g.
# PartDetails, Person).
if parse_also:
for i, extra_model in enumerate(parse_also):
args = (extra_model, fm_field, fm_value)
extra[i] += self._map_field_value(*args)
# The class of the handler instance (passed to constructor, and set to
# self.handler) should define a handler method for each model.
handler = self._get_handler(model_name)
if not handler:
return
return handler(fielddata, extra)
def parse(self, model_name, data_path, parse_also):
"""
Kick off parsing for a single FMPDSO XML document.
Parameters
----------
model_name : str
Must be the (lowercase-normed) name of a
:class:`django.db.models.Model` subclass in
:mod:`isiscb.isisdata.models`\.
data_path : str
Location of the XML document.
parse_also : list
Names of other models that should be parsed at the same time.
"""
# This is a much more memory-friendly approach -- ET does all kinds of
# crazy copying otherwise. The trade-off is that we only get one crack
# each element that streams through.
fast_iter(ET.iterparse(data_path), # Iterator.
self.parse_record, # Method.
'ROW',
model_name, # Extra...
parse_also)
class VerboseHandler(object):
"""
Just for testing.
"""
def handle_citation(self, fielddata, extra):
pprint.pprint(fielddata)
def handle_authority(self, fielddata, extra):
pprint.pprint(fielddata)
class DatabaseHandler(object):
"""
Updates the IsisCB Explore database using data yielded by the
:class:`.FMPDSOParser`\.
"""
pk_fields = ['language', 'subject', 'object', 'citation', 'authority',
'redirect_to', 'source']
"""
When these fields are encountered, `_id` will be appended to the field
name.
"""
id_prefixes = {
'CBB': 'citation',
'CBA': 'authority',
'ACR': 'acrelation',
'AAR': 'aarelation',
'CCR': 'ccrelation',
}
"""
Maps ID prefixes onto model names.
"""
def __init__(self, print_every=200):
self.model_counts = Counter()
self.print_every = print_every
self.errors = []
try:
with open('/home/ec2-user/ingest_errors.pickle', 'r') as f:
self.errors += pickle.load(f)
except:
pass
def _tick(self, model_name):
self.model_counts[model_name] += 1
N = self.model_counts[model_name]
if N % self.print_every == 0:
pprint.pprint("handled %i %s records" % (N, model_name))
def _get_subject(self, subject_id):
"""
Obtain the ID of the ContentType instance corresponding to the object
with ID ``subject_id``.
Parameters
----------
subject_id : str
Returns
-------
int
Primary key ID for the ContentType instance for the object's
model class.
"""
model_name = self.id_prefixes[subject_id[:3]]
return ContentType.objects.get(model=model_name).id
def _update_with(self, instance, data):
"""
Update a db model ``instance`` with values in ``data``.
Parameters
----------
instance : :class:`django.db.models.Model`
data : dict
"""
for field, value in list(data.items()):
setattr(instance, field, value)
instance.save()
def _prepare_data(self, model, data):
"""
Converts ``data`` to a dict, and makes any necessary modifications to
field names.
Parameters
----------
data : list
A list of (fieldname, value) tuples.
Returns
-------
dict
"""
prepped_data = {}
for field, value in list(dict(data).items()):
if field in self.pk_fields:
field += '_id'
prepped_data[field] = value
return prepped_data
def _fix_partdetails(self, partdetails_data):
"""
Occassionally non-int data will be entered in int-only fields for
:class:`.PartDetails`\. If so, we pass the value off to the
corresponding ``free_text`` field, and remove the non-conforming field.
"""
int_fields = [
'issue_end', 'issue_begin',
'page_begin', 'page_end',
'volume_begin', 'volume_end'
]
partdetails_data_fixed = {}
for key, value in list(partdetails_data.items()):
if key in int_fields and type(value) is not int:
prefix = key.split('_')[0]
freetext_key = prefix + u'_free_text'
# Don't overwrite existing data.
if freetext_key in partdetails_data:
continue
key = freetext_key
partdetails_data_fixed[key] = value
return partdetails_data_fixed
def _handle_dataset(self, literal):
if type(literal) in [str, str]:
match = re.search('([^(]+)[(](.+)[)]', literal)
if match:
datasetname, editorname = match.groups()
dataset, _ = Dataset.objects.get_or_create(name=datasetname)
subdataset, _ = Dataset.objects.get_or_create(name=literal,
defaults={'belongs_to': dataset})
return 'belongs_to', subdataset
else:
dataset, _ = Dataset.objects.get_or_create(name=literal)
return 'belongs_to', dataset
return 'dataset_literal', literal
def handle_citation(self, fielddata, extra):
"""
Create or update a :class:`.Citation` with ``fielddata``.
Parameters
----------
fielddata : list
A list of (fieldname, value) tuples.
extra : list
Items are lists in the same format as ``fielddata``.
"""
citation_data = self._prepare_data(Citation, fielddata)
citation_id = citation_data.pop('id') # Don't want this in update.
language_id = citation_data.pop('language_id', None)
dataset = citation_data.pop('dataset', None)
if dataset:
key, value = self._handle_dataset(dataset)
citation_data['dataset_literal'] = dataset
citation_data[key] = value
try:
citation, created = Citation.objects.update_or_create(
pk=citation_id,
defaults=citation_data
)
except Exception as E:
print(citation_data, citation_id)
raise E
if language_id:
citation.language.add(language_id)
partdetails_data = self._prepare_data(PartDetails, extra[0])
partdetails_data = self._fix_partdetails(partdetails_data)
if not created:
if citation.part_details and len(partdetails_data) > 0:
self._update_with(citation.part_details, partdetails_data)
if (created or not citation.part_details) and len(partdetails_data) > 0:
try:
part_details = PartDetails.objects.create(**partdetails_data)
except Exception as E:
print(partdetails_data)
raise E
citation.part_details = part_details
citation.save()
self._tick('citation')
def handle_authority(self, fielddata, extra):
"""
Create or update an :class:`.Authority` with ``fielddata``.
Parameters
----------
fielddata : list
A list of (fieldname, value) tuples.
extra : list
Items are lists in the same format as ``fielddata``.
"""
authority_data = self._prepare_data(Authority, fielddata)
person_data = self._prepare_data(Person, extra[0])
if authority_data['record_status_value'] == CuratedMixin.ACTIVE:
authority_data['public'] = True
if person_data and authority_data.get('type_controlled') == 'PE':
model = Person
authority_data.update(person_data)
else:
model = Authority
try:
authority, created = model.objects.update_or_create(
pk=authority_data['id'],
defaults=authority_data
)
except Exception as E:
# If this record redirects to a record that has not yet been
# created, update_or_create() will throw an IntegrityError.
if type(E) is IntegrityError and authority_data.get('record_status_value').lower() == 'redirect':
redirect_to = authority_data.get('redirect_to_id')
Authority.objects.create(**{
'pk': redirect_to,
'type_controlled': authority_data.get('type_controlled'),
'public': True,
'record_status_value': 'Active',
})
try:
authority, created = model.objects.update_or_create(
pk=authority_data['id'],
defaults=authority_data
)
except Exception as E:
self.errors.append(('authority', E.__repr__(), authority_data['id'], authority_data))
print(authority_data)
raise E
self._tick('authority')
def handle_ccrelation(self, fielddata, extra):
"""
Create or update a :class:`.CCRelation` with ``fielddata``.
Parameters
----------
fielddata : list
A list of (fieldname, value) tuples.
extra : list
Items are lists in the same format as ``fielddata``.
"""
ccrelation_data = self._prepare_data(CCRelation, fielddata)
ccrelation_id = ccrelation_data.pop('id')
try:
ccrelation, created = CCRelation.objects.update_or_create(
pk=ccrelation_id,
defaults=ccrelation_data
)
except Exception as E:
self.errors.append(('ccrelation', E.__repr__(), ccrelation_id, ccrelation_data))
self._tick('ccrelation')
def handle_acrelation(self, fielddata, extra):
"""
Create or update a :class:`.ACRelation` with ``fielddata``.
Parameters
----------
fielddata : list
A list of (fieldname, value) tuples.
extra : list
Items are lists in the same format as ``fielddata``.
"""
acrelation_data = self._prepare_data(ACRelation, fielddata)
acrelation_id = acrelation_data.pop('id')
try:
acrelation, created = ACRelation.objects.update_or_create(
pk=acrelation_id,
defaults=acrelation_data
)
except Exception as E:
print(E.__repr__(), acrelation_id, acrelation_data)
self.errors.append(('acrelation', E.__repr__(), acrelation_id, acrelation_data))
self._tick('acrelation')
def handle_attribute(self, fielddata, extra):
"""
Create or update an :class:`.Attribute` with ``fielddata``.
Parameters
----------
fielddata : list
A list of (fieldname, value) tuples.
extra : list
Items are lists in the same format as ``fielddata``.
"""
N_values = 0
datasets = []
value_data = []
for field, value in fielddata:
if field == 'value':
value_data.append(value)
N_values = len(value_data)
if len(value_data) == 1:
value_data = value_data[0]
# If the row has some problem, there may not be an actual Value.
if not value_data:
return
attribute_data = self._prepare_data(Attribute, fielddata)
attribute_id = attribute_data.pop('id', None)
# `subject` is a generic relation; an Attribute can describe anything.
subject_id = attribute_data.pop('source_id')
subject_type_id = self._get_subject(subject_id)
attribute_data.update({
'source_content_type_id': subject_type_id,
'source_instance_id': subject_id,
})
# We don't want these in the data for Attribute.
attribute_data.pop('type_qualifier', None)
attribute_data.pop('value', None)
try:
type_controlled = attribute_data.pop('type_controlled')
except KeyError as E:
print(E.__repr__(), attribute_id, attribute_data)
self.errors.append(('attribute', E.__repr__(), attribute_id, attribute_data))
return
if type_controlled == 'BirthToDeathDates' or \
(type(value_data) in [list, tuple] and len(value_data) == 2):
value_model = ISODateRangeValue
elif 'date' in type_controlled.lower():
value_model = ISODateValue
else:
value_model = dict(VALUE_MODELS)[type(value_data)]
try:
value_model_ctype = ContentType.objects.get_for_model(value_model)
attribute_type, _ = AttributeType.objects.update_or_create(
name=type_controlled,
defaults={
'value_content_type_id': value_model_ctype.id
}
)
except Exception as E:
print(E.__repr__(), attribute_id, attribute_data)
self.errors.append(('attribute', E.__repr__(), attribute_id, attribute_data))
attribute_data.update({
'type_controlled_id': attribute_type.id,
})
try:
attribute, created = Attribute.objects.update_or_create(
pk=attribute_id,
defaults=attribute_data
)
except Exception as E:
print(E.__repr__(), attribute_id, attribute_data)
self.errors.append(('attribute', E.__repr__(), attribute_id, attribute_data))
# try:
if not hasattr(attribute, 'value'):
try:
value = value_model.objects.create(
value=value_data,
attribute=attribute
)
except Exception as E:
print(E.__repr__(), attribute_id, attribute_data)
self.errors.append(('value', E.__repr__(), attribute_id, value_data))
else:
child_class = attribute.value.get_child_class()
if type(child_class) != value_model:
attribute.value.delete()
try:
value = value_model.objects.create(
value=value_data,
attribute=attribute
)
except Exception as E:
print(E.__repr__(), attribute_id, attribute_data)
self.errors.append(('value', E.__repr__(), attribute_id, value_data))
else:
try:
self._update_with(attribute.value, {'value': value_data})
except Exception as E:
print(E.__repr__(), attribute_id, attribute_data)
self.errors.append(('value', E.__repr__(), attribute_id, value_data))
value = attribute.value
# except Exception as E:
# print E.__repr__(), attribute_id, attribute_data
# self.errors.append(('value', E.__repr__(), attribute_id, value_data))
if 'value_freeform' not in attribute_data or not attribute.value_freeform:
attribute.value_freeform = attribute.value.__unicode__()
attribute.save()
self._tick('attribute')
def handle_linkeddata(self, fielddata, extra):
"""
Create or update a :class:`.LinkedData` with ``fielddata``.
Parameters
----------
fielddata : list
A list of (fieldname, value) tuples.
extra : list
Items are lists in the same format as ``fielddata``.
"""
linkeddata_data = self._prepare_data(LinkedData, fielddata)
linkeddata_id = linkeddata_data.pop('id')
# `subject` is a generic relation; an Attribute can describe anything.
subject_id = linkeddata_data.pop('subject_id')
subject_type_id = self._get_subject(subject_id)
# Get the LinkedDataType instance for this LinkedData.
type_controlled = linkeddata_data.pop('type_controlled')
ld_type, _ = LinkedDataType.objects.get_or_create(name=type_controlled)
linkeddata_data.update({
'subject_content_type_id': subject_type_id,
'subject_instance_id': subject_id,
'type_controlled_id': ld_type.id,
})
try:
linkeddata, created = LinkedData.objects.update_or_create(
pk=linkeddata_id,
defaults=linkeddata_data
)
except Exception as E:
self.errors.append(('linkeddata', E.__repr__(), linkeddata_id, linkeddata_data))
self._tick('linkeddata')
def handle_tracking(self, fielddata, extra):
"""
Create or update a :class:`.Tracking` with ``fielddata``.
Parameters
----------
fielddata : list
A list of (fieldname, value) tuples.
extra : list
Items are lists in the same format as ``fielddata``.
"""
tracking_data = self._prepare_data(Tracking, fielddata)
tracking_id = tracking_data.pop('id')
subject_id = tracking_data.pop('subject_id')
subject_type_id = self._get_subject(subject_id)
tracking_data.update({
'subject_content_type_id': subject_type_id,
'subject_instance_id': subject_id,
})
try:
tracking, created = Tracking.objects.update_or_create(
pk=tracking_id,
defaults=tracking_data
)
except Exception as E:
self.errors.append(('tracking', E.__repr__(), tracking_id, tracking_data))
self._tick('tracking')
def __del__(self):
import pickle as pickle
try:
with open('/home/ec2-user/ingest_errors.pickle', 'w') as f:
pickle.dump(self.errors, f)
except:
pass
class Command(BaseCommand):
help = 'Update the IsisCB Explore database with FileMaker Pro FMPDSO XML.'
def __init__(self, *args, **kwargs):
self.failed = []
return super(Command, self).__init__(*args, **kwargs)
def _get_subject(self, subject_id):
model_name = model_ids[subject_id[:3]]
subject_ctype = ContentType.objects.get(model=model_name).id
return subject_ctype
def add_arguments(self, parser):
parser.add_argument('datapath', nargs=1, type=str)
parser.add_argument('table', nargs='*', type=str)
def handle(self, *args, **options):
parser = FMPDSOParser(DatabaseHandler())
table = options['table'][0]
if table == 'citation':
parse_also = ['partdetails']
elif table == 'authority':
parse_also = ['person']
else:
parse_also = []
dirpath = os.path.join(options['datapath'][0], table)
if os.path.exists(dirpath) and os.path.isdir(dirpath):
for fname in os.listdir(dirpath):
if fname.startswith(table) and fname.endswith('xml'):
path = os.path.join(dirpath, fname)
print('processing %s' % fname)
parser.parse(table, path, parse_also)
else:
path = os.path.join(options['datapath'][0], '%s.xml' % table)
parser.parse(table, path, parse_also)
| StarcoderdataPython |
202814 | <gh_stars>0
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import time
contatos = open("./contatos.txt", 'r', encoding='UTF-8')
contatos = contatos.read()
contatos = contatos.split('\n')
print('\n--- ROBÔ WHATSAPP PARA ENVIO DE MENSAGENS ---')
print('Enviar texto [1]\nEnviar imagem [2]\nEnviar texto e imagem [3]')
opcao = input('\nDigite a sua opção: ')
class WhatsappBot:
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument('lang=pt-br')
ser = Service("./chromedriver.exe")
op = webdriver.ChromeOptions()
self.driver = webdriver.Chrome(service=ser, options=op)
def EnviarMensagens(self):
mensagem = open("./mensagem.txt", 'r', encoding='UTF-8')
mensagem = mensagem.read()
self.driver.get('https://web.whatsapp.com')
time.sleep(10)
for contato in contatos:
pesquisa = self.driver.find_element(By.CLASS_NAME, "_13NKt")
time.sleep(2)
pesquisa.click()
time.sleep(2)
pesquisa.send_keys(contato)
time.sleep(2)
pesquisa.send_keys(Keys.ENTER)
chat_box = self.driver.find_element(By.CLASS_NAME, 'p3_M1')
time.sleep(3)
chat_box.click()
time.sleep(3)
chat_box.send_keys(mens<PASSWORD>)
botao_enviar = self.driver.find_element(By.XPATH,
"//span[@data-icon='send']")
time.sleep(3)
botao_enviar.click()
time.sleep(10)
def EnviarMidia(self):
midia = "C:\\caminho\\para\\seu\\arquivo\\de\\midia.jpeg"
self.driver.get('https://web.whatsapp.com')
time.sleep(10)
for contato in contatos:
pesquisa = self.driver.find_element(By.CLASS_NAME, "_13NKt")
time.sleep(2)
pesquisa.click()
time.sleep(2)
pesquisa.send_keys(contato)
time.sleep(2)
pesquisa.send_keys(Keys.ENTER)
self.driver.find_element(By.CSS_SELECTOR, "span[data-icon='clip']").click()
attach = self.driver.find_element(By.XPATH, '//input[@accept="image/*,video/mp4,video/3gpp,video/quicktime"]')
time.sleep(3)
attach.send_keys(midia)
time.sleep(5)
botao_enviar = self.driver.find_element(By.XPATH,
"//span[@data-icon='send']")
time.sleep(3)
botao_enviar.click()
time.sleep(10)
def EnviarAmbos(self):
midia = "C:\\caminho\\para\\seu\\arquivo\\de\\midia.jpeg"
mensagem = open("./mensagem.txt", 'r', encoding='UTF-8')
mensagem = mensagem.read()
self.driver.get('https://web.whatsapp.com')
time.sleep(10)
for contato in contatos:
pesquisa = self.driver.find_element(By.CLASS_NAME, "_13NKt")
time.sleep(2)
pesquisa.click()
time.sleep(2)
pesquisa.send_keys(contato)
time.sleep(2)
pesquisa.send_keys(Keys.ENTER)
self.driver.find_element(By.CSS_SELECTOR, "span[data-icon='clip']").click()
attach = self.driver.find_element(By.XPATH, '//input[@accept="image/*,video/mp4,video/3gpp,video/quicktime"]')
time.sleep(3)
attach.send_keys(midia)
time.sleep(5)
botao_enviar = self.driver.find_element(By.XPATH,
"//span[@data-icon='send']")
time.sleep(3)
botao_enviar.click()
time.sleep(5)
chat_box = self.driver.find_element(By.CLASS_NAME, 'p3_M1')
time.sleep(3)
chat_box.click()
time.sleep(3)
chat_box.send_keys(<PASSWORD>)
botao_enviar = self.driver.find_element(By.XPATH,
"//span[@data-icon='send']")
time.sleep(3)
botao_enviar.click()
time.sleep(10)
bot = WhatsappBot()
if opcao == "1":
bot.EnviarMensagens()
if opcao == "2":
bot.EnviarMidia()
if opcao == "3":
bot.EnviarAmbos() | StarcoderdataPython |
3353096 | """feusb\feusb_win32.py -- Fascinating Electronics USB CDC Library
The feusb libary supports USB CDC devices from Fascinating Electronics, with
useful features and error detection for USB device suspend and disconnect.
Feusb does not support legacy RS232 devices or modems.
This file only contains support for Windows. Other files distributed with feusb
provide support for Linux and OS-X.
Do not import this file directly, instead "import feusb". This will load the
correct support file for your operating system automatically.
"""
__author__ = "<NAME> <<EMAIL>"
__copyright__ = "Copyright 2008 <NAME>, <NAME>"
__version__ = "1.1"
import exceptions
import time
import sys
import glob
import os
import select
import struct
import fcntl
import traceback
TIMEOUTS = (0, 0, 20, 0, 1000) #milliseconds - read timeout - write timeout
COMMAND_INTERVAL = 0.001 #seconds - process command to read reply
RETRY_INTERVAL = 0.001 #seconds
RETRY_LIMIT = 20 #max number of read retries per reply
SUSPEND_INTERVAL = 1.000 #seconds
PORT_OK = 'PORT_OK' #port status conditions
SUSPENDED = 'SUSPENDED'
DISCONNECTED = 'DISCONNECTED'
BEL = '\a' #non-printing bel character
ERRNUM_CANNOT_OPEN = 2 #The system cannot find the file specified.
ERRNUM_ACCESS_DENIED = 5 #Access is denied.
ERRNUM_SUSPENDED = 31 #A device attached to the system is not functioning.
ERRNUM_DISCONNECTED = 1167 #The device is not connected.
PURGE_RXCLEAR = 0x0008 #Windows PurgeComm flag
PURGE_TXCLEAR = 0x0004 #Windows PurgeComm flag
import termios
TIOCM_zero_str = struct.pack('I', 0)
TIOCINQ = hasattr(termios, 'FIONREAD') and termios.FIONREAD
def port_list():
"""Return a list of the available serial ports (as strings)."""
ports = []
list = []
if sys.platform=='linux2':
list.extend(glob.glob('/dev/ttyACM*'))
list.extend(glob.glob('/dev/fercs*'))
elif sys.platform=='darwin':
list.extend(glob.glob('/dev/tty.usbmodem*'))
for port in list:
try:
p = Feusb(port)
except OpenError:
pass
else:
ports.append(port)
del p
return ports
class FeusbError(Exception):
"""Base class for exceptions raised in the Feusb class."""
pass
class OpenError(FeusbError):
"""Unsuccessful opening the port."""
pass
class SuspendError(FeusbError):
"""The device is in a USB suspend state."""
pass
class DisconnectError(FeusbError):
"""The device has been disconnected."""
pass
class ReadTimeoutError(FeusbError):
"""The device hasn't returned the requested number of replies in time."""
pass
class UnexpectedError(FeusbError):
"""An error occurred that was not part of normal operation."""
pass
class Feusb:
"""Fascinating Electronics USB-CDC device class."""
def __init__(self, port_string, error_on_suspend=False):
"""Open the port and allocate buffers."""
self._handle = -1
self._port_string = port_string
self._error_on_suspend = error_on_suspend
self._string_buffer = ''
self._status = DISCONNECTED
try:
self._handle = os.open(self._port_string, os.O_RDWR | os.O_NONBLOCK)
self.__oldmode=termios.tcgetattr(self._handle)
# setup tcsetattr for setting serial options
self.__params=[]
self.__params.append(termios.IGNPAR) # c_iflag
self.__params.append(0) # c_oflag
self.__params.append(termios.CS8|termios.CLOCAL|termios.CREAD) # c_cflag
self.__params.append(0) # c_lflag
self.__params.append(termios.B115200) # c_ispeed
self.__params.append(termios.B115200) # c_ospeed
if sys.platform=='linux2':
cc=[0]*termios.NCCS
elif sys.platform=='darwin':
cc=[0]*len(self.__oldmode[6])
cc[termios.VMIN]=0 # Non-blocking reading.
cc[termios.VTIME]=0
self.__params.append(cc) # c_cc
termios.tcsetattr(self._handle, termios.TCSANOW, self.__params)
except exceptions.IOError, e:
raise OpenError()
except Exception, e:
raise UnexpectedError('Unexpected error in __init__.\n'
'%s\nDetails: %s'
%(str(type(e)),str(e)))
else:
self._status = PORT_OK
self.purge()
def __del__(self):
"""Close the port."""
self._close()
def _close(self):
try:
os.close(self._handle)
except OSError, e:
if e.errno is not 9:
raise e
def purge(self):
"""Purge input buffer and attempt to purge device responses."""
if len(self._string_buffer) > 0:
# print 'DEBUG: Purging string_buffer of %d characters.'%len(self._string_buffer)
self._string_buffer = ''
if self._status is DISCONNECTED:
raise DisconnectError("Port %s is disconnected."
%self._port_string)
retries = 0
while retries < RETRY_LIMIT:
time.sleep(RETRY_INTERVAL)
count = self.raw_waiting()
print count
self._string_buffer = ''
flags = termios.tcdrain(self._handle)
if count == 0:
retries += 1
if self._status is DISCONNECTED:
raise DisconnectError("Port %s is disconnected."
%self._port_string)
def error_on_suspend(self, new_error_on_suspend=None):
"""Return error_on_suspend status, with optional set parameter."""
if new_error_on_suspend is True:
self._error_on_suspend = True
elif new_error_on_suspend is False:
self._error_on_suspend = False
return self._error_on_suspend
def raw_waiting(self):
"""Update buffer, return the number of characters available."""
if self._status is DISCONNECTED:
raise DisconnectError("Port %s needs to be reconnected."
%self._port_string)
try:
s = fcntl.ioctl(self._handle, TIOCINQ, TIOCM_zero_str)
in_que = struct.unpack('I',s)[0]
except IOError, e:
self._status = DISCONNECTED
raise DisconnectError("Port %s needs to be reconnected."
%self._port_string)
except Exception, e:
raise UnexpectedError('Unexpected error in raw_waiting.\n'
'%s\nDetails: %s'
%(str(type(e)),str(e)))
else:
if self._status is SUSPENDED:
self._status = PORT_OK
if in_que > 0:
try:
buff = os.read(self._handle, in_que)
except Exception, e:
raise UnexpectedError('Unexpected ReadFile error '
'in raw_waiting.\n'
'%s\nDetails: %s'
%(str(type(e)),str(e)))
else:
self._string_buffer += buff
if len(buff) < in_que:
raise UnexpectedError('ReadFile in raw_waiting '
'returned fewer characters '
'than expected.\n'
'Expected: %d Got: %d'%
(in_que, len(buff)))
return len(self._string_buffer)
def waiting(self):
"""Update buffer, return the number of replies available."""
self.raw_waiting() #update _string_buffer
return self._string_buffer.count('\r\n')
def raw_read(self, limit=None):
"Return any characters available (a string), with an optional limit."
char_count = self.raw_waiting() #update _string_buffer
if char_count <= limit or limit is None:
split_location = char_count
else:
split_location = limit
ret_str = self._string_buffer[:split_location]
self._string_buffer = self._string_buffer[split_location:]
return ret_str
def read(self, command=None, count=1):
"""Send command, return replies stripped of text, blocking if necessary.
Replies are stripped of text, leaving just integers or floats.
For a single line reply, either a number or tuple is returned.
For a multi-line reply, a list of numbers and tuples is returned.
When the command count > 1, a list of the above is returned.
"""
if command is not None:
self.write(command)
time.sleep(COMMAND_INTERVAL)
current_replies = self.waiting()
old_replies = current_replies
retries = 0
while current_replies < count:
if self._status is SUSPENDED:
time.sleep(SUSPEND_INTERVAL)
else:
if current_replies == old_replies:
retries += 1
if retries == RETRY_LIMIT:
status = self.status()
if status is DISCONNECTED:
raise DisconnectError('Port %s is disconnected.'%
self._port_string)
elif status is SUSPENDED:
raise UnexpectedError('Unexpected error in read(): '
'Port %s is suspended, but '
"the suspend wasn't caught "
'in waiting() as expected.'%
self._port_string)
else:
raise ReadTimeoutError("Feusb method read() took "
"more than %4.3f seconds "
"per reply."%
(RETRY_INTERVAL*RETRY_LIMIT))
else:
retries = 0
old_replies = current_replies
time.sleep(RETRY_INTERVAL)
current_replies = self.waiting()
all_replies = self._string_buffer.split("\r\n")
return_value = []
for i in range(count):
reply_lines = all_replies.pop(0).splitlines()
command_reply = []
for line in reply_lines:
token_list = line.split()
line_reply = []
for token in token_list:
if token[0].isalpha():
pass
elif '.' in token:
line_reply.append(float(token))
else:
line_reply.append(int(token))
if len(line_reply) > 1:
command_reply.append(tuple(line_reply))
elif len(line_reply) == 1:
command_reply.append(line_reply[0])
if len(command_reply) == 1:
return_value.append(command_reply[0])
else:
return_value.append(command_reply)
self._string_buffer = "\r\n".join(all_replies)
if len(return_value) == 1:
return return_value[0]
else:
return return_value
def raw_write(self, string=''):
"""Write a command string to the port.
The string should end with <return> or <newline> characters ('\r' or
'\n') if you want the module to start processing the command now.
"""
if self._status is DISCONNECTED:
raise DisconnectError("Port %s needs to be reconnected before use."
%self._port_string)
while True:
try:
os.write(self._handle, string)
except OSError, e:
if e.errno == 5:
self._status = DISCONNECTED
raise DisconnectError("Port %s needs to be reconnected before use."
%self._port_string)
else:
self._status = PORT_OK
return
def write(self, command=''):
"""Write commands as UPPERCASE terminated with '\r' to the port."""
if not (command.endswith('\r') or command.endswith('\n')):
command += '\r'
self.raw_write(command.upper())
def raw_status(self):
"""Return the port's recent status, but don't perform a test."""
return self._status
def status(self):
"""Test and return port status without asserting exceptions."""
if self._status is DISCONNECTED:
return self._status
try:
os.write(self._handle, BEL)
except OSError, e:
if e.errno == 5:
self._status = DISCONNECTED
return DISCONNECTED
except Exception, e:
raise UnexpectedError('Unexpected error in status.\n'
'%s\nDetails: %s'
%(str(type(e)),str(e)))
else:
self._status = PORT_OK
return self._status
def reconnect(self):
"""Reconnect a port that had been DISCONNECTED, return status."""
if self._status is not DISCONNECTED:
raise OpenError("Port %s is not disconnected."%self._port_string)
try:
self._close()
self._handle = os.open(self._port_string, os.O_RDWR, 0)
except OSError, e:
if e.errno == 22 or e.errno == 2:
raise OpenError('Unable to reopen port %s.'%self._port_string)
raise e
except Exception, e:
raise UnexpectedError('Unexpected error in reconnect.\n'
'%s\nDetails: %s'
%(str(type(e)),str(e)))
else:
self._status = PORT_OK
self.purge()
return self._status
if __name__=='__main__':
try:
print 'feusb_win32 - Fascinating Electronics USB comm port class.'
# OPEN THE PORT
while True:
print '\nAvailable Ports\nSEL Comm Port\n--- ---------'
ports = ['Quit'] + port_list()
for i, v in enumerate(ports):
print '%3d %s'%(i, v)
try:
sel = abs(int(raw_input('Select a comm port or 0 to Quit -->')))
ports[sel]
except Exception:
print 'Acceptable values are 0 to %d.'%i
else:
if sel == 0:
exit()
else:
print "Testing: Feusb('%s')"%ports[sel]
try:
dev = Feusb(ports[sel])
except OpenError, e:
sys.stderr.write(str(e)+'\n')
else:
break
# RAW READ AND WRITE AND WAITING TESTS
print "Testing: raw_write('u\\r')"
dev.raw_write('u\r')
print 'Testing: raw_waiting() and waiting()'
while True:
rw = dev.raw_waiting()
w = dev.waiting()
print 'raw_waiting() returned: %d'%rw
print 'waiting() returned: %d'%w
if w == 1:
break
print 'Sleeping for 1 mS.'
time.sleep(.001)
print 'Testing: raw_read()\nReply received:\n', dev.raw_read(),
# NUMERIC READ FORMAT TESTS
print "Testing: read('m1')"
print 'Reply received: ', dev.read('m1')
print "Testing: read('s1')"
print 'Reply received: ', dev.read('s1')
print "Testing: read('m')"
print 'Reply received: ', dev.read('m')
print "Testing: read('m1s1m', 3)"
print 'Reply received:\n', dev.read('m1s1m', 3)
print "Testing: read('s')"
r = repr(dev.read('s'))
print 'Reply received:'
print r[:56]
print r[56:112]
print r[112:168]
print r[168:]
# SUSPEND/RESUME DURING RAW_READ
print "Testing: raw_write, raw_waiting, raw_read, error_on_suspend."
print "Sleep/resume computer to test for read errors."
print "Disconnect device to end this test."
NUMCMDS = 240
dev.raw_write('cs\r')
while dev.waiting() < 1:
time.sleep(0.001)
comparison_string = dev.raw_read()
comparison_length = len(comparison_string)
print ("Each 'r' represents %d characters read."
%(NUMCMDS*comparison_length))
dev.error_on_suspend(True)
keep_going = True
while keep_going:
while True:
try:
dev.raw_write('s'*NUMCMDS+'\r')
except SuspendError:
print ('SuspendError reported during raw_write(). '
'Sleeping 1 second.')
time.sleep(1.0)
except DisconnectError:
print 'DisconnectError reported during raw_write().'
keep_going = False
break
else:
print 'w',
sys.stdout.flush()
break
read_tries = 0
responses_read = 0
while keep_going and responses_read < NUMCMDS:
try:
num_of_characters = dev.raw_waiting()
except SuspendError:
print ('SuspendError reported during raw_waiting(). '
'Sleeping 1 second.')
time.sleep(1.0)
except DisconnectError:
print 'DisconnectError reported during raw_write().'
keep_going = False
break
else:
read_tries += 1
if num_of_characters >= comparison_length:
read_tries = 0
try:
response = dev.raw_read(comparison_length)
except SuspendError:
print ('SuspendError during raw_read(). '
'Sleeping 1 second.')
time.sleep(1.0)
else:
responses_read += 1
if response != comparison_string:
print "\nResponse does not match expected:"
print response
print "Purging remaining characters."
dev.purge()
break
if read_tries >= RETRY_LIMIT:
print ('\n%d attempted reads without getting a full '
'response.'%RETRY_LIMIT)
time.sleep(0.500) #time for a disconnect to be detected
current_status = dev.status()
print 'dev.status() reports: %s'%current_status
if current_status is DISCONNECTED:
keep_going = False
break
else:
print ('%d responses read correctly so far.'
%responses_read)
print ('Number of waiting characters: %d'
%num_of_characters)
if num_of_characters > 0:
print 'Response at this time:'
print dev.raw_read()
print 'Port is probably unresponsive.'
ri = raw_input('Hit <enter> to exit, or any key '
'<enter> to disconnect and reconnect ->')
if ri == '':
exit()
else:
print '*** Unplug the device ***'
old_stat = ''
stat = ''
while stat is not DISCONNECTED:
stat = dev.status()
if old_stat is not stat:
print 'Device status is:', stat
old_stat = stat
time.sleep(0.050)
print '*** Plug in the device ***'
while True:
try:
dev.reconnect()
except OpenError:
time.sleep(0.050)
else:
break
print 'Device status is:', dev.status()
break
time.sleep(RETRY_INTERVAL)
if responses_read == NUMCMDS:
print 'r',
sys.stdout.flush()
dev.error_on_suspend(False)
if dev.status() is not PORT_OK:
print '*** Plug in the device ***'
while True:
try:
dev.reconnect()
except OpenError:
time.sleep(0.100)
else:
break
# SUSPEND/RESUME DURING READ
print "Testing: read (and consequently write)."
print "Sleep/resume computer to test for read errors."
print "Disconnect device to end this test."
NUMCMDS = 240
dev.raw_write('S\r')
while dev.waiting() < 1:
time.sleep(RETRY_INTERVAL)
comp_len = dev.raw_waiting()
comp = dev.read()
print ("Each '*' represents %d characters and %d commands read."
%(comp_len*NUMCMDS, NUMCMDS))
while True:
try:
responses = dev.read('S'*NUMCMDS, NUMCMDS)
except DisconnectError, e:
print 'DisconnectError reported during read().'
print 'Details:\n', e
break
except ReadTimeoutError, e:
print 'ReadTimeoutError reported during read().'
print 'Details:\n', e
print '%d characters in input buffer.'%dev.raw_waiting()
print '%d responses in input buffer.'%dev.waiting()
print 'Purging port.'
dev.purge()
# test port status (could have timed out due to disconnect)
print 'Testing port status.'
status = dev.status()
while status is not PORT_OK:
if status == DISCONNECTED:
print 'Port status is actually DISCONNECTED.'
break
elif status == SUSPENDED:
print 'Status is SUSPENDED. Sleeping 1 second.'
time.sleep(1.000)
status = dev.status()
if status is DISCONNECTED:
break
elif status is PORT_OK:
print 'Port status returns PORT_OK.'
else:
print 'Port status error!'
try: # test for port unresponsive
response = dev.read('U')
except DisconnectError, e:
print 'DisconnectError reported testing responsiveness.'
print 'Details:\n', e
break
except ReadTimeoutError, e:
print 'ReadTimeoutError reported testing responsiveness.'
print 'Details:\n', e
print 'Port is unresponsive.'
print '*** Unplug the device ***'
old_stat = ''
stat = ''
while stat is not DISCONNECTED:
stat = dev.status()
if old_stat is not stat:
print 'Device status is:', stat
old_stat = stat
time.sleep(0.050)
break
else:
print 'Port checks out OK.'
else:
match = 0
for response in responses:
if comp == response:
match += 1
else:
print 'Expected: %s Got: %s'%(repr(comp),repr(response))
if match == NUMCMDS:
print '*',
sys.stdout.flush()
else:
print '\n%d of %d match correctly.'%(match, NUMCMDS)
print 'Reconnecting.'
while True:
try:
dev.reconnect()
except OpenError:
time.sleep(0.100)
else:
break
del(dev)
raw_input("Tests complete. Hit <enter> to exit -->")
except Exception, e:
print "Unhandled main program exception!!!"
print type(e)
print e
traceback.print_exc(sys.exc_traceback)
raw_input("Hit enter to exit ->")
| StarcoderdataPython |
9709154 | from .kb4 import KnowBe4 | StarcoderdataPython |
1946708 | class Tracker():
"""
Tracker Class
...
Attributes
----------
gold : int
Hold the gold amount
score : int
Hold the game score
Class Methods
-------------
increment_gold(value: int)
Increment the gold amount
decrement_gold(value: int)
Decrement the gold amount
increment_score(value: int)
Increment the game score
decrement_score(value: int)
Decrement the game score
trigger_easter_egg()
Trigger easter egg reward
reset()
Reset all tracked values
"""
# Gold class attribute
gold = 0
# Score class attribute
score = 0
# Score class attribute
kills = 0
# Easter egg class attribute
easter_egg_found = False
@classmethod
def increment_gold(cls, value):
cls.gold += value
@classmethod
def decrement_gold(cls, value):
cls.gold -= value
@classmethod
def increment_score(cls, value):
cls.score += value
@classmethod
def decrement_score(cls, value):
cls.score -= value
@classmethod
def add_kill(cls):
cls.kills += 1
@classmethod
def reset_trackers(cls):
cls.score = 0
cls.gold = 0
cls.kills = 0
@classmethod
def trigger_easter_egg(cls):
if not cls.easter_egg_found:
cls.easter_egg_found = True
cls.increment_gold(100)
@classmethod
def reset(cls):
cls.gold = 0
cls.score = 0
cls.kills = 0
cls.easter_egg_found = False
| StarcoderdataPython |
1728897 | <gh_stars>1-10
# Usage: python export_weights.py foo.net foo.caffemodel out
# Will create layer files out1.h5 out2.h5 ...
import sys
import h5py
import caffe
import numpy as np
net=caffe.Net(sys.argv[1], sys.argv[2], caffe.TEST)
print("blobs {}\nparams {}".format(net.blobs.keys(), net.params.keys()))
out=sys.argv[3]
nout=1
for k,v in net.params.iteritems():
fname = out + str(nout) + '.h5'
f = h5py.File(fname, 'w')
# f.create_dataset('w', data=v[0].data.squeeze().transpose())
# f.create_dataset('b', data=v[1].data.squeeze(axis=(1,2)))
f.create_dataset('w', data=v[0].data)
f.create_dataset('b', data=v[1].data)
if np.any(v[0].diff != 0):
# f.create_dataset('dw', data=v[0].diff.squeeze().transpose())
# f.create_dataset('db', data=v[1].diff.squeeze(axis=(1,2)))
f.create_dataset('dw', data=v[0].diff)
f.create_dataset('db', data=v[1].diff)
if nout == 1:
f.attrs['f'] = np.string_('relu') # for julia
f.attrs['xfunc'] = np.int32(0) # for cuda
f.attrs['yfunc'] = np.int32(1)
else:
f.attrs['xfunc'] = np.int32(0)
f.attrs['yfunc'] = np.int32(2)
f.close()
nout += 1
| StarcoderdataPython |
6696029 | <reponame>hnthh/foodgram-project-react
import pytest
from recipes.tests.share import create_recipes
pytestmark = [pytest.mark.django_db]
URL = '/api/recipes/'
PAGINATION_PARAMS = ('count', 'next', 'previous', 'results')
RECIPE_PARAMS = (
'id',
'tags',
'author',
'ingredients',
'is_favorited',
'is_in_shopping_cart',
'name',
'image',
'text',
'cooking_time',
)
def test_ok(as_anon, as_user, ingredients, tags):
create_recipes(as_user, ingredients, tags)
got = as_anon.get(URL)
assert tuple(got.keys()) == PAGINATION_PARAMS
assert len(got['results']) == 2
assert len(tuple(got['results'][0])) == len(RECIPE_PARAMS)
for param in got['results'][0]:
assert param in RECIPE_PARAMS
| StarcoderdataPython |
1976770 | <gh_stars>1-10
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Estimators."""
from gammapy.utils.registry import Registry
from .core import *
from .map import *
from .points import *
from .profile import *
ESTIMATOR_REGISTRY = Registry(
[
ExcessMapEstimator,
TSMapEstimator,
FluxPointsEstimator,
ASmoothMapEstimator,
LightCurveEstimator,
SensitivityEstimator,
FluxProfileEstimator,
]
)
"""Registry of estimator classes in Gammapy."""
__all__ = [
"ESTIMATOR_REGISTRY",
"FluxPoints",
"FluxMaps",
"Estimator",
]
__all__.extend(cls.__name__ for cls in ESTIMATOR_REGISTRY)
| StarcoderdataPython |
5062642 | from hexagonal import app, db
from hexagonal.model.user import User
from hexagonal.auth import register_account
import json
app.testing = True
test_client = app.test_client()
def root_login():
return call('auth.login', ['root', 'toor'])
def get_login_pair():
get_login_pair.cnt += 1
return (
'testLogin_{}'.format(get_login_pair.cnt),
'testPassword_{}'.format(get_login_pair.cnt)
)
get_login_pair.cnt = 0
def call(method, params, token=None):
headers = {}
if token is not None:
headers['Authorization'] = token
result = json.loads(test_client.post('/api/v1/rpc', data=json.dumps({
'jsonrpc': '2.0',
'method': method,
'params': params
}), content_type='application/json', headers=headers).data.decode('utf-8'))
if 'error' in result:
raise Exception(result)
else:
return result['result']
def reload_db():
db.session.commit()
db.drop_all()
db.create_all()
root = User.query.filter_by(login=app.config['ROOT_LOGIN']).first()
if root is None:
root = register_account(
login=app.config['ROOT_LOGIN'],
password=app.config['<PASSWORD>'],
role=app.config['ROOT_ROLE'],
name='Root Root',
address='Centaurus Constellation, Alpha Star System, Third Planet',
phone='+1 (800) I-AM-ROOT',
card_number='-10000'
)
db.session.commit()
reload_db()
def create_instance(cls, **fields):
instance = cls(**fields)
db.session.add(instance)
db.session.commit()
return cls.query.filter(cls.id == instance.id).first()
def register_test_account(role, **kwargs):
login = password = <PASSWORD>
if 'login' in kwargs:
login = kwargs['login']
if 'password' in kwargs:
password = kwargs['password']
t_login, t_password = get_login_pair()
if login is None:
login = t_login
if password is None:
password = t_password
args = {
'login': login,
'password': password,
'name': login,
'phone': '123',
'address': '123',
'card_number': 123,
'role': role
}
args.update(kwargs)
return register_account(**args)
| StarcoderdataPython |
3554970 | <gh_stars>10-100
import os
import toml
from app import CONFIG
poetry_config = toml.load(f'{CONFIG.PROJECT_PATH}{os.path.sep}pyproject.toml')
with open(f'{CONFIG.PROJECT_PATH}{os.path.sep}requirements.txt') as f_stream:
requirements = f_stream.readlines()
all_dependencies = {
**poetry_config['tool']['poetry']['dependencies'],
**poetry_config['tool']['poetry']['dev-dependencies'],
}
all_dependencies = {key.lower(): all_dependencies[key] for key in all_dependencies}
requirements_dict = dict(requirement.replace('\n', '').split('==') for requirement in requirements)
requirements_dict = {key.lower(): requirements_dict[key] for key in requirements_dict}
missing_deps = [
dependency
for dependency in all_dependencies if dependency not in requirements_dict
]
missing_deps = list(filter(lambda dep_name: dep_name not in {'python', 'toml'}, missing_deps))
if missing_deps:
raise RuntimeError(f'Missing dependencies in pip freeze {missing_deps}')
| StarcoderdataPython |
3284082 | #<NAME> 11/04/18
#Iris Data Set Project
#Attempts to split up, summarise and plot data set
#Uses less code than previous work on the data set and generates histograms with labelled axes and titles
import pandas as pd #pandas module imported
import numpy as np #numpy module imported
import matplotlib.pyplot as plt #matplotlib module imported
iris = pd.read_csv("data/iris.csv", names = ["sepal length", "sepal width", "petal length", "petal width", "species",]) #import dataset as panda
# credit to http://www.codeastar.com/beginner-data-science-tutorial/ for inspiration for lines 09-18
#ensures floats are used, strings eliminated
#setosa data sorted
Setosa = irisdata[:50][:,0:4] #the entire setosa data set
Setosa1 = irisdata[:50][:,0:1] #setosa petal length
Setosa2 = irisdata[:50][:,1:2] #setosa petal width
Setosa3 = irisdata[:50][:,2:3] #setosa sepal length
Setosa4 = irisdata[:50][:,3:4] #setosa sepal width
print ('Mean of Setosa petal length is', np.mean(Setosa1))
print ('Mean of Setosa petal Width is', np.mean(Setosa2))
print ('Mean of Setosa Sepal length is', np.mean(Setosa3))
print ('Mean of Setosa Sepal Width is', np.mean(Setosa4))
Setosa_means = np.array[((np.mean(Setosa1), (np.mean(Setosa2), (np.mean(Setosa3), (np.mean(Setosa4))]
#versicolor data sorted
Versicolor = irisdata[50:100][:,0:4] #The entire Versicolor data set
Versicolor1 = irisdata[50:100][:,0:1] #Versicolor petal length
Versicolor2 = irisdata[50:100][:,1:2] #Versicolor petal width
Versicolor3 = irisdata[50:100][:,2:3] #Versicolor sepal length
Versicolor4 = irisdata[50:100][:,3:4] #Versicolor sepal width
print ('Mean of Versicolor petal length is', np.mean(Versicolor1))
print ('Mean of Versicolor petal Width is', np.mean(Versicolor2))
print ('Mean of Versicolor Sepal length is', np.mean(Versicolor3))
print ('Mean of Versicolor Sepal Width is', np.mean(Versicolor4))
#virginica data sorted
Virginica = irisdata[100:150][:,0:4] #The entire Virginica data set
Virginica1 = irisdata[100:150][:,0:1] #Virginica petal length
Virginica2 = irisdata[100:150][:,1:2] #Virginica petal width
Virginica3 = irisdata[100:150][:,2:3] #Virginica sepal length
Virginica4 = irisdata[100:150][:,3:4] #Virginica sepal width
print ('Mean of Virginica petal length is', np.mean(Virginica1))
print ('Mean of Virginica petal Width is', np.mean(Virginica2))
print ('Mean of Virginica Sepal length is', np.mean(Virginica3))
print ('Mean of Virginica Sepal Width is', np.mean(Virginica4))
print ('Setosa:\n', Setosa)
print ('Versicolor:\n', Versicolor)
print ('Virginica:\n', Virginica)
print (iris.describe()) #Overall summary of entire iris data: count, mean sd, min, %, max
#Some code for labelling, generating and printing histograms
fig1 = plt.figure(1)
fig2 = plt.figure(2)
fig3 = plt.figure(3)
#Initial attempts to display all plots individually as simple line graphs
ax1 = fig1.add_subplot(221)
ax1.plot(Setosa1)
ax1.set(title = 'Setosa Petal Length')
ax2 = fig1.add_subplot(222)
ax2.set(title = 'Setosa Petal Width')
ax2.plot(Setosa2)
ax3 = fig1.add_subplot(223)
ax3.set(title = 'Setosa Sepal Length')
ax3.plot(Setosa3)
ax4 = fig1.add_subplot(224)
ax4.set(title = 'Setosa Sepal Width')
ax4.plot(Setosa4)
ax5 = fig2.add_subplot(221)
ax5.set(title = 'Versicolor Petal Length')
ax5.plot(Versicolor1)
ax6 = fig2.add_subplot(222)
ax6.set(title = 'Versicolor Petal Width')
ax6.plot(Versicolor2)
ax7 = fig2.add_subplot(223)
ax7.set(title = 'Versicolor Sepal Length')
ax7.plot(Versicolor3)
ax8 = fig2.add_subplot(224)
ax8.set(title = 'Versicolor Sepal Width')
ax8.plot(Versicolor4)
ax9 = fig3.add_subplot(221)
ax9.set(title = 'Virginica Petal Length')
ax9.plot(Virginica1)
ax10 = fig3.add_subplot(222)
ax10.set(title = 'Virginica Petal Width')
ax10.plot(Virginica2)
ax11 = fig3.add_subplot(223)
ax11.set(title = 'Virginica Sepal Length')
ax11.plot(Virginica3)
ax12 = fig3.add_subplot(224)
ax12.set(title = 'Virginica Sepal Width')
ax12.plot(Virginica4)
plt.show()
#The following outputs histograms of all characteristics of all species using the pyplot function on matplotlib
#Histogram 1 Setosa Petal Length
plt.title('Setosa Petal Length')
plt.xlabel('size (cm)')
plt.ylabel('count')
plt.hist(Setosa1)
plt.show()
#Histogram 2 Setosa Petal Width
plt.title('Setosa Petal Width')
plt.xlabel('size (cm)')
plt.ylabel('count')
plt.hist(Setosa2)
plt.show()
#Histogram 3 Setosa Sepal Length
plt.title('Setosa Sepal Length')
plt.xlabel('size (cm)')
plt.ylabel('count')
plt.hist(Setosa3)
plt.show()
# Histogram 4 Setosa Sepal Width
plt.title('Setosa Sepal Width')
plt.xlabel('size (cm)')
plt.ylabel('count')
plt.hist(Setosa4)
plt.show()
#Histogram 5 Versicolor Petal Length
plt.title('Versicolor Petal Lengths')
plt.xlabel('size (cm)')
plt.ylabel('count')
plt.hist(Versicolor1)
plt.show()
#Histogram 6 Versicolor Petal Width
plt.title('Versicolor Petal Width')
plt.xlabel('size (cm)')
plt.ylabel('count')
plt.hist(Versicolor2)
plt.show()
#Histogram 7 Versicolor Sepal Length
plt.title('Versicolor Sepal Length')
plt.xlabel('size (cm)')
plt.ylabel('count')
plt.hist(Versicolor3)
plt.show()
#Histogram 8 Versicolor Sepal Width
plt.title('Versicolor Sepal Width')
plt.xlabel('size (cm)')
plt.ylabel('count')
plt.hist(Versicolor4)
plt.show()
#Histogram 9 Virginica Petal Length
plt.title('Virginica Petal Length')
plt.xlabel('size (cm)')
plt.ylabel('count')
plt.hist(Virginica1)
plt.show()
#Histogram 10 Virginica Petal Width
plt.title('Virginica Petal Width')
plt.xlabel('size (cm)')
plt.ylabel('count')
plt.hist(Virginica2)
plt.show()
#Histogram 11 Virginica Sepal Length
plt.title('Virginica Sepal Length')
plt.xlabel('size (cm)')
plt.ylabel('count')
plt.hist(Virginica3)
plt.show()
#Histogram 12 Virginica Sepal Length
plt.title('Virginica Sepal Width')
plt.xlabel('size (cm)')
plt.ylabel('count')
plt.hist(Virginica3)
plt.show()
| StarcoderdataPython |
6675780 | from importlib import import_module
from django.contrib.auth import SESSION_KEY, BACKEND_SESSION_KEY, HASH_SESSION_KEY
def force_login(user, driver, base_url):
from django.conf import settings
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore
selenium_login_start_page = getattr(settings, 'SELENIUM_LOGIN_START_PAGE', '/page_404/')
driver.get('{}{}'.format(base_url, selenium_login_start_page))
session = SessionStore()
session[SESSION_KEY] = user.id
session[BACKEND_SESSION_KEY] = settings.AUTHENTICATION_BACKENDS[0]
session[HASH_SESSION_KEY] = user.get_session_auth_hash()
session.save()
domain = base_url.rpartition('://')[2].split('/')[0].split(':')[0]
cookie = {
'name': settings.SESSION_COOKIE_NAME,
'value': session.session_key,
'path': '/',
'domain': domain
}
driver.add_cookie(cookie)
driver.refresh()
| StarcoderdataPython |
1616220 | <filename>black ping flood.py
#!/usr/bin/python
# Simple Network Intrussion Prevention System
# Sample Case: Ping Flooding
#
# Module Requirement: python-pcapy
# testing on Ubuntu
#
# coded by: 5ynL0rd
import pcapy
import re
import binascii
import os
import json
from datetime import datetime
class VoidSniff:
def __init__(self, pcap_filter):
self.device = "any"
self.snaplen = 2048
self.promisc = 1
self.to_ms = 100
self.pcap_filter = pcap_filter
self.max_pkts = -1
self.p = pcapy.open_live(self.device, self.snaplen, self.promisc, self.to_ms)
def packethandler(self, hdr, data):
byte = len(data)
timestamp = datetime.now()
contain = binascii.b2a_hex(data)
src_ip = '%s.%s.%s.%s' %(int('0x'+contain[56:58], 0),int('0x'+contain[58:60], 0),int('0x'+contain[60:62], 0),int('0x'+contain[62:64], 0))
dst_ip = '%s.%s.%s.%s' %(int('0x'+contain[64:66], 0),int('0x'+contain[66:68], 0),int('0x'+contain[68:70], 0),int('0x'+contain[70:72], 0))
src_port = str(int('0x'+contain[72:76], 0))
dst_port = str(int('0x'+contain[76:80], 0))
# PING Flooding Detection
if self.pcap_filter == 'icmp':
data = [{'ip': src_ip,
'timestamp': '%s-%s-%s-%s-%s-%s-%s' % (datetime.utcnow().year,
datetime.utcnow().month,
datetime.utcnow().day,
datetime.utcnow().hour,
datetime.utcnow().minute,
datetime.utcnow().second,
datetime.utcnow().microsecond
),
}]
data = json.dumps(data)
try:
data_prev = open('dump.json', 'r').read()
except Exception, err:
open('dump.json', 'w').write(data)
else:
data_prev = json.loads(data_prev)
data = json.loads(data)
data = data_prev + data
open('dump.json', 'w').write(json.dumps(data))
try:
blacklist = open('blacklist.json', 'r').read()
except Exception, err:
blacklist = []
else:
blacklist = json.loads(blacklist)
data_from_json = open('dump.json','r').read()
data_from_json = json.loads(data_from_json)
if len(data_from_json) >= 50 and {'ip': src_ip} not in blacklist:
first = data[0]['timestamp']
delta = datetime.utcnow() - datetime(int(first.split('-')[0]),
int(first.split('-')[1]),
int(first.split('-')[2]),
int(first.split('-')[3]),
int(first.split('-')[4]),
int(first.split('-')[5]),
int(first.split('-')[6]))
if delta.seconds == 0:
print '[!] ALERT! PING FLOODING FROM: %s' % src_ip
b_data = json.dumps([{'ip':src_ip}])
try:
b_data_prev = open('blacklist.json', 'r').read()
except Exception:
pass
else:
b_data = json.loads(b_data_prev) + json.loads(b_data)
b_data = json.dumps(b_data)
open('blacklist.json', 'w').write(b_data)
os.system('iptables -A FORWARD -s %s -p icmp -j DROP' % src_ip)
os.system('iptables -A OUTPUT -s %s -p icmp -j DROP' % src_ip)
print '[!] IP %s Blocked!' % src_ip
os.remove('dump.json')
def run(self):
self.p.setfilter(self.pcap_filter)
self.p.loop(self.max_pkts, self.packethandler)
if __name__ == '__main__':
icmp_sniff = VoidSniff('icmp')
icmp_sniff.run()
| StarcoderdataPython |
4931393 | <reponame>xymy/gethash
from typing import Any
__all__ = [
"_check_int",
"_check_int_opt",
"_check_float",
"_check_float_opt",
"_check_str",
"_check_str_opt",
"_check_bytes",
"_check_bytes_opt",
"_check_bytes_w",
"_check_bytes_w_opt",
"_is_writable_memoryview",
]
def _check_int(obj: object, name: str) -> None:
if not isinstance(obj, int):
t = type(obj).__name__
raise TypeError(f"{name} must be int, not {t}")
def _check_int_opt(obj: object, name: str, default: object = None) -> Any:
if obj is None:
return default
if not isinstance(obj, int):
t = type(obj).__name__
raise TypeError(f"{name} must be int or None, not {t}")
return obj
def _check_float(obj: object, name: str) -> None:
if not isinstance(obj, float):
t = type(obj).__name__
raise TypeError(f"{name} must be float, not {t}")
def _check_float_opt(obj: object, name: str, default: object = None) -> Any:
if obj is None:
return default
if not isinstance(obj, float):
t = type(obj).__name__
raise TypeError(f"{name} must be float or None, not {t}")
return obj
def _check_str(obj: object, name: str) -> None:
if not isinstance(obj, str):
t = type(obj).__name__
raise TypeError(f"{name} must be str, not {t}")
def _check_str_opt(obj: object, name: str, default: object = None) -> Any:
if obj is None:
return default
if not isinstance(obj, str):
t = type(obj).__name__
raise TypeError(f"{name} must be str or None, not {t}")
return obj
def _check_bytes(obj: object, name: str) -> None:
if not isinstance(obj, (bytes, bytearray, memoryview)):
t = type(obj).__name__
raise TypeError(f"{name} must be bytes-like, not {t}")
def _check_bytes_opt(obj: object, name: str, default: object = None) -> Any:
if obj is None:
return default
if not isinstance(obj, (bytes, bytearray, memoryview)):
t = type(obj).__name__
raise TypeError(f"{name} must be bytes-like or None, not {t}")
return obj
def _check_bytes_w(obj: object, name: str) -> None:
if not (isinstance(obj, bytearray) or _is_writable_memoryview(obj)):
t = type(obj).__name__
raise TypeError(f"{name} must be writable bytes-like, not {t}")
def _check_bytes_w_opt(obj: object, name: str, default: object = None) -> Any:
if obj is None:
return default
if not (isinstance(obj, bytearray) or _is_writable_memoryview(obj)):
t = type(obj).__name__
raise TypeError(f"{name} must be writable bytes-like or None, not {t}")
return obj
def _is_writable_memoryview(obj):
return isinstance(obj, memoryview) and not obj.readonly
| StarcoderdataPython |
3228608 | <reponame>felipecerinzasick/blog
import datetime
import time
from facebook_business.api import FacebookAdsApi
from facebook_business.adobjects.adaccountuser import AdAccountUser
from facebook_business.adobjects.adaccount import AdAccount
from facebook_business.adobjects.adsinsights import AdsInsights
from facebook_business.exceptions import FacebookRequestError
from collections import Counter
class ApiParser():
fields = [
AdsInsights.Field.account_currency,
AdsInsights.Field.account_id,
AdsInsights.Field.campaign_id,
# AdsInsights.Field.campaign_name,
# AdsInsights.Field.adset_name,
# AdsInsights.Field.ad_name,
AdsInsights.Field.spend,
AdsInsights.Field.clicks,
# AdsInsights.Field.cpc,
# AdsInsights.Field.ctr,
AdsInsights.Field.impressions,
# AdsInsights.Field.cost_per_unique_click,
AdsInsights.Field.unique_clicks,
]
required_merged_field_list = [
AdsInsights.Field.spend,
AdsInsights.Field.clicks,
AdsInsights.Field.impressions,
AdsInsights.Field.unique_clicks,
]
final_data = {}
def __init__(self, token, api_version='v5.0'):
self.token = token
self.api_version = api_version
FacebookAdsApi.init(access_token=self.token, api_version=self.api_version)
def build_params(self, from_time, to_time):
if isinstance(from_time, str) and isinstance(to_time, str):
from_time_str, to_time_str = from_time, to_time
elif isinstance(from_time, datetime.datetime) and isinstance(to_time, datetime.datetime):
time_format = '%Y-%m-%d'
from_time_str, to_time_str = from_time.strftime(time_format), to_time.strftime(time_format)
else:
return {}
return {
'time_range': {
'since': from_time_str,
'until': to_time_str,
},
'breakdowns': [],
'level': 'ad',
'time_increment': 1
}
def get_all_ad_accounts(self):
"""sample_response:: <api.Cursor> [
<AdAccount> {
"account_id": "12345",
"id": "act_12345"
}, <AdAccount> {
"account_id": "234567",
"id": "act_234567"
}
]"""
try:
me = AdAccountUser(fbid='me')
ad_acc = me.get_ad_accounts()
ad_acc_list = []
for item_ad_acc in ad_acc._queue:
ad_acc_list.append({
"id": item_ad_acc.get("id"),
"account_id": item_ad_acc.get("account_id"),
})
return ad_acc_list
except FacebookRequestError as er:
print(er)
return []
def get_ads_insight(self, account_id, from_time, to_time):
"""sample_response:: <api.Cursor> [
<AdsInsights> {
"ad_name": "Ad name here",
"adset_name": "Sample name",
"campaign_id": "123456789",
"campaign_name": "Campaign Test name",
"cost_per_unique_click": "0.727143",
"cpc": "0.727143",
"ctr": "1.62037",
"date_start": "2019-12-10",
"date_stop": "2019-12-10",
"spend": "5.09",
"clicks": "7",
"unique_clicks": "7"
},
]"""
params = self.build_params(from_time, to_time)
the_ad_account = AdAccount(account_id)
try:
async_job = the_ad_account.get_insights_async(fields=self.fields, params=params)
status = async_job.remote_read()
while status['async_percent_completion'] < 100:
time.sleep(1)
status = async_job.remote_read()
result = async_job.get_result()
insight_data_list = []
for item_ad_insight in result._queue:
temp_dict = {}
for _key in self.fields:
try:
temp_dict.update({_key: item_ad_insight.__getitem__(_key), })
except KeyError:
temp_dict.update({_key: '', })
if temp_dict:
insight_data_list.append(temp_dict)
for k in self.required_merged_field_list:
self.final_data.update({k: self.add_value_by_key(k, insight_data_list)})
if len(insight_data_list) > 0:
remaining_fields_set = set(self.fields) ^ set(self.required_merged_field_list)
for f in remaining_fields_set:
# remaining fields has same value of all items of the list. That's why only first item is considered
self.final_data.update({f: insight_data_list[0].get(f, '')})
except FacebookRequestError:
pass
# print(er)
return self.final_data
@staticmethod
def add_value_by_key(_key, _data):
_sum = sum((Counter({el['campaign_id']: float(el[_key])}) for el in _data), Counter())
for x in _sum.most_common():
try:
# _sum is always counter({x, y}), and y is the value, so we need the first
val = x[1]
return int(val) if val.is_integer() else val
except KeyError:
pass
return 0
| StarcoderdataPython |
12835592 | <reponame>emilwareus/smaland
import os
import pyotp
def totp(secret):
totp = pyotp.TOTP(secret)
return totp.now()
| StarcoderdataPython |
11305881 | from django.db import models
from django.utils.translation import gettext_lazy as _
from trade_system.users.models import User
from trade_system.items.models import Item
from trade_system.offers.choises import OrderType
class Offer(models.Model):
"""Request to buy or sell specific stocks"""
user = models.ForeignKey(User, blank=True, null=True, on_delete=models.SET_NULL)
item = models.ForeignKey(Item, blank=True, null=True, on_delete=models.SET_NULL)
entry_quantity = models.IntegerField(_("Requested quantity"))
quantity = models.IntegerField(_("Current quantity"))
order_type = models.PositiveSmallIntegerField(choices=OrderType)
price = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True)
is_active = models.BooleanField(default=True)
def __str__(self):
return 'Offer {} {} {} {} {} {}'.format(self.user,
self.item,
self.quantity,
self.price,
self.item.currency,
self.order_type) | StarcoderdataPython |
1864520 | """Contains a Reader class that can read values from Modbus TCP servers.
Uses the following settings from the main settings.py file:
LOGGER_ID: Used to create the final sensor_id for each value read from the Modbus server.
MODBUS_TARGETS: Lists the Modbus Servers, Devices, and Registers that will be read.
See further documentation of these settings in the system_files/settings_template.py file.
Note: Each Modbus "sensor" will generate one Modbus read, so consider this when setting the
READ_INTERVAL setting in the settings file.
"""
import time
import struct
import logging
from pymodbus.client.sync import ModbusTcpClient as ModbusClient
from . import base_reader
class ModbusTCPreader(base_reader.Reader):
def read(self):
# list to hold final readings
readings = []
for device_info, sensors in self._settings.MODBUS_TARGETS:
# use the same timestamp for all of the sensors on this device
ts = time.time()
try:
try:
host, port, kwargs = device_info
except:
host, port = device_info
kwargs = {}
device_addr = kwargs.get('device_addr', 1)
endian = kwargs.get('endian', 'big')
if endian not in ('big', 'little'):
raise ValueError(f'Improper endian value for Modbus device {device_info}')
with ModbusClient(host=host, port=port) as client:
for sensor_info in sensors:
try:
try:
register, sensor_name, kwargs = sensor_info
except:
register, sensor_name = sensor_info
kwargs = {}
datatype = kwargs.get('datatype', 'uint16')
transform = kwargs.get('transform', None)
register_type = kwargs.get('register_type', 'holding')
reading_type = kwargs.get('reading_type', 'value')
# determine number of registers to read and the correct struct
# unpacking code based upon the data type for this sensor.
try:
reg_count, unpack_fmt = {
'uint16': (1, 'H'),
'int16': (1, 'h'),
'uint32': (2, 'I'),
'int32': (2, 'i'),
'float': (2, 'f'),
'float32': (2, 'f'),
'double': (4, 'd'),
'float64': (4, 'd'),
}[datatype]
except:
logging.exception(f'Invalid Modbus Datatype: {datatype} for Sensor {sensor_info}')
continue
# Determine the correct function to use for reading the values
try:
read_func = {
'holding': client.read_holding_registers,
'input': client.read_input_registers,
'coil': client.read_coils,
'discrete': client.read_discrete_inputs
}[register_type]
except:
logging.exception(f'Invalid Modbus register type for Sensor {sensor_info}')
continue
try:
reading_type_code = {
'value': base_reader.VALUE,
'state': base_reader.STATE,
'counter': base_reader.COUNTER
}[reading_type]
except:
logging.exception(f'Invalid Reading Type for Sensor {sensor_info}')
continue
result = read_func(register, reg_count, unit=device_addr)
if not hasattr(result, 'registers'):
raise ValueError(f'An error occurred while reading Sensor {sensor_info} from Modbus Device {device_info}')
# make an array of register values with least-signifcant value first
registers = result.registers
# calculate the integer equivalent of the registers read
if endian == 'big':
registers = reversed(registers)
val = 0
mult = 1
for reg in registers:
val += reg * mult
mult *= 2**16
# Use the struct module to convert this number into the appropriate data type.
# First, create a byte array that encodes this unsigned number according to
# how many words it contains.
reg_count_to_pack_fmt = {
1: 'H',
2: 'I',
4: 'Q'
}
pack_fmt = reg_count_to_pack_fmt[reg_count]
packed_bytes = struct.pack(pack_fmt, val)
# unpack bytes to convert to correct datatype
val = struct.unpack(unpack_fmt, packed_bytes)[0]
if transform:
val = eval(transform)
sensor_id = f'{self._settings.LOGGER_ID}_{sensor_name}'
readings.append( (ts, sensor_id, val, reading_type_code) )
except Exception as err:
logging.exception(str(err))
continue # on to next sensor
except Exception as err:
logging.exception(str(err))
continue # on to next device
return readings
| StarcoderdataPython |
47031 | <gh_stars>0
import datetime as dt
import json
from typing import Dict, Optional, cast
import dateutil.parser
import pytest
from packaging import version
from great_expectations.data_context.util import file_relative_path
@pytest.fixture
def release_file() -> str:
path: str = file_relative_path(__file__, "../.github/release_schedule.json")
return path
@pytest.fixture
def release_schedule(release_file: str) -> Dict[dt.datetime, version.Version]:
with open(release_file) as f:
release_schedule: Dict[str, str] = json.loads(f.read())
parsed_schedule: Dict[dt.datetime, version.Version] = {}
for date, release_version in release_schedule.items():
parsed_date = dateutil.parser.parse(date)
parsed_version = cast(version.Version, version.parse(release_version))
parsed_schedule[parsed_date] = parsed_version
return parsed_schedule
def test_release_schedule_adheres_to_schema(
release_schedule: Dict[dt.datetime, version.Version]
) -> None:
today: dt.datetime = dt.datetime.today()
prev_date: Optional[dt.datetime] = None
prev_version: Optional[version.Version] = None
for date, release_version in release_schedule.items():
if prev_date and prev_version:
# Each date should be greater than the prior one
assert date > prev_date
# Each release occurs on a Thursday
assert date.weekday() == 3
curr_minor: int = release_version.minor
curr_patch: int = release_version.micro
prev_minor: int = prev_version.minor
prev_patch: int = prev_version.micro
# If incrementing a minor version number, the patch should be 0 (ex: 0.15.7 -> 0.16.0)
if curr_minor > prev_minor:
assert curr_minor - prev_minor == 1 and curr_patch == 0
# If incrementing a patch version number, the patch should get incremented by 1 (ex: 0.15.7 -> 0.15.8)
else:
assert curr_minor - prev_minor == 0 and curr_patch - prev_patch == 1
prev_date = date
prev_version = release_version
# For release safety, there must always be items in the scheduler
future_release_count: int = sum(1 for date in release_schedule if date > today)
assert future_release_count > 0
| StarcoderdataPython |
9708121 | import numpy as np
from topic_model_diversity.rbo import rbo
from scipy.spatial import distance
from itertools import combinations
from topic_model_diversity.word_embeddings_rbo import word_embeddings_rbo
def proportion_unique_words(topics, topk=10):
"""
compute the proportion of unique words
Parameters
----------
topics: a list of lists of words
topk: top k words on which the topic diversity will be computed
"""
if topk > len(topics[0]):
raise Exception('Words in topics are less than '+str(topk))
else:
unique_words = set()
for topic in topics:
unique_words = unique_words.union(set(topic[:topk]))
puw = len(unique_words) / (topk * len(topics))
return puw
def irbo(topics, weight=0.9, topk=10):
"""
compute the inverted rank-biased overlap
Parameters
----------
topics: a list of lists of words
weight: p (float), default 1.0: Weight of each
agreement at depth d:p**(d-1). When set
to 1.0, there is no weight, the rbo returns
to average overlap.
topk: top k words on which the topic diversity
will be computed
Returns
-------
irbo : score of the rank biased overlap over the topics
"""
if topk > len(topics[0]):
raise Exception('Words in topics are less than topk')
else:
collect = []
for list1, list2 in combinations(topics, 2):
word2index = get_word2index(list1, list2)
indexed_list1 = [word2index[word] for word in list1]
indexed_list2 = [word2index[word] for word in list2]
rbo_val = rbo(indexed_list1[:topk], indexed_list2[:topk], p=weight)[2]
collect.append(rbo_val)
return 1 - np.mean(collect)
def word_embedding_irbo(topics, word_embedding_model, weight=0.9, topk=10):
'''
compute the word embedding-based inverted rank-biased overlap
Parameters
----------
topics: a list of lists of words
weight: p (float), default 1.0: Weight of each agreement at depth d:
p**(d-1). When set to 1.0, there is no weight, the rbo returns to average overlap.
Returns
-------
weirbo: word embedding-based inverted rank_biased_overlap over the topics
'''
if topk > len(topics[0]):
raise Exception('Words in topics are less than topk')
else:
collect = []
for list1, list2 in combinations(topics, 2):
word2index = get_word2index(list1, list2)
index2word = {v: k for k, v in word2index.items()}
indexed_list1 = [word2index[word] for word in list1]
indexed_list2 = [word2index[word] for word in list2]
rbo_val = word_embeddings_rbo(indexed_list1[:topk], indexed_list2[:topk], p=weight,
index2word=index2word, word2vec=word_embedding_model)[2]
collect.append(rbo_val)
return 1 - np.mean(collect)
def pairwise_jaccard_diversity(topics, topk=10):
'''
compute the average pairwise jaccard distance between the topics
Parameters
----------
topics: a list of lists of words
topk: top k words on which the topic diversity
will be computed
Returns
-------
pjd: average pairwise jaccard distance
'''
dist = 0
count = 0
for list1, list2 in combinations(topics, 2):
js = 1 - len(set(list1).intersection(set(list2)))/len(set(list1).union(set(list2)))
dist = dist + js
count = count + 1
return dist/count
def pairwise_word_embedding_distance(topics, word_embedding_model, topk=10):
"""
:param topk: how many most likely words to consider in the evaluation
:return: topic coherence computed on the word embeddings similarities
"""
if topk > len(topics[0]):
raise Exception('Words in topics are less than topk')
else:
count = 0
sum_dist = 0
for list1, list2 in combinations(topics, 2):
count = count+1
word_counts = 0
dist = 0
for word1 in list1[:topk]:
for word2 in list2[:topk]:
dist = dist + distance.cosine(word_embedding_model.wv[word1], word_embedding_model.wv[word2])
word_counts = word_counts + 1
dist = dist/word_counts
sum_dist = sum_dist + dist
return sum_dist/count
def centroid_distance(topics, word_embedding_model, topk=10):
"""
:param topk: how many most likely words to consider in the evaluation
:return: topic coherence computed on the word embeddings similarities
"""
if topk > len(topics[0]):
raise Exception('Words in topics are less than topk')
else:
count = 0
for list1, list2 in combinations(topics, 2):
count = count + 1
centroid1 = np.zeros(word_embedding_model.vector_size)
centroid2 = np.zeros(word_embedding_model.vector_size)
for word1 in list1[:topk]:
centroid1 = centroid1 + word_embedding_model[word1]
for word2 in list2[:topk]:
centroid2 = centroid2 + word_embedding_model[word2]
centroid1 = centroid1 / len(list1[:topk])
centroid2 = centroid2 / len(list2[:topk])
return distance.cosine(centroid1, centroid2)
def get_word2index(list1, list2):
words = set(list1)
words = words.union(set(list2))
word2index = {w: i for i, w in enumerate(words)}
return word2index
| StarcoderdataPython |
1636470 | <filename>testproject/tests/test_add_mfa.py
import pytest
from django.contrib.auth import get_user_model
from rest_framework.test import APIClient
from tests.utils import get_token_from_response, header_template, login
from trench.utils import create_otp_code, create_secret
User = get_user_model()
@pytest.mark.django_db
def test_add_user_mfa(active_user):
client = APIClient()
login_request = login(active_user)
client.credentials(HTTP_AUTHORIZATION=header_template.format(get_token_from_response(login_request)))
secret = create_secret()
response = client.post(
path='/auth/email/activate/',
data={
'secret': secret,
'code': create_otp_code(secret),
'user': getattr(
active_user,
active_user.USERNAME_FIELD,
)
},
format='json',
)
assert response.status_code == 200
@pytest.mark.django_db
def test_user_with_many_methods(active_user_with_many_otp_methods):
client = APIClient()
initial_active_methods_count = active_user_with_many_otp_methods.mfa_methods.filter(is_active=True).count()
first_step = login(active_user_with_many_otp_methods)
primary_method = active_user_with_many_otp_methods.mfa_methods.filter(
is_primary=True,
)
# As user has several methods get first and get sure only 1 is primary
assert len(primary_method) == 1
secret = primary_method[0].secret
second_step_response = client.post(
path='/auth/login/code/',
data={
'token': first_step.data.get('ephemeral_token'),
'code': create_otp_code(secret),
},
format='json',
)
# Log in the user in the second step and make sure it is correct
assert second_step_response.status_code == 200
client.credentials(
HTTP_AUTHORIZATION=header_template.format(get_token_from_response(second_step_response))
)
active_methods_response = client.get(
path='/auth/mfa/user-active-methods/',
)
# This user should have 3 methods, so we check that return has 3 methods
assert len(active_methods_response.data) == initial_active_methods_count
| StarcoderdataPython |
5176989 | # Copyright 2014-2020 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import shutil
import os
import atexit
from io import open
if False: # NOSONAR
from typing import Dict, Optional, Any, Union
import copy
import json
import pprint
import subprocess
from distutils.spawn import find_executable
from scalyr_agent.__scalyr__ import PACKAGE_INSTALL, DEV_INSTALL, get_package_root
from scalyr_agent import compat
from scalyr_agent.platform_controller import PlatformController
from scalyr_agent.configuration import Configuration
from tests.utils.compat import Path
from tests.utils.common import get_env
import six
_AGENT_MAIN_PATH = Path(get_package_root(), "agent_main.py")
_CONFIG_MAIN_PATH = Path(get_package_root(), "config_main.py")
def _make_or_clear_directory(path): # type: (Path) -> None
"""
Create directory or clear it if exests..
"""
if path.exists():
shutil.rmtree(six.text_type(path), ignore_errors=True)
path.mkdir(exist_ok=True, parents=True)
def _path_or_text(fn):
def wrapper(self, path, *args, **kwargs):
if isinstance(path, six.text_type):
path = Path(path)
return fn(self, path, *args, **kwargs)
return wrapper
class AgentRunner(object):
"""
Agent runner provides ability to launch Scalyr agent with needed configuration settings.
"""
def __init__(
self,
installation_type=DEV_INSTALL,
enable_coverage=False,
enable_debug_log=False,
send_to_server=True,
workers_type="thread",
workers_session_count=1,
): # type: (int, bool, bool, bool, six.text_type, int) -> None
if enable_coverage and installation_type != DEV_INSTALL:
raise ValueError("Coverage is only supported for dev installs")
# agent data directory path.
self._agent_data_dir_path = None # type: Optional[Path]
# agent logs directory path.
self.agent_logs_dir_path = None # type: Optional[Path]
# path to the agent config.
self._agent_config_path = None # type: Optional[Path]
# path to the agent.log file.
self.agent_log_file_path = None # type: Optional[Path]
# all files processed by the agent
self._files = dict() # type: Dict[six.text_type, Path]
# all files considered as a log files.
self._log_files = dict() # type: Dict[six.text_type, Dict[six.text_type, Any]]
# The gent runner uses this variable as a hint where to search agent essential paths.
# This is useful when agent was installed from package,
# and agent runner needs to know it where files are located.
self._installation_type = installation_type
self._stopped = False
self._enable_coverage = enable_coverage
self._enable_debug_log = enable_debug_log
# if set, the configuration option - 'disable_send_requests' is set to True
self._send_to_server = send_to_server
self._init_agent_paths()
self._agent_process = None
self._workers_type = workers_type
self._worker_sessions_count = workers_session_count
def get_file_path_text(self, path): # type: (Path) -> str
return str(self._files[six.text_type(path)])
@_path_or_text
def add_file(self, path): # type: (Path) -> Path
self._files[six.text_type(path)] = path
return path
@_path_or_text
def add_log_file(self, path, attributes=None):
# type: (Path, Optional[Dict[six.text_type, Any]]) -> Path
path = self.add_file(path)
if attributes is None:
attributes = {"parser": "json"}
path_text = six.text_type(path)
self._log_files[path_text] = {"path": path_text, "attributes": attributes}
return path
def _get_default_paths(self): # type: () -> Dict[six.text_type, Path]
"""
Get default path for essential directories and files of the agent. Those paths are fetched from 'PlatformController'.
"""
# create new 'PlatformController' instance. Since this code is executed on the same machine with agent,
# platform setting and paths should match.
platform = PlatformController.new_platform()
# change install type of the controller to needed one.
platform._install_type = self._installation_type
default_types = platform.default_paths
result = dict()
for k, v in default_types.__dict__.items():
result[k] = Path(v)
return result
def _init_agent_paths(self):
"""
Set paths for the essential files and directories.
"""
default_paths = self._get_default_paths()
self._agent_data_dir_path = default_paths["agent_data_path"]
self.agent_logs_dir_path = default_paths["agent_log_path"]
self._agent_config_path = self.add_file(default_paths["config_file_path"])
self.agent_log_file_path = self.add_file(self.agent_logs_dir_path / "agent.log")
self._default_paths = default_paths
def _create_agent_files(self):
"""
Create all essential files and directories and dynamically added files.
"""
_make_or_clear_directory(self._agent_data_dir_path)
_make_or_clear_directory(self.agent_logs_dir_path)
for file_path in self._files.values():
self._create_file(file_path)
self.write_to_file(
self._agent_config_path, json.dumps(self._agent_config, indent=4)
)
def start(self, executable="python"):
self.clear_agent_logs()
# important to call this function before agent was started.
self._create_agent_files()
if self._installation_type == PACKAGE_INSTALL:
# use service command to start agent, because stop command hands on some of the RHEL based distributions
# if agent is started differently.
service_executable = find_executable("service")
if service_executable:
cmd = "%s scalyr-agent-2 --no-fork --no-change-user start" % (
service_executable
)
else:
# Special case for CentOS 6 where we need to use absolute path to service command
cmd = "/sbin/service scalyr-agent-2 --no-fork --no-change-user start"
self._agent_process = subprocess.Popen(
cmd, shell=True, env=compat.os_environ_unicode.copy()
)
else:
base_args = [
str(_AGENT_MAIN_PATH),
"--no-fork",
"--no-change-user",
"start",
]
if self._enable_coverage:
# NOTE: We need to pass in command string as a single argument to coverage run
args = [
"coverage",
"run",
"--branch",
"--concurrency=thread",
"--parallel-mode",
" ".join(base_args),
]
else:
args = [executable] + base_args
# NOTE: Using list would be safer since args are then auto escaped
cmd = " ".join(args)
self._agent_process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
close_fds=True,
)
print("Agent started.")
# NOTE: We register atexit handler to ensure agent process is always stopped. This means
# even if a test failure occurs and we don't get a chance to manually call stop() method.
atexit.register(self.stop)
def status(self):
if self._installation_type == PACKAGE_INSTALL:
cmd = "/usr/sbin/scalyr-agent-2 status -v"
else:
cmd = "python {0} status -v".format(_AGENT_MAIN_PATH)
output = compat.subprocess_check_output(cmd=cmd, shell=True)
output = six.ensure_text(output)
return output
def status_json(self, parse_json=False):
# type: (bool) -> Union[six.text_type, dict]
"""
:param parse_json: True to parse result as json and return a dict.
"""
if self._installation_type == PACKAGE_INSTALL:
cmd = "/usr/sbin/scalyr-agent-2 status -v --format=json"
else:
cmd = "python {0} status -v --format=json".format(_AGENT_MAIN_PATH)
output = compat.subprocess_check_output(cmd=cmd, shell=True)
output = six.ensure_text(output)
if parse_json:
return json.loads(output)
return output
def switch_version(self, version, env=None):
# type: (six.text_type, Optional[dict]) -> None
"""
:param version: Python version to switch the agent to.
:param env: Environment to use with this command.
:param clean_env: True to perform the switch in a clean environment without the agent config
being present and any SCALYR_ environment variables being set.
"""
if env:
kwargs = {"env": env}
else:
kwargs = {}
if self._installation_type == PACKAGE_INSTALL:
subprocess.check_call(
"/usr/sbin/scalyr-agent-2-config --set-python {0}".format(version),
shell=True,
**kwargs # type: ignore
)
else:
subprocess.check_call(
"python {0} --set-python {1}".format(_CONFIG_MAIN_PATH, version),
shell=True,
**kwargs # type: ignore
)
def stop(self, executable="python"):
if six.PY3:
atexit.unregister(self.stop)
if self._stopped:
return
print("Stopping agent process...")
if self._installation_type == PACKAGE_INSTALL:
service_executable = find_executable("service")
if service_executable:
cmd = "%s scalyr-agent-2 stop" % (service_executable)
else:
# Special case for CentOS 6 where we need to use absolute path to service command
cmd = "/sbin/service scalyr-agent-2 stop"
result = subprocess.check_call(cmd, shell=True)
return result
else:
process = subprocess.Popen(
"{0} {1} stop".format(executable, _AGENT_MAIN_PATH), shell=True
)
process.wait()
self._agent_process.wait()
# Print any output produced by the agent before forking which may not end up in the logs
if self._agent_process.stdout and self._agent_process.stderr:
stdout = self._agent_process.stdout.read().decode("utf-8")
stderr = self._agent_process.stderr.read().decode("utf-8")
if stdout:
print("Agent process stdout: %s" % (stdout))
if stderr:
print("Agent process stderr: %s" % (stderr))
if self._enable_coverage:
# Combine all the coverage files for this process and threads into a single file so
# we can copy it over.
print("Combining coverage data...")
os.system("coverage combine")
print("Agent stopped.")
self._stopped = True
def restart(self, executable="python"):
print("Restarting agent process...")
if self._installation_type == PACKAGE_INSTALL:
service_executable = find_executable("service")
if service_executable:
cmd = "%s scalyr-agent-2 restart" % (service_executable)
else:
# Special case for CentOS 6 where we need to use absolute path to service command
cmd = "/sbin/service scalyr-agent-2 restart"
result = subprocess.check_call(cmd, shell=True)
return result
else:
process = subprocess.Popen(
"{0} {1} restart".format(executable, _AGENT_MAIN_PATH), shell=True
)
process.wait()
self._agent_process.wait()
print("Agent process restarted.")
@property
def agent_pid(self):
path = self.agent_logs_dir_path / "agent.pid"
with open(six.text_type(path), "r") as f:
return int(f.read())
def __del__(self):
self.stop()
@property
def _server_host(self): # type: () -> six.text_type
return get_env("AGENT_HOST_NAME")
@property
def _agent_config(self):
# type: () -> Dict[six.text_type, Any]
"""
Build and return agent configuration.
:return: dict with configuration.
"""
# do not include default log files.
files_to_exclude_from_config = [
str(Path(self.agent_logs_dir_path, name)) # type:ignore
for name in [
"linux_process_metrics.log",
"linux_system_metrics.log",
"agent.log",
]
]
config_log_files = list()
for log_file in self._log_files.values():
if log_file["path"] not in files_to_exclude_from_config:
config_log_files.append(log_file)
config = {
"api_key": compat.os_environ_unicode["SCALYR_API_KEY"],
"verify_server_certificate": "false",
"server_attributes": {"serverHost": self._server_host},
"logs": config_log_files,
"default_sessions_per_worker": self._worker_sessions_count,
"monitors": [],
"use_multiprocess_workers": self._workers_type == "process",
# NOTE: We disable this functionality so tests finish faster and we can use lower
# timeout
"global_monitor_sample_interval_enable_jitter": False,
}
if self._enable_debug_log:
# NOTE: We also enable copy_from_start if debug_level is enabled to we ship whole debug
# log to scalyr
config["debug_level"] = 5
config["logs"].append({"path": "agent_debug.log"}) # type: ignore
if not self._send_to_server:
# do not send requests to server.
config["disable_send_requests"] = True
# Print out the agent config (masking the secrets) to make troubleshooting easier
config_sanitized = copy.copy(config)
config_sanitized.pop("api_key", None)
print("Using agent config: %s" % (pprint.pformat(config_sanitized)))
return config
@property
def config_object(self): # type: () -> Configuration
"""
Get config object from the config file.
"""
platform = PlatformController.new_platform()
platform._install_type = self._installation_type
default_types = platform.default_paths
config = Configuration(
six.text_type(self._agent_config_path), default_types, None
)
config.parse()
return config
@staticmethod
def _create_file(path, content=None):
# type: (Path, Optional[Any[six.text_type, six.binary_type]]) -> None
"""
Add new file to runner's data directory.
:param path: path to new file, it is relative to runner's data directory path.
:param content: if set, write its data to file.
:return:
"""
if path.exists():
os.remove(six.text_type(path))
if not path.parent.exists():
path.parent.mkdir(parents=True, exist_ok=True)
if not path.exists():
path.touch()
if content:
if isinstance(content, six.text_type):
path.write_text(content)
else:
path.write_bytes(content)
@staticmethod
def read_file_content(path): # type: (Path) -> six.text_type
return path.read_text()
def write_to_file(self, path, data):
# type: (Path, six.text_type) -> None
"""
Write data to the file located in 'path'
"""
data = six.ensure_text(data)
with path.open("a") as f:
f.write(data)
f.flush()
def write_line(self, path, data):
# type: (Path, six.text_type) -> None
data = six.ensure_text(data)
data = "{0}\n".format(data)
self.write_to_file(path, data)
def clear_agent_logs(self):
"""Clear agent logs directory."""
if self.agent_logs_dir_path.exists():
for child in self.agent_logs_dir_path.iterdir():
if child.is_file():
child.unlink()
@property
def config(self):
# type: () -> Dict
"""
Read config file and return as dict
"""
return json.loads(self._agent_config_path.read_text()) # type: ignore
def write_config(self, config):
# type: (Dict) -> None
"""
Write new data to the config.
"""
self._agent_config_path.write_text(six.text_type(json.dumps(config))) # type: ignore
@property
def worker_type(self):
return self._workers_type
@property
def worker_session_ids(self):
"""
Return ids of all running worker sessions.
"""
status = json.loads(self.status_json()) # type: ignore
ids = []
for worker in status["copying_manager_status"]["workers"]:
for worker_session in worker["sessions"]:
ids.append(worker_session["session_id"])
return ids
@property
def worker_sessions_log_paths(self):
"""Get list of log file path for all worker sessions."""
result = []
for worker_session_id in self.config_object.get_session_ids_from_all_workers():
log_file_path = self.config_object.get_worker_session_agent_log_path(
worker_session_id
)
result.append(Path(log_file_path))
return result
| StarcoderdataPython |
229333 | #Importing Needed Stuff
from pygame import *
#framerate
clock = time.Clock()
FPS = 60
#Variables
running = True
paddle_width = 20
paddle_height = 80
#Game Window Set Up
win_width = 700
win_height = 500
window = display.set_mode((win_width, win_height))
#Classes
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_x, player_y, size_x, size_y, player_speed):
super().__init__()
self.image = transform.scale(image.load(player_image), (size_x, size_y))
self.speed = player_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
class LeftPlayer(GameSprite):
def update(self):
keys = key.get_pressed()
if keys[K_w] and self.rect.y > 0:
self.rect.y -= self.speed
if keys[K_s] and self.rect.y < win_height - paddle_height:
self.rect.y += self.speed
class RightPlayer(GameSprite):
def update(self):
pass
keys = key.get_pressed()
if keys[K_UP] and self.rect.y > 0:
self.rect.y -= self.speed
if keys[K_DOWN] and self.rect.y < win_height - paddle_height:
self.rect.y += self.speed
#Creating All Sprites
l_player = LeftPlayer("Paddle.png", 10, 0, paddle_width, paddle_height, 5)
r_player = RightPlayer("Paddle.png", win_width - paddle_width - 10, 0, paddle_width, paddle_height, 5)
#Game Loop
while running:
for e in event.get():
if e.type == QUIT:
running = False
window.fill((37, 150, 190))
l_player.update()
l_player.reset()
r_player.update()
r_player.reset()
display.update()
clock.tick(FPS)
| StarcoderdataPython |
6613866 | <gh_stars>0
import sqlite3
fn = 'storychain.db'
cnct = sqlite3.connect(fn)
cnct.execute('''CREATE TABLE chains
(title char(20) NOT NULL,
ct INTEGER,
main char(400) NOT NULL,
userid text,
datetime text)
''')
cnct.execute('''INSERT INTO chains VALUES
("wolf",
0,
"There was once a boy-wolf.",
"wwshen",
"2015-11-15 15:00:00")
''')
cnct.execute('''INSERT INTO chains VALUES
("wolf",
1,
"He liked walking and dancing in the rain.",
"wwshen",
"2015-11-15 15:00:00")
''')
cnct.execute('''INSERT INTO chains VALUES
("my dad",
0,
"My dad is awesome!",
"wwshen",
"2015-11-15 15:00:00")
''')
cnct.commit() | StarcoderdataPython |
1872661 | <reponame>MaoXianXin/oneflow
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from typing import Union
import oneflow as flow
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.nn.module import Module
@oneflow_export("nn.ConstantPad2d")
@experimental_api
class ConstantPad2d(Module):
r"""The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.nn.ConstantPad2d.html?highlight=constantpad2d#torch.nn.ConstantPad2d
This operator pads the input with constant value that user specifies. User can set the amount of padding by setting the parameter `paddings`.
Args:
padding (Union[int, tuple, list]): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 4-`tuple`, uses (:math:`\mathrm{padding_{left}}`, :math:`\mathrm{padding_{right}}`, :math:`\mathrm{padding_{top}}`, :math:`\mathrm{padding_{bottom}}`)
value (Union[int, float]): The constant value used for padding. Defaults to 0.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
:math:`H_{out} = H_{in} + \mathrm{padding_{top}} + \mathrm{padding_{bottom}}`
:math:`W_{out} = W_{in} + \mathrm{padding_{left}} + \mathrm{padding_{right}}`
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> constantpad_layer_0 = flow.nn.ConstantPad2d((2, 2, 1, 1), 1)
>>> input = flow.Tensor(np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32))
>>> input_int = flow.Tensor(np.arange(18).reshape((1, 2, 3, 3)).astype(np.int32))
>>> output = constantpad_layer_0(input)
>>> output.shape
flow.Size([1, 2, 5, 7])
>>> output
tensor([[[[ 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 0., 1., 2., 1., 1.],
[ 1., 1., 3., 4., 5., 1., 1.],
[ 1., 1., 6., 7., 8., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1.]],
<BLANKLINE>
[[ 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 9., 10., 11., 1., 1.],
[ 1., 1., 12., 13., 14., 1., 1.],
[ 1., 1., 15., 16., 17., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1.]]]], dtype=oneflow.float32)
>>> output_int = constantpad_layer_0(input_int)
>>> output_int
tensor([[[[ 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 0., 1., 2., 1., 1.],
[ 1., 1., 3., 4., 5., 1., 1.],
[ 1., 1., 6., 7., 8., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1.]],
<BLANKLINE>
[[ 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 9., 10., 11., 1., 1.],
[ 1., 1., 12., 13., 14., 1., 1.],
[ 1., 1., 15., 16., 17., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1.]]]], dtype=oneflow.float32)
"""
def __init__(self, padding: Union[int, tuple, list], value: Union[int, float] = 0):
super().__init__()
if isinstance(padding, (tuple, list)):
assert len(padding) == 4, ValueError("Length of padding must be 4")
boundary = [padding[0], padding[1], padding[2], padding[3]]
elif isinstance(padding, int):
boundary = [padding, padding, padding, padding]
else:
raise ValueError("padding must be int or list or tuple!")
self.padding = boundary
self.value = value
def forward(self, x):
_, _, h, w = x.shape
if x.dtype in [flow.float32, flow.float16, flow.float64]:
floating_value = float(self.value)
integral_value = int(0)
else:
floating_value = float(0)
integral_value = int(self.value)
self._op = (
flow.builtin_op("constant_pad2d")
.Input("x")
.Output("y")
.Attr("padding", self.padding)
.Attr("floating_value", floating_value)
.Attr("integral_value", integral_value)
.Build()
)
res = self._op(x)[0]
return res
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| StarcoderdataPython |
11396131 | <gh_stars>0
from axelrod.action import Action
from axelrod.player import Player
C, D = Action.C, Action.D
class Defector(Player):
"""A player who only ever defects.
Names:
- Defector: [Axelrod1984]_
- ALLD: [Press2012]_
- Always defect: [Mittal2009]_
"""
name = "Defector"
classifier = {
"memory_depth": 0,
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
@staticmethod
def strategy(opponent: Player) -> Action:
return D
class TrickyDefector(Player):
"""A defector that is trying to be tricky.
Names:
- Tricky Defector: Original name by <NAME>
"""
name = "Tricky Defector"
classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
"""Almost always defects, but will try to trick the opponent into
cooperating.
Defect if opponent has cooperated at least once in the past and has
defected for the last 3 turns in a row.
"""
if C in opponent.history and opponent.history[-3:] == [D] * 3:
return C
return D
| StarcoderdataPython |
8187631 | # _*_coding : UTF_8_*_
# Author :<NAME>
# CreatTime :2022/1/25 11:07
| StarcoderdataPython |
8175431 | <reponame>TheIdesofMay/lightweight-hangman
import random
# gets random word from list
def pick_random_word(words):
random_word = random.choice(words).upper()
return random_word
def get_input():
# error handling
while True:
attempt = input("Enter a letter: \n").upper()
if attempt.isalpha() and len(attempt) == 1:
break
else:
print("{} is not a valid input. Please enter a single letter: \n".format(attempt))
continue
return attempt
def is_good_guess(attempt, current_word, n):
# successful guess
if attempt in current_word:
return n
# incorrect guess so increase guess count by one and output failure message
else:
print("WRONG! You have {} tries left. \n".format(4-n))
return n+1
# checks if guess matches with a letter in random word
def display_progress(current_word, attempt, progress_string):
for index, letter in enumerate(current_word):
if letter == attempt:
progress_string[index] = attempt
print(" ".join(progress_string))
# VARIABLE DEFS
words = []
progress_string = []
n=0
# import the list of words and store each as element
with open("words.txt", "r") as x:
words_list = x.readlines()
for word in words_list:
words.append(word.rstrip("\n"))
# picks random word and make each letter an underscore in seperate list
current_word = pick_random_word(words)
for letter in current_word:
progress_string.append("_")
# GAME SETUP
start_game = input("Press Enter to start \n")
print("_ "*len(current_word))
# GAME LOOP - checks for win and lose condition
while "_" in progress_string and n<5:
# gets attempt, changes n appropriately and then displays curent game state
attempt = get_input()
n = is_good_guess(attempt, current_word, n)
display_progress(current_word, attempt, progress_string)
# win message
if "_" not in progress_string:
print("Congratulations! You guessed the word '{}' correctly\n".format(current_word))
# loss message
if n==5:
print("That's your last guess! Better luck next time. The word was '{}'\n".format(current_word))
| StarcoderdataPython |
317235 | <reponame>pysalt/freddie<gh_stars>10-100
from typing import Type
from factory import Factory
from peewee import fn
from psycopg2 import connect as pg_connect
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from pydantic import BaseConfig
from pytest import fixture
from freddie import Schema
def create_schema_from_config(config: dict) -> Type[Schema]:
config_cls = type('Config', (BaseConfig,), config)
return type('Schema', (Schema,), {'Config': config_cls})
class WithClient:
@fixture(autouse=True)
def _setup_app_client(self, client):
self.client = client
class BaseFactory(Factory):
@classmethod
def _setup_next_sequence(cls, *args, **kwargs):
model = cls._meta.model
pk = getattr(model, model._meta.primary_key.name)
max_pk = model.select(fn.Max(pk)).scalar() or 1
return max_pk + 1
@classmethod
def _create(cls, target_class, *args, **kwargs):
model = target_class.create(**kwargs)
return model
def run_sql(query, database='postgres'):
conn = pg_connect(database=database)
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = conn.cursor()
cur.execute(query)
conn.close()
| StarcoderdataPython |
11234067 | # Generated by Django 2.2.10 on 2020-04-07 18:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_game_player_point_word'),
]
operations = [
migrations.AlterField(
model_name='game',
name='room',
field=models.CharField(max_length=500, unique=True, verbose_name='room'),
),
]
| StarcoderdataPython |
3572988 | import sys
import cv2
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage.filter import sobel
from sklearn.cluster import KMeans
def binarize_img(img):
#binarize image with k means (k=2)
k = KMeans(n_clusters=2)
k.fit(img.reshape((64 * 64, 1)))
binarized_img = k.predict(img.reshape((64 * 64, 1)))
binarized_img = binarized_img.reshape((64, 64))
v = binarized_img.sum(axis=1)
h = binarized_img.sum(axis=0)
n = np.array([h] * 64)
for i in range(64):
n[:,i] += v
binarized_img -= 1
binarized_img = abs(binarized_img)
return binarized_img
def eyes_detection(binarized_img):
#as the parts of hair in the image can move significantly the centroid
#the use of sobel filter to get edges on binary images could mitigate this issue
sob = sobel(binarized_img) > 0
#split the image in 4 parts
top_left = sob[:32,:32]
top_right = sob[:32,32:]
bot_left = sob[32:,:32]
bot_right = sob[32:,32:]
#get the top_left cluster center
x, y = np.where(top_left == 1)
x, y = x.tolist(), y.tolist()
k = KMeans(n_clusters=1)
k.fit(zip(x, y))
top_left_center = k.cluster_centers_[0,:]
#get the top_right cluster center
x, y = np.where(top_right == 1)
x, y = x.tolist(), y.tolist()
k = KMeans(n_clusters=1)
k.fit(zip(x, y))
top_right_center = k.cluster_centers_[0,:]
#convert the relatives cluster centers to absolute cluster center
#the eyes should be here... or something close...
top_left_center = np.rint(top_left_center).tolist()
top_right_center = np.rint(top_right_center).tolist()
top_right_center[1] += 32
top_left_center = int(top_left_center[0]), int(top_left_center[1])
top_right_center = int(top_right_center[0]), int(top_right_center[1])
return top_left_center, top_right_center
def angle(p1, p2):
a = np.arctan2(p2[0] - p1[0], p2[1] - p1[1])
return np.rad2deg(a)
def get_face_rotation_angle(img):
bi = binarize_img(img)
top_left_center, top_right_center = eyes_detection(bi)
theta = angle(top_left_center, top_right_center)
return theta
if __name__ == '__main__':
img = cv2.imread(sys.argv[1], 0)
#binarization of the image & eyes detection
print "binarization of the image..."
bi = binarize_img(img)
print "eyes detection..."
top_left_center, top_right_center = eyes_detection(bi)
bi[top_left_center[0], top_left_center[1]] = 5
bi[top_right_center[0], top_right_center[1]] = 5
plt.imshow(bi)
plt.show()
#get face rotation angle
print "getting the face rotation angle..."
theta = angle(top_left_center, top_right_center)
print top_left_center
print top_right_center
print "rotation angle : {}".format(theta)
#rotate the image to align face... seems to be not really performant... :'(
M = cv2.getRotationMatrix2D((32, 32), theta, 1)
dst = cv2.warpAffine(img, M, (64, 64), borderValue=img.mean())
#show the rotated image
plt.imshow(dst)
plt.show()
#save the rotated image
cv2.imwrite(sys.argv[1] + "_rotated.jpg", dst)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.