index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
34,155
|
arturkaa231/clickhouse_api
|
refs/heads/master
|
/WV/migrations/0003_data_data_title.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-24 06:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('WV', '0002_remove_data_data_title'),
]
operations = [
migrations.AddField(
model_name='data',
name='Data_title',
field=models.CharField(blank=True, default=None, max_length=100, null=True),
),
]
|
{"/spyrecorder/views.py": ["/spyrecorder/CHmodels.py"], "/WV/views.py": ["/WV/models.py", "/WV/forms.py", "/Word2Vec/settings.py"], "/WV/forms.py": ["/WV/models.py"], "/api/views.py": ["/Word2Vec/settings.py"], "/WV/models.py": ["/Word2Vec/settings.py"], "/WV/admin.py": ["/WV/models.py"]}
|
34,156
|
arturkaa231/clickhouse_api
|
refs/heads/master
|
/api/views.py
|
from django.shortcuts import render
from django.shortcuts import render_to_response,redirect
from django.template.context_processors import csrf
import json
import os.path
from uuid import uuid4
import os
import time
from django.http.response import HttpResponse
from wsgiref.util import FileWrapper
from Word2Vec.settings import BASE_DIR,MEDIA_ROOT
import pandas
import requests
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import pprint
import re
from pandas import ExcelWriter
from pandas import ExcelFile
import copy
from datetime import datetime, timedelta
import pytz
from Word2Vec.settings import stas_api,DB
def get_all_dimensions():
#Дописать
all_dimensions=['visitorId','idVisit','deviceModel','serverDate','referrerTypeName',
'referrerUrl','deviceType','languageCode','language','deviceBrand','operatingSystemName',
'visitorType','country','AdCampaignId','AdKeywordId','AdBannerId','AdGroupId',
'AdPositionType','AdPosition','AdRegionId','AdRetargetindId','AdTargetId','DRF',
'AdPlacement','AdDeviceType','browserName','browserFamily','operatingSystemVersion',
'browserVersion','visitServerHour','city','day_of_week','day_of_week_code','month','week',
'quarter','month_code','year','minute','second','campaignSource','campaignMedium',
'campaignKeyword','campaignContent','visitLocalHour','campaignName','provider','resolution','hour','week_period','quarter_period','month_period']
return all_dimensions
def get_adstat_dimensions():
#Дописать
all_dimensions=['idSite','AdCampaignId','AdBannerId','AdChannelId','AdDeviceType','AdGroupId','AdKeywordId','AdPosition','AdPositionType',
'AdRegionId','AdRetargetindId','AdPlacement','AdTargetId','AdvertisingSystem','DRF','campaignContent','campaignKeyword','campaignMedium',
'campaignName','campaignSource','StatDate','StatDateTime','Impressions','Clicks','Cost','month','month_period','quarter','quarter_period','week','week_period','second',
'minute','hour','year','day_of_week_code','date','day_of_week']
return all_dimensions
def negative_condition(condition):
negatives = {
'==': '!=',
'=@': '!@',
'>': '<',
'>=': '<=',
'=^': '!^',
'=$': '!$',
'=~': '!~',
'!=': '==',
'!@': '=@',
'<': '>',
'<=': '>=',
'!~': '=~',
'!^': '=^',
'!$': '=$'
}
if condition in negatives:
return negatives[condition]
return condition
def get_query_for_metric_name(metric):
metrics_query_dict={'conversion_rate':"if(uniq(idVisit)=0,0,floor(countIf(Type='goal')*100/uniq(idVisit),2)) as conversion_rate",
'nb_new_visitors_per_all_visitors':"if(uniq(visitorId)=0,0,floor(uniqIf(visitorId,visitorType='new')*100/uniq(visitorId),2)) as nb_new_visitors_per_all_visitors",
'nb_return_visitors_per_all_visitors':"if(uniq(visitorId)=0,0,floor(CAST(uniq(visitorId)-uniqIf(visitorId,visitorType='new'),'Int')*100/uniq(visitorId),2)) as nb_return_visitors_per_all_visitors",
'nb_visits_with_searches':"CAST(countIf(searches>0),'Int') as nb_visits_with_searches",
'nb_searches_visits_per_all_visits':"if(uniq(idVisit)=0,0,floor(countIf(searches>0)*100/uniq(idVisit),2)) as nb_searches_visits_per_all_visits",
'nb_searches':"CAST(sum(searches),'Int') as nb_searches",
'nb_downloas_per_visit':"if(uniq(idVisit)=0,0,floor(sum(Type='download')/uniq(idVisit),2)) as nb_downloas_per_visit",
'avg_visit_length':"if(uniq(idVisit)=0,0,floor(sum(visitDuration)/uniq(idVisit),2)) as avg_visit_length",
'nb_return_visitors':"CAST(uniq(visitorId)-uniqIf(visitorId,visitorType='new'),'Int') as nb_return_visitors",
'nb_new_visits_per_all_visits':"if(uniq(idVisit)=0,0,floor(uniqIf(idVisit,visitorType='new')*100/uniq(idVisit),2)) as nb_new_visits_per_all_visits",
'nb_new_visits':"CAST(uniqIf(idVisit,visitorType='new'),'Int') as nb_new_visits",
'nb_new_visitors':"CAST(uniqIf(visitorId,visitorType='new'),'Int') as nb_new_visitors",
'nb_actions_per_visit':"if(uniq(idVisit)=0,0,floor(count(*)/uniq(idVisit),2)) as nb_actions_per_visit",
'nb_pageviews_per_visit':"floor(sum(Type='action')/uniq(idVisit),2) as nb_pageviews_per_visit",
'avg_time_generation':"floor(avg(generationTimeMilliseconds)/1000,2) as avg_time_generation",
'nb_downloads':"CAST(sum(Type='download'),'Int') as nb_downloads",
'nb_conversions':"CAST(sum(Type='goal'),'Int') as nb_conversions",
'nb_pageviews':"CAST(sum(Type='action'),'Int') as nb_pageviews",
'bounce_rate':"if(uniq(idVisit)=0,0,floor((uniqIf(idVisit,visitDuration=0)*100/uniq(idVisit)),2)) as bounce_rate",
'bounce_count':"CAST(uniqIf(idVisit,visitDuration=0),'Int') as bounce_count",
'nb_actions':"CAST(count(*),'Int') as nb_actions",
'nb_visitors':"CAST(uniq(visitorId),'Int') as nb_visitors",
'nb_visits':"CAST(uniq(idVisit),'Int') as nb_visits"
}
return metrics_query_dict[metric]
def get_time_dimensions_names():
return ['month_period','month_code','month','quarter_period','quarter','week_period','day_of_week_code','day_of_week','week',
'visitLocalSecond','second','visitLocalMinute','minute','visitLocalHour','hour','year','date']
def get_time_dimensions(key):
all_time_dimensions = {'month': "dictGetString('month','{lang}',toUInt64(toMonth({date_field})))",
'month_period': "concat(toString(toYear({date_field})),concat('-',toString(toMonth({date_field}))))",
'month_code': "toMonth({date_field})",
'quarter': "toQuarter({date_field})",
'quarter_period': "concat(toString(toYear({date_field})),concat('-',toString(toQuarter({date_field}))))",
'week': "toRelativeWeekNum({date_field}) - toRelativeWeekNum(toStartOfYear({date_field}))",
'week_period': "concat(toString(toMonday({date_field})),concat(' ',toString(toMonday({date_field}) +6)))",
'visitLocalSecond': "toSecond(toDateTime(concat('2018-02-02 ',visitLocalTime)))",
'second': "toSecond(toDateTime({date_field}))",
'visitLocalMinute': "toMinute(toDateTime(concat('2018-02-02 ',visitLocalTime)))",
'minute': "toMinute(toDateTime({date_field}))",
'visitLocalHour': "toHour(toDateTime(concat('2018-02-02 ',visitLocalTime)))",
'hour': "toHour(toDateTime({date_field}))",
'year': "toYear({date_field})",
'day_of_week_code': "toDayOfWeek({date_field})",
'date': "toDate({date_field})",
'day_of_week': "dictGetString('week','{lang}',toUInt64(toDayOfWeek({date_field})))"
}
return all_time_dimensions[key]
def MetricCounts(metrics, headers,dimensionslist,is_all_segments,attribution_model):
metric_counts = []
ad_metric_counts = []
metrics_string=','.join(metrics)
for i in metrics:
try:
metric_counts.append(get_query_for_metric_name(i))
except:
pass
if 'all_conversion_cost' in i:
metrics_string = metrics_string.replace('all_conversion_cost',"if(nb_conversions==0,0,floor(cost/nb_conversions,2)) as all_conversion_cost")
ad_metric_counts.append("CAST(sum(Cost),'Int') as cost")
metric_counts.append("CAST(sum(Type='goal'),'Int') as nb_conversions")
continue
if 'calculated_metric' in i:
calc_metr = json.loads(requests.get(
stas_api+'reference/calculated_metrics/{num}/?all=1'.format(
num=int(i[17:])),
headers=headers).content.decode('utf-8'))['definition']
#Проверяем есть ли в составе calc_metr показатели дл рекламной статистики, если есть то меняем таблицу, из которой будут браться данные
calc_metr = calc_metr.replace('impressions', 'sum(Impressions)').replace('nb_actions_per_visit',"if(uniq(idVisit)=0,0,count(*)/uniq(idVisit))")\
.replace('nb_downloas_per_visit',"if(uniq(idVisit)=0,0,sum(Type='download')/uniq(idVisit))").replace('cost', 'sum(Cost)')\
.replace('clicks', "sum(Clicks)").replace('nb_visits_with_searches',"countIf(searches>0)").replace('nb_visits', "uniq(idVisit)").replace('nb_actions',"count(*)")\
.replace('nb_visitors', "uniq(visitorId)").replace('bounce_count',"uniqIf(idVisit,visitDuration=0)").replace('bounce_rate','if(uniq(idVisit)=0,0,(uniqIf(idVisit,visitDuration=0)*100/uniq(idVisit)))')\
.replace('nb_pageviews',"sum(Type='action')").replace('nb_conversions',"sum(Type='goal')").replace('nb_downloads',"sum(Type='download')")\
.replace('avg_time_generation',"avg(generationTimeMilliseconds)/1000").replace('ctr',"if(sum(impressions)=0,0,(sum(clicks)/sum(impressions))*100)")\
.replace('nb_pageviews_per_visit',"sum(Type='action')/uniq(idVisit)").replace('nb_new_visitors_per_all_visitors',"if(uniq(visitorId)=0,0,uniqIf(visitorId,visitorType='new')*100/uniq(visitorId))")\
.replace('nb_new_visitors',"uniqIf(visitorId,visitorType='new')").replace('nb_new_visits_per_all_visits',"if(uniq(idVisit)=0,0,uniqIf(idVisit,visitorType='new')*100/uniq(idVisit))")\
.replace('nb_new_visits',"uniqIf(idVisit,visitorType='new')").replace('nb_return_visitors_per_all_visitors',"if(uniq(visitorId)=0,0,uniqIf(visitorId,visitorType='returning')*100/uniq(visitorId))")\
.replace('nb_return_visitors',"uniq(visitorId)-uniqIf(visitorId,visitorType='new')")\
.replace('avg_visit_length',"if(uniq(idVisit)=0,0,sum(visitDuration)/uniq(idVisit))").replace('nb_searches_visits_per_all_visits',"if(uniq(idVisit)=0,0,countIf(searches>0)*100/uniq(idVisit))")\
.replace('nb_searches',"sum(searches)").replace('conversion_rate',"if(uniq(idVisit)=0,0,sum(Type='goal')*100/uniq(idVisit))")
goal_conversions = re.findall(r'goal\d{1,3}_conversion', calc_metr)
for goal_conversion in goal_conversions:
calc_metr = calc_metr.replace(goal_conversion,
"floor((sum(Type='goal' and goalId={N})/uniq(idVisit))*100,2)".format(
N=goal_conversion.partition("_conversion")[0][4:]))
goals = re.findall(r'goal\d{1,3}_{0}', calc_metr)
for goal in goals:
calc_metr = calc_metr.replace(goal, "sum(Type='goal' AND goalId={N})".format(N=goal[4:]))
if 'Cost' in calc_metr or 'Impressions' in calc_metr or 'Clicks' in calc_metr:
ad_metric_counts.append('if('+calc_metr+'==inf,0,floor(' + calc_metr + ',2))' + ' as calculated_metric{N}'.format(N=int(i[17:])))
else:
metric_counts.append('if('+calc_metr+'==inf,0,floor(' + calc_metr + ',2))' + ' as calculated_metric{N}'.format(N=int(i[17:])))
#Добавление показателей, связанных с целями
if 'goal' in i:
if 'goalgroup' in i:
try:
#Добавляем группы целей
query=[] # список с (sum(Type='goal' and goalId={N}) для всех целей, что есть в goals в апи
print(json.loads(requests.get(
stas_api+'reference/goal_groups/{id}?all=1'.format(id=re.findall(r'\d{1,3}',i)[0]),
headers=headers).content.decode('utf-8'))['goals_code'])
for goal in json.loads(requests.get(
stas_api+'reference/goal_groups/{id}?all=1'.format(id=re.findall(r'\d{1,3}',i)[0]),
headers=headers).content.decode('utf-8'))['goals_code']:
if goal:
query.append("CAST(sum(Type='goal' AND goalId={N}),'Int')".format(N=goal[4:]))
query='+'.join(query) #строка запроса с суммой goalN
#если в показателе есть _conversion, то вычисляем относительный показатель(делим на колчество визитов)
if '_conversion' in i:
metric_counts.append('if(uniq(idVisit)=0,0,floor(('+query+')*100/uniq(idVisit),2)) as goalgroup{N}_conversion'.format(
N=i.partition("_conversion")[0][9:]))
elif '_cost' in i :
metrics_string = metrics_string.replace(i,'if(goalgroup{N}==0,cost,floor(cost/goalgroup{N},2)) as goalgroup{N}_cost'.format(N=re.findall(r'\d{1,3}',i)[0]))
ad_metric_counts.append("CAST(sum(Cost),'Int') as cost")
metric_counts.append(query + ' as goalgroup{N}'.format(
N=re.findall(r'\d{1,3}', i)[0]))
continue
else:
print(query)
metric_counts.append(query + ' as goalgroup{N}'.format(N=i[9:]))
continue
except:
continue
elif '_conversion' in i:
metric_counts.append(
" if(uniq(idVisit)=0,0,floor((sum(Type='goal' and goalId={N})/uniq(idVisit))*100,2)) as goal{N}_conversion".format(
N=i.partition("_conversion")[0][4:]))
else:
if 'cost' in i:
metrics_string=metrics_string.replace(i,"if(goal{N}==0,cost,floor(cost/goal{N},2)) as goal{N}_cost".format(N=re.findall(r'\d{1,3}',i)[0]))
ad_metric_counts.append("CAST(sum(Cost),'Int') as cost")
metric_counts.append("CAST(sum(Type='goal' AND goalId={N}),'Int') as goal{N}".format(N=re.findall(r'\d{1,3}',i)[0]))
else:
metric_counts.append("CAST(sum(Type='goal' AND goalId={N}),'Int') as goal{N}".format(N=i[4:]))
continue
if i in ['clicks', 'cost', 'impressions','ctr']:
metr="CAST(sum({Metric}),'Int') as {metric}".format(metric=i,Metric=i.capitalize())
if i=='ctr':
metr="if(sum(Impressions)=0, 0, floor((sum(Clicks) / sum(Impressions)) * 100, 2)) as {metric}".format(metric=i)
# Если в dimensions переданы только сегменты или параметры, которых нет в adstat, то меняем таблицу и записываем нули в рекламные показатели
flag=1
if is_all_segments == False:
for dim in dimensionslist:
if dim not in get_adstat_dimensions():
metric_counts.append("0 as {metric}".format(metric=i))
flag=0
break
if flag==1:
ad_metric_counts.append(metr)
return metric_counts,ad_metric_counts,metrics_string
@csrf_exempt
def CHapi(request):
def ChangeName(filename):
ext = filename.split('.')[-1]
# get filename
filename = '{}.{}'.format(uuid4().hex, ext)
# return the whole path to the file
return filename
def ToExcel(stats):
xls_stats=[]
try:
#определяем порядок столбцов(первым идет dimension)
col_names=[]
col_names.append(json.loads(requests.get(
stas_api+'reference/dimensions/?code={dimension}'.format(dimension=dimensionslist[0]),
headers=headers).content.decode('utf-8'))['results'][0]['name'])
for metr in metrics:
col_names.append(json.loads(requests.get(
stas_api+'reference/metrics/?code={metric}'.format(metric=metr),
headers=headers).content.decode('utf-8'))['results'][0]['name'])
#Добавляем строку с тоталами
totals={}
totals[col_names[0]]='\tИТОГО'
for metr in metrics:
totals[json.loads(requests.get(
stas_api+'reference/metrics/?code={metric}'.format(metric=metr),
headers=headers).content.decode('utf-8'))['results'][0]['name']]=str(total_filtered[metr])+'(всего '+str(total[metr])+')'
xls_stats.append(totals)
for dic in stats:
xls_dic={}
xls_dic[json.loads(requests.get(
stas_api+'reference/dimensions/?code={dimension}'.format(dimension=dimensionslist[0]),
headers=headers).content.decode('utf-8'))['results'][0]['name']]=dic[dimensionslist[0]]
for metr in metrics:
xls_dic[json.loads(requests.get(
stas_api+'reference/metrics/?code={metric}'.format(metric=metr),
headers=headers).content.decode('utf-8'))['results'][0]['name']]=dic[metr]
xls_stats.append(xls_dic)
df = pandas.DataFrame(xls_stats)
df = df[col_names]
except:
xls_stats=[]
xls_dict={json.loads(requests.get(
stas_api+'reference/dimensions/?code={dimension}'.format(dimension=dimensionslist[0]),
headers=headers).content.decode('utf-8'))['results'][0]['name']:''}
for metr in metrics:
xls_dict[json.loads(requests.get(
stas_api+'reference/metrics/?code={metric}'.format(metric=metr),
headers=headers).content.decode('utf-8'))['results'][0]['name']]=''
xls_stats.append(xls_dict)
df = pandas.DataFrame(xls_stats)
excel_name=ChangeName('stat.xlsx')
writer = ExcelWriter(os.path.join(MEDIA_ROOT, excel_name))
df.to_excel(writer, 'Sheet1', index=False)
writer.save()
return excel_name
def datesdicts(array_dates, dim,dim_with_alias,ad_dim_with_alias,table,date_filt,updm,group_by):
q_all = '''SELECT {dimension_with_alias} FROM {table}
WHERE 1 {filt} AND {site_filt} AND {date_filt} AND {updm}
GROUP BY {group_by}
'''
if is_two_tables:
q_all="SELECT {dimension} FROM ("+q_all+""") ALL FULL OUTER JOIN (SELECT {ad_dimension_with_alias} FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {ad_date_filt} AND {updm} GROUP BY {group_by}) USING {dimension}"""
q_all=q_all.format(dimension_with_alias=dim_with_alias,ad_dimension_with_alias=ad_dim_with_alias,dimension=dim,updm=updm,date_filt=date_filt,
site_filt=site_filt, filt=filt,group_by=group_by,ad_date_filt=ad_date_filt,table=table,DB=DB)
all_labeldicts = json.loads(get_clickhouse_data(q_all+' {limit} FORMAT JSON'.format(limit=limit), 'http://46.4.81.36:8123'))['data']
all_label = []
sorted_array = array_dates[0]
for i in all_labeldicts:
all_label.append(i[dim])
for label in all_label:
k = 0
for sub_array in array_dates[0]:
if sub_array[dim] == label:
k = 1
if k == 0:
sorted_array.append({dim: label})
array_dates_dicts = []
for i in array_dates:
sub_dict = {}
for j in i:
sub_dict[j[dim]] = j
array_dates_dicts.append(sub_dict)
return array_dates_dicts
def MaxLenNum(array_dates):
"""Возвращает порядок элемента списка с наибольшей длиной"""
max_len = 0
for array_num in range(len(array_dates)):
if len(array_dates[array_num]) > len(array_dates[max_len]):
max_len = array_num
return(max_len)
def RecStats(n,i,updimensions,table,up_dim_info):
"""Рекурсивный метод для добавления вложенных структур в stats"""
#Добавляем фильтры
try:
print(updimensions)
updimensions.pop(n)
except:
pass
#Добавляем параметры в список с параметрами верхних уровней
#Если предыдущий уровень-сегмент, не добавляем фильтр
try:
if '_path' in dimensionslist_with_segments[n]:
updimensions.append('visitorId IN {list_of_id}'.format(list_of_id=i['visitorId']))
else:
if dimensionslist_with_segments[n] in time_dimensions_dict.keys():
if type(i[dimensionslist_with_segments[n]]) is int:
updimensions.append(
"{updimension}={updimension_val}".format(updimension=time_dimensions_dict[dimensionslist_with_segments[n]],
updimension_val=i[dimensionslist_with_segments[n]]))
else:
updimensions.append(
"{updimension}='{updimension_val}'".format(updimension=time_dimensions_dict[dimensionslist_with_segments[n]],
updimension_val=i[dimensionslist_with_segments[n]]))
else:
updimensions.append(
"{updimension}='{updimension_val}'".format(updimension_val=i[dimensionslist_with_segments[n]],
updimension=dimensionslist_with_segments[n]))
except:
pass
sub=[]
if type(dimensionslist_with_segments[n+1]) == list:
num_seg = len(dimensionslist_with_segments[n+1])
else:
num_seg = 0
#Добавляем информацию о верхнем уровне в группу сегментов
if (num_seg!=0 or 'segment' in dimensionslist_with_segments[n+1]) and up_dim_info != 1:
up_dim_info['label']='Все данные'
sub.append(up_dim_info)
for num in range(num_seg):
array_dates = []
seg = json.loads(requests.get(
stas_api+'reference/segments/{num_seg}/?all=1'.format(num_seg=int(dimensionslist_with_segments[n+1][num][7:])),
headers=headers).content.decode('utf-8'))['real_definition']
seg_filt = seg.partition("==")[0] + "=='" + seg.partition("==")[2] + "'"
seg_label = json.loads(requests.get(
stas_api+'reference/segments/{num_seg}/?all=1'.format(num_seg=int(dimensionslist_with_segments[n+1][num][7:])),
headers=headers).content.decode('utf-8'))['name']
updm = ' AND '.join(updimensions)
counter=0
for date in relative_period:
q = '''SELECT '{label_val}' as label,'{segment_val}' as segment,{metric_counts} FROM {table}
WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}' AND {seg_filt} AND {updm}
'''
if is_two_tables:
q = "SELECT label,segment,{metrics_string} FROM (" + q + """) ALL FULL OUTER JOIN (SELECT '{label_val}' as label,'{segment_val}' as segment,{ad_metric_counts} FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' AND {seg_filt} AND {updm}) USING label LIMIT 1"""
q = q.format(label_val=seg_label,
site_filt=site_filt,
segment_val=seg, updm=updm,
seg_filt=FilterParse(seg_filt.replace("'", '')),
metric_counts=metric_counts,
ad_date_field=ad_date_field,
ad_metric_counts=ad_metric_counts,
date1=date['date1'], metrics_string=metrics_string,
date2=date['date2'], filt=filt,
table=table, date_field=date_field,DB=DB)
array_dates.append(json.loads(get_clickhouse_data(q+' FORMAT JSON', 'http://46.4.81.36:8123'))['data'])
# Если сегмент пуст, добавляем нулевые значение в dates
if array_dates[counter] == []:
empty_dict = {'label': seg_label,
'segment': seg}
for metric in metrics:
empty_dict[metric] = 0
array_dates[counter].append(empty_dict)
counter+=1
counter=0
for i in array_dates[0]:
updimensions = updimensions[:n + 1]
stat_dict = {'label': i['label'],
'segment': i['segment']}
dates = []
for m in range(len(array_dates)):
metrics_dict = dict.fromkeys(metrics)
for j in metrics_dict:
try:
metrics_dict[j] = array_dates[m][counter][j]
except:
metrics_dict[j]=0
dates.append({'date1': period[m]['date1'], 'date2': period[m]['date2'], 'metrics': metrics_dict})
stat_dict['dates'] = dates
if len(dimensionslist_with_segments) > n+2:
# Добавляем подуровень
updimensions.append(seg_filt)
updimensions.append(seg_filt)
stat_dict['sub'] = RecStats(n+1, i, updimensions, table,1)
sub.append(stat_dict)
counter+=1
# Если dimension является сегментом(не группой сегментов а отдельным сегментом):
if 'segment' in dimensionslist_with_segments[n+1]:
array_dates = []
seg = json.loads(requests.get(
stas_api+'reference/segments/{num_seg}/?all=1'.format(num_seg=int(dimensionslist_with_segments[n+1][7:])),
headers=headers).content.decode('utf-8'))['real_definition']
seg_filt = seg.partition("==")[0] + "=='" + seg.partition("==")[2] + "'"
seg_label = json.loads(requests.get(
stas_api+'reference/segments/{num_seg}/?all=1'.format(num_seg=int(dimensionslist_with_segments[n+1][7:])),
headers=headers).content.decode('utf-8'))['name']
updm = ' AND '.join(updimensions)
for date in relative_period:
q = '''SELECT '{label_val}' as label,'{segment_val}' as segment,{metric_counts} FROM {table}
WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}' AND {seg_filt} AND {updm}
'''
if is_two_tables:
q = "SELECT label,segment,{metrics_string} FROM (" + q + """) ALL FULL OUTER JOIN (SELECT '{label_val}' as label,'{segment_val}' as segment,{ad_metric_counts} FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' AND {seg_filt} AND {updm}) USING label LIMIT 1"""
q = q.format(label_val=seg_label,
site_filt=site_filt,
segment_val=seg,updm=updm,
seg_filt=FilterParse(seg_filt.replace("'", '')),
metric_counts=metric_counts,
ad_date_field=ad_date_field,
ad_metric_counts=ad_metric_counts,
date1=date['date1'], metrics_string=metrics_string,
date2=date['date2'], filt=filt,
table=table, date_field=date_field,DB=DB)
print(q)
array_dates.append(json.loads(get_clickhouse_data(q+" FORMAT JSON", 'http://46.4.81.36:8123'))['data'])
if array_dates[0]==[]:
empty_dict={'label':seg_label,
'segment':seg}
for metric in metrics:
empty_dict[metric]=0
array_dates[0].append(empty_dict)
counter=0
for i in array_dates[0]:
updimensions = updimensions[:n + 1]
stat_dict = {'label': i['label'],
'segment': i['segment'], }
dates = []
for m in range(len(array_dates)):
metrics_dict = dict.fromkeys(metrics)
for j in metrics_dict:
try:
metrics_dict[j] = array_dates[m][counter][j]
except:
metrics_dict[j]=0
dates.append({'date1': period[m]['date1'], 'date2': period[m]['date2'], 'metrics': metrics_dict})
stat_dict['dates'] = dates
# если размер dimensions больше 1, заполняем подуровень
if len(dimensionslist_with_segments) > n+2:
# Добавляем подуровень
updimensions.append(FilterParse(seg_filt.replace("'", '')))
updimensions.append(FilterParse(seg_filt.replace("'", '')))
up_dim = stat_dict.copy()
stat_dict['sub'] = RecStats(n+1, i, updimensions, table, up_dim)
sub.append(stat_dict)
counter+=1
elif num_seg==0:
array_dates = []
updm=' AND '.join(updimensions)
dimension = dimensionslist_with_segments[n + 1]
if sort_column == "":
sort_column_in_query = dimension
else:
sort_column_in_query = sort_column
group_by = dimensionslist_with_segments[n+1]
if '_path' in dimensionslist_with_segments[n+1]:
group_by = 'visitorId'
if attribution_model == 'first_interaction':
for date in relative_period:
date0 = (datetime.strptime(date['date1'], time_format) - timedelta(days=int(attribution_lookup_period))).strftime(time_format)
q = '''SELECT alias as {dimension_without_aliases},{sum_metric_string} FROM (SELECT visitorId,any({dimension_without_aliases}) as alias FROM {table}
WHERE visitorId IN (SELECT visitorId FROM {DB}.hits_with_visits WHERE 1 {filt} AND {site_filt} AND {updimensions} AND {date_field} BETWEEN '{date1}' AND '{date2}')
AND {date_field} BETWEEN '{date0}' AND '{date2}' {filt} AND {updimensions} GROUP BY visitorId)
ALL INNER JOIN
(SELECT {metric_counts},visitorId FROM {table} WHERE 1 {filt} AND {updimensions} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}' GROUP BY visitorId)
USING visitorId GROUP BY {group_by}'''
if is_two_tables:
q = "SELECT {dimension},{metrics_string} FROM (" + q + """) ALL FULL OUTER JOIN (SELECT {ad_dimension_with_alias},{ad_metric_counts} FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {updimensions} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' GROUP BY {group_by}) USING {dimension}"""
q = q.format(table=table, dimension_with_alias=dimensionslist_with_segments_and_aliases[n+1],ad_dimension_with_alias=ad_dimensionslist_with_segments_and_aliases[n+1], dimension=dimension,
sum_metric_string=sum_metric_string, date0=str(date0),updimensions=updm,
metric_counts=metric_counts, date1=date['date1'], ad_metric_counts=ad_metric_counts,
site_filt=site_filt, date2=date['date2'], filt=filt, group_by=group_by,
dimension_without_aliases=list_with_time_dimensions_without_aliases[n+1],
date_field=date_field, ad_date_field=ad_date_field, metrics_string=metrics_string,DB=DB)
array_dates.append(json.loads(get_clickhouse_data(q + ' ORDER BY {sort_column} {sort_order} FORMAT JSON'.format(
sort_column=sort_column_in_query, sort_order=sort_order, limit=limit),'http://46.4.81.36:8123'))['data'])
elif attribution_model == 'last_non-direct_interaction':
for date in relative_period:
q = '''SELECT alias as {dimension_without_aliases},{sum_metric_string}
FROM (SELECT visitorId,any({dimension_without_aliases}) as alias FROM {table}
WHERE visitorId IN (SELECT visitorId FROM {table} WHERE 1 {filt} AND {site_filt} AND {updimensions} AND {date_field} BETWEEN '{date1}' AND '{date2}')
AND {date_field} < '{date2}' AND referrerType!='direct' {filt} AND {updimensions} GROUP BY visitorId)
ALL INNER JOIN
(SELECT {metric_counts},visitorId,{dimension_with_alias} FROM {table} WHERE 1 {filt} AND {updimensions} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}' GROUP BY {dimension},visitorId)
USING visitorId GROUP BY {group_by}'''
if is_two_tables:
q = "SELECT {dimension},{metrics_string} FROM (" + q + """) ALL FULL OUTER JOIN (SELECT {ad_dimension_with_alias},{ad_metric_counts} FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {updimensions} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' GROUP BY {group_by}) USING {dimension}"""
q = q.format(table=table, dimension_with_alias=dimensionslist_with_segments_and_aliases[n + 1],
dimension=dimension,ad_dimension_with_alias=ad_dimensionslist_with_segments_and_aliases[n+1],
sum_metric_string=sum_metric_string, updimensions=updm,
metric_counts=metric_counts, date1=date['date1'], ad_metric_counts=ad_metric_counts,
site_filt=site_filt, date2=date['date2'], filt=filt, group_by=group_by,
dimension_without_aliases=list_with_time_dimensions_without_aliases[n + 1],
date_field=date_field, ad_date_field=ad_date_field, metrics_string=metrics_string,DB=DB)
array_dates.append(json.loads(
get_clickhouse_data(q + ' ORDER BY {sort_column} {sort_order} FORMAT JSON'.format(
sort_column=sort_column_in_query, sort_order=sort_order, limit=limit),
'http://46.4.81.36:8123'))['data'])
else:
for date in relative_period:
q = '''SELECT {dimension_with_alias},{metric_counts} FROM {table}
WHERE 1 {filt} AND {site_filt} AND {updimensions} AND {date_field} BETWEEN '{date1}' AND '{date2}'
GROUP BY {group_by}'''
# Если в запросе к апи были запрошены рекламные показатели или поля, которые есть только в таблице adstat, то объединяем таблицы с хитами и визитами с adstat
if is_two_tables:
q = "SELECT {dimension},{metrics_string} FROM (" + q + """) ALL FULL OUTER JOIN (SELECT {ad_dimension_with_alias},{ad_metric_counts} FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {updimensions} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' GROUP BY {group_by}) USING {dimension}"""
q = q.format(table=table, dimension_with_alias=dimensionslist_with_segments_and_aliases[n+1], dimension=dimension,
metric_counts=metric_counts, date1=date['date1'], ad_metric_counts=ad_metric_counts,updimensions=updm,
site_filt=site_filt, date2=date['date2'], filt=filt, group_by=group_by,ad_dimension_with_alias=ad_dimensionslist_with_segments_and_aliases[n+1],
date_field=date_field, ad_date_field=ad_date_field, metrics_string=metrics_string,DB=DB)
print(q)
array_dates.append(json.loads(get_clickhouse_data(q+' ORDER BY {sort_column} {sort_order} FORMAT JSON'
.format(sort_column=sort_column_in_query,sort_order=sort_order), 'http://46.4.81.36:8123'))['data'])
dates_dicts=datesdicts(array_dates,dimensionslist_with_segments[n+1],dimensionslist_with_segments_and_aliases[n+1],ad_dimensionslist_with_segments_and_aliases[n+1],table,date_filt,updm,group_by)
#возвращаем пусто sub если на данном уровне все показатели равны нулю
empties = []
for i in array_dates:
empty_d = True
for j in i:
if len(list(j.keys())) != 1 and list(j.values()).count(0) != len(list(j.keys())) - 1:
empty_d = False
break
empties.append(empty_d)
if empties.count(True) == len(empties):
return sub
for i2 in array_dates[MaxLenNum(array_dates)][:lim]:
updimensions=updimensions[:n+1]
stat_dict = {'label': i2[dimensionslist_with_segments[n + 1]],
'segment':'{label}=={value}'.format(label=dimensionslist_with_segments[n + 1]
,value=i2[dimensionslist_with_segments[n + 1]])}
dates = []
is_all_nulls = True
for m in range(len(array_dates)):
metrics_dict = dict.fromkeys(metrics)
for j in metrics_dict:
try:
metrics_dict[j] = dates_dicts[m][i2[dimensionslist_with_segments[n+1]]][j]
is_all_nulls=False
except:
metrics_dict[j]=0
dates.append({'date1': period[m]['date1'], 'date2': period[m]['date2'], 'metrics': metrics_dict})
if is_all_nulls:
continue
stat_dict['dates'] = dates
if n != len(dimensionslist_with_segments) - 2:
if '_path' in dimensionslist_with_segments[n+1]:
updimensions.append('visitorId IN {list_of_id}'.format(list_of_id=i2['visitorId']))
else:
try:
if dimensionslist_with_segments[n+1] in time_dimensions_dict.keys():
if type(i2[dimensionslist_with_segments[n+1]]) is int:
updimensions.append("{updimension}={updimension_val}".format(updimension=time_dimensions_dict[dimensionslist_with_segments[n+1]],updimension_val=i2[dimensionslist_with_segments[n+1]]))
else:
updimensions.append(
"{updimension}='{updimension_val}'".format(updimension=time_dimensions_dict[dimensionslist_with_segments[n+1]],
updimension_val=i2[dimensionslist_with_segments[n+1]]))
else:
updimensions.append("{updimension}='{updimension_val}'".format(updimension_val=i2[dimensionslist_with_segments[n+1]],
updimension=dimensionslist_with_segments[n+1]))
except:
pass
up_dim=stat_dict.copy()#Передаем словарь с информацией о вернем уровне "Все файлы"
stat_dict['sub'] = RecStats(n + 1, i2, updimensions, table,up_dim)
sub.append(stat_dict)
return sub
def get_clickhouse_data(query,host,connection_timeout=1500):
"""Метод для обращения к базе данных CH"""
r=requests.post(host,params={'query':query},timeout=connection_timeout)
return r.text
def AddCounts(period,dimension_counts,ad_dimension_counts,filt,sort_order,table,date_filt,):
"""Добавление ключа Counts в ответ"""
a={}
for dim_num in range(len(dimensionslist)):
if '_path' in dimensionslist[dim_num]:
q = """SELECT CAST(uniq(*),'Int') as h{dim} FROM (SELECT toString(groupUniqArray({dim_path}))
FROM {table}
WHERE 1 {filt} AND {site_filt} AND {date_filt}
GROUP BY visitorId
FORMAT JSON""".format(dim=dimensionslist[dim_num], dim_path=dimensionslist[dim_num][:-5],
filt=filt, site_filt=site_filt,
sort_order=sort_order,
table=table,
date_filt=date_filt)
print(q)
elif attribution_model=='first_interaction':
date0 = (datetime.strptime(relative_period[0]['date1'], time_format) - timedelta(days=int(attribution_lookup_period))).strftime(time_format)
q = '''SELECT CAST(uniq(alias),'Int') as h{dimension}
FROM (SELECT visitorId,any({dimension_without_aliases}) as alias FROM {table}
WHERE visitorId IN (SELECT visitorId FROM {table} WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}')
AND {date_field} BETWEEN '{date0}' AND '{date2}' {filt} GROUP BY visitorId)
ALL INNER JOIN
(SELECT visitorId FROM {table} WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}' GROUP BY visitorId)
USING visitorId
'''.format(dimension_with_alias=dimensionslist_with_segments_and_aliases[dim_num],dimension_without_aliases=list_with_time_dimensions_without_aliases[dim_num],
date0=str(date0),dimension_counts=dimension_counts[dim_num],site_filt=site_filt,
date1=relative_period[0]['date1'],dimension=dimensionslist[dim_num],
date2=relative_period[0]['date2'], filt=filt, sort_order=sort_order,
limit=limit,
table=table,
date_field=date_field)
elif attribution_model=='last_non-direct_interaction':
q = '''SELECT CAST(uniq(alias),'Int') as h{dimension}
FROM (SELECT visitorId,any({dimension_without_aliases}) as alias FROM {table}
WHERE visitorId IN (SELECT visitorId FROM {table} WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}')
AND {date_field} < '{date2}' AND referrerType!='direct' {filt} GROUP BY visitorId)
ALL INNER JOIN
(SELECT visitorId,{dimension_with_alias} FROM {table} WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}' GROUP BY {dimension},visitorId)
USING visitorId
'''.format(
dimension_with_alias=dimensionslist_with_segments_and_aliases[dim_num],dimension_without_aliases=list_with_time_dimensions_without_aliases[dim_num],
dimension_counts=dimension_counts[dim_num],site_filt=site_filt,
date1=relative_period[0]['date1'],dimension=dimensionslist[dim_num],
date2=relative_period[0]['date2'], filt=filt, sort_order=sort_order,
limit=limit,
table=table,
date_field=date_field)
print(q)
else:
q = ''' SELECT {dimension_counts}
FROM {table}
WHERE 1 {filt} AND {site_filt} AND {date_filt}
'''
if is_two_tables:
q="""SELECT CAST(uniq({dimension}),'Int') as h{dimension} FROM (SELECT DISTINCT {dimension_with_alias} FROM {DB}.hits_with_visits
WHERE 1 {filt} AND {site_filt} AND {date_filt}) ALL FULL OUTER JOIN (SELECT DISTINCT {ad_dimension_with_alias} FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {ad_date_filt}) USING {dimension}"""
q=q.format(dimension_counts=dimension_counts[dim_num],dimension=dimensionslist[dim_num],date_filt=date_filt,
site_filt=site_filt,dimension_with_alias=dimensionslist_with_segments_and_aliases[dim_num],
ad_dimension_with_alias=ad_dimensionslist_with_segments_and_aliases[dim_num], filt=filt,ad_date_filt=ad_date_filt,table=table,DB=DB)
print(q)
try:
a.update(json.loads(get_clickhouse_data(q+' FORMAT JSON', 'http://46.4.81.36:8123'))['data'][0])
except:
a.update({'h'+dimensionslist[dim_num]:0})
b = {}
try:
# Объеденяем словарь с датами со словарем вернувшихся значений каждого из запрошенных параметров
#изменяем значения показателей на целочисленный тип
for key in a.keys():
b[key[1:]]=a[key]
except:
b=dict.fromkeys(dimensionslist,0)
return b
def AddMetricSums(period,metric_counts_list,filt,metrics,sort_order,table):
"""Добавление ключа metric_sums в ответ"""
dates = []
total={}
total_filtered={}
#разделлим metric_counts для визитов и для рекламной статистики
for (date,abs_date) in zip(relative_period,period):
# Запрос на получение сумм показателей без фильтра
q_total = ''' SELECT 1 as l,{metric_counts}
FROM {table}
WHERE {date_field} BETWEEN '{date1}' AND '{date2}' AND {site_filt}
'''
# С фильтром
q = ''' SELECT 1 as l,{metric_counts} FROM {table}
WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}'
'''
#Если запрошены две таблицы, объединяем полученные показатели ля hits_with_visits c показателями для adstat
if is_two_tables:
q_total = "SELECT {metrics_string} FROM (" + q_total + """) ALL FULL OUTER JOIN (SELECT 1 as l,{ad_metric_counts}
FROM {DB}.adstat
WHERE {ad_date_field} BETWEEN '{date1}' AND '{date2}' AND {site_filt}) USING l """
q = "SELECT {metrics_string} FROM (" + q + """) ALL FULL OUTER JOIN (SELECT 1 as l,{ad_metric_counts}
FROM {DB}.adstat
WHERE 1 {filt} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' AND {site_filt}) USING l"""
q=q.format(date1=date['date1'], date2=date['date2'], metric_counts=metric_counts,site_filt=site_filt,metrics_string=metrics_string,
filt=filt, sort_order=sort_order,table=table,ad_date_field=ad_date_field,ad_metric_counts=ad_metric_counts,date_field=date_field,DB=DB)
q_total=q_total.format(date1=date['date1'], date2=date['date2'], metric_counts=metric_counts,site_filt=site_filt,metrics_string=metrics_string,
filt=filt, sort_order=sort_order,table=table,ad_date_field=ad_date_field,ad_metric_counts=ad_metric_counts,date_field=date_field,DB=DB)
print(q)
try:
total = json.loads(get_clickhouse_data(q_total + ' FORMAT JSON', 'http://46.4.81.36:8123'))['data'][0]
total_filtered = json.loads(get_clickhouse_data(q + ' FORMAT JSON', 'http://46.4.81.36:8123'))['data'][0]
except:
pass
# Создаем словарь, ключи которого это элементы списка metrics
metric_dict = dict.fromkeys(metrics)
# Добавляем в словарь даты
for key in list(metric_dict.keys()):
sub_metr_dict={}
try:
sub_metr_dict['total_sum']=total[key]
except:
metric_dict[key] = {"total_sum": 0, "sum": 0}
continue
try:
sub_metr_dict['sum'] = total_filtered[key]
except:
sub_metr_dict['sum']=0
metric_dict[key]=sub_metr_dict
metric_dict.update(abs_date)
dates.append(metric_dict)
return total,total_filtered,dates
def AddMetricSumsWithFilt(relative_period,metric_counts,ad_metric_counts,filt,metrics,sort_order,table):
#Метод для добавления "Всех данных" для сегмента
ar_d=[]
for date in relative_period:
t = '''SELECT 1 as l, {metric_counts} FROM {table}
WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}'
'''
if is_two_tables:
t = "SELECT {metrics_string} FROM ("+t+""") ALL FULL OUTER JOIN ( SELECT 1 as l,{ad_metric_counts}
FROM {DB}.adstat WHERE 1 {filt} AND {site_filt}
AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' ) USING l"""
t=t.format(metric_counts=metric_counts,
date1=date['date1'],site_filt=site_filt,date2=date['date2'], filt=filt,metrics_string=metrics_string,
ad_date_field=ad_date_field,table=table, date_field=date_field,ad_metric_counts=ad_metric_counts,DB=DB)
ar_d.append(json.loads(get_clickhouse_data(t+ ' FORMAT JSON', 'http://46.4.81.36:8123'))['data'])
#Объкдиняем данные для разных таблиц, если выбор происходит из нескольких таблиц
counter=0
for i in ar_d[0]:
try:
st_d = {'label': 'Все данные',
'segment': filter.replace(',',' OR ').replace(';',' AND ') }
except:
st_d = {'label': 'Все данные',
'segment': ''}
dates = []
for m in range(len(ar_d)):
metrics_dict = dict.fromkeys(metrics)
for j in metrics_dict:
try:
metrics_dict[j] = ar_d[m][counter][j]
except:
metrics_dict[j] = 0
dates.append({'date1': period[m]['date1'], 'date2': period[m]['date2'], 'metrics': metrics_dict})
st_d['dates'] = dates
counter+=1
return st_d
def AddStats2(dim,dim_with_alias,ad_dim_with_alias,metric_counts, filt, limit, period, metrics, table,date_filt):
"""Добавление ключа stats в ответ"""
stats = []
#Определяем, есть ли вначале dimensions группа сегментов
if type(dim[0])==list:
stats.append(AddMetricSumsWithFilt(relative_period, metric_counts,ad_metric_counts, filt, metrics, sort_order, table))
seg_label_list={}
#сортировка по имени сегмента
for i in dim[0]:
seg_label_list[int(i[7:])]=(json.loads(requests.get(
stas_api+'reference/segments/{num_seg}/?all=1'.format(num_seg=int(i[7:])),
headers=headers).content.decode('utf-8'))['name'])
if sort_order=='desc':
seg_label_list=sorted(seg_label_list,reverse=True)
else:
seg_label_list = sorted(seg_label_list)
else:
seg_label_list=[]
#Если dimension является сегментом(не группой сегментов а отдельным сегментом):
if 'segment' in dim[0]:
stats.append(AddMetricSumsWithFilt(relative_period, metric_counts,ad_metric_counts, filt, metrics, sort_order, table))
array_dates = []
seg=json.loads(requests.get(stas_api+'reference/segments/{num_seg}/?all=1'.format(num_seg=int(dim[0][7:])),
headers=headers).content.decode('utf-8'))['real_definition']
seg_filt=seg.partition("==")[0]+"=='"+seg.partition("==")[2]+"'"
seg_label=json.loads(requests.get(stas_api+'reference/segments/{num_seg}/?all=1'.format(num_seg=int(dim[0][7:])),
headers=headers).content.decode('utf-8'))['name']
for date in relative_period:
q = '''SELECT '{label_val}' as label,'{segment_val}' as segment,{metric_counts} FROM {table}
WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}' AND {seg_filt}
'''
if is_two_tables:
q="SELECT label,segment,{metrics_string} FROM ("+q+""") ALL FULL OUTER JOIN (SELECT '{label_val}' as label,'{segment_val}' as segment,{ad_metric_counts} FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' AND {seg_filt}) USING label LIMIT 1"""
q=q.format(label_val=seg_label,
site_filt=site_filt,
segment_val=seg,
seg_filt=FilterParse(seg_filt.replace("'",'')),
metric_counts=metric_counts,
ad_date_field=ad_date_field,
ad_metric_counts=ad_metric_counts,
date1=date['date1'],metrics_string=metrics_string,
date2=date['date2'], filt=filt,
table=table, date_field=date_field,DB=DB)
print(q)
array_dates.append(json.loads(get_clickhouse_data(q+ " FORMAT JSON", 'http://46.4.81.36:8123'))['data'])
counter=0
for i in array_dates[0]:
updimensions = []
if search_pattern.lower() not in str(i['label']).lower():
continue
stat_dict = {'label': i['label'],
'segment': i['segment'],}
dates = []
for m in range(len(array_dates)):
metrics_dict = dict.fromkeys(metrics)
for j in metrics_dict:
try:
metrics_dict[j] = array_dates[m][counter][j]
except:
metrics_dict[j]=0
dates.append({'date1': period[m]['date1'], 'date2': period[m]['date2'], 'metrics': metrics_dict})
stat_dict['dates'] = dates
# если размер dimensions больше 1, заполняем подуровень
if len(dim) > 1:
# Добавляем подуровень
updimensions.append(FilterParse(seg_filt.replace("'",'')))
updimensions.append(FilterParse(seg_filt.replace("'",'')))
up_dim = stat_dict.copy()
stat_dict['sub'] = RecStats(0, i, updimensions, table,up_dim)
stats.append(stat_dict)
counter+=1
elif seg_label_list==[]:
array_dates = []
if sort_column=="":
sort_column_in_query=dim[0]
else:
sort_column_in_query=sort_column
#если указана модель аттрибуции показатели рассчитываются для первого визита
group_by=dim[0]
if '_path' in dim[0]:
group_by='visitorId'
if attribution_model=='first_interaction':
for date in relative_period:
date0=(datetime.strptime(date['date1'], time_format) - timedelta(days=int(attribution_lookup_period))).strftime(time_format)
q = '''SELECT alias as {dimension},{sum_metric_string} FROM (SELECT visitorId,any({dimension_without_aliases}) as alias FROM {table}
WHERE visitorId IN (SELECT visitorId FROM {DB}.hits_with_visits WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}')
AND {date_field} BETWEEN '{date0}' AND '{date2}' {filt} GROUP BY visitorId)
ALL INNER JOIN
(SELECT {metric_counts},visitorId FROM {table} WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}' GROUP BY visitorId)
USING visitorId GROUP BY {group_by}'''
if is_two_tables:
q = "SELECT {dimension},{metrics_string} FROM (" + q + """) ALL FULL OUTER JOIN (SELECT {ad_dimension_with_alias},{ad_metric_counts} FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' GROUP BY {group_by}) USING {dimension}"""
q = q.format(table=table, dimension_with_alias=dim_with_alias[0],ad_dimension_with_alias=ad_dim_with_alias[0], dimension=dim[0],
sum_metric_string=sum_metric_string,date0=str(date0),
metric_counts=metric_counts, date1=date['date1'], ad_metric_counts=ad_metric_counts,
site_filt=site_filt, date2=date['date2'], filt=filt, group_by=group_by,
dimension_without_aliases=list_with_time_dimensions_without_aliases[0],
date_field=date_field, ad_date_field=ad_date_field, metrics_string=metrics_string,DB=DB)
array_dates.append(json.loads(get_clickhouse_data(q + ' ORDER BY {sort_column} {sort_order} {limit} FORMAT JSON'.format(
sort_column=sort_column_in_query, sort_order=sort_order, limit=limit),'http://46.4.81.36:8123'))['data'])
elif attribution_model=='last_non-direct_interaction':
for date in relative_period:
q = '''SELECT alias as {dimension},{sum_metric_string}
FROM (SELECT visitorId,any({dimension_without_aliases}) as alias FROM {table}
WHERE visitorId IN (SELECT visitorId FROM {table} WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}')
AND {date_field} < '{date2}' AND referrerType!='direct' {filt} GROUP BY visitorId)
ALL INNER JOIN
(SELECT {metric_counts},visitorId, {dimension_with_alias} FROM {table} WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}' GROUP BY visitorId,{group_by})
USING visitorId
GROUP BY {group_by}'''
if is_two_tables:
q="SELECT {dimension},{metrics_string} FROM ("+q+""") ALL FULL OUTER JOIN (SELECT {ad_dimension_with_alias},{ad_metric_counts} FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' GROUP BY {group_by}) USING {dimension}"""
q = q.format(table=table, dimension_with_alias=dim_with_alias[0],ad_dimension_with_alias=ad_dim_with_alias[0], dimension=dim[0],sum_metric_string=sum_metric_string,
metric_counts=metric_counts, date1=date['date1'], ad_metric_counts=ad_metric_counts,
site_filt=site_filt, date2=date['date2'], filt=filt, group_by=group_by,dimension_without_aliases=list_with_time_dimensions_without_aliases[0],
date_field=date_field, ad_date_field=ad_date_field, metrics_string=metrics_string,DB=DB)
print(q+' ORDER BY {sort_column} {sort_order} {limit} FORMAT JSON'.format(sort_column=sort_column_in_query,sort_order=sort_order,limit=limit))
array_dates.append(json.loads(get_clickhouse_data(q+' ORDER BY {sort_column} {sort_order} {limit} FORMAT JSON'.format(sort_column=sort_column_in_query,sort_order=sort_order,limit=limit), 'http://46.4.81.36:8123'))['data'])
else:
for date in relative_period:
q = '''SELECT {dimension_with_alias},{metric_counts} FROM {table}
WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}'
GROUP BY {group_by}'''
#Если в запросе к апи были запрошены рекламные показатели или поля, которые есть только в таблице adstat, то объединяем таблицы с хитами и визитами с adstat
if is_two_tables:
q="SELECT {dimension},{metrics_string} FROM ("+q+""") ALL FULL OUTER JOIN (SELECT {ad_dimension_with_alias},{ad_metric_counts} FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' GROUP BY {group_by}) USING {dimension}"""
q=q.format(table=table,dimension_with_alias=dim_with_alias[0],ad_dimension_with_alias=ad_dim_with_alias[0],dimension=dim[0], metric_counts=metric_counts,date1=date['date1'],ad_metric_counts=ad_metric_counts,
site_filt=site_filt,date2=date['date2'], filt=filt,group_by=group_by,date_field=date_field,ad_date_field=ad_date_field,metrics_string=metrics_string,DB=DB)
#Если текущий параметр содержит в себе _path то изменяе строку запроса
if '_path' in dim[0]:
q="SELECT {dimension},replaceAll(replaceAll(toString(groupUniqArray(visitorId)),'[','('),']',')') as visitorId,{sum_metric_string} FROM ({q}) GROUP BY {dimension}".format(dimension=dim[0],q=q.replace('SELECT','SELECT visitorId,'),sum_metric_string=sum_metric_string)
print(q+' ORDER BY {sort_column} {sort_order} {limit} FORMAT JSON'
.format(sort_column=sort_column_in_query,sort_order=sort_order,limit=limit))
array_dates.append(json.loads(get_clickhouse_data(q+' ORDER BY {sort_column} {sort_order} {limit} FORMAT JSON'
.format(sort_column=sort_column_in_query,sort_order=sort_order,limit=limit), 'http://46.4.81.36:8123'))['data'])
#если необходимо экспортировать статистику в xlsx
if export=='xlsx':
return ToExcel(array_dates[0])
dates_dicts=datesdicts(array_dates,dim[0],dim_with_alias[0],ad_dim_with_alias[0],table,date_filt,1,group_by)
#Проверка на выдачу только нулей в показателях. Если тлько нули возвращаем пустой список
empties=[]
for i in array_dates:
empty_d=True
for j in i:
if len(list(j.keys()))!=1 and list(j.values()).count(0)!=len(list(j.keys()))-1:
empty_d=False
break
empties.append(empty_d)
if empties.count(True)==len(empties):
return stats
#определим самый большой список в array_dates
for i in array_dates[MaxLenNum(array_dates)][:lim]:
updimensions = []
if search_pattern.lower() not in str(i[dim[0]]).lower():
continue
stat_dict = {'label': i[dim[0]],
'segment': '{label}=={value}'.format(label=dim[0]
, value=i[dim[0]])
}
dates = []
is_all_nulls = True
for m in range(len(array_dates)):
metrics_dict = dict.fromkeys(metrics)
for j in metrics_dict:
try:
metrics_dict[j] = dates_dicts[m][i[dim[0]]][j]
is_all_nulls=False
except:
metrics_dict[j]=0
dates.append({'date1': period[m]['date1'], 'date2': period[m]['date2'], 'metrics': metrics_dict})
#Если все значения показателей во всех временных интерваллах для данного значения параметра равны нулю, то игнорируем это значение параметра
if is_all_nulls:
continue
stat_dict['dates'] = dates
if len(dim) > 1:
# Добавляем подуровень. Если параметр вычисляемый то подставляем его название из словаря time_dimensions_dict
if '_path' in dim[0]:
updimensions.append('visitorId IN {list_of_id}'.format(list_of_id=i['visitorId']))
else:
try:
if dim[0] in time_dimensions_dict.keys():
if type(i[dim[0]]) is int:
updimensions.append("{updimension}={updimension_val}".format(updimension=time_dimensions_dict[dim[0]],updimension_val=i[dim[0]]))
else:
updimensions.append(
"{updimension}='{updimension_val}'".format(updimension=time_dimensions_dict[dim[0]],
updimension_val=i[dim[0]]))
else:
updimensions.append("{updimension}='{updimension_val}'".format(updimension_val=i[dim[0]],
updimension=dim[0]))
except:
pass
up_dim=stat_dict.copy()#Передаем словарь с информацией о вернем уровне "Все файлы"
stat_dict['sub'] = RecStats(0, i, updimensions, table,up_dim)
stats.append(stat_dict)
#Для групп сегментов
for num in seg_label_list:
array_dates = []
seg = json.loads(requests.get(
stas_api+'reference/segments/{num_seg}/?all=1'.format(num_seg=int(num)),
headers=headers).content.decode('utf-8'))['real_definition']
seg_filt = seg.partition("==")[0] + "=='" + seg.partition("==")[2] + "'"
seg_label = json.loads(requests.get(
stas_api+'reference/segments/{num_seg}/?all=1'.format(num_seg=int(num)),
headers=headers).content.decode('utf-8'))['name']
counter=0
for date in relative_period:
q = '''SELECT '{label_val}' as label,'{segment_val}' as segment,{metric_counts} FROM {table}
WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}' AND {seg_filt}
'''
if is_two_tables:
q = "SELECT label,segment,{metrics_string} FROM (" + q + """) ALL FULL OUTER JOIN (SELECT '{label_val}' as label,'{segment_val}' as segment,{ad_metric_counts} FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' AND {seg_filt}) USING label LIMIT 1"""
q = q.format(label_val=seg_label,
site_filt=site_filt,
segment_val=seg,
seg_filt=FilterParse(seg_filt.replace("'", '')),
metric_counts=metric_counts,
ad_date_field=ad_date_field,
ad_metric_counts=ad_metric_counts,
date1=date['date1'], metrics_string=metrics_string,
date2=date['date2'], filt=filt,
table=table, date_field=date_field,DB=DB)
print(q)
array_dates.append(json.loads(get_clickhouse_data(q +" FORMAT JSON", 'http://46.4.81.36:8123'))['data'])
if array_dates[counter] == []:
empty_dict = {'label': seg_label,
'segment': seg}
for metric in metrics:
empty_dict[metric] = 0
array_dates[counter].append(empty_dict)
counter+=1
counter = 0
for i in array_dates[0]:
updimensions = []
if search_pattern.lower() not in str(i['label']).lower():
continue
stat_dict = {'label': i['label'],
'segment': i['segment'],}
dates = []
for m in range(len(array_dates)):
metrics_dict = dict.fromkeys(metrics)
for j in metrics_dict:
try:
metrics_dict[j] = array_dates[m][counter][j]
except:
metrics_dict[j]=0
dates.append({'date1': period[m]['date1'], 'date2': period[m]['date2'], 'metrics': metrics_dict})
stat_dict['dates'] = dates
# если размер списка dimensions больше размера группы, заполняем подуровень
if len(dim) > 1:
# Добавляем подуровень
updimensions.append(seg_filt)
updimensions.append(seg_filt)
stat_dict['sub'] = RecStats(0, i, updimensions, table,1)
stats.append(stat_dict)
counter+=1
return stats
def FilterParse(filt_string):
"""Метод для перевода global_filter в строку для sql запроса"""
#filt_string=filt_string.replace(',',' OR ')
#filt_string = filt_string.replace(';', ' AND ')
#print(filt_string.partition('=@'))
simple_operators=['==','!=','>=','<=','>','<']
like_operators=['=@','!@','=^','=$','!^','!&']
like_str={'=@':" LIKE '%{val}%'",'!@':" NOT LIKE '%{val}%'",'=^':" LIKE '{val}%'",'=$':" LIKE '%{val}'",'!^':" NOT LIKE '{val}%'",'!&':" NOT LIKE '%{val}'"}
match_operators=['=~','!~']
match_str={'=~':" match({par}?'{val}')",'!~':" NOT match({par}?'{val}')"}
separator_indices=[]
for i in range(len(filt_string)):
if filt_string[i]==',' or filt_string[i]==';':
separator_indices.append(i)
separator_indices.append(len(filt_string))
end_filt=""
for i in range(len(separator_indices)):
if i==0:
sub_str = filt_string[0:separator_indices[i]]
else:
sub_str=filt_string[separator_indices[i-1]+1:separator_indices[i]]
#Заменяем названия временных параметров на соотв. строки для запросов в кх
for time_dim in get_time_dimensions_names():
if time_dim in sub_str:
sub_str=sub_str.replace(time_dim,get_time_dimensions(time_dim).format(lang=lang,date_field=date_field).replace(',','?'))
break
for j in simple_operators:
if sub_str.partition(j)[2]=='':
pass
else:
first_arg=sub_str.partition(j)[0]
operator = j
try:
int(sub_str.partition(j)[2])
json.loads(get_clickhouse_data(
'SELECT {par}=={val} FROM {table} LIMIT 1 FORMAT JSON'.format(table=table,
par=sub_str.partition(j)[0].replace('?',','), val=sub_str.partition(j)[2]), 'http://46.4.81.36:8123'))
second_arg=sub_str.partition(j)[2]
except:
second_arg= "'" + sub_str.partition(j)[2] + "'"
if sub_str.partition(j)[0][0]=='!':
operator=negative_condition(j)
first_arg=sub_str.partition(j)[0][1:]
sub_str=first_arg+operator+second_arg
break
for j in like_operators:
if sub_str.partition(j)[2]=='':
pass
else:
operator = j
first_arg=sub_str.partition(j)[0]
second_arg=sub_str.partition(j)[2]
if sub_str.partition(j)[0][0]=='!':
operator = negative_condition(j)
first_arg = sub_str.partition(j)[0][1:]
sub_str = first_arg +like_str[operator].format(val=second_arg)
break
for j in match_operators:
if sub_str.partition(j)[2]=='':
pass
else:
operator = j
first_arg = sub_str.partition(j)[0]
second_arg = sub_str.partition(j)[2]
if sub_str.partition(j)[0][0]=='!':
operator = negative_condition(j)
first_arg = sub_str.partition(j)[0][1:]
sub_str = match_str[operator].format(val=second_arg,par=first_arg)
break
try:
end_filt=end_filt+sub_str+filt_string[separator_indices[i]]
except:
end_filt = end_filt + sub_str
end_filt=end_filt.replace(',',' OR ')
end_filt=end_filt.replace(';',' AND ')
end_filt = end_filt.replace('?', ',')
return end_filt
if request.method=='POST':
#Заголовки для запроса сегментов
# Если переданы только сегменты
resp = {} # Выходной словарь
headers = {
'Authorization': 'JWT eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyX2lkIjoxOSwiZW1haWwiOiIiLCJ1c2VybmFtZSI6ImFydHVyIiwiZXhwIjoxNTE4MTIxNDIyfQ._V0PYXMrE2pJlHlkMtZ_c-_p0y0MIKsv8o5jzR5llpY',
'Content-Type': 'application/json'}
# Парсинг json
try:
export = json.loads(request.body.decode('utf-8'))['export']
except:
export = ""
try:
sort_order=json.loads(request.body.decode('utf-8'))['sort_order']
except:
sort_order=""
try:
lang = json.loads(request.body.decode('utf-8'))['lang']
if lang == "":
lang='ru'
if lang == "en":
lang='eng'
except:
lang = "ru"
dimensionslist_with_segments=json.loads(request.body.decode('utf-8'))['dimensions']
dimensionslist = []
#Создание списка параметров без сегментов
dimensionslist_with_segments_and_aliases=[]
time_dimensions_dict={}
list_with_time_dimensions_without_aliases=[]#список с параметрами,в которых временные параматры, типа year,month и тд будут представлены без алиасов
ad_dimensionslist_with_segments_and_aliases = []
ad_time_dimensions_dict = {}
ad_list_with_time_dimensions_without_aliases = []
for d in dimensionslist_with_segments:
if 'segment' not in d and type(d)!=list:
dimensionslist.append(d)
try:
time_dimensions_dict[d]=get_time_dimensions(d).format(lang=lang,date_field='toDateTime(serverTimestamp)')
dimensionslist_with_segments_and_aliases.append(time_dimensions_dict[d]+' as '+d)
list_with_time_dimensions_without_aliases.append(time_dimensions_dict[d])
ad_time_dimensions_dict[d] = get_time_dimensions(d).format(lang=lang, date_field='StatDateTime')
ad_dimensionslist_with_segments_and_aliases.append(ad_time_dimensions_dict[d] + ' as ' + d)
ad_list_with_time_dimensions_without_aliases.append(ad_time_dimensions_dict[d])
continue
except:
list_with_time_dimensions_without_aliases.append(d)
ad_list_with_time_dimensions_without_aliases.append(d)
dimensionslist_with_segments_and_aliases.append(d)
ad_dimensionslist_with_segments_and_aliases.append(d)
metrics = json.loads(request.body.decode('utf-8'))['metrics']
# сортировка по переданному показателю
try:
sort_column = json.loads(request.body.decode('utf-8'))['sort_column']
except:
sort_column = ""
#если для сортировки передан показатель, которого нет вmetrics, добавляем его в metrics
if sort_column not in metrics and sort_column!="" and sort_column not in get_all_dimensions() and '_path' not in sort_column:
metrics.append(sort_column)
# строка "sum(metric1),avg(metric2)...". если показатель относительный используется avg, если нет - sum
sum_metric_string=[]
for i in metrics:
if i in ['clicks','ctr','cost','impressions']:
continue
if 'conversion' in i or i in ['conversion_rate','bounce_rate','nb_new_visits_per_all_visits','nb_new_visitors_per_all_visitors','avg_time_generation','nb_return_visitors_per_all_visitors','avg_visit_length','nb_pageviews_per_visit','nb_actions_per_visit','nb_downloas_per_visit'] or i.find(r'goal\d{1,3}_conversion') != -1:
sum_metric_string.append("floor(avg("+i+"),2) as "+i)
else:
if "goalgroup" in i and "cost" in i:
sum_metric_string.append("CAST(sum(goalgroup{N}),'Int') as goalgroup{N}".format(N=re.findall(r'\d{1,3}',i)[0]))
continue
if "goal" in i and "cost" in i:
sum_metric_string.append(
"CAST(sum(goal{N}),'Int') as goal{N}".format(N=re.findall(r'\d{1,3}', i)[0]))
continue
else:
sum_metric_string.append("CAST(sum(" + i + "),'Int') as " + i)
sum_metric_string=','.join(sum_metric_string)
#если в запросе не указан сдвиг, зададим его равным нулю
try:
offset = json.loads(request.body.decode('utf-8'))['offset']
except:
offset='0'
#если в запросе не указан лимит, зададим его путой строкой, если указан, составим строку LIMIT...
try:
lim=int(json.loads(request.body.decode('utf-8'))['limit'])
limit = 'LIMIT '+str(offset)+','+str(json.loads(request.body.decode('utf-8'))['limit'])
except:
limit=''
lim=None
period = json.loads(request.body.decode('utf-8'))['periods']
try:
search_pattern=json.loads(request.body.decode('utf-8'))['search_pattern']
except:
search_pattern=""
try:
attribution_model=json.loads(request.body.decode('utf-8'))['attribution_model']
except:
attribution_model=""
try:
attribution_lookup_period=json.loads(request.body.decode('utf-8'))['attribution_lookup_period']
except:
attribution_lookup_period=""
site_filt=' 1' # по умолчанию фильтра по idSite нет
time_format = '%Y-%m-%d'
ad_date_field='StatDateTime'
filt=''
try:
profile_id=json.loads(request.body.decode('utf-8'))['profile_id']
try:
timezone = json.loads(requests.get(
stas_api+'profiles/{profile_id}/?all=1'.format(profile_id=profile_id),
headers=headers).content.decode('utf-8'))['timezone']
date1 = datetime.strptime(period[0]['date1'] + '-00', '%Y-%m-%d-%H')
timezone = pytz.timezone(timezone)
date1 = timezone.localize(date1)
time_offset = str(date1)[19:]
relative_period = []
for date in period:
# вторую дату увеличиваем на день
date2=(datetime.strptime(date['date2'], '%Y-%m-%d') - timedelta(
days=-1)).strftime('%Y-%m-%d')
if time_offset[0] == '+':
relative_period.append({'date1': str(
datetime.strptime(date['date1'] + '-00', '%Y-%m-%d-%H') - timedelta(
hours=int(time_offset[2]) - 3)), 'date2': str(
datetime.strptime(date2 + '-00', '%Y-%m-%d-%H') - timedelta(
hours=int(time_offset[2]) - 3))})
else:
relative_period.append({'date1': str(
datetime.strptime(date['date1'] + '-00', '%Y-%m-%d-%H') - timedelta(
hours=-int(time_offset[2]) - 3)),
'date2': str(datetime.strptime(date2 + '-00', '%Y-%m-%d-%H') - timedelta(
hours=-int(time_offset[2]) - 3))})
date_field='toDateTime(serverTimestamp)'
ad_date_field='StatDateTime'
time_format='%Y-%m-%d %H:%M:%S'
except:
relative_period=period
date_field = 'serverDate'
try:
if json.loads(get_clickhouse_data(
'SELECT idSite FROM {DB}.hits_with_visits WHERE idSite={idSite} LIMIT 1 FORMAT JSON'.format(DB=DB,
idSite=json.loads(requests.get(
stas_api+'profiles/{profile_id}/?all=1'.format(
profile_id=profile_id), headers=headers).content.decode('utf-8'))[
'site_db_id']), 'http://46.4.81.36:8123'))['data'] == []:
filt = ' AND 0'
site_filt = " 0"
else:
site_filt = ' idSite==' + str(json.loads(requests.get(
stas_api+'profiles/{profile_id}/?all=1'.format(profile_id=profile_id),
headers=headers).content.decode('utf-8'))['site_db_id'])
except:
filt = ' AND 0'
site_filt = " 0"
except:
relative_period=period
date_field = 'serverDate'
table = DB+'.hits_with_visits'
#Формируем массив с count() для каждого параметра
dimension_counts=[]
ad_dimension_counts = []
for i in dimensionslist:
if i in time_dimensions_dict.keys():
dimension_counts.append("CAST(uniq({dimension}),'Int') as h{dimension_alias}".format(dimension=time_dimensions_dict[i],dimension_alias=i))
ad_dimension_counts.append("CAST(uniq({dimension}),'Int') as h{dimension_alias}".format(dimension=ad_time_dimensions_dict[i],dimension_alias=i))
else:
dimension_counts.append("CAST(uniq({dimension}),'Int') as h{dimension}".format(dimension=i))
ad_dimension_counts.append("CAST(uniq({dimension}),'Int') as h{dimension}".format(dimension=i))
is_two_tables=False
# Фильтр по всем датам
date_filt = []
ad_date_filt = []
for dates in relative_period:
date_filt.append(
"({date_field} BETWEEN '".format(date_field=date_field) + str(dates['date1']) + "' AND '" + str(
dates['date2']) + "')")
ad_date_filt.append(
"({ad_date_field} BETWEEN '".format(ad_date_field=ad_date_field) + str(
dates['date1']) + "' AND '" + str(
dates['date2']) + "')")
date_filt = ' OR '.join(date_filt)
ad_date_filt = ' OR '.join(ad_date_filt)
# Проверка, состоит ли dimensions только из сегментов
is_all_segments = True
for dim in dimensionslist:
if 'segment' not in dim:
is_all_segments=False
# ФОрмируем массив с запросом каждого показателя в SQL
metric_counts_list,ad_metric_counts_list,metrics_string=MetricCounts(metrics,headers,dimensionslist,is_all_segments,attribution_model)
#Если список нерекламных показателей не пуст, делаем строку с показателями из списка
metric_counts = ','.join(metric_counts_list)
ad_metric_counts = ','.join(ad_metric_counts_list)
# если в metrics присутствуют оба вида показателей, то устанавливаем флаг двух таблиц в true
if metric_counts_list!=[] and ad_metric_counts_list!=[] :
is_two_tables=True
for dim in dimensionslist:
if dim not in get_adstat_dimensions():
is_two_tables=False
break
if is_all_segments==True and metric_counts_list!=[] and ad_metric_counts_list!=[]:
is_two_tables = True
if metric_counts_list==[]:
dimension_counts=ad_dimension_counts
dimensionslist_with_segments_and_aliases=ad_dimensionslist_with_segments_and_aliases
list_with_time_dimensions_without_aliases=ad_list_with_time_dimensions_without_aliases
table=DB+'.adstat'
metric_counts=','.join(ad_metric_counts_list)
date_field=ad_date_field
date_filt=ad_date_filt
#Если фильтр не нулевой(idSite для данного profile_id существует),добавляем его
if filt!=' AND 0':
try:
filter = json.loads(request.body.decode('utf-8'))['filter']
except:
filt = " "
else:
if filter != "":
filt = "AND" + "(" + FilterParse(filter) + ")"
try:
show_filter_label = json.loads(request.body.decode('utf-8'))['show_filter_label']
except:
show_filter_label = ''
# Если в show_filter_label передали true, то добавляем в результирующий словарь массив filter_label
if show_filter_label in ['True', 'true', 'TRUE', True]:
filter = filter.replace(',', ';')
filter = filter.split(';')
filter_label = []
for sub_filt in filter:
filter_label.append(re.split(r'={1,2}', sub_filt)[1].capitalize())
resp['filter_label'] = filter_label
else:
filt = "AND 1"
# Заполнение таблицы с рекламной статистикой
"""load_query = "INSERT INTO CHdatabase_test.adstat VALUES "
next=stas_api+'ad_stat/?all=1'
while json.loads(requests.get(next, headers=headers).content.decode('utf-8'))['next']!='null':
for part in json.loads(requests.get(next, headers=headers).content.decode('utf-8'))['results']:
query = load_query + "(" + str(list(part.values())[1:])[1:len(str(list(part.values())[1:])) - 1] + ",1)"
query = query.replace("None", "'none'")
get_clickhouse_data(query, 'http://46.4.81.36:8123')
next=json.loads(requests.get(next, headers=headers).content.decode('utf-8'))['next']"""
#Добавляем в выходной словарь параметр counts
resp['counts'] = {}
try:
resp['counts'] = AddCounts(period, dimension_counts,ad_dimension_counts, filt, sort_order, table, date_filt)
except:
pass
# Добавляем в выходной словарь параметр metric_sums
resp['metric_sums']={}
total,total_filtered,resp['metric_sums']['dates'] = AddMetricSums(period,metric_counts_list,filt,metrics,sort_order,table)
if site_filt==' 0':
stats=[]
else:
stats=AddStats2(dimensionslist_with_segments,dimensionslist_with_segments_and_aliases,ad_dimensionslist_with_segments_and_aliases,metric_counts,filt,limit,period,metrics,table,date_filt)
# Добавляем stats
resp['stats']=stats
pprint.pprint(resp)
if export=='xlsx':
filename =stats
content_type = 'application/vnd.ms-excel'
file_path = os.path.join(MEDIA_ROOT, filename)
response = HttpResponse(FileWrapper(open(file_path, 'rb')), content_type=content_type)
response['Content-Disposition'] = 'attachment; filename=%s' % (filename)
response['Content-Length'] = os.path.getsize(file_path)
return response
response=JsonResponse(resp,safe=False,)
response['Access-Control-Allow-Origin']='*'
return response
else:
args={}
args.update(csrf(request))
return render_to_response('mainAPI.html',args)
def segment_stat(request):
def get_clickhouse_data(query, host, connection_timeout=1500):
"""Метод для обращения к базе данных CH"""
r = requests.post(host, params={'query': query}, timeout=connection_timeout)
return r.text
def FilterParse(filt_string):
"""Метод для перевода global_filter в строку для sql запроса"""
#filt_string=filt_string.replace(',',' OR ')
#filt_string = filt_string.replace(';', ' AND ')
#print(filt_string.partition('=@'))
simple_operators=['==','!=','>=','<=','>','<']
like_operators=['=@','!@','=^','=$','!^','!&']
like_str={'=@':" LIKE '%{val}%'",'!@':" NOT LIKE '%{val}%'",'=^':" LIKE '{val}%'",'=$':" LIKE '%{val}'",'!^':" NOT LIKE '{val}%'",'!&':" NOT LIKE '%{val}'"}
match_operators=['=~','!~']
match_str={'=~':" match({par}?'{val}')",'!~':" NOT match({par}?'{val}')"}
separator_indices=[]
for i in range(len(filt_string)):
if filt_string[i]==',' or filt_string[i]==';':
separator_indices.append(i)
separator_indices.append(len(filt_string))
end_filt=""
for i in range(len(separator_indices)):
if i==0:
sub_str = filt_string[0:separator_indices[i]]
else:
sub_str=filt_string[separator_indices[i-1]+1:separator_indices[i]]
#Заменяем названия временных параметров на соотв. строки для запросов в кх
for time_dim in get_time_dimensions_names():
if time_dim in sub_str:
sub_str=sub_str.replace(time_dim,get_time_dimensions(time_dim).format(lang='ru',date_field='toDateTime(serverTimestamp)').replace(',','?'))
break
for j in simple_operators:
if sub_str.partition(j)[2]=='':
pass
else:
first_arg=sub_str.partition(j)[0]
operator = j
try:
int(sub_str.partition(j)[2])
print('SELECT {par}=={val} FROM {DB}.hits_with_visits LIMIT 1 FORMAT JSON'.format(DB=DB,
par=sub_str.partition(j)[0].replace('?',','), val=sub_str.partition(j)[2]))
json.loads(get_clickhouse_data(
'SELECT {par}=={val} FROM {DB}.hits_with_visits LIMIT 1 FORMAT JSON'.format(DB=DB,
par=sub_str.partition(j)[0].replace('?',','), val=sub_str.partition(j)[2]), 'http://46.4.81.36:8123'))
second_arg=sub_str.partition(j)[2]
except:
second_arg= "'" + sub_str.partition(j)[2] + "'"
if sub_str.partition(j)[0][0]=='!':
operator=negative_condition(j)
first_arg=sub_str.partition(j)[0][1:]
sub_str=first_arg+operator+second_arg
break
for j in like_operators:
if sub_str.partition(j)[2]=='':
pass
else:
operator = j
first_arg=sub_str.partition(j)[0]
second_arg=sub_str.partition(j)[2]
if sub_str.partition(j)[0][0]=='!':
operator = negative_condition(j)
first_arg = sub_str.partition(j)[0][1:]
sub_str = first_arg +like_str[operator].format(val=second_arg)
break
for j in match_operators:
if sub_str.partition(j)[2]=='':
pass
else:
operator = j
first_arg = sub_str.partition(j)[0]
second_arg = sub_str.partition(j)[2]
if sub_str.partition(j)[0][0]=='!':
operator = negative_condition(j)
first_arg = sub_str.partition(j)[0][1:]
sub_str = match_str[operator].format(val=second_arg,par=first_arg)
break
try:
end_filt=end_filt+sub_str+filt_string[separator_indices[i]]
except:
end_filt = end_filt + sub_str
end_filt=end_filt.replace(',',' OR ')
end_filt=end_filt.replace(';',' AND ')
end_filt = end_filt.replace('?', ',')
return end_filt
if request.method=='GET':
headers = {
'Authorization': 'JWT eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyX2lkIjoxOSwiZW1haWwiOiIiLCJ1c2VybmFtZSI6ImFydHVyIiwiZXhwIjoxNTE4MTIxNDIyfQ._V0PYXMrE2pJlHlkMtZ_c-_p0y0MIKsv8o5jzR5llpY',
'Content-Type': 'application/json'}
response=dict(request.GET)
for key in response.keys():
response[key]=response[key][0]
try:
if response['filter']=='':
print(response['filter'])
filter=1
else:
filter=FilterParse(response['filter'])
except:
filter = 1
try:
profile_id=response['profile_id']
try:
#Определяем относительное время(с учетом часового пояса)
timezone = json.loads(requests.get(
stas_api+'profiles/{profile_id}/?all=1'.format(profile_id=profile_id),
headers=headers).content.decode('utf-8'))['timezone']
d1 = datetime.strptime(response['date1'] + '-00', '%Y-%m-%d-%H')
timezone = pytz.timezone(timezone)
d1 = timezone.localize(d1)
time_offset = str(d1)[19:]
#Вторую дату увеличиваем на день
d2 = (datetime.strptime(response['date2'], '%Y-%m-%d') - timedelta(
days=-1)).strftime('%Y-%m-%d')
if time_offset[0] == '+':
relative_date1=str(datetime.strptime(response['date1'] + '-00', '%Y-%m-%d-%H') - timedelta(
hours=int(time_offset[2]) - 3))
relative_date2=str(datetime.strptime(d2 + '-00', '%Y-%m-%d-%H') - timedelta(
hours=int(time_offset[2]) - 3))
else:
relative_date1 = str(datetime.strptime(response['date1'] + '-00', '%Y-%m-%d-%H') - timedelta(
hours=-int(time_offset[2]) - 3))
relative_date2 = str(datetime.strptime(d2 + '-00', '%Y-%m-%d-%H') - timedelta(
hours=-int(time_offset[2]) - 3))
date_field='toDateTime(serverTimestamp)'
except:
relative_date1=response['date1']
relative_date2 = response['date2']
date_field = 'serverDate'
site_filter = ' 1'
try:
if json.loads(get_clickhouse_data(
'SELECT idSite FROM '+DB+'.hits_with_visits WHERE idSite={idSite} LIMIT 1 FORMAT JSON'.format(
idSite=json.loads(requests.get(
stas_api+'profiles/{profile_id}/?all=1'.format(
profile_id=profile_id), headers=headers).content.decode('utf-8'))[
'site_db_id']), 'http://46.4.81.36:8123'))['data'] == []:
site_filter = ' 0'
else:
site_filter = ' idSite==' + str(json.loads(requests.get(
stas_api+'profiles/{profile_id}/?all=1'.format(profile_id=profile_id),
headers=headers).content.decode('utf-8'))['site_db_id'])
except:
site_filter = ' 0'
except:
site_filter =' 1'
relative_date1=response['date1']
relative_date2= response['date2']
date_field = 'serverDate'
q_total="""
SELECT CAST(uniq(visitorId),'Int') as visitors,CAST(uniq(idVisit),'Int') as visits FROM {DB}.hits_with_visits WHERE {date_field} BETWEEN '{date1}' AND '{date2}' AND {site_filter} FORMAT JSON
""".format(date1=relative_date1,date2=relative_date2,date_field=date_field,site_filter=site_filter,DB=DB)
print(q_total)
try:
total=json.loads(get_clickhouse_data(q_total, 'http://46.4.81.36:8123'))['data'][0]
except:
total={'visitors':0,'visits':0}
q = """
SELECT CAST(uniq(visitorId),'Int') as visitors,CAST(uniq(idVisit),'Int') as visits FROM {DB}.hits_with_visits WHERE {date_field} BETWEEN '{date1}' AND '{date2}' AND {filter} AND {site_filter} FORMAT JSON
""".format(date1=relative_date1, date2=relative_date2,filter=filter,date_field=date_field,site_filter=site_filter,DB=DB)
print(q)
try:
with_filter = json.loads(get_clickhouse_data(q, 'http://46.4.81.36:8123'))['data'][0]
except:
with_filter = {'visitors': 0, 'visits': 0}
visitors={'total_sum':total['visitors'],'sum':with_filter['visitors']}
visits={'total_sum':total['visits'],'sum':with_filter['visits']}
response['segment_stat']={'visitors':visitors,'visits':visits}
return JsonResponse(response, safe=False, )
@csrf_exempt
def diagram_stat(request):
def get_clickhouse_data(query,host,connection_timeout=1500):
"""Метод для обращения к базе данных CH"""
r=requests.post(host,params={'query':query},timeout=connection_timeout)
return r.text
def AddCounts(date1,date2,dimension_counts,filt):
"""Добавление ключа Counts в ответ"""
a={}
for dim_num in range(len(dimensionslist)):
q = ''' SELECT {dimension_counts}
FROM {table}
WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}'
'''
if is_two_tables:
q="""SELECT CAST(uniq({dimension}),'Int') as h{dimension} FROM (SELECT DISTINCT {dimensions_with_alias} FROM {DB}.hits_with_visits
WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}') ALL FULL OUTER JOIN (SELECT DISTINCT {ad_dimensions_with_alias} FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}') USING {dimension}"""
q=q.format(dimension_counts=dimension_counts[dim_num],dimension=dimensionslist[dim_num],date_field=date_field,
ad_dimensions_with_alias=ad_dimensionslist_with_aliases[dim_num],dimensions_with_alias=dimensionslist_with_aliases[dim_num],
site_filt=site_filt, filt=filt,ad_date_field=ad_date_field,table=table,date1=date1,date2=date2,DB=DB)
print(q)
try:
a.update(json.loads(get_clickhouse_data(q+' FORMAT JSON', 'http://46.4.81.36:8123'))['data'][0])
except:
a.update({'h'+dimensionslist[dim_num]:0})
b = {}
try:
# Объеденяем словарь с датами со словарем вернувшихся значений каждого из запрошенных параметров
#изменяем значения показателей на целочисленный тип
for key in a.keys():
b[key[1:]]=a[key]
except:
b=dict.fromkeys(dimensionslist,0)
return b
def OldAddCounts(date1,date2,dimension_counts,filt,sort_order,table):
"""Добавление ключа Counts в ответ"""
a={}
for dim_num in range(len(dimensionslist)):
q = ''' SELECT {dimension_counts}
FROM {table}
WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}'
ORDER BY NULL {sort_order}
FORMAT JSON
'''.format(site_filt=site_filt,date1=date1, date2=date2, dimension_counts=dimension_counts[dim_num], filt=filt,
sort_order=sort_order,table=table,date_field=date_field)
try:
a.update(json.loads(get_clickhouse_data(q, 'http://46.4.81.36:8123'))['data'][0])
except:
a.update({'h'+dimensionslist[dim_num]:0})
b = {}
try:
# Объеденяем словарь с датами со словарем вернувшихся значений каждого из запрошенных параметров
#изменяем значения показателей на целочисленный тип
for key in a.keys():
b[key[1:]]=a[key]
except:
b=dict.fromkeys(dimensionslist,0)
return b
def AddMetricSums(rel_date1,rel_date2,metric_counts_list,filt,metrics,sort_order,table):
"""Добавление ключа metric_sums в ответ"""
dates = []
a = {}
b = {}
# Запрос на получение сумм показателей без фильтра
q_total = ''' SELECT 1 as l,{metric_counts}
FROM {table}
WHERE {date_field} BETWEEN '{date1}' AND '{date2}' AND {site_filt}
'''
# С фильтром
q = ''' SELECT 1 as l,{metric_counts} FROM {table}
WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}'
'''
# Если запрошены две таблицы, объединяем полученные показатели ля hits_with_visits c показателями для adstat
if is_two_tables:
q_total = "SELECT {metrics_string} FROM (" + q_total + """) ALL FULL OUTER JOIN (SELECT 1 as l,{ad_metric_counts}
FROM {DB}.adstat
WHERE {ad_date_field} BETWEEN '{date1}' AND '{date2}' AND {site_filt}) USING l """
q = "SELECT {metrics_string} FROM (" + q + """) ALL FULL OUTER JOIN (SELECT 1 as l,{ad_metric_counts}
FROM {DB}.adstat
WHERE 1 {filt} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' AND {site_filt}) USING l"""
q = q.format(date1=rel_date1, date2=rel_date2, metric_counts=metric_counts, site_filt=site_filt,
metrics_string=metrics_string,
filt=filt, sort_order=sort_order, table=table, ad_date_field=ad_date_field,
ad_metric_counts=ad_metric_counts, date_field=date_field,DB=DB)
q_total = q_total.format(date1=rel_date1, date2=rel_date2, metric_counts=metric_counts,
site_filt=site_filt, metrics_string=metrics_string,
filt=filt, sort_order=sort_order, table=table, ad_date_field=ad_date_field,
ad_metric_counts=ad_metric_counts, date_field=date_field,DB=DB)
print(q)
try:
a = json.loads(get_clickhouse_data(q_total + ' FORMAT JSON', 'http://46.4.81.36:8123'))['data'][0]
b = json.loads(get_clickhouse_data(q + ' FORMAT JSON', 'http://46.4.81.36:8123'))['data'][0]
except:
pass
#Проверка на существование записей, если их нет, возвращаем нули
metric_dict = dict.fromkeys(metrics)
# Добавляем в словарь даты
for key in list(metric_dict.keys()):
sub_metr_dict = {}
try:
sub_metr_dict['total_sum'] = a[key]
except:
metric_dict[key] = {"total_sum": 0, "sum": 0}
continue
try:
sub_metr_dict['sum'] = b[key]
except:
sub_metr_dict['sum'] = 0
metric_dict[key] = sub_metr_dict
# Добавляем в него даты
metric_dict.update({'date1':date1,'date2':date2})
dates.append(metric_dict)
return dates
def AddStats2(dimensionslist,dim_with_aliases, ad_dim_with_aliases,metric_counts, filt, limit, having, date1,date2, metrics, table):
"""Добавление ключа stats в ответ"""
invis_metric_counts=''#Если данные берутся только из одной таблицы, не запрашиваем все показатели дополнительно(используется для goalN_cost goalgroupN_cost)
#Определяем, есть ли вначале dimensions группа сегментов
q = '''SELECT {dimensions_with_alias},{invis_metric_counts}({metric_counts}) as metrics FROM {table}
WHERE 1 {filt} AND {site_filt} AND {date_field} BETWEEN '{date1}' AND '{date2}'
GROUP BY {dimensions}'''
# Если в запросе к апи были запрошены рекламные показатели или поля, которые есть только в таблице adstat, то объединяем таблицы с хитами и визитами с adstat
if is_two_tables:
invis_metric_counts=metric_counts+","
q = "SELECT {dimensions},({metrics_string}) as metrics FROM (" + q + """) ALL FULL OUTER JOIN (SELECT {ad_dimensions_with_alias},{ad_metric_counts} FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' GROUP BY {dimensions}) USING {dimensions}"""
q = q.format(table=table, dimensions_with_alias=','.join(dim_with_aliases), dimensions=','.join(dimensionslist), metric_counts=metric_counts,
date1=date1, ad_metric_counts=ad_metric_counts,ad_dimensions_with_alias=','.join(ad_dim_with_aliases),
site_filt=site_filt, date2=date2, filt=filt, date_field=date_field,invis_metric_counts=invis_metric_counts,
ad_date_field=ad_date_field, metrics_string=metrics_string,DB=DB)
print(q+" ORDER BY {sort_column} {sort_order} {limit} FORMAT JSON".format(limit=limit,sort_column=sort_column,sort_order=sort_order))
stats=json.loads(get_clickhouse_data(q+" ORDER BY {sort_column} {sort_order} {limit} FORMAT JSON".format(limit=limit,sort_column=sort_column,sort_order=sort_order), 'http://46.4.81.36:8123'))['data']
#Если показатеь один, обрабатываем ошибку
try:
for stat in stats:
metr = {}
for metric_num in range(len(stat['metrics'])):
metr[metrics[metric_num]]=stat['metrics'][metric_num]
stat['metrics']=metr
if dimensionslist!=dimensionslist_with_segments:
stat['segment']='Все данные'
except:
for stat in stats:
stat['metrics']={metrics[0]:stat['metrics']}
for dim in dimensionslist_with_segments:
if 'segment' in dim:
seg = json.loads(requests.get(
stas_api+'reference/segments/{num_seg}/?all=1'.format(num_seg=int(dim[7:])),
headers=headers).content.decode('utf-8'))['real_definition']
seg_filt = seg.partition("==")[0] + "=='" + seg.partition("==")[2] + "'"
seg_label = json.loads(requests.get(
stas_api+'reference/segments/{num_seg}/?all=1'.format(num_seg=int(dim[7:])),
headers=headers).content.decode('utf-8'))['name']
q = '''SELECT {dimensions_with_alias},[{metric_counts}] as metrics FROM {table}
WHERE 1 {filt} AND {site_filt} AND ({date_field} BETWEEN '{date1}' AND '{date2}') AND ({seg_filt})
GROUP BY {dimensions}'''
# Если в запросе к апи были запрошены рекламные показатели или поля, которые есть только в таблице adstat, то объединяем таблицы с хитами и визитами с adstat
if is_two_tables:
q = "SELECT {dimensions},arrayConcat(metrics,ad_metrics) as metrics FROM (" + q + """) ALL FULL OUTER JOIN (SELECT {ad_dimensions_with_alias},[{ad_metric_counts}] as ad_metrics FROM {DB}.adstat
WHERE 1 {filt} AND {site_filt} AND {ad_date_field} BETWEEN '{date1}' AND '{date2}' AND ({seg_filt}) GROUP BY {dimensions}) USING {dimensions}"""
q = q.format(table=table,dimensions_with_alias=','.join(dim_with_aliases),ad_dimensions_with_alias=','.join(ad_dim_with_aliases),
dimensions=','.join(dimensionslist), metric_counts=metric_counts,
date1=date1, ad_metric_counts=ad_metric_counts,seg_filt=FilterParse(seg_filt.replace("'",'')),
site_filt=site_filt, date2=date2, filt=filt, date_field=date_field,
ad_date_field=ad_date_field,DB=DB)
print(q)
stat_with_segment = json.loads(get_clickhouse_data(q+" ORDER BY {sort_column} {sort_order} {limit} FORMAT JSON".format(limit=limit,sort_column=sort_column,sort_order=sort_order), 'http://46.4.81.36:8123'))['data']
for stat in stat_with_segment:
metr = {}
for metric_num in range(len(stat['metrics'])):
metr[metrics[metric_num]] = stat['metrics'][metric_num]
stat['metrics'] = metr
stat['segment']=seg_label
stats.append(stat)
if dimensionslist!=dimensionslist_with_segments:
for stat_num in range(len(stats)-1,-1,-1):
if stats[stat_num]['segment']=='Все данные':
k=0
without_seg=stats[stat_num].copy()
without_seg.pop('metrics')
without_seg.pop('segment')
without_seg=without_seg
for i in stats:
compare_seg = i.copy()
compare_seg.pop('metrics')
compare_seg.pop('segment')
if without_seg==compare_seg:
k+=1
if k==1:
stats.pop(stat_num)
return stats
def FilterParse(filt_string):
"""Метод для перевода global_filter в строку для sql запроса"""
#filt_string=filt_string.replace(',',' OR ')
#filt_string = filt_string.replace(';', ' AND ')
#print(filt_string.partition('=@'))
simple_operators=['==','!=','>=','<=','>','<']
like_operators=['=@','!@','=^','=$','!^','!&']
like_str={'=@':" LIKE '%{val}%'",'!@':" NOT LIKE '%{val}%'",'=^':" LIKE '{val}%'",'=$':" LIKE '%{val}'",'!^':" NOT LIKE '{val}%'",'!&':" NOT LIKE '%{val}'"}
match_operators=['=~','!~']
match_str={'=~':" match({par}?'{val}')",'!~':" NOT match({par}?'{val}')"}
separator_indices=[]
for i in range(len(filt_string)):
if filt_string[i]==',' or filt_string[i]==';':
separator_indices.append(i)
separator_indices.append(len(filt_string))
end_filt=""
for i in range(len(separator_indices)):
if i==0:
sub_str = filt_string[0:separator_indices[i]]
else:
sub_str=filt_string[separator_indices[i-1]+1:separator_indices[i]]
#Заменяем названия временных параметров на соотв. строки для запросов в кх
for time_dim in get_time_dimensions_names():
if time_dim in sub_str:
sub_str=sub_str.replace(time_dim,get_time_dimensions(time_dim).format(lang=lang,date_field=date_field).replace(',','?'))
break
for j in simple_operators:
if sub_str.partition(j)[2]=='':
pass
else:
first_arg=sub_str.partition(j)[0]
operator = j
try:
int(sub_str.partition(j)[2])
json.loads(get_clickhouse_data(
'SELECT {par}=={val} FROM {table} LIMIT 1 FORMAT JSON'.format(table=table,
par=sub_str.partition(j)[0].replace('?',','), val=sub_str.partition(j)[2]), 'http://46.4.81.36:8123'))
second_arg=sub_str.partition(j)[2]
except:
second_arg= "'" + sub_str.partition(j)[2] + "'"
if sub_str.partition(j)[0][0]=='!':
operator=negative_condition(j)
first_arg=sub_str.partition(j)[0][1:]
sub_str=first_arg+operator+second_arg
break
for j in like_operators:
if sub_str.partition(j)[2]=='':
pass
else:
operator = j
first_arg=sub_str.partition(j)[0]
second_arg=sub_str.partition(j)[2]
if sub_str.partition(j)[0][0]=='!':
operator = negative_condition(j)
first_arg = sub_str.partition(j)[0][1:]
sub_str = first_arg +like_str[operator].format(val=second_arg)
break
for j in match_operators:
if sub_str.partition(j)[2]=='':
pass
else:
operator = j
first_arg = sub_str.partition(j)[0]
second_arg = sub_str.partition(j)[2]
if sub_str.partition(j)[0][0]=='!':
operator = negative_condition(j)
first_arg = sub_str.partition(j)[0][1:]
sub_str = match_str[operator].format(val=second_arg,par=first_arg)
break
try:
end_filt=end_filt+sub_str+filt_string[separator_indices[i]]
except:
end_filt = end_filt + sub_str
end_filt=end_filt.replace(',',' OR ')
end_filt=end_filt.replace(';',' AND ')
end_filt = end_filt.replace('?', ',')
return end_filt
if request.method=='POST':
resp = {} # Выходной словарь
#Заголовки для запроса сегментов
headers = {
'Authorization': 'JWT eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyX2lkIjoxOSwiZW1haWwiOiIiLCJ1c2VybmFtZSI6ImFydHVyIiwiZXhwIjoxNTE4MTIxNDIyfQ._V0PYXMrE2pJlHlkMtZ_c-_p0y0MIKsv8o5jzR5llpY',
'Content-Type': 'application/json'}
# Парсинг json
try:
sort_order=json.loads(request.body.decode('utf-8'))['sort_order']
except:
sort_order=""
#сортировка по переданному показателю
dimensionslist_with_segments=json.loads(request.body.decode('utf-8'))['dimensions']
try:
sort_column = json.loads(request.body.decode('utf-8'))['sort_column']
except:
sort_column=dimensionslist_with_segments[0]
try:
lang = json.loads(request.body.decode('utf-8'))['lang']
if lang == "":
lang='ru'
if lang == "en":
lang='eng'
except:
lang = "ru"
dimensionslist = []
dimensionslist_with_aliases=[]
ad_dimensionslist_with_aliases=[]#ля рекламных параметров
#Создание списка параметров без сегментов
time_dimensions_dict = {}
ad_time_dimensions_dict = {}#для рекламных параметров
#Проверка на сложные параметры(временнЫе)
for d in dimensionslist_with_segments:
if 'segment' not in d and type(d) != list:
dimensionslist.append(d)
try:
time_dimensions_dict[d]=get_time_dimensions(d).format(lang=lang,date_field='toDateTime(serverTimestamp)')
dimensionslist_with_aliases.append(time_dimensions_dict[d]+' as '+d)
ad_time_dimensions_dict[d] = get_time_dimensions(d).format(lang=lang, date_field='StatDateTime')
ad_dimensionslist_with_aliases.append(ad_time_dimensions_dict[d] + ' as ' + d)
continue
except:
dimensionslist_with_aliases.append(d)
ad_dimensionslist_with_aliases.append(d)
metrics = json.loads(request.body.decode('utf-8'))['metrics']
#если в запросе не указан сдвиг, зададим его равным нулю
try:
offset = json.loads(request.body.decode('utf-8'))['offset']
except:
offset='0'
#если в запросе не указан лимит, зададим его путой строкой, если указан, составим строку LIMIT...
try:
limit = 'LIMIT '+str(offset)+','+str(json.loads(request.body.decode('utf-8'))['limit'])
except:
limit=''
date1 = json.loads(request.body.decode('utf-8'))['date1']
date2= json.loads(request.body.decode('utf-8'))['date2']
try:
filter_metric = json.loads(request.body.decode('utf-8'))['filter_metric']
except:
having=" "
else:
having = 'HAVING'+' '+FilterParse(filter_metric)
#если список dimensionslist пуст, значит были переданы только сегменты
site_filt=' 1'#льтр по idSite(по умолчанию нет)
ad_date_field="StatDateTime"
filt=''
try:
profile_id=json.loads(request.body.decode('utf-8'))['profile_id']
try:
#Определяем относительное время(с учетом часового пояса)
timezone = json.loads(requests.get(
stas_api+'profiles/{profile_id}/?all=1'.format(profile_id=profile_id),
headers=headers).content.decode('utf-8'))['timezone']
d1 = datetime.strptime(date1 + '-00', '%Y-%m-%d-%H')
timezone = pytz.timezone(timezone)
d1 = timezone.localize(d1)
time_offset = str(d1)[19:]
#Вторую дату увеличиваем на день
d2 = (datetime.strptime(date2, '%Y-%m-%d') - timedelta(
days=-1)).strftime('%Y-%m-%d')
if time_offset[0] == '+':
relative_date1=str(datetime.strptime(date1 + '-00', '%Y-%m-%d-%H') - timedelta(
hours=int(time_offset[2]) - 3))
relative_date2=str(datetime.strptime(d2 + '-00', '%Y-%m-%d-%H') - timedelta(
hours=int(time_offset[2]) - 3))
else:
relative_date1 = str(datetime.strptime(date1 + '-00', '%Y-%m-%d-%H') - timedelta(
hours=-int(time_offset[2]) - 3))
relative_date2 = str(datetime.strptime(d2 + '-00', '%Y-%m-%d-%H') - timedelta(
hours=-int(time_offset[2]) - 3))
date_field='toDateTime(serverTimestamp)'
ad_date_field='StatDateTime'
except:
relative_date1=date1
relative_date2 = date2
date_field = 'serverDate'
try:
if json.loads(get_clickhouse_data(
'SELECT idSite FROM {DB}.hits_with_visits WHERE idSite={idSite} LIMIT 1 FORMAT JSON'.format(DB=DB,
idSite=json.loads(requests.get(
stas_api+'profiles/{profile_id}/?all=1'.format(
profile_id=profile_id), headers=headers).content.decode('utf-8'))[
'site_db_id']), 'http://46.4.81.36:8123'))['data'] == []:
filt = ' AND 0'
site_filt = " 0"
else:
site_filt = ' idSite==' + str(json.loads(requests.get(
stas_api+'profiles/{profile_id}/?all=1'.format(profile_id=profile_id),
headers=headers).content.decode('utf-8'))['site_db_id'])
except:
filt = ' AND 0'
site_filt = " 0"
except:
relative_date1=date1
relative_date2= date2
date_field = 'serverDate'
table = DB+'.hits_with_visits'
list_of_adstat_par=['Clicks','Impressions','Cost','StatDate','idSite', 'AdCampaignId', 'AdBannerId', 'AdChannelId', 'AdDeviceType', 'AdGroupId', 'AdKeywordId',
'AdPosition', 'AdPositionType', 'AdRegionId', 'AdRetargetindId', 'AdPlacement', 'AdTargetId', 'AdvertisingSystem', 'DRF', 'campaignContent',
'campaignKeyword', 'campaignMedium', 'campaignName', 'campaignSource']
#Формируем массив с count() для каждого параметра
dimension_counts=[]
ad_dimension_counts=[]
for i in dimensionslist:
if i in time_dimensions_dict.keys():
ad_dimension_counts.append("CAST(uniq({dimension}),'Int') as h{dimension_alias}".format(dimension=ad_time_dimensions_dict[i],dimension_alias=i))
dimension_counts.append("CAST(uniq({dimension}),'Int') as h{dimension_alias}".format(dimension=time_dimensions_dict[i],dimension_alias=i))
else:
dimension_counts.append("CAST(uniq({dimension}),'Int') as h{dimension}".format(dimension=i))
ad_dimension_counts.append("CAST(uniq({dimension}),'Int') as h{dimension}".format(dimension=i))
# ФОрмируем массив с запросом каждого показателя в SQL
is_all_segments = True
is_two_tables=False
for dim in dimensionslist:
if 'segment' not in dim:
is_all_segments = False
# ФОрмируем массив с запросом каждого показателя в SQL
metric_counts_list, ad_metric_counts_list,metrics_string = MetricCounts(metrics, headers, dimensionslist, is_all_segments,'')
# Если список нерекламных показателей не пуст, делаем строку с показателями из списка
metric_counts = ','.join(metric_counts_list)
ad_metric_counts = ','.join(ad_metric_counts_list)
# если в metrics присутствуют оба вида показателей, то устанавливаем флаг двух таблиц в true
if metric_counts_list != [] and ad_metric_counts_list != []:
is_two_tables = True
for dim in dimensionslist:
if dim not in get_adstat_dimensions():
is_two_tables = False
break
if is_all_segments == True and metric_counts_list != [] and ad_metric_counts_list != []:
is_two_tables = True
if metric_counts_list == []:
dimensionslist_with_aliases=ad_dimensionslist_with_aliases
dimension_counts=ad_dimension_counts
table = DB+'.adstat'
metric_counts = ','.join(ad_metric_counts_list)
date_field = ad_date_field
if filt!=' AND 0':
try:
filter = json.loads(request.body.decode('utf-8'))['filter']
if filter != "":
filt = "AND"+"("+FilterParse(filter)+")"
try:
show_filter_label = json.loads(request.body.decode('utf-8'))['show_filter_label']
except:
show_filter_label = ''
# Если в show_filter_label передали true, то добавляем в результирующий словарь массив filter_label
if show_filter_label in ['True', 'true', 'TRUE', True]:
filter = filter.replace(',', ';')
filter = filter.split(';')
filter_label = []
for sub_filt in filter:
filter_label.append(re.split(r'={1,2}', sub_filt)[1].capitalize())
resp['filter_label'] = filter_label[len(filter_label)-1]
if len(filter_label)>1:
resp['filter_parent_label'] = filter_label[len(filter_label) - 2]
else:
filt=""
except:
filt=""
print(filt)
#Добавляем в выходной словарь параметр counts
resp['counts'] = {}
if dimensionslist != []:
resp['counts']=AddCounts(relative_date1,relative_date2,dimension_counts,filt)
# Добавляем в выходной словарь параметр metric_sums
resp['metric_sums']={}
resp['metric_sums']['dates'] = AddMetricSums(relative_date1,relative_date2,metric_counts_list,filt,metrics,sort_order,table)
if site_filt==' 0':
stats=[]
else:
stats=AddStats2(dimensionslist,dimensionslist_with_aliases,ad_dimensionslist_with_aliases,metric_counts,filt,limit,having,relative_date1,relative_date2,metrics,table)
# Добавляем stats
resp['stats']=stats
pprint.pprint(resp)
response=JsonResponse(resp,safe=False,)
response['Access-Control-Allow-Origin']='*'
return response
|
{"/spyrecorder/views.py": ["/spyrecorder/CHmodels.py"], "/WV/views.py": ["/WV/models.py", "/WV/forms.py", "/Word2Vec/settings.py"], "/WV/forms.py": ["/WV/models.py"], "/api/views.py": ["/Word2Vec/settings.py"], "/WV/models.py": ["/Word2Vec/settings.py"], "/WV/admin.py": ["/WV/models.py"]}
|
34,157
|
arturkaa231/clickhouse_api
|
refs/heads/master
|
/WV/models.py
|
from django.db import models
from django.db import models
from django.core.urlresolvers import reverse
from uuid import uuid4
import os.path
from django.db.models.signals import post_delete
from django.core.files.storage import FileSystemStorage
from Word2Vec.settings import STATIC_ROOT
fs = FileSystemStorage(location='/opt/static/')
# Create your models here.
class Data(models.Model):
class Meta():
db_table='Data'
Data_title=models.CharField(default=None,blank=True, null=True,max_length=100, )
Data_xls = models.FileField(blank=True, null=True,default=None)
Data_model=models.FileField(blank=True,null=True,default=None)
def get_text(self):
return self.tags_set.all()
def __unicode__(self):
return self.Data_title
class Tags(models.Model):
class Meta():
db_table='tags'
tg=models.CharField(max_length=100,default=None,blank=True, null=True)
text=models.ForeignKey(Data,null=True, blank=True,related_name='TAGS',on_delete=models.SET_NULL)
class Options(models.Model):
class Meta():
db_table = 'Options'
size = models.IntegerField()
win = models.IntegerField()
minc = models.IntegerField()
cbow=models.BooleanField(blank=True,default=False)
skipgr = models.BooleanField(blank=True, default=True)
alg=models.IntegerField(blank=True, null=True, default=0)
text=models.ForeignKey(Data,null=True, blank=True, on_delete=models.SET_NULL)
preview=models.ImageField(blank=True, null=True, default=None, storage=fs)
class ImageOptions(models.Model):
class Meta():
db_table = 'Images'
num_clusters = models.IntegerField(default=20)
img = models.ImageField(blank=True, null=True, default=None, storage=fs)
script = models.TextField(blank=True, null=True, default=None)
div = models.TextField(blank=True, null=True, default=None)
num_neighbors=models.IntegerField(default=30,null=True,blank=True)
opt = models.ForeignKey(Options, null=True, blank=True,related_name='image', on_delete=models.SET_NULL)
class Templates(models.Model):
class Meta():
db_table='Temp'
size = models.IntegerField(default=300)
win = models.IntegerField(default=5)
minc = models.IntegerField(default=30)
def delete_Data_Data_xls(sender, **kwargs):
"""
Процедура, ловящая сигнал при удалении записи,
и производящая собственно удаление файла
"""
mf = kwargs.get("instance")
mf.Data_xls.delete(save=False)
mf.Data_model.delete(save=False)
def delete_ImageOptions_img(sender, **kwargs):
"""
Процедура, ловящая сигнал при удалении записи,
и производящая собственно удаление файла
"""
mf = kwargs.get("instance")
storage=mf.img.storage
storage.delete(mf.img)
# Теперь зарегистрируем нашу функцию для удаления
post_delete.connect(delete_Data_Data_xls, Data)
# Теперь зарегистрируем нашу функцию для удаления
post_delete.connect(delete_ImageOptions_img, ImageOptions)
|
{"/spyrecorder/views.py": ["/spyrecorder/CHmodels.py"], "/WV/views.py": ["/WV/models.py", "/WV/forms.py", "/Word2Vec/settings.py"], "/WV/forms.py": ["/WV/models.py"], "/api/views.py": ["/Word2Vec/settings.py"], "/WV/models.py": ["/Word2Vec/settings.py"], "/WV/admin.py": ["/WV/models.py"]}
|
34,158
|
arturkaa231/clickhouse_api
|
refs/heads/master
|
/WV/migrations/0004_auto_20170724_1021.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-24 07:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('WV', '0003_data_data_title'),
]
operations = [
migrations.CreateModel(
name='Options',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size', models.IntegerField()),
('win', models.IntegerField()),
('minc', models.IntegerField()),
('xls', models.FileField(blank=True, default=None, null=True, upload_to='')),
('img', models.ImageField(blank=True, default=None, null=True, upload_to='')),
],
options={
'db_table': 'Options',
},
),
migrations.RemoveField(
model_name='data',
name='Data_minc',
),
migrations.RemoveField(
model_name='data',
name='Data_size',
),
migrations.RemoveField(
model_name='data',
name='Data_tags',
),
migrations.RemoveField(
model_name='data',
name='Data_win',
),
migrations.RemoveField(
model_name='data',
name='Data_xls',
),
migrations.DeleteModel(
name='Tags',
),
migrations.AddField(
model_name='options',
name='text',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='WV.Data'),
),
]
|
{"/spyrecorder/views.py": ["/spyrecorder/CHmodels.py"], "/WV/views.py": ["/WV/models.py", "/WV/forms.py", "/Word2Vec/settings.py"], "/WV/forms.py": ["/WV/models.py"], "/api/views.py": ["/Word2Vec/settings.py"], "/WV/models.py": ["/Word2Vec/settings.py"], "/WV/admin.py": ["/WV/models.py"]}
|
34,159
|
arturkaa231/clickhouse_api
|
refs/heads/master
|
/WV/migrations/0005_auto_20170724_1034.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-24 07:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('WV', '0004_auto_20170724_1021'),
]
operations = [
migrations.RemoveField(
model_name='options',
name='xls',
),
migrations.AddField(
model_name='data',
name='Data_xls',
field=models.FileField(blank=True, default=None, null=True, upload_to=''),
),
]
|
{"/spyrecorder/views.py": ["/spyrecorder/CHmodels.py"], "/WV/views.py": ["/WV/models.py", "/WV/forms.py", "/Word2Vec/settings.py"], "/WV/forms.py": ["/WV/models.py"], "/api/views.py": ["/Word2Vec/settings.py"], "/WV/models.py": ["/Word2Vec/settings.py"], "/WV/admin.py": ["/WV/models.py"]}
|
34,160
|
arturkaa231/clickhouse_api
|
refs/heads/master
|
/api/apps.py
|
from django.apps import AppConfig
class ChConfig(AppConfig):
name = 'CH'
|
{"/spyrecorder/views.py": ["/spyrecorder/CHmodels.py"], "/WV/views.py": ["/WV/models.py", "/WV/forms.py", "/Word2Vec/settings.py"], "/WV/forms.py": ["/WV/models.py"], "/api/views.py": ["/Word2Vec/settings.py"], "/WV/models.py": ["/Word2Vec/settings.py"], "/WV/admin.py": ["/WV/models.py"]}
|
34,161
|
arturkaa231/clickhouse_api
|
refs/heads/master
|
/spyrecorder/CHmodels.py
|
from infi.clickhouse_orm import models as md
from infi.clickhouse_orm import fields as fd
from infi.clickhouse_orm import engines as en
import datetime
class Actions(md.Model):
# describes datatypes and fields
user_id = fd.UInt64Field()
user_name=fd.StringField()
time=fd.StringField()
event_type=fd.StringField()
screen_name=fd.StringField()
app_name=fd.StringField()
app_productname=fd.StringField()
app_version=fd.StringField()
app_publisher=fd.StringField()
app_file=fd.StringField()
app_copyright=fd.StringField()
app_language=fd.StringField()
file_versioninfo=fd.StringField()
file_description=fd.StringField()
file_internalname=fd.StringField()
file_originalname=fd.StringField()
Date = fd.DateField(default=datetime.date.today())
engine = en.MergeTree('Date', ('user_id','user_name','time','event_type','screen_name','app_name',
'app_productname','app_version','app_publisher','app_file',
'app_copyright','app_language','file_versioninfo','file_description',
'file_internalname','file_originalname'))
|
{"/spyrecorder/views.py": ["/spyrecorder/CHmodels.py"], "/WV/views.py": ["/WV/models.py", "/WV/forms.py", "/Word2Vec/settings.py"], "/WV/forms.py": ["/WV/models.py"], "/api/views.py": ["/Word2Vec/settings.py"], "/WV/models.py": ["/Word2Vec/settings.py"], "/WV/admin.py": ["/WV/models.py"]}
|
34,162
|
arturkaa231/clickhouse_api
|
refs/heads/master
|
/WV/urls.py
|
from django.conf.urls import url
from WV import views
urlpatterns = [
url(r'^downloadedtexts/(?P<page_number>\d+)/$',views.DownloadedTexts, name='DownloadedTexts'),
url(r'^filteredtexts/(?P<page_number>\d+)/(?P<tags>\S+)/$',views.FilteredTexts, name='FilteredTexts'),
url(r'^showmap/(?P<Data_id>\d+)/(?P<Opt_id>\d+)/(?P<Img_id>\d+)/$',views.Showmap, name='showmap'),
url(r'^maps/(?P<Data_id>\d+)/(?P<page_number>\d+)/$',views.Maps, name='maps'),
url(r'^images/(?P<Data_id>\d+)/(?P<Opt_id>\d+)/(?P<page_number>\d+)/$', views.Images, name='images'),
url(r'^deleteopt/(?P<Opt_id>\d+)/(?P<Data_id>\d+)/$',views.DeleteOpt, name='deleteopt'),
url(r'^deleteimg/(?P<Data_id>\d+)/(?P<Opt_id>\d+)/(?P<Img_id>\d+)/$',views.DeleteImageOpt, name='deleteimg'),
url(r'^template/(?P<size>\d+)/(?P<win>\d+)/(?P<minc>\d+)/(?P<Data_id>\d+)/$', views.Template, name='template'),
url(r'^options/(?P<Data_id>\d+)/$', views.Enteroptions, name='options'),
url(r'^imageoptions/(?P<Data_id>\d+)/(?P<Opt_id>\d+)/$', views.EnterImageOptions, name='imageoptions'),
url(r'^setpreview/(?P<Data_id>\d+)/(?P<Opt_id>\d+)/(?P<img>\S+)/$', views.SetPreview, name='setpreview'),
url(r'^centroids/(?P<Data_id>\d+)/(?P<Opt_id>\d+)/(?P<Img_id>\d+)/$', views.Centroids, name='centroids'),
url(r'^minfreq/(?P<Data_id>\d+)/(?P<Opt_id>\d+)/(?P<Img_id>\d+)/$', views.MinFrequencyWord, name='minfreq'),
url(r'^similarwords/$', views.SimilarWords, name='similarwords'),
url(r'^downloadtext/(?P<Data_id>\d+)/$',views.DownloadText, name='downloadtext'),
url(r'^',views.MainPage, name='EnterData'),
]
|
{"/spyrecorder/views.py": ["/spyrecorder/CHmodels.py"], "/WV/views.py": ["/WV/models.py", "/WV/forms.py", "/Word2Vec/settings.py"], "/WV/forms.py": ["/WV/models.py"], "/api/views.py": ["/Word2Vec/settings.py"], "/WV/models.py": ["/Word2Vec/settings.py"], "/WV/admin.py": ["/WV/models.py"]}
|
34,163
|
arturkaa231/clickhouse_api
|
refs/heads/master
|
/WV/admin.py
|
from django.contrib import admin
from WV.models import Templates,Data,Tags,Options
class DataAdmin(admin.ModelAdmin):
fields = ['Data_title','Data_xls']
class TemplatesAdmin(admin.ModelAdmin):
fields = ['size', 'minc', 'win']
class DataInLine(admin.StackedInline):
model=Tags
extra=2
class DataInLine2(admin.StackedInline):
model=Options
extra=2
admin.site.register(Data,DataAdmin)
admin.site.register(Templates,TemplatesAdmin)
# Register your models here.
|
{"/spyrecorder/views.py": ["/spyrecorder/CHmodels.py"], "/WV/views.py": ["/WV/models.py", "/WV/forms.py", "/Word2Vec/settings.py"], "/WV/forms.py": ["/WV/models.py"], "/api/views.py": ["/Word2Vec/settings.py"], "/WV/models.py": ["/Word2Vec/settings.py"], "/WV/admin.py": ["/WV/models.py"]}
|
34,164
|
arturkaa231/clickhouse_api
|
refs/heads/master
|
/WV/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-24 06:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Data_title', models.CharField(blank=True, default=None, max_length=100, null=True)),
('Data_size', models.IntegerField()),
('Data_win', models.IntegerField()),
('Data_minc', models.IntegerField()),
('Data_xls', models.FileField(blank=True, default=None, null=True, upload_to='')),
],
options={
'db_table': 'Data',
},
),
migrations.CreateModel(
name='Tags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Tags_text', models.CharField(blank=True, max_length=100, null=True)),
],
options={
'db_table': 'tags',
'ordering': ('Tags_text',),
},
),
migrations.CreateModel(
name='Templates',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size', models.IntegerField(default=300)),
('win', models.IntegerField(default=5)),
('minc', models.IntegerField(default=30)),
],
options={
'db_table': 'Temp',
},
),
migrations.AddField(
model_name='data',
name='Data_tags',
field=models.ManyToManyField(to='WV.Tags'),
),
]
|
{"/spyrecorder/views.py": ["/spyrecorder/CHmodels.py"], "/WV/views.py": ["/WV/models.py", "/WV/forms.py", "/Word2Vec/settings.py"], "/WV/forms.py": ["/WV/models.py"], "/api/views.py": ["/Word2Vec/settings.py"], "/WV/models.py": ["/Word2Vec/settings.py"], "/WV/admin.py": ["/WV/models.py"]}
|
34,165
|
arturkaa231/clickhouse_api
|
refs/heads/master
|
/WV/migrations/0007_auto_20170724_1624.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-24 13:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('WV', '0006_auto_20170724_1617'),
]
operations = [
migrations.RemoveField(
model_name='options',
name='html',
),
migrations.AddField(
model_name='options',
name='components',
field=models.TextField(blank=True, default=None, null=True),
),
]
|
{"/spyrecorder/views.py": ["/spyrecorder/CHmodels.py"], "/WV/views.py": ["/WV/models.py", "/WV/forms.py", "/Word2Vec/settings.py"], "/WV/forms.py": ["/WV/models.py"], "/api/views.py": ["/Word2Vec/settings.py"], "/WV/models.py": ["/Word2Vec/settings.py"], "/WV/admin.py": ["/WV/models.py"]}
|
34,166
|
SpokenBanana/PicturePalet
|
refs/heads/master
|
/picture_pallet.py
|
from PIL import Image
import random
import numba
@numba.jit(nopython=True)
def color_diff(color1, color2):
mean = (color1[0] + color2[0]) // 2
r = color1[0] - color2[0]
g = color1[1] - color2[1]
b = color1[2] - color2[2]
return (((512 + mean) * r * r) >> 8) + 4 * g * g + (
((767 - mean) * b * b) >> 8)
class Palette:
def __init__(self, first, second):
self.palette = first
self.destination = second
# need the original image for reference
self.original = Image.new('RGB', second.size, 'white')
self.original.paste(second, (0, 0))
def generate_picture(self, file_name="/tmp/image.png", iterations=2000000):
size = list(self.destination.size)
if size[0] > 700:
aspect = size[1] / float(size[0])
size[0] = 600
size[1] = int(600 * aspect)
self.destination = self.destination.resize(
size, Image.BILINEAR).convert('RGB')
self.original = self.original.resize(
size, Image.BILINEAR).convert('RGB')
# fit the pallet to the destination image
self.palette = self.palette.resize(size, Image.BILINEAR).convert('RGB')
self.destination.paste(self.palette, (0, 0))
original = self.original.load()
destination = self.destination.load()
for _ in range(iterations):
fx, fy = (random.randrange(self.destination.size[0]),
random.randrange(self.destination.size[1]))
sx, sy = (random.randrange(self.destination.size[0]),
random.randrange(self.destination.size[1]))
original_first = original[fx, fy]
original_second = original[sx, sy]
destination_first = destination[fx, fy]
destination_second = destination[sx, sy]
if color_diff(original_first, destination_first) + \
color_diff(original_second, destination_second) > \
color_diff(original_first, destination_second) + \
color_diff(original_second, destination_first):
destination[fx, fy] = destination_second
destination[sx, sy] = destination_first
self.destination.save(file_name)
return file_name
|
{"/bot.py": ["/picture_pallet.py"]}
|
34,167
|
SpokenBanana/PicturePalet
|
refs/heads/master
|
/bot.py
|
import twitter
from PIL import Image
import urllib
import io
import os
from picture_pallet import Palette
from firebase import firebase
from apscheduler.schedulers.blocking import BlockingScheduler
api = twitter.Api(consumer_key=os.environ['TWITTER_KEY'],
consumer_secret=os.environ['TWITTER_SECRET'],
access_token_key=os.environ['TWITTER_TOKEN'],
access_token_secret=os.environ['TWITTER_TOKEN_SECRET'])
cursor = firebase.FirebaseApplication(os.environ['FIREBASE_URL'])
sched = BlockingScheduler()
def get_last():
last = cursor.get('', 'last', connection=None)
return last if last else None
def get_image(mention_media):
fd = urllib.request.urlopen(mention_media['media_url_https'])
image_file = io.BytesIO(fd.read())
return Image.open(image_file).convert('RGB')
def reply_to(mention):
if len(mention['media']) < 2:
api.PostUpdate('Sorry @{}, I need two pictures!'.format(
mention['user']['screen_name']),
in_reply_to_status_id=mention['id'])
return
im = get_image(mention['media'][0])
im2 = get_image(mention['media'][1])
palette = Palette(im, im2).generate_picture()
api.PostUpdate('@{}'.format(mention['user']['screen_name']),
in_reply_to_status_id=mention['id'], media=palette)
@sched.scheduled_job('interval', minutes=10)
def start():
last = get_last()
mentions = api.GetMentions(since_id=last)
print("{0:d} mentions.".format(len(mentions)))
for mention in mentions:
as_dict = mention.AsDict()
print('replying to {}'.format(as_dict['user']['screen_name']))
reply_to(as_dict)
print('replied to {}'.format(as_dict['user']['screen_name']))
# keep track of the last mention replied to
if len(mentions):
cursor.put('', 'last', str(mentions[0].id), connection=None)
if __name__ == '__main__':
sched.start()
|
{"/bot.py": ["/picture_pallet.py"]}
|
34,173
|
buzutilucas/CNNs-mnist
|
refs/heads/master
|
/Th_nN/check_train.py
|
import lasagne
import cPickle
import os
def saved_params(neural_network, file):
path = os.path.abspath('.')
files = path[:]
params = lasagne.layers.get_all_param_values(neural_network['out'])
cPickle.dump(params, open(files+'/'+file, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL)
print 'Saved file'
def predict(neural_network, valid_fn, x_test, y_test, file):
path = os.path.abspath('.')
files = path[:]
loaded_params = cPickle.load(open(files+'/'+file, 'rb'))
lasagne.layers.set_all_param_values(neural_network['out'], loaded_params)
acc = valid_fn(x_test, y_test)[1]
print 'Test accuracy rate: %.2f%%' % (acc*100)
|
{"/cnn_mnist.py": ["/Th_nN/train.py", "/Th_nN/check_train.py", "/Th_nN/mnist_data.py"]}
|
34,174
|
buzutilucas/CNNs-mnist
|
refs/heads/master
|
/Th_nN/train.py
|
#coding: utf-8
from matplotlib import pyplot as plt
import numpy as np
import sys
import lasagne
import theano
from theano import tensor as T
from lasagne.regularization import regularize_layer_params, l2
def plot_train(train_curves):
plt.figure()
cost_history, acc_history, val_cost_history, val_acc_history = train_curves
plt.plot(cost_history, 'b--', label='Tranining')
plt.plot(val_cost_history, 'r-', label='Valid')
plt.xlabel('Epochs', fontsize=15)
plt.ylabel('Error rate', fontsize=15)
plt.legend()
print 'The best performance valid: %0.2f%%' % (np.max(val_acc_history)*100)
plt.show()
def compile_train_function(neural_network, lr, w_dacy):
input_var = neural_network['input'].input_var
output_var = T.lvector() # Variable symbolic
predicted = lasagne.layers.get_output(neural_network['out'], inputs=input_var) # Answer of output
loss = lasagne.objectives.categorical_crossentropy(predicted, output_var) # Function of error
loss = loss.mean()
"""
Regularize L2 (avoid over-fitting)
Only to function of train
Lreg = L + λ*∑(w^2)
where: L --> loss
λ --> weight decay
w --> weight
"""
loss += w_dacy * regularize_layer_params(neural_network['out'], l2) # Regularize L2
# Accuracy rate
y_pred = T.argmax(predicted, axis=1)
acc = T.eq(y_pred, output_var)
acc = acc.mean()
valid_predicted = lasagne.layers.get_output(neural_network['out'], inputs=input_var) # Validation answer of output
valid_loss = lasagne.objectives.categorical_crossentropy(valid_predicted, output_var) # Validation function of error
valid_loss = valid_loss.mean()
# Validation accuracy rate
valid_y_pred = T.argmax(valid_predicted, axis=1)
valid_acc = T.eq(valid_y_pred, output_var)
valid_acc = valid_acc.mean()
# Parameters updating
params = lasagne.layers.get_all_params(neural_network['out'])
updates = lasagne.updates.sgd(loss, params, lr)
# Compile function
train_fn = theano.function([input_var, output_var], [loss, acc], updates=updates)
valid_fn = theano.function([input_var, output_var], [valid_loss, valid_acc])
return train_fn, valid_fn
def _iterate_minibatches(x, y, batch_size):
for batch_start in xrange(0, len(x), batch_size):
yield x[batch_start:batch_start+batch_size], y[batch_start:batch_start+batch_size]
def fit(train_fn, valid_fn, train_set, valid_set, epochs, batch_size):
x_train, y_train = train_set
x_valid, y_valid = valid_set
cost_history = []
acc_history = []
val_cost_history = []
val_acc_history = []
print('epoch\ttrain_err\tval_err')
for i in range(epochs):
epoch_cost = 0
epoch_acc = 0
train_batches = 0
for x_batch, y_batch in _iterate_minibatches(x_train, y_train, batch_size):
cost, acc = train_fn(x_batch, y_batch)
epoch_cost += cost
epoch_acc += acc
train_batches += 1
val_epoch_cost = 0
val_epoch_acc = 0
val_batches = 0
for x_batch, y_batch in _iterate_minibatches(x_valid, y_valid, batch_size):
val_cost, val_acc = valid_fn(x_batch, y_batch)
val_epoch_cost += val_cost
val_epoch_acc += val_acc
val_batches += 1
epoch_cost = epoch_cost / train_batches
cost_history.append(epoch_cost)
acc_history.append(epoch_acc / train_batches)
val_epoch_cost = val_epoch_cost / val_batches
val_cost_history.append(val_epoch_cost)
val_acc_history.append(val_epoch_acc / val_batches)
num_epochs = int(((i+1.)/epochs) * 100)
sys.stdout.write('\033[2K' + '\r' + '%d\t%.4f\t\t%.4f' % (i + 1, epoch_cost, val_epoch_cost))
sys.stdout.write('\n')
sys.stdout.write('Epochs ' + str(i + 1) + '/' + str(epochs) +
' | Progress ' + '#' * num_epochs + ' ' + str(num_epochs) + '%')
sys.stdout.flush()
print '\n\nValidation accuracy rate: %.2f%%' % (val_acc_history[-1] * 100)
return cost_history, acc_history, val_cost_history, val_acc_history
|
{"/cnn_mnist.py": ["/Th_nN/train.py", "/Th_nN/check_train.py", "/Th_nN/mnist_data.py"]}
|
34,175
|
buzutilucas/CNNs-mnist
|
refs/heads/master
|
/cnn_mnist.py
|
"""
Created on Mon Jan 22 2018
Python 2.7
Convolutional Neural Network
Frameworks: Theano and Lasagne
Data base: mnist
@author: Lucas Buzuti
"""
from lasagne.layers import InputLayer, DenseLayer, Conv2DLayer, MaxPool2DLayer
from lasagne.nonlinearities import softmax
from Th_nN.train import plot_train, compile_train_function, fit
from Th_nN.check_train import saved_params, predict
from Th_nN.mnist_data import loading_dataset
from sklearn.model_selection import train_test_split
x_train, y_train, x_test, y_test = loading_dataset()
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.3, random_state=15000)
# Model Neural Network
def build_neural_network():
net = {}
net['input'] = InputLayer((None, 1, 28, 28))
net['conv1'] = Conv2DLayer(net['input'], num_filters=8, filter_size=5)
net['pool1'] = MaxPool2DLayer(net['conv1'], pool_size=2)
net['conv2'] = Conv2DLayer(net['pool1'], num_filters=16, filter_size=5)
net['pool2'] = MaxPool2DLayer(net['conv2'], pool_size=3)
net['hid1'] = DenseLayer(net['pool2'], num_units=100)
net['hid2'] = DenseLayer(net['hid1'], num_units=100)
net['hid3'] = DenseLayer(net['hid2'], num_units=100)
net['out'] = DenseLayer(net['hid3'], num_units=10, nonlinearity=softmax)
return net
net = build_neural_network()
train_fn, valid_fn = compile_train_function(net, lr=0.0001, w_dacy=1e-5)
train_curves = fit(train_fn, valid_fn,
train_set=(x_train, y_train), valid_set=(x_valid, y_valid),
epochs=20, batch_size=8000)
saved_params(net, 'params.pkl')
plot_train(train_curves)
predict(net, valid_fn, x_test, y_test, 'params.pkl')
|
{"/cnn_mnist.py": ["/Th_nN/train.py", "/Th_nN/check_train.py", "/Th_nN/mnist_data.py"]}
|
34,176
|
buzutilucas/CNNs-mnist
|
refs/heads/master
|
/Th_nN/mnist_data.py
|
import numpy as np
import struct
import urllib
import os
import sys
import gzip
import StringIO
def _open_file(file_img, file_lable):
with open(file_lable, 'rb') as flbl:
magic, num = struct.unpack('>II', flbl.read(8))
lbl = np.fromfile(flbl, dtype=np.int8)
with open(file_img, 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), 1, rows, cols)
return img, lbl
def loading_dataset():
baseURL = 'http://yann.lecun.com/exdb/mnist/'
lst_data_url = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz']
lst_dataset = ['train-images.idx3-ubyte', 'train-labels.idx1-ubyte',
't10k-images.idx3-ubyte', 't10k-labels.idx1-ubyte']
path = os.path.abspath('.')
# Download dataset if not yet done:
for i, data in enumerate(lst_data_url):
if not os.path.isfile(path+'/Datasets/'+lst_dataset[i]):
sys.stdout.write('\r' + 'Downloading')
resp = urllib.urlopen(baseURL + data)
compressed_file = StringIO.StringIO()
compressed_file.write(resp.read())
compressed_file.seek(0)
decompressed_file = gzip.GzipFile(fileobj=compressed_file, mode='rb')
with open(path+'/Datasets/'+lst_dataset[i], 'wb') as out_file:
out_file.write(decompressed_file.read())
sys.stdout.write('\r')
# Load the dataset
train_file_img = path+'/Datasets/'+lst_dataset[0]
train_file_lbl = path+'/Datasets/'+lst_dataset[1]
test_file_img = path+'/Datasets/'+lst_dataset[2]
test_file_lbl = path+'/Datasets/'+lst_dataset[3]
train_img, train_lbl = _open_file(train_file_img, train_file_lbl)
test_img, test_lbl = _open_file(test_file_img, test_file_lbl)
return train_img, train_lbl, test_img, test_lbl
|
{"/cnn_mnist.py": ["/Th_nN/train.py", "/Th_nN/check_train.py", "/Th_nN/mnist_data.py"]}
|
34,180
|
Ekko84H/WhatsApp-Spammer
|
refs/heads/main
|
/functions.py
|
import time
from selenium import webdriver
def spammer(name,message,count):
driver = webdriver.Firefox()
driver.get('https://web.whatsapp.com')
driver.implicitly_wait(15)
driver.find_element_by_css_selector("span[title='" + str(name) + "']").click()
while count>0:
driver.find_element_by_xpath('//*[@id="main"]/footer/div[1]/div[2]/div/div[2]').send_keys(message)
driver.find_element_by_xpath('//*[@id="main"]/footer/div[1]/div[3]/button').click()
time.sleep(1)
count -= 1
S='Spammed'+name
return S
|
{"/main.py": ["/functions.py"]}
|
34,181
|
Ekko84H/WhatsApp-Spammer
|
refs/heads/main
|
/main.py
|
import PySimpleGUI as sg
from functions import spammer
# Set the theme
sg.theme('DarkBlue')
# Define the window's contents
layout = [[sg.Text('Name of Contact :')],
[sg.Input(key='-CONTACT-')],
[sg.Text('Enter Text Message :')],
[sg.Input(key='-MESSAGE-')],
[sg.Text('Enter of number times to Send :')],
[sg.Input(key='-COUNT-')],
[sg.Text(size=(40,1), key='-OUTPUT-')],
[sg.Button('SPAM !!!'), sg.Button('Quit')]]
# Create the window
window = sg.Window('WhatsApp Spammer', layout, alpha_channel=0.90)
# Display and interact with the Window using an Event Loop
while True:
event, values = window.read()
# See if user wants to quit or window was closed
if event == sg.WINDOW_CLOSED or event == 'Quit':
break
# Setting Variables
name=values['-CONTACT-']
message=values['-MESSAGE-']
count=values['-COUNT-']
#calling spammer from functions.py
S=spammer(name,message,count)
# Output a message to the window
window['-OUTPUT-'].update(S)
# Finish up by removing from the screen
window.close()
|
{"/main.py": ["/functions.py"]}
|
34,185
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/views/passage_views.py
|
from rest_framework.generics import CreateAPIView, GenericAPIView, ListAPIView
from rest_framework.response import Response
from project.api.models.passage_model import Passage
from project.api.serializers.passage_serializer import PassageSerializer
# Get List Passages
class ListPassageView(ListAPIView):
serializer_class = PassageSerializer
queryset = Passage.objects.filter(passage__isnull=True)
# create a Passage
class CreatePassageView(CreateAPIView):
serializer_class = PassageSerializer
queryset = Passage.objects.all()
# Get Update Delete a Passage by ID.
class GetUpdateDeletePassageView(GenericAPIView):
serializer_class = PassageSerializer
queryset = Passage.objects.all()
def get(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post)
return Response(serializer.data)
def post(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def delete(self, request, **kwargs):
post = self.get_object()
post.delete()
return Response('Passage deleted')
# Get Passage by CellType ID
class PassageByCellTypeId(ListAPIView):
serializer_class = PassageSerializer
def get_queryset(self):
cell_types_id = self.kwargs['pk']
return Passage.objects.filter(cell_type=cell_types_id)
# Get Passage by Biopsy ID
class PassageByBiopsyId(ListAPIView):
serializer_class = PassageSerializer
def get_queryset(self):
biopsy_id = self.kwargs['pk']
return Passage.objects.filter(cell_type__skin_layer__sub_biopsy__biopsy__id=biopsy_id)
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,186
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/enzyme_serializer.py
|
from rest_framework import serializers
from project.api.models.enzyme_model import Enzyme
class EnzymeSerializer(serializers.ModelSerializer):
class Meta:
model = Enzyme
fields = ['id', 'name', 'lot_number']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,187
|
etsyketsy/finalProject
|
refs/heads/master
|
/nginx/render_template.py
|
import os
import jinja2
env = jinja2.Environment(loader=jinja2.FileSystemLoader('/templates'))
template = env.get_template('template.conf')
conf = template.render(NGINX_SSL=os.environ['NGINX_SSL'],
DOMAIN=os.environ['DOMAIN'])
with open('/etc/nginx/conf.d/default.conf', 'w') as f:
f.write(conf)
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,188
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/container_type_serializer.py
|
from rest_framework import serializers
from project.api.models.container_type_model import ContainerType
class ContainerTypeSerializer(serializers.ModelSerializer):
class Meta:
model = ContainerType
fields = ['id', 'name']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,189
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/cell_category_serailzer.py
|
from rest_framework import serializers
from project.api.models.cell_category_model import CellCategory
class CellCategorySerializer(serializers.ModelSerializer):
class Meta:
model = CellCategory
fields = ['id', 'name']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,190
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/morphology_model.py
|
from django.db import models
from simple_history.models import HistoricalRecords
class Morphology(models.Model):
BIPOLAR = "Bipolar"
SPINDLE_LIKE = "Spindle-like"
STAR_LIKE = "Star-like"
DENDRITE_LIKE = "Dendrite-like"
OTHER = "Other"
type = models.TextField(
verbose_name='Morphology',
max_length=255,
choices=(
(BIPOLAR, BIPOLAR),
(SPINDLE_LIKE, SPINDLE_LIKE),
(STAR_LIKE, STAR_LIKE),
(DENDRITE_LIKE, DENDRITE_LIKE),
(OTHER, OTHER)
),
blank=True,
null=True,
)
history = HistoricalRecords()
class Meta:
verbose_name_plural = 'Morphologies'
app_label = 'api'
def __str__(self):
return self.type
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,191
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/subbiopsy_model.py
|
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from simple_history.models import HistoricalRecords
from project.api.models.biopsy_model import Biopsy
from project.api.models.comment_model import Comment
from project.api.models.cut_method_model import CutMethod
from project.api.models.numbering_model import Numbering
class SubBiopsy(models.Model):
numbering = models.ForeignKey(
verbose_name='Numbering',
related_name='subbiopies',
to=Numbering,
on_delete=models.CASCADE,
)
biopsy = models.ForeignKey(
verbose_name='Biopsy',
related_name='sub_biopsies',
to=Biopsy,
on_delete=models.CASCADE,
blank=False,
null=False,
)
sub_biopsy_area = models.IntegerField(
verbose_name='Sub Biopsy Area',
blank=True,
null=True,
)
cut_method = models.ForeignKey(
verbose_name='Cut Method',
related_name='sub_biopsies',
to=CutMethod,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
comments = GenericRelation(Comment)
history = HistoricalRecords()
class Meta:
app_label = 'api'
verbose_name_plural = 'Sub-biopsies'
def __str__(self):
return self.numbering.numbering
def save(self, **kwargs):
if self.pk and not self.numbering or not self.pk:
self.numbering = Numbering.objects.create()
super().save(**kwargs)
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,192
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/cell_distribution_serializer.py
|
from rest_framework import serializers
from project.api.models.cell_distribution_model import CellDistribution
class CellDistributionSerializer(serializers.ModelSerializer):
class Meta:
model = CellDistribution
fields = ['type', 'id']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,193
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/skin_layer_model.py
|
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from simple_history.models import HistoricalRecords
from project.api.models.comment_model import Comment
from project.api.models.enzyme_model import Enzyme
from project.api.models.layer_type_model import LayerType
from project.api.models.numbering_model import Numbering
from project.api.models.subbiopsy_model import SubBiopsy
class SkinLayer(models.Model):
numbering = models.ForeignKey(
verbose_name='Numbering',
related_name='skinlayers',
to=Numbering,
on_delete=models.CASCADE,
)
layer_type = models.ForeignKey(
verbose_name='Skin Layer Type',
related_name='skin_layers',
to=LayerType,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
enzyme = models.ForeignKey(
verbose_name='Enzyme',
related_name='skin_layers',
to=Enzyme,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
separation_time = models.DateTimeField(
verbose_name='Separation Time',
blank=True,
null=True,
)
temperature = models.FloatField(
verbose_name='Temperature',
blank=True,
null=True,
)
sub_biopsy = models.ForeignKey(
verbose_name='Sub Biopsy',
related_name='skin_layers',
to=SubBiopsy,
on_delete=models.CASCADE,
blank=False,
null=False,
)
comments = GenericRelation(Comment)
history = HistoricalRecords()
class Meta:
app_label = 'api'
def __str__(self):
return self.numbering.numbering
def save(self, **kwargs):
if self.pk and not self.numbering or not self.pk:
self.numbering = Numbering.objects.create()
super().save(**kwargs)
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,194
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/comment_model.py
|
from django.db import models
from simple_history.models import HistoricalRecords
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
class Comment(models.Model):
text = models.TextField(
verbose_name='Comment',
blank=True,
null=True,
)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField(
blank=True,
null=True
)
content_object = GenericForeignKey('content_type', 'object_id')
history = HistoricalRecords()
class Meta:
app_label = 'api'
def __str__(self):
return str(self.id)
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,195
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/views/users_views.py
|
from rest_framework.generics import ListAPIView, GenericAPIView
from django.contrib.auth.models import User
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from project.api.serializers.users_serializer import UserSerializer
# Get current User
class GetCurrentUserView(GenericAPIView):
serializer_class = UserSerializer
queryset = User.objects.all()
def get(self, request, **kwargs):
user = request.user
return Response(UserSerializer(User.objects.filter(username=user), many=True).data)
# list all users
class ListUsersView(ListAPIView):
serializer_class = UserSerializer
permission_classes = [
IsAuthenticated,
]
queryset = User.objects.all()
# Get Update Delete Users by ID
class GetUpdateDeleteUserView(GenericAPIView):
serializer_class = UserSerializer
queryset = User.objects.all()
permission_classes = [
IsAuthenticated,
]
def get(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post)
return Response(serializer.data)
def post(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def delete(self, request, **kwargs):
post = self.get_object()
post.delete()
return Response('User deleted')
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,196
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/urls.py
|
from django.urls import path
from project.api.views.biopsys_views import BiopsyView, CreateNewBiopsyView, GetUpdateDeleteBiopsyView
from project.api.views.cell_types_views import GetUpdateDeleteCellTypeView, CreateCellTypeView, CellTypeBySkinLayerId, \
ListCellTypeView
from project.api.views.comments_views import CommentsView, GetUpdateDeleteCommentView
from project.api.views.donor_views import DonorView, CreateNewDonorView, GetUpdateDeleteDonorView
from project.api.views.enzyme_views import CreateEnzymeView, GetUpdateDeleteEnzymeView, EnzymeBySkinLayerId
from project.api.views.exports import BiopsyExportXlsView, BiopsyExportCsvView, CellTypeCsvView, CellTypeXlsView
from project.api.views.passage_views import CreatePassageView, GetUpdateDeletePassageView, PassageByCellTypeId, \
ListPassageView, PassageByBiopsyId
from project.api.views.skin_layer_views import SkinLayerView, GetUpdateDeleteSkinLayerView, SkinLayerBySubBioId, \
ListSkinLayerView
from project.api.views.sub_biopsys_views import SubBiopsiesView, CreateNewSubBiopsyView, GetUpdateDeleteSubBiopsyView, \
ListSubBiopsyByBioIDView
from project.api.views.users_views import ListUsersView, GetUpdateDeleteUserView, GetCurrentUserView
app_name = "api"
urlpatterns = [
# Donors
path('donor/', DonorView.as_view(), name='list-all-donors'),
path('donor/new/', CreateNewDonorView.as_view(), name='create-new-donor'),
path('donor/<int:pk>/', GetUpdateDeleteDonorView.as_view(), name='get-update-delete-donor-by-id'),
# path('donors/<str:string>/', GetDonorByNumberingView.as_view(), name='get-donor-by-numbering'),
# Biopsys
path('biopsy/', BiopsyView.as_view(), name='list-all-biopsies'),
path('biopsy/new/', CreateNewBiopsyView.as_view(), name='create-new-biopsy'),
path('biopsy/<int:pk>/', GetUpdateDeleteBiopsyView.as_view(), name='get-update-delete-biopsy-by-id'),
# SubBiopsies
path('subbiopsy/', SubBiopsiesView.as_view(), name='list-all-sub-biopsies'),
path('subbiopsy/new/', CreateNewSubBiopsyView.as_view(), name='create-new-sub-biopsy'),
path('subbiopsy/<int:pk>/', GetUpdateDeleteSubBiopsyView.as_view(), name='get-update-delete-sub-biopsy-by-id'),
path('subbiopsy/biopsy/<int:pk>/', ListSubBiopsyByBioIDView.as_view(), name='list-all-sub-biopsies-by-biopsy-id'),
# skinLayers
path('skinlayer/new/', SkinLayerView.as_view(), name='create-new-skinlayer'),
path('skinlayer/<int:pk>/', GetUpdateDeleteSkinLayerView.as_view(), name='get-update-delete-skinlayer-by-id'),
path('skinlayer/subbiopsy/<int:pk>/', SkinLayerBySubBioId.as_view(), name='list-all-skinlayer-by-sub-bio-id'),
path('skinlayers/', ListSkinLayerView.as_view(), name='list-all-skinlayer'),
# CellTypes
path('celltypes/new/', CreateCellTypeView.as_view(), name='create-new-celltype'),
path('celltypes/<int:pk>/', GetUpdateDeleteCellTypeView.as_view(), name='get-update-delete-celltypes-by-id'),
path('celltypes/skinlayer/<int:pk>/', CellTypeBySkinLayerId.as_view(), name='list-all-cellTypes-by-skinlayer-id'),
path('celltypes/', ListCellTypeView.as_view(), name='list-all-celltypes'),
# Enzyme
path('enzyme/new/', CreateEnzymeView.as_view(), name='create-new-enzyme'),
path('enzyme/<int:pk>/', GetUpdateDeleteEnzymeView.as_view(), name='get-update-delete-enzyme-by-id'),
path('enzyme/skinlayer/<int:pk>/', EnzymeBySkinLayerId.as_view(), name='list-all-enzyme-by-skinlayer-id'),
# Passage
path('passage/new/', CreatePassageView.as_view(), name='create-new-passage'),
path('passage/<int:pk>/', GetUpdateDeletePassageView.as_view(), name='get-update-delete-passage-by-id'),
path('passage/celltypes/<int:pk>/', PassageByCellTypeId.as_view(), name='list-all-passage-by-cellTypes-id'),
path('passages/', ListPassageView.as_view(), name='list-all-celltypes'),
path('passage/biopsy/<int:pk>/', PassageByBiopsyId.as_view(), name='list-all-passage-by-biopsy-id'),
# Comments
path('comments/', CommentsView.as_view(), name='list-all-comments'),
path('comments/<int:pk>/', GetUpdateDeleteCommentView.as_view(), name='get-update-delete-comment-by-id'),
# users
path('me/', GetCurrentUserView.as_view(), name='get-current-user'),
path('users/list/', ListUsersView.as_view(), name='list-all-users'),
# path('users/', SearchUserView.as_view(), name='users-search'),
path('me/<int:pk>/', GetUpdateDeleteUserView.as_view(), name='get-update-delete-users-by-id'),
# exports
path('exports/biopsy/xls/', BiopsyExportXlsView.as_view(), name='print-all-biopsies-xls-format'),
path('exports/biopsy/csv/', BiopsyExportCsvView.as_view(), name='print-all-biopsies-csv-format'),
path('exports/celltypes/xls/', CellTypeXlsView.as_view(), name='print-all-celltypes-xls-format'),
path('exports/celltypes/csv/', CellTypeCsvView.as_view(), name='print-all-celltypes-csv-format'),
]
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,197
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/skin_layer_serializer.py
|
from rest_framework import serializers
from project.api.models.skin_layer_model import SkinLayer
class SkinLayerSerializer(serializers.ModelSerializer):
numbering = serializers.SerializerMethodField(read_only=True)
sub_biopsy_numbering = serializers.SerializerMethodField(read_only=True)
def get_numbering(self, instance):
return instance.numbering.numbering
def get_sub_biopsy_numbering(self, instance):
return instance.sub_biopsy.numbering.numbering
class Meta:
model = SkinLayer
fields = ['numbering', 'sub_biopsy', 'enzyme', 'separation_time', 'temperature', 'id', 'sub_biopsy_numbering']
read_only_fields = ['id', 'numbering']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,198
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/cell_types_serializer.py
|
from rest_framework import serializers
from project.api.models.cell_type_model import CellType
class CellTypeSerializer(serializers.ModelSerializer):
numbering = serializers.SerializerMethodField(read_only=True)
skin_layer_numbering = serializers.SerializerMethodField(read_only=True)
def get_numbering(self, instance):
return instance.numbering.numbering
def get_skin_layer_numbering(self, instance):
return instance.skin_layer.numbering.numbering
class Meta:
model = CellType
fields = [
'numbering', 'skin_layer', 'type', 'enzyme', 'temperature', 'digestion_time', 'inhibition',
'filter_size', 'filter_rinsing', 'centrifugation_speed', 'centrifugation_time', 'resuspended_volume',
'concentration', 'viability', 'diameter', 'total_viable_isolated_cells', 'isolation_yield', 'id',
'skin_layer_numbering',
]
read_only_fields = ['numbering']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,199
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/cut_method_serializer.py
|
from rest_framework import serializers
from project.api.models.cut_method_model import CutMethod
class CutMethodSerializer(serializers.ModelSerializer):
class Meta:
model = CutMethod
fields = ['id', 'method']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,200
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/views/cell_types_views.py
|
from rest_framework.generics import GenericAPIView, ListAPIView, CreateAPIView
from rest_framework.response import Response
from project.api.models.cell_type_model import CellType
from project.api.serializers.cell_types_serializer import CellTypeSerializer
# Get List of CellTypes.
class ListCellTypeView(ListAPIView):
serializer_class = CellTypeSerializer
queryset = CellType.objects.all()
# Create a new CellType.
class CreateCellTypeView(CreateAPIView):
serializer_class = CellTypeSerializer
queryset = CellType.objects.all()
# Get Update Delete a CellType by ID.
class GetUpdateDeleteCellTypeView(GenericAPIView):
serializer_class = CellTypeSerializer
queryset = CellType.objects.all()
def get(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post)
return Response(serializer.data)
def post(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def delete(self, request, **kwargs):
post = self.get_object()
post.delete()
return Response('Cell deleted')
# Get celltype by SkinLayer ID
class CellTypeBySkinLayerId(ListAPIView):
serializer_class = CellTypeSerializer
def get_queryset(self):
skin_layer_id = self.kwargs['pk']
return CellType.objects.filter(skin_layer=skin_layer_id)
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,201
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/helpers.py
|
import random
def generate_numbering(length=4):
# generate random code for numbering
characters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
return ''.join(random.choice(characters) for i in range(length))
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,202
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/cell_counting_serializer.py
|
from rest_framework import serializers
from project.api.models.cell_counting_model import CellCounting
class CellCountingSerializer(serializers.ModelSerializer):
class Meta:
model = CellCounting
fields = ['id', 'image', 'cells_counted', 'passage']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,203
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/passage_serializer.py
|
from rest_framework import serializers
from project.api.models.passage_model import Passage
from project.api.serializers.cell_counting_serializer import CellCountingSerializer
from rest_framework_recursive.fields import RecursiveField
class PassageSerializer(serializers.ModelSerializer):
passages = RecursiveField(required=False, allow_null=True, many=True)
numbering = serializers.SerializerMethodField(read_only=True)
passage_numbering = serializers.SerializerMethodField(read_only=True)
cell_counting = CellCountingSerializer(many=True, read_only=True)
cell_type_numbering = serializers.SerializerMethodField(read_only=True)
def get_numbering(self, instance):
return instance.numbering.numbering
def get_cell_type_numbering(self, instance):
if instance.cell_type:
return instance.cell_type.numbering.numbering
return None
def get_passage_numbering(self, instance):
if instance.passage:
return instance.passage.numbering.numbering
return None
def get_cell_counting(self, instance):
if instance.cell_counting:
return instance.cell_counting.cells_counted
return None
class Meta:
model = Passage
fields = [
'numbering', 'cell_type', 'passage', 'passages', 'cell_distribution', 'pigmentation', 'morphology',
'morphology_image', 'passaging_date', 'enzyme', 'digestion_time', 'digestion_temperature',
'resuspended_volume', 'concentration', 'viability', 'diameter', 'container_type', 'container_area',
'coating', 'coating_concentration_per_cm2', 'seeding_density', 'magnification', 'cell_counting',
'not_continued', 'total_viable_isolated_cells', 'id', 'cell_type_numbering', 'passage_numbering',
]
read_only_fields = ['id', 'numbering', 'cell_counting']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,204
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/numbering_model.py
|
from django.db import models
from simple_history.models import HistoricalRecords
from ..helpers import generate_numbering
class Numbering(models.Model):
numbering = models.CharField(
verbose_name='Numbering',
max_length=6,
unique=True,
)
history = HistoricalRecords()
class Meta:
app_label = 'api'
def save(self, *args, **kwargs):
if self.pk and not self.numbering or not self.pk:
while True:
new_number = generate_numbering(4)
try:
Numbering.objects.get(numbering=new_number)
except Numbering.DoesNotExist:
self.numbering = new_number
break
super().save(*args, **kwargs)
def __str__(self):
return self.numbering
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,205
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/biopsy_model.py
|
from django.db import models
from simple_history.models import HistoricalRecords
from django.contrib.contenttypes.fields import GenericRelation
from project.api.models.anatomical_part_model import AnatomicalPart
from project.api.models.comment_model import Comment
from project.api.models.donor_model import Donor
from project.api.models.numbering_model import Numbering
class Biopsy(models.Model):
numbering = models.ForeignKey(
verbose_name='Numbering',
related_name='biopsies',
to=Numbering,
on_delete=models.CASCADE,
)
surgery_date = models.DateField(
verbose_name='Surgery Date',
blank=True,
null=True,
)
anatomical_part = models.ForeignKey(
verbose_name='Anatomical Part',
related_name='biopsies',
to=AnatomicalPart,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
skin_thickness = models.FloatField(
verbose_name='Skin Thickness',
blank=True,
null=True,
)
skin_area = models.FloatField(
verbose_name='Skin Area',
blank=True,
null=True,
)
donor = models.ForeignKey(
verbose_name='Donor',
related_name='biopsies',
to=Donor,
on_delete=models.CASCADE,
blank=False,
null=False,
)
comments = GenericRelation(Comment)
history = HistoricalRecords()
class Meta:
ordering = ['-surgery_date']
app_label = 'api'
verbose_name_plural = 'Biopsies'
def __str__(self):
return self.numbering.numbering
def save(self, **kwargs):
if self.pk and not self.numbering or not self.pk:
self.numbering = Numbering.objects.create()
super().save(**kwargs)
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,206
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/urls.py
|
from django.contrib import admin
from django.urls import path, include
from rest_framework.documentation import include_docs_urls
from rest_framework_simplejwt import views as jwt_views
mypatterns = [
path('admin/', admin.site.urls),
path('api/', include('project.api.urls')),
path('docs/', include_docs_urls(title='eSkin Rest API')),
# tokens
path('api/token/', jwt_views.TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'),
path('api/token/verify/', jwt_views.TokenVerifyView.as_view(), name='token_refresh'),
]
urlpatterns = [
path('backend/', include(mypatterns)),
]
# Change admin site title
admin.site.site_header = "eSkin Admin Dashboard"
admin.site.site_title = "eSkin Admin Dashboard"
admin.site.index_title = ""
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,207
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/sub_biopsy_serializer.py
|
from rest_framework import serializers
from project.api.models.subbiopsy_model import SubBiopsy
class SubbiopsySerializer(serializers.ModelSerializer):
numbering = serializers.SerializerMethodField(read_only=True)
biopsy_numbering = serializers.SerializerMethodField(read_only=True)
def get_numbering(self, instance):
return instance.numbering.numbering
def get_biopsy_numbering(self, instance):
return instance.biopsy.numbering.numbering
class Meta:
model = SubBiopsy
fields = ['numbering', 'biopsy', 'sub_biopsy_area', 'cut_method', 'id', 'biopsy_numbering']
read_only_fields = ['id', 'numbering']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,208
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/pigmentation_model.py
|
from django.db import models
from simple_history.models import HistoricalRecords
class Pigmentation(models.Model):
PIGMENTED = "Pigmented"
NON_PIGMENTED = "Non-pigmented"
type = models.TextField(
verbose_name='Pigmentation',
max_length=255,
choices=(
(PIGMENTED, PIGMENTED),
(NON_PIGMENTED, NON_PIGMENTED),
),
blank=True,
null=True,
)
history = HistoricalRecords()
class Meta:
app_label = 'api'
def __str__(self):
return self.type
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,209
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/views/exports.py
|
from django.http import HttpResponse
from rest_framework.generics import GenericAPIView
from project.api.resources.biopsy_resource import BiopsyResource
from project.api.resources.cell_types_resource import CellTypeResource
# printing xls format
class BiopsyExportXlsView(GenericAPIView):
def get(self, request):
biopsy_resource = BiopsyResource()
dataset = biopsy_resource.export()
response = HttpResponse(dataset.xls, content_type='application/vnd.ms-excel')
response['Content-Disposition'] = 'attachment; filename="biopsies.xls"'
return response
class CellTypeXlsView(GenericAPIView):
def get(self, request):
biopsy_resource = CellTypeResource()
dataset = biopsy_resource.export()
response = HttpResponse(dataset.xls, content_type='application/vnd.ms-excel')
response['Content-Disposition'] = 'attachment; filename="celltypes.xls"'
return response
# printing csv format
class BiopsyExportCsvView(GenericAPIView):
def get(self, request):
biopsy_resource = BiopsyResource()
dataset = biopsy_resource.export()
response = HttpResponse(dataset.csv, content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="biopsies.cvs"'
return response
class CellTypeCsvView(GenericAPIView):
def get(self, request):
biopsy_resource = CellTypeResource()
dataset = biopsy_resource.export()
response = HttpResponse(dataset.csv, content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="biopsies.cvs"'
return response
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,210
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/anatomical_part_serializer.py
|
from rest_framework import serializers
from project.api.models.anatomical_part_model import AnatomicalPart
class AnatomicalPartSerializer(serializers.ModelSerializer):
class Meta:
model = AnatomicalPart
fields = ['type', 'id']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,211
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/migrations/0002_auto_20190423_1407.py
|
# Generated by Django 2.0.3 on 2019-04-23 14:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='passage',
name='cell_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='passages', to='api.CellType', verbose_name='Cell Type'),
),
migrations.AlterField(
model_name='passage',
name='passage',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='passages', to='api.Passage'),
),
]
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,212
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/comments_serializer.py
|
from rest_framework import serializers
from project.api.models.comment_model import Comment
class CommentsSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ['id', 'text', 'content_type', 'object_id']
read_only_fields = ['id', 'object_id']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,213
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/cell_category_model.py
|
from django.db import models
from simple_history.models import HistoricalRecords
class CellCategory(models.Model):
KERATINOCYTES = "Keratinocytes"
MELANOCYTES = "Melanocytes"
FIBROBLAST = "Fibroblast"
name = models.CharField(
verbose_name='Type',
max_length=100,
choices=(
(KERATINOCYTES, KERATINOCYTES),
(MELANOCYTES, MELANOCYTES),
(FIBROBLAST, FIBROBLAST)
),
blank=True,
null=True,
)
history = HistoricalRecords()
class Meta:
app_label = 'api'
verbose_name_plural = 'Cell categories'
def __str__(self):
return self.name
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,214
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/views/skin_layer_views.py
|
from rest_framework.generics import CreateAPIView, GenericAPIView, ListAPIView
from rest_framework.response import Response
from project.api.models.skin_layer_model import SkinLayer
from project.api.serializers.skin_layer_serializer import SkinLayerSerializer
# Get List SkinLayers.
class ListSkinLayerView(ListAPIView):
serializer_class = SkinLayerSerializer
queryset = SkinLayer.objects.all()
# create a new skinlayer.
class SkinLayerView(CreateAPIView):
serializer_class = SkinLayerSerializer
queryset = SkinLayer.objects.all()
# Get Update Delete a skinlayer by ID.
class GetUpdateDeleteSkinLayerView(GenericAPIView):
serializer_class = SkinLayerSerializer
queryset = SkinLayer.objects.all()
def get(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post)
return Response(serializer.data)
def post(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def delete(self, request, **kwargs):
post = self.get_object()
post.delete()
return Response('Skin-Layer deleted')
# Get SkinLayer by Subbiopsy ID
class SkinLayerBySubBioId(ListAPIView):
serializer_class = SkinLayerSerializer
def get_queryset(self):
sub_biopsy_id = self.kwargs['pk']
return SkinLayer.objects.filter(sub_biopsy=sub_biopsy_id)
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,215
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/coating_serializer.py
|
from rest_framework import serializers
from project.api.models.coating_model import Coating
class CoatingSerializer(serializers.ModelSerializer):
class Meta:
model = Coating
fields = ['id', 'name']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,216
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/admin.py
|
from django.contrib import admin
from import_export.admin import ImportExportModelAdmin
from django.contrib.contenttypes.admin import GenericTabularInline
from simple_history.admin import SimpleHistoryAdmin
from project.api.models.anatomical_part_model import AnatomicalPart
from project.api.models.biopsy_model import Biopsy
from project.api.models.cell_category_model import CellCategory
from project.api.models.cell_counting_model import CellCounting
from project.api.models.cell_type_model import CellType
from project.api.models.coating_model import Coating
from project.api.models.comment_model import Comment
from project.api.models.container_type_model import ContainerType
from project.api.models.cut_method_model import CutMethod
from project.api.models.donor_model import Donor
from project.api.models.enzyme_model import Enzyme
from project.api.models.layer_type_model import LayerType
from project.api.models.morphology_model import Morphology
from project.api.models.numbering_model import Numbering
from project.api.models.passage_model import Passage, CellDistribution
from project.api.models.pigmentation_model import Pigmentation
from project.api.models.skin_layer_model import SkinLayer
from project.api.models.subbiopsy_model import SubBiopsy
class CommentInline(GenericTabularInline):
model = Comment
# Register models that need comments below
@admin.register(Donor)
class DonorAdmin(SimpleHistoryAdmin, ImportExportModelAdmin):
inlines = [CommentInline]
list_display = ['numbering', "age", "gender", "donor_reference"]
exclude = ['numbering']
@admin.register(Biopsy)
class BiopsyAdmin(SimpleHistoryAdmin, ImportExportModelAdmin):
inlines = [CommentInline]
list_display = ["numbering", "surgery_date", "anatomical_part", "skin_thickness", "skin_area",
"donor"]
exclude = ['numbering']
@admin.register(SubBiopsy)
class SubBiopsyAdmin(SimpleHistoryAdmin, ImportExportModelAdmin):
inlines = [CommentInline]
list_display = ["numbering", "biopsy", "sub_biopsy_area", "cut_method"]
exclude = ['numbering']
@admin.register(SkinLayer)
class SkinLayer(SimpleHistoryAdmin, ImportExportModelAdmin):
inlines = [CommentInline]
list_display = ["numbering", "enzyme", "separation_time", "temperature", "sub_biopsy"]
exclude = ['numbering']
@admin.register(CellType)
class CellTypeAdmin(SimpleHistoryAdmin, ImportExportModelAdmin):
inlines = [CommentInline]
list_display = ["numbering", "type", "enzyme", "temperature", "digestion_time", "inhibition",
"filter_size", "filter_rinsing", "centrifugation_speed",
"centrifugation_time", "resuspended_volume", "concentration",
"viability", "diameter", "skin_layer", "total_viable_isolated_cells", "isolation_yield"]
exclude = ['numbering']
@admin.register(Passage)
class PassageAdmin(SimpleHistoryAdmin, ImportExportModelAdmin):
inlines = [CommentInline]
list_display = ["numbering", "cell_type", "passage", "cell_distribution", "pigmentation",
"morphology", "morphology_image", "passaging_date", "enzyme", "digestion_time",
"digestion_temperature", "resuspended_volume", "concentration", "viability",
"diameter", "container_type", "container_area", "coating",
"coating_concentration_per_cm2", "seeding_density", "magnification", "not_continued"]
exclude = ['numbering']
# Models without comments
admin.site.register(Numbering, SimpleHistoryAdmin)
admin.site.register(Comment, SimpleHistoryAdmin)
admin.site.register(AnatomicalPart, SimpleHistoryAdmin)
admin.site.register(CutMethod, SimpleHistoryAdmin)
admin.site.register(Enzyme, SimpleHistoryAdmin)
admin.site.register(LayerType, SimpleHistoryAdmin)
admin.site.register(CellCategory, SimpleHistoryAdmin)
admin.site.register(ContainerType, SimpleHistoryAdmin)
admin.site.register(Coating, SimpleHistoryAdmin)
admin.site.register(CellDistribution, SimpleHistoryAdmin)
admin.site.register(Pigmentation, SimpleHistoryAdmin)
admin.site.register(Morphology, SimpleHistoryAdmin)
admin.site.register(CellCounting, SimpleHistoryAdmin)
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,217
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/anatomical_part_model.py
|
from django.db import models
from simple_history.models import HistoricalRecords
class AnatomicalPart(models.Model):
ABDOMEN = 'Abdomen'
LEG = 'Leg'
BREAST = 'Breast'
SCALP = 'Scalp'
OTHER = 'Other'
ANATOMICAL_PART_CHOICES = (
(ABDOMEN, ABDOMEN),
(LEG, LEG),
(BREAST, BREAST),
(SCALP, SCALP),
(OTHER, OTHER),
)
type = models.CharField(
verbose_name='Anatomical Part',
max_length=150,
choices=ANATOMICAL_PART_CHOICES,
blank=True,
null=True,
)
history = HistoricalRecords()
class Meta:
app_label = 'api'
def __str__(self):
return self.type
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,218
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/cell_type_model.py
|
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from simple_history.models import HistoricalRecords
from project.api.models.cell_category_model import CellCategory
from project.api.models.comment_model import Comment
from project.api.models.enzyme_model import Enzyme
from project.api.models.numbering_model import Numbering
from project.api.models.skin_layer_model import SkinLayer
class CellType(models.Model):
numbering = models.ForeignKey(
verbose_name='Numbering',
related_name='celltypes',
to=Numbering,
on_delete=models.CASCADE,
)
type = models.ForeignKey(
verbose_name='Cell Category',
related_name='cell_types',
to=CellCategory,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
enzyme = models.ForeignKey(
verbose_name='Enzyme',
related_name='cell_types',
to=Enzyme,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
temperature = models.FloatField(
verbose_name='Temperature',
blank=True,
null=True,
)
digestion_time = models.DateTimeField(
verbose_name='Digestion Time',
blank=True,
null=True,
)
inhibition = models.IntegerField(
verbose_name='Inhibition',
blank=True,
null=True,
)
filter_size = models.IntegerField(
verbose_name='Filter Size',
blank=True,
null=True,
)
filter_rinsing = models.IntegerField(
verbose_name='Filter Rinsing',
blank=True,
null=True,
)
centrifugation_speed = models.IntegerField(
verbose_name='Centrifugation Speed',
blank=True,
null=True,
)
centrifugation_time = models.DateTimeField(
verbose_name='Centrifugation Time',
blank=True,
null=True,
)
resuspended_volume = models.IntegerField(
verbose_name='Resuspended Volume',
blank=True,
null=True,
)
concentration = models.FloatField(
verbose_name='Concentration',
blank=True,
null=True,
)
viability = models.FloatField(
verbose_name='Viability',
blank=True,
null=True,
)
diameter = models.FloatField(
verbose_name='Diameter',
blank=True,
null=True,
)
skin_layer = models.ForeignKey(
verbose_name='Skin Layer',
related_name='cell_types',
to=SkinLayer,
on_delete=models.CASCADE,
blank=False,
null=False,
)
comments = GenericRelation(Comment)
history = HistoricalRecords()
class Meta:
app_label = 'api'
def __str__(self):
return self.numbering.numbering
@property
def total_viable_isolated_cells(self):
try:
result = self.resuspended_volume * self.concentration * (self.viability/100)
except ZeroDivisionError as error:
print(error)
result = 'Null'
except Exception as exception:
print(exception)
result = 'Null'
return result
@property
def isolation_yield(self):
try:
result = self.total_viable_isolated_cells / self.skin_layer.sub_biopsy.biopsy.skin_area
except ZeroDivisionError as error:
print(error)
result = 'Null'
except Exception as exception:
print(exception)
result = 'Null'
return result
def save(self, **kwargs):
if self.pk and not self.numbering or not self.pk:
self.numbering = Numbering.objects.create()
super().save(**kwargs)
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,219
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/views/enzyme_views.py
|
from rest_framework.generics import CreateAPIView, GenericAPIView, ListAPIView
from rest_framework.response import Response
from project.api.models.enzyme_model import Enzyme
from project.api.serializers.enzyme_serializer import EnzymeSerializer
# Create a new Enzyme
class CreateEnzymeView(CreateAPIView):
serializer_class = EnzymeSerializer
queryset = Enzyme.objects.all()
# Get Update Delete a Enzyme by ID.
class GetUpdateDeleteEnzymeView(GenericAPIView):
serializer_class = EnzymeSerializer
queryset = Enzyme.objects.all()
def get(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post)
return Response(serializer.data)
def post(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def delete(self, request, **kwargs):
post = self.get_object()
post.delete()
return Response('Enzyme deleted')
# Get Enzyme by SkinLayer ID
class EnzymeBySkinLayerId(ListAPIView):
serializer_class = EnzymeSerializer
def get_queryset(self):
skin_layer_id = self.kwargs['pk']
return Enzyme.objects.filter(skin_layer=skin_layer_id)
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,220
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/container_type_model.py
|
from django.db import models
from simple_history.models import HistoricalRecords
class ContainerType(models.Model):
FLASK = "Flask"
VIAL = "Vial"
WELL_PLATE = "Well plate"
QUANTUM = "Quantum"
DENOVO_SKIN = "DenovoSkin"
OTHER = "Other"
name = models.CharField(
verbose_name='type',
max_length=100,
choices=(
(FLASK, FLASK),
(VIAL, VIAL),
(WELL_PLATE, WELL_PLATE),
(QUANTUM, QUANTUM),
(DENOVO_SKIN, DENOVO_SKIN),
(OTHER, OTHER)
),
blank=True,
null=True,
)
history = HistoricalRecords()
class Meta:
app_label = 'api'
def __str__(self):
return self.name
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,221
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/resources/cell_types_resource.py
|
from import_export import resources
from project.api.models.cell_type_model import CellType
class CellTypeResource(resources.ModelResource):
class Meta:
model = CellType
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,222
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/passage_model.py
|
from django.db import models
from simple_history.models import HistoricalRecords
from django.contrib.contenttypes.fields import GenericRelation
from project.api.models.cell_type_model import CellType
from project.api.models.coating_model import Coating
from project.api.models.comment_model import Comment
from project.api.models.container_type_model import ContainerType
from project.api.models.enzyme_model import Enzyme
from project.api.models.morphology_model import Morphology
from project.api.models.numbering_model import Numbering
from project.api.models.pigmentation_model import Pigmentation
from project.api.models.cell_distribution_model import CellDistribution
class Passage(models.Model):
numbering = models.ForeignKey(
verbose_name='Numbering',
related_name='passages',
to=Numbering,
on_delete=models.CASCADE,
)
passage = models.ForeignKey(
'self',
on_delete=models.SET_NULL,
related_name='passages',
blank=True,
null=True,
)
cell_type = models.ForeignKey(
verbose_name='Cell Type',
related_name='passages',
to=CellType,
on_delete=models.CASCADE,
blank=True,
null=True,
)
cell_distribution = models.ForeignKey(
verbose_name='Cell Distribution',
related_name='passages',
to=CellDistribution,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
pigmentation = models.ForeignKey(
verbose_name='Pigmentation',
related_name='passages',
to=Pigmentation,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
morphology = models.ForeignKey(
verbose_name='Morphology',
related_name='passages',
to=Morphology,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
morphology_image = models.ImageField(
verbose_name='Morphology Image',
upload_to='Media Files',
blank=True,
null=True,
)
passaging_date = models.DateField(
verbose_name='Passaging Date',
blank=True,
null=True,
)
enzyme = models.ForeignKey(
verbose_name='Enzyme',
related_name='passages',
to=Enzyme,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
digestion_time = models.DateTimeField(
verbose_name='Digestion Time',
blank=True,
null=True,
)
digestion_temperature = models.FloatField(
verbose_name='Digestion Temperature',
blank=True,
null=True,
)
resuspended_volume = models.IntegerField(
verbose_name='Resuspended Volume',
blank=True,
null=True,
)
concentration = models.FloatField(
verbose_name='Concentration',
blank=True,
null=True,
)
viability = models.FloatField(
verbose_name='Viability',
blank=True,
null=True,
)
diameter = models.FloatField(
verbose_name='Diameter',
blank=True,
null=True,
)
container_type = models.ForeignKey(
verbose_name='Container Type',
related_name='passages',
to=ContainerType,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
container_area = models.FloatField(
verbose_name='Container Area',
blank=True,
null=True,
)
coating = models.ForeignKey(
verbose_name='Coating',
related_name='passages',
to=Coating,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
coating_concentration_per_cm2 = models.FloatField(
verbose_name='Coating Concentration per Cm2',
blank=True,
null=True,
)
seeding_density = models.IntegerField(
verbose_name='Seeding Density',
blank=True,
null=True,
)
magnification = models.IntegerField(
verbose_name='Magnification',
blank=True,
null=True,
)
not_continued = models.BooleanField(
verbose_name='Not Continued',
default=False,
)
comments = GenericRelation(Comment)
history = HistoricalRecords()
class Meta:
app_label = 'api'
@property
def total_viable_isolated_cells(self):
try:
result = self.resuspended_volume * self.concentration * (self.viability / 100)
except ZeroDivisionError as error:
print(error)
result = 'Null'
except Exception as exception:
print(exception)
result = 'Null'
return result
def save(self, **kwargs):
if self.pk and not self.numbering or not self.pk:
self.numbering = Numbering.objects.create()
super().save(**kwargs)
def __str__(self):
return self.numbering.numbering
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,223
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/views/biopsys_views.py
|
from rest_framework.generics import ListAPIView, GenericAPIView, CreateAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from project.api.serializers.biopsy_serializer import BiopsySerializer
from project.api.models.biopsy_model import Biopsy
# Get the list of all biopsies. ***
class BiopsyView(ListAPIView):
serializer_class = BiopsySerializer
queryset = Biopsy.objects.all()
# Create a new Biopsy
class CreateNewBiopsyView(CreateAPIView):
serializer_class = BiopsySerializer
queryset = Biopsy.objects.all()
# Get Update Delete a biopsy by ID
class GetUpdateDeleteBiopsyView(GenericAPIView):
queryset = Biopsy.objects.all()
serializer_class = BiopsySerializer
permission_classes = [
IsAuthenticated,
]
def get(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post)
return Response(serializer.data)
def post(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def delete(self, request, **kwargs):
post = self.get_object()
post.delete()
return Response('Biopsy deleted')
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,224
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/views/comments_views.py
|
from rest_framework.generics import ListAPIView, GenericAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from project.api.models.comment_model import Comment
from project.api.serializers.comments_serializer import CommentsSerializer
class CommentsView(ListAPIView):
serializer_class = CommentsSerializer
def get_queryset(self):
return Comment.objects.all()
# Get Update Delete the list of comments by ID
class GetUpdateDeleteCommentView(GenericAPIView):
queryset = Comment.objects.all()
serializer_class = CommentsSerializer
permission_classes = [
IsAuthenticated,
]
def get(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post)
return Response(serializer.data)
def post(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def delete(self, okrequest, **kwargs):
post = self.get_object()
post.delete()
return Response('Comment deleted')
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,225
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/views/sub_biopsys_views.py
|
from rest_framework.generics import ListAPIView, GenericAPIView, CreateAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from project.api.serializers.sub_biopsy_serializer import SubbiopsySerializer
from project.api.models.subbiopsy_model import SubBiopsy
# List all sub-biopsies
class SubBiopsiesView(ListAPIView):
serializer_class = SubbiopsySerializer
queryset = SubBiopsy.objects.all()
# create a new Sub-biopsy
class CreateNewSubBiopsyView(CreateAPIView):
serializer_class = SubbiopsySerializer
queryset = SubBiopsy.objects.all()
# Get Update Delete a subbiopsy by ID.
class GetUpdateDeleteSubBiopsyView(GenericAPIView):
queryset = SubBiopsy.objects.all()
serializer_class = SubbiopsySerializer
permission_classes = [
IsAuthenticated,
]
def get(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post)
return Response(serializer.data)
def post(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def delete(self, request, **kwargs):
post = self.get_object()
post.delete()
return Response('Sub-biopsy deleted')
# Get the list of subbiopsies by Biopsy ID.
class ListSubBiopsyByBioIDView(ListAPIView):
serializer_class = SubbiopsySerializer
def get_queryset(self):
biopsy_id = self.kwargs['pk']
return SubBiopsy.objects.filter(biopsy=biopsy_id)
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,226
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/views/donor_views.py
|
from rest_framework.generics import ListAPIView, GenericAPIView, CreateAPIView
from rest_framework.response import Response
from project.api.serializers.donor_serializer import DonorSerializer
from project.api.models.donor_model import Donor
# Get a list of all donors
class DonorView(ListAPIView):
serializer_class = DonorSerializer
queryset = Donor.objects.all()
# register a new donor.
class CreateNewDonorView(CreateAPIView):
serializer_class = DonorSerializer
queryset = Donor.objects.all()
# Get Update Delete a donor by ID.
class GetUpdateDeleteDonorView(GenericAPIView):
serializer_class = DonorSerializer
queryset = Donor.objects.all()
def get(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post)
return Response(serializer.data)
def post(self, request, **kwargs):
post = self.get_object()
serializer = self.get_serializer(post, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
def delete(self, request, **kwargs):
post = self.get_object()
post.delete()
return Response('Donor deleted')
# Get a donor by Numbering:
# class GetDonorByNumberingView(GenericAPIView):
# serializer_class = DonorSerializer
# queryset = Donor.objects.all()
#
# def get(self, *args, **kwargs):
# search_string = self.request.query_params.get('code')
# return Response(DonorSerializer(instance=Donor.objects.all(), many=True).data)
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,227
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/layer_type_model.py
|
from django.db import models
from simple_history.models import HistoricalRecords
class LayerType(models.Model):
DERMIS = "Dermis"
EPIDERMIS = "Epidermis"
type = models.CharField(
verbose_name='Type',
max_length=100,
choices=(
(DERMIS, DERMIS),
(EPIDERMIS, EPIDERMIS)
),
blank=True,
null=True,
)
history = HistoricalRecords()
class Meta:
app_label = 'api'
def __str__(self):
return self.type
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,228
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/biopsy_serializer.py
|
from rest_framework import serializers
from project.api.models.biopsy_model import Biopsy
class BiopsySerializer(serializers.ModelSerializer):
numbering = serializers.SerializerMethodField(read_only=True)
donor_numbering = serializers.SerializerMethodField(read_only=True)
def get_numbering(self, instance):
return instance.numbering.numbering
def get_donor_numbering(self, instance):
return instance.donor.numbering.numbering
class Meta:
model = Biopsy
fields = ['numbering', 'surgery_date', 'donor', 'anatomical_part', 'skin_thickness', 'skin_area', 'id',
'donor_numbering']
read_only_fields = ['numbering']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,229
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/migrations/0003_auto_20190425_0907.py
|
# Generated by Django 2.0.3 on 2019-04-25 09:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20190423_1407'),
]
operations = [
migrations.AlterField(
model_name='biopsy',
name='anatomical_part',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='biopsies', to='api.AnatomicalPart', verbose_name='Anatomical Part'),
),
migrations.AlterField(
model_name='celltype',
name='enzyme',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='cell_types', to='api.Enzyme', verbose_name='Enzyme'),
),
migrations.AlterField(
model_name='celltype',
name='type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='cell_types', to='api.CellCategory', verbose_name='Cell Category'),
),
migrations.AlterField(
model_name='passage',
name='cell_distribution',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='passages', to='api.CellDistribution', verbose_name='Cell Distribution'),
),
migrations.AlterField(
model_name='passage',
name='coating',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='passages', to='api.Coating', verbose_name='Coating'),
),
migrations.AlterField(
model_name='passage',
name='container_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='passages', to='api.ContainerType', verbose_name='Container Type'),
),
migrations.AlterField(
model_name='passage',
name='enzyme',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='passages', to='api.Enzyme', verbose_name='Enzyme'),
),
migrations.AlterField(
model_name='passage',
name='morphology',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='passages', to='api.Morphology', verbose_name='Morphology'),
),
migrations.AlterField(
model_name='passage',
name='passage',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='passages', to='api.Passage'),
),
migrations.AlterField(
model_name='passage',
name='pigmentation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='passages', to='api.Pigmentation', verbose_name='Pigmentation'),
),
migrations.AlterField(
model_name='skinlayer',
name='enzyme',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='skin_layers', to='api.Enzyme', verbose_name='Enzyme'),
),
migrations.AlterField(
model_name='skinlayer',
name='layer_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='skin_layers', to='api.LayerType', verbose_name='Skin Layer Type'),
),
migrations.AlterField(
model_name='subbiopsy',
name='cut_method',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sub_biopsies', to='api.CutMethod', verbose_name='Cut Method'),
),
]
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,230
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/Serializers/donor_serializer.py
|
from rest_framework import serializers
from project.api.models.donor_model import Donor
class DonorSerializer(serializers.ModelSerializer):
numbering = serializers.SerializerMethodField()
def get_numbering(self, instance):
return instance.numbering.numbering
class Meta:
model = Donor
fields = ['numbering', 'gender', 'age', 'donor_reference', 'id']
read_only_fields = ['id', 'numbering']
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,231
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/migrations/0001_initial.py
|
# Generated by Django 2.0.3 on 2019-04-22 07:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import simple_history.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='AnatomicalPart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(blank=True, choices=[('Abdomen', 'Abdomen'), ('Leg', 'Leg'), ('Breast', 'Breast'), ('Scalp', 'Scalp'), ('Other', 'Other')], max_length=150, null=True, verbose_name='Anatomical Part')),
],
),
migrations.CreateModel(
name='Biopsy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('surgery_date', models.DateField(blank=True, null=True, verbose_name='Surgery Date')),
('skin_thickness', models.FloatField(blank=True, null=True, verbose_name='Skin Thickness')),
('skin_area', models.FloatField(blank=True, null=True, verbose_name='Skin Area')),
('anatomical_part', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='biopsies', to='api.AnatomicalPart', verbose_name='Anatomical Part')),
],
options={
'verbose_name_plural': 'Biopsies',
'ordering': ['-surgery_date'],
},
),
migrations.CreateModel(
name='CellCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, choices=[('Keratinocytes', 'Keratinocytes'), ('Melanocytes', 'Melanocytes'), ('Fibroblast', 'Fibroblast')], max_length=100, null=True, verbose_name='Type')),
],
options={
'verbose_name_plural': 'Cell categories',
},
),
migrations.CreateModel(
name='CellCounting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='Media Files', verbose_name='Cell Counting Image')),
('cells_counted', models.IntegerField(verbose_name='Cells Counted')),
],
),
migrations.CreateModel(
name='CellDistribution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(blank=True, choices=[('Equally dispersed', 'Equally dispersed'), ('Colony-like growth', 'Colony-like growth'), ('Other', 'Other')], max_length=100, null=True, verbose_name='type')),
],
),
migrations.CreateModel(
name='CellType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('temperature', models.FloatField(blank=True, null=True, verbose_name='Temperature')),
('digestion_time', models.DateTimeField(blank=True, null=True, verbose_name='Digestion Time')),
('inhibition', models.IntegerField(blank=True, null=True, verbose_name='Inhibition')),
('filter_size', models.IntegerField(blank=True, null=True, verbose_name='Filter Size')),
('filter_rinsing', models.IntegerField(blank=True, null=True, verbose_name='Filter Rinsing')),
('centrifugation_speed', models.IntegerField(blank=True, null=True, verbose_name='Centrifugation Speed')),
('centrifugation_time', models.DateTimeField(blank=True, null=True, verbose_name='Centrifugation Time')),
('resuspended_volume', models.IntegerField(blank=True, null=True, verbose_name='Resuspended Volume')),
('concentration', models.FloatField(blank=True, null=True, verbose_name='Concentration')),
('viability', models.FloatField(blank=True, null=True, verbose_name='Viability')),
('diameter', models.FloatField(blank=True, null=True, verbose_name='Diameter')),
],
),
migrations.CreateModel(
name='Coating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=150, null=True, verbose_name='Name')),
('lot_number', models.CharField(blank=True, max_length=150, null=True, verbose_name='Coating Lot Number')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(blank=True, null=True, verbose_name='Comment')),
('object_id', models.PositiveIntegerField(blank=True, null=True)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
),
migrations.CreateModel(
name='ContainerType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, choices=[('Flask', 'Flask'), ('Vial', 'Vial'), ('Well plate', 'Well plate'), ('Quantum', 'Quantum'), ('DenovoSkin', 'DenovoSkin'), ('Other', 'Other')], max_length=100, null=True, verbose_name='type')),
],
),
migrations.CreateModel(
name='CutMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('method', models.CharField(blank=True, max_length=150, null=True, verbose_name='Cut Method')),
],
),
migrations.CreateModel(
name='Donor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gender', models.CharField(choices=[('M', 'M'), ('F', 'F')], max_length=150, verbose_name='Donor')),
('age', models.IntegerField(verbose_name='Age')),
('donor_reference', models.CharField(blank=True, max_length=150, null=True, verbose_name='Donor Reference')),
],
),
migrations.CreateModel(
name='Enzyme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=150, null=True, verbose_name='Name')),
('lot_number', models.CharField(blank=True, max_length=150, null=True, verbose_name='Lot Number')),
],
),
migrations.CreateModel(
name='HistoricalAnatomicalPart',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('type', models.CharField(blank=True, choices=[('Abdomen', 'Abdomen'), ('Leg', 'Leg'), ('Breast', 'Breast'), ('Scalp', 'Scalp'), ('Other', 'Other')], max_length=150, null=True, verbose_name='Anatomical Part')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical anatomical part',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalBiopsy',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('surgery_date', models.DateField(blank=True, null=True, verbose_name='Surgery Date')),
('skin_thickness', models.FloatField(blank=True, null=True, verbose_name='Skin Thickness')),
('skin_area', models.FloatField(blank=True, null=True, verbose_name='Skin Area')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('anatomical_part', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.AnatomicalPart', verbose_name='Anatomical Part')),
('donor', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Donor', verbose_name='Donor')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical biopsy',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalCellCategory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('name', models.CharField(blank=True, choices=[('Keratinocytes', 'Keratinocytes'), ('Melanocytes', 'Melanocytes'), ('Fibroblast', 'Fibroblast')], max_length=100, null=True, verbose_name='Type')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical cell category',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalCellCounting',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('image', models.TextField(blank=True, max_length=100, null=True, verbose_name='Cell Counting Image')),
('cells_counted', models.IntegerField(verbose_name='Cells Counted')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical cell counting',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalCellDistribution',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('type', models.CharField(blank=True, choices=[('Equally dispersed', 'Equally dispersed'), ('Colony-like growth', 'Colony-like growth'), ('Other', 'Other')], max_length=100, null=True, verbose_name='type')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical cell distribution',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalCellType',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('temperature', models.FloatField(blank=True, null=True, verbose_name='Temperature')),
('digestion_time', models.DateTimeField(blank=True, null=True, verbose_name='Digestion Time')),
('inhibition', models.IntegerField(blank=True, null=True, verbose_name='Inhibition')),
('filter_size', models.IntegerField(blank=True, null=True, verbose_name='Filter Size')),
('filter_rinsing', models.IntegerField(blank=True, null=True, verbose_name='Filter Rinsing')),
('centrifugation_speed', models.IntegerField(blank=True, null=True, verbose_name='Centrifugation Speed')),
('centrifugation_time', models.DateTimeField(blank=True, null=True, verbose_name='Centrifugation Time')),
('resuspended_volume', models.IntegerField(blank=True, null=True, verbose_name='Resuspended Volume')),
('concentration', models.FloatField(blank=True, null=True, verbose_name='Concentration')),
('viability', models.FloatField(blank=True, null=True, verbose_name='Viability')),
('diameter', models.FloatField(blank=True, null=True, verbose_name='Diameter')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('enzyme', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Enzyme', verbose_name='Enzyme')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical cell type',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalCoating',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=150, null=True, verbose_name='Name')),
('lot_number', models.CharField(blank=True, max_length=150, null=True, verbose_name='Coating Lot Number')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical coating',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalComment',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('text', models.TextField(blank=True, null=True, verbose_name='Comment')),
('object_id', models.PositiveIntegerField(blank=True, null=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('content_type', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='contenttypes.ContentType')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical comment',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalContainerType',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('name', models.CharField(blank=True, choices=[('Flask', 'Flask'), ('Vial', 'Vial'), ('Well plate', 'Well plate'), ('Quantum', 'Quantum'), ('DenovoSkin', 'DenovoSkin'), ('Other', 'Other')], max_length=100, null=True, verbose_name='type')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical container type',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalCutMethod',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('method', models.CharField(blank=True, max_length=150, null=True, verbose_name='Cut Method')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical cut method',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalDonor',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('gender', models.CharField(choices=[('M', 'M'), ('F', 'F')], max_length=150, verbose_name='Donor')),
('age', models.IntegerField(verbose_name='Age')),
('donor_reference', models.CharField(blank=True, max_length=150, null=True, verbose_name='Donor Reference')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical donor',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalEnzyme',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=150, null=True, verbose_name='Name')),
('lot_number', models.CharField(blank=True, max_length=150, null=True, verbose_name='Lot Number')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical enzyme',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalLayerType',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('type', models.CharField(blank=True, choices=[('Dermis', 'Dermis'), ('Epidermis', 'Epidermis')], max_length=100, null=True, verbose_name='Type')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical layer type',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalMorphology',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('type', models.TextField(blank=True, choices=[('Bipolar', 'Bipolar'), ('Spindle-like', 'Spindle-like'), ('Star-like', 'Star-like'), ('Dendrite-like', 'Dendrite-like'), ('Other', 'Other')], max_length=255, null=True, verbose_name='Morphology')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical morphology',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalNumbering',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('numbering', models.CharField(db_index=True, max_length=6, verbose_name='Numbering')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical numbering',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalPassage',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('morphology_image', models.TextField(blank=True, max_length=100, null=True, verbose_name='Morphology Image')),
('passaging_date', models.DateField(blank=True, null=True, verbose_name='Passaging Date')),
('digestion_time', models.DateTimeField(blank=True, null=True, verbose_name='Digestion Time')),
('digestion_temperature', models.FloatField(blank=True, null=True, verbose_name='Digestion Temperature')),
('resuspended_volume', models.IntegerField(blank=True, null=True, verbose_name='Resuspended Volume')),
('concentration', models.FloatField(blank=True, null=True, verbose_name='Concentration')),
('viability', models.FloatField(blank=True, null=True, verbose_name='Viability')),
('diameter', models.FloatField(blank=True, null=True, verbose_name='Diameter')),
('container_area', models.FloatField(blank=True, null=True, verbose_name='Container Area')),
('coating_concentration_per_cm2', models.FloatField(blank=True, null=True, verbose_name='Coating Concentration per Cm2')),
('seeding_density', models.IntegerField(blank=True, null=True, verbose_name='Seeding Density')),
('magnification', models.IntegerField(blank=True, null=True, verbose_name='Magnification')),
('not_continued', models.BooleanField(default=False, verbose_name='Not Continued')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('cell_distribution', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.CellDistribution', verbose_name='Cell Distribution')),
('cell_type', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.CellType', verbose_name='Cell Type')),
('coating', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Coating', verbose_name='Coating')),
('container_type', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.ContainerType', verbose_name='Container Type')),
('enzyme', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Enzyme', verbose_name='Enzyme')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical passage',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalPigmentation',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('type', models.TextField(blank=True, choices=[('Pigmented', 'Pigmented'), ('Non-pigmented', 'Non-pigmented')], max_length=255, null=True, verbose_name='Pigmentation')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical pigmentation',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalSkinLayer',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('separation_time', models.DateTimeField(blank=True, null=True, verbose_name='Separation Time')),
('temperature', models.FloatField(blank=True, null=True, verbose_name='Temperature')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('enzyme', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Enzyme', verbose_name='Enzyme')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical skin layer',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalSubBiopsy',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('sub_biopsy_area', models.IntegerField(blank=True, null=True, verbose_name='Sub Biopsy Area')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('biopsy', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Biopsy', verbose_name='Biopsy')),
('cut_method', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.CutMethod', verbose_name='Cut Method')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical sub biopsy',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='LayerType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(blank=True, choices=[('Dermis', 'Dermis'), ('Epidermis', 'Epidermis')], max_length=100, null=True, verbose_name='Type')),
],
),
migrations.CreateModel(
name='Morphology',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.TextField(blank=True, choices=[('Bipolar', 'Bipolar'), ('Spindle-like', 'Spindle-like'), ('Star-like', 'Star-like'), ('Dendrite-like', 'Dendrite-like'), ('Other', 'Other')], max_length=255, null=True, verbose_name='Morphology')),
],
options={
'verbose_name_plural': 'Morphologies',
},
),
migrations.CreateModel(
name='Numbering',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('numbering', models.CharField(max_length=6, unique=True, verbose_name='Numbering')),
],
),
migrations.CreateModel(
name='Passage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('morphology_image', models.ImageField(blank=True, null=True, upload_to='Media Files', verbose_name='Morphology Image')),
('passaging_date', models.DateField(blank=True, null=True, verbose_name='Passaging Date')),
('digestion_time', models.DateTimeField(blank=True, null=True, verbose_name='Digestion Time')),
('digestion_temperature', models.FloatField(blank=True, null=True, verbose_name='Digestion Temperature')),
('resuspended_volume', models.IntegerField(blank=True, null=True, verbose_name='Resuspended Volume')),
('concentration', models.FloatField(blank=True, null=True, verbose_name='Concentration')),
('viability', models.FloatField(blank=True, null=True, verbose_name='Viability')),
('diameter', models.FloatField(blank=True, null=True, verbose_name='Diameter')),
('container_area', models.FloatField(blank=True, null=True, verbose_name='Container Area')),
('coating_concentration_per_cm2', models.FloatField(blank=True, null=True, verbose_name='Coating Concentration per Cm2')),
('seeding_density', models.IntegerField(blank=True, null=True, verbose_name='Seeding Density')),
('magnification', models.IntegerField(blank=True, null=True, verbose_name='Magnification')),
('not_continued', models.BooleanField(default=False, verbose_name='Not Continued')),
('cell_distribution', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='passages', to='api.CellDistribution', verbose_name='Cell Distribution')),
('cell_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='passages', to='api.CellType', verbose_name='Cell Type')),
('coating', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='passages', to='api.Coating', verbose_name='Coating')),
('container_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='passages', to='api.ContainerType', verbose_name='Container Type')),
('enzyme', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='passages', to='api.Enzyme', verbose_name='Enzyme')),
('morphology', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='passages', to='api.Morphology', verbose_name='Morphology')),
('numbering', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='passages', to='api.Numbering', verbose_name='Numbering')),
('passage', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='Related_Passages', to='api.Passage')),
],
),
migrations.CreateModel(
name='Pigmentation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.TextField(blank=True, choices=[('Pigmented', 'Pigmented'), ('Non-pigmented', 'Non-pigmented')], max_length=255, null=True, verbose_name='Pigmentation')),
],
),
migrations.CreateModel(
name='SkinLayer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('separation_time', models.DateTimeField(blank=True, null=True, verbose_name='Separation Time')),
('temperature', models.FloatField(blank=True, null=True, verbose_name='Temperature')),
('enzyme', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='skin_layers', to='api.Enzyme', verbose_name='Enzyme')),
('layer_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='skin_layers', to='api.LayerType', verbose_name='Skin Layer Type')),
('numbering', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='skinlayers', to='api.Numbering', verbose_name='Numbering')),
],
),
migrations.CreateModel(
name='SubBiopsy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_biopsy_area', models.IntegerField(blank=True, null=True, verbose_name='Sub Biopsy Area')),
('biopsy', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sub_biopsies', to='api.Biopsy', verbose_name='Biopsy')),
('cut_method', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sub_biopsies', to='api.CutMethod', verbose_name='Cut Method')),
('numbering', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subbiopies', to='api.Numbering', verbose_name='Numbering')),
],
options={
'verbose_name_plural': 'Sub-biopsies',
},
),
migrations.AddField(
model_name='skinlayer',
name='sub_biopsy',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='skin_layers', to='api.SubBiopsy', verbose_name='Sub Biopsy'),
),
migrations.AddField(
model_name='passage',
name='pigmentation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='passages', to='api.Pigmentation', verbose_name='Pigmentation'),
),
migrations.AddField(
model_name='historicalsubbiopsy',
name='numbering',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Numbering', verbose_name='Numbering'),
),
migrations.AddField(
model_name='historicalskinlayer',
name='layer_type',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.LayerType', verbose_name='Skin Layer Type'),
),
migrations.AddField(
model_name='historicalskinlayer',
name='numbering',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Numbering', verbose_name='Numbering'),
),
migrations.AddField(
model_name='historicalskinlayer',
name='sub_biopsy',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.SubBiopsy', verbose_name='Sub Biopsy'),
),
migrations.AddField(
model_name='historicalpassage',
name='morphology',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Morphology', verbose_name='Morphology'),
),
migrations.AddField(
model_name='historicalpassage',
name='numbering',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Numbering', verbose_name='Numbering'),
),
migrations.AddField(
model_name='historicalpassage',
name='passage',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Passage'),
),
migrations.AddField(
model_name='historicalpassage',
name='pigmentation',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Pigmentation', verbose_name='Pigmentation'),
),
migrations.AddField(
model_name='historicaldonor',
name='numbering',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Numbering', verbose_name='Numbering'),
),
migrations.AddField(
model_name='historicalcelltype',
name='numbering',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Numbering', verbose_name='Numbering'),
),
migrations.AddField(
model_name='historicalcelltype',
name='skin_layer',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.SkinLayer', verbose_name='Skin Layer'),
),
migrations.AddField(
model_name='historicalcelltype',
name='type',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.CellCategory', verbose_name='Cell Category'),
),
migrations.AddField(
model_name='historicalcellcounting',
name='passage',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Passage', verbose_name='Passage'),
),
migrations.AddField(
model_name='historicalbiopsy',
name='numbering',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='api.Numbering', verbose_name='Numbering'),
),
migrations.AddField(
model_name='donor',
name='numbering',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='donors', to='api.Numbering', verbose_name='Numbering'),
),
migrations.AddField(
model_name='celltype',
name='enzyme',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='cell_types', to='api.Enzyme', verbose_name='Enzyme'),
),
migrations.AddField(
model_name='celltype',
name='numbering',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='celltypes', to='api.Numbering', verbose_name='Numbering'),
),
migrations.AddField(
model_name='celltype',
name='skin_layer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cell_types', to='api.SkinLayer', verbose_name='Skin Layer'),
),
migrations.AddField(
model_name='celltype',
name='type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='cell_types', to='api.CellCategory', verbose_name='Cell Category'),
),
migrations.AddField(
model_name='cellcounting',
name='passage',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='cell_counting', to='api.Passage', verbose_name='Passage'),
),
migrations.AddField(
model_name='biopsy',
name='donor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='biopsies', to='api.Donor', verbose_name='Donor'),
),
migrations.AddField(
model_name='biopsy',
name='numbering',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='biopsies', to='api.Numbering', verbose_name='Numbering'),
),
]
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,232
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/donor_model.py
|
from django.db import models
from simple_history.models import HistoricalRecords
from django.contrib.contenttypes.fields import GenericRelation
from project.api.models.comment_model import Comment
from project.api.models.numbering_model import Numbering
class Donor(models.Model):
numbering = models.ForeignKey(
verbose_name='Numbering',
related_name='donors',
to=Numbering,
on_delete=models.CASCADE,
)
MALE = "M"
FEMALE = "F"
gender = models.CharField(
verbose_name='Donor',
max_length=150,
choices=(
(MALE, MALE),
(FEMALE, FEMALE),
),
blank=False,
null=False,
)
age = models.IntegerField(
verbose_name='Age',
blank=False,
null=False,
)
donor_reference = models.CharField(
verbose_name='Donor Reference',
max_length=150,
blank=True,
null=True,
)
comments = GenericRelation(Comment)
history = HistoricalRecords()
class Meta:
app_label = 'api'
def save(self, **kwargs):
if self.pk and not self.numbering or not self.pk:
self.numbering = Numbering.objects.create()
super().save(**kwargs)
def __str__(self):
return self.numbering.numbering
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,233
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/coating_model.py
|
from django.db import models
from simple_history.models import HistoricalRecords
class Coating(models.Model):
name = models.CharField(
verbose_name='Name',
max_length=150,
blank=True,
null=True,
)
lot_number = models.CharField(
verbose_name='Coating Lot Number',
max_length=150,
blank=True,
null=True,
)
history = HistoricalRecords()
class Meta:
app_label = 'api'
def __str__(self):
return self.name
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,234
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/cell_counting_model.py
|
from django.db import models
from simple_history.models import HistoricalRecords
from project.api.models.passage_model import Passage
class CellCounting(models.Model):
passage = models.ForeignKey(
verbose_name='Passage',
related_name='cell_counting',
to=Passage,
on_delete=models.CASCADE,
blank=True,
null=True,
)
image = models.ImageField(
verbose_name='Cell Counting Image',
upload_to='Media Files',
blank=True,
null=True,
)
cells_counted = models.IntegerField(
verbose_name='Cells Counted',
)
history = HistoricalRecords()
class Meta:
app_label = 'api'
def __str__(self):
return str(self.cells_counted)
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,235
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/models/cell_distribution_model.py
|
from django.db import models
from simple_history.models import HistoricalRecords
class CellDistribution(models.Model):
EQUALLY_DISPERSED = "Equally dispersed"
COLONY_LIKE_GROWTH = "Colony-like growth"
OTHER = "Other"
type = models.CharField(
verbose_name='type',
max_length=100,
choices=(
(EQUALLY_DISPERSED, EQUALLY_DISPERSED),
(COLONY_LIKE_GROWTH, COLONY_LIKE_GROWTH),
(OTHER, OTHER)
),
blank=True,
null=True,
)
history = HistoricalRecords()
class Meta:
app_label = 'api'
def __str__(self):
return self.type
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,236
|
etsyketsy/finalProject
|
refs/heads/master
|
/backend/project/project/api/resources/biopsy_resource.py
|
from import_export import resources
from project.api.models.biopsy_model import Biopsy
class BiopsyResource(resources.ModelResource):
class Meta:
model = Biopsy
|
{"/backend/project/project/api/models/numbering_model.py": ["/backend/project/project/api/helpers.py"]}
|
34,237
|
esnet/flowd
|
refs/heads/main
|
/scitag/stun/services.py
|
import socket
import requests
import ipaddress
import logging
import scitag.settings
import scitag.stun
log = logging.getLogger('scitag')
def get_ip4():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(scitag.settings.IP4_DISCOVERY)
log.debug(' IPv4:{}'.format(s.getsockname()[0]))
return s.getsockname()[0]
def get_ip6():
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(scitag.settings.IP6_DISCOVERY)
return s.getsockname()[0]
def get_stun_ip():
for stun_s in scitag.settings.STUN_SERVERS:
nat_type, external_ip, external_port = scitag.stun.get_ip_info(stun_host=stun_s[0], stun_port=stun_s[1])
log.debug(' STUN {}/{}: {} {} {}'.format(stun_s[0], stun_s[1], nat_type, external_ip, external_port))
if external_ip:
return external_ip
return None
def get_my_ip():
ip = requests.get('https://api.my-ip.io/ip')
if ip.status_code == 200 and ip.text:
log.debug(' MY-IP: {}'.format(ip.text))
return ip.text
else:
return None
def get_ext_ip():
# todo: ip6 + error handling
ip4_local = get_ip4()
if ipaddress.ip_address(ip4_local).is_private:
ip_ext = get_stun_ip()
if ip_ext:
return ip4_local, ip_ext
else:
ip_ext = get_my_ip()
if ip_ext:
return ip4_local, ip_ext
else:
return None, None
|
{"/scitag/stun/services.py": ["/scitag/settings.py"], "/scitag/service.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/stun/services.py", "/scitag/config.py"], "/scitag/backends/udp_firefly.py": ["/scitag/config.py", "/scitag/settings.py"], "/scitag/plugins/np_api.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/config.py"], "/scitag/config.py": ["/scitag/__init__.py"], "/setup.py": ["/scitag/__init__.py"], "/scitag/plugins/netstat.py": ["/scitag/__init__.py", "/scitag/config.py"]}
|
34,238
|
esnet/flowd
|
refs/heads/main
|
/scitag/settings.py
|
CONFIG_PATH = '/etc/flowd/flowd.cfg'
PID_FILE = '/var/run/flowd.pid'
WORK_DIR = '/var/lib/flowd'
DEFAULT_BACKEND = 'udp_firefly'
NP_API_FILE = '/var/run/flowd'
UDP_FIREFLY_PORT = 5000
IP4_DISCOVERY = ('10.255.255.255', 1)
IP6_DISCOVERY = ('fc00::', 1)
STUN_SERVERS = [('stun.l.google.com', 19305), ('stun.services.mozilla.org', 3478)]
|
{"/scitag/stun/services.py": ["/scitag/settings.py"], "/scitag/service.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/stun/services.py", "/scitag/config.py"], "/scitag/backends/udp_firefly.py": ["/scitag/config.py", "/scitag/settings.py"], "/scitag/plugins/np_api.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/config.py"], "/scitag/config.py": ["/scitag/__init__.py"], "/setup.py": ["/scitag/__init__.py"], "/scitag/plugins/netstat.py": ["/scitag/__init__.py", "/scitag/config.py"]}
|
34,239
|
esnet/flowd
|
refs/heads/main
|
/scitag/service.py
|
import datetime
import logging
import fcntl
import importlib
import os
import pkgutil
import sys
import multiprocessing as mp
import queue
import signal
import scitag
import scitag.settings
import scitag.plugins
import scitag.backends
import scitag.stun.services
from scitag.config import config
log = logging.getLogger('scitag')
def unlock_file(f):
if f.writable():
fcntl.lockf(f, fcntl.LOCK_UN)
class FlowService(object):
def __init__(self, args, pid_file):
self.pid_file = pid_file
self.backend = config.get('BACKEND')
self.backend_mod = None
self.backend_proc = None
self.plugin = config.get('PLUGIN')
self.plugin_mod = None
self.plugin_proc = None
if args.debug or args.fg:
self.debug = True
else:
self.debug = False
self.flow_id_queue = mp.Queue()
self.term_event = mp.Event()
header = list()
header.append("flowd v.{}: {}".format(scitag.__version__, datetime.datetime.now()))
header.append("config: {}".format(scitag.settings.CONFIG_PATH))
l_max = len(max(header, key=lambda x: len(x)))
log.info('*' * (l_max + 4))
for line in header:
log.info('* {0:<{1}s} *'.format(line, l_max))
log.info('*' * (l_max + 4))
if 'IP_DISCOVERY_ENABLED' in config.keys() and config['IP_DISCOVERY_ENABLED']:
try:
eip, iip = scitag.stun.services.get_ext_ip()
log.info('network info: {}/{}'.format(iip, eip))
except Exception as e:
log.exception(e)
sys.exit(1)
def init_plugins(self):
log.debug(" Loading plugin {}".format(self.plugin))
try:
default_pkg = os.path.dirname(scitag.plugins.__file__)
if self.plugin in [name for _, name, _ in pkgutil.iter_modules([default_pkg])]:
self.plugin_mod = importlib.import_module("scitag.plugins.{}".format(self.plugin))
else:
log.error("Configured plugin not found")
return False
except ImportError as e:
log.error("Exception caught {} while loading plugin {}".format(e, self.plugin))
sys.exit(1)
try:
log.debug(" Calling plugin init: {}".format(self.plugin))
self.plugin_mod.init()
except Exception as e:
log.error("Exception was thrown while initialing plugin {} ({})".format(self.plugin, e))
sys.exit(1)
backend = config.get('BACKEND', scitag.settings.DEFAULT_BACKEND)
log.debug(" Loading backend {}".format(backend))
try:
default_pkg = os.path.dirname(scitag.backends.__file__)
if self.backend in [name for _, name, _ in pkgutil.iter_modules([default_pkg])]:
self.backend_mod = importlib.import_module("scitag.backends.{}".format(self.backend))
else:
log.error("Configured backend not found")
return False
except ImportError as e:
log.error("Exception caught {} while loading backend {}".format(e, self.backend))
sys.exit(1)
def cleanup(self, sig, frame):
log.debug('caught signal {}'.format(sig))
self.term_event.set()
while True:
try:
self.flow_id_queue.get(block=False)
except queue.Empty:
break
except ValueError:
break
self.flow_id_queue.close()
self.flow_id_queue.join_thread()
if self.plugin_proc and self.plugin_proc.is_alive():
self.plugin_proc.join(5)
if self.backend_proc and self.backend_proc.is_alive():
self.backend_proc.join(5)
# wait -> if self.plugin_proc.is_alive()
# self.plugin_proc.terminate()
self.plugin_proc.close()
self.backend_proc.close()
unlock_file(self.pid_file)
log.debug('cleanup done ... ')
@staticmethod
def reload_config():
importlib.reload(scitag.config)
def main(self):
# 1. create queue and process pool for backend
# 2. create process or pool for plugin
# 3. watch plugin and backend pools until they finish
self.backend_proc = mp.Process(target=self.backend_mod.run,
args=(self.flow_id_queue, self.term_event),
daemon=True)
self.plugin_proc = mp.Process(target=self.plugin_mod.run,
args=(self.flow_id_queue, self.term_event),
daemon=True)
try:
self.backend_proc.start()
self.plugin_proc.start()
if self.debug:
signal.signal(signal.SIGINT, self.cleanup)
signal.signal(signal.SIGTERM, self.cleanup)
self.plugin_proc.join()
except Exception as e:
log.exception('Exception caught in main')
log.debug('flowd terminated')
|
{"/scitag/stun/services.py": ["/scitag/settings.py"], "/scitag/service.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/stun/services.py", "/scitag/config.py"], "/scitag/backends/udp_firefly.py": ["/scitag/config.py", "/scitag/settings.py"], "/scitag/plugins/np_api.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/config.py"], "/scitag/config.py": ["/scitag/__init__.py"], "/setup.py": ["/scitag/__init__.py"], "/scitag/plugins/netstat.py": ["/scitag/__init__.py", "/scitag/config.py"]}
|
34,240
|
esnet/flowd
|
refs/heads/main
|
/scitag/__init__.py
|
import collections
AUTHOR = "Marian Babik <Marian.Babik@cern.ch>, "
AUTHOR_EMAIL = "<net-wg-dev@cern.ch>"
COPYRIGHT = "Copyright (C) 2021"
VERSION = "0.0.1"
DATE = "13 Jul 2021"
__author__ = AUTHOR
__version__ = VERSION
__date__ = DATE
class FlowConfigException(Exception):
pass
# Flow Identifier
# flow-start
# inputs: (protocol, src, src_port, dst, dst_port, experiment, activity)
# flow-end
# inputs: (protocol, src, src_port, dst, dst_port, experiment, activity)
# flow-update (optional)
# inputs: (protocol, src, src_port, dst, dst_port, experiment, activity)
FlowID = collections.namedtuple('FlowID', ['state', 'prot', 'src', 'src_port', 'dst', 'dst_port', 'exp', 'act'])
|
{"/scitag/stun/services.py": ["/scitag/settings.py"], "/scitag/service.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/stun/services.py", "/scitag/config.py"], "/scitag/backends/udp_firefly.py": ["/scitag/config.py", "/scitag/settings.py"], "/scitag/plugins/np_api.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/config.py"], "/scitag/config.py": ["/scitag/__init__.py"], "/setup.py": ["/scitag/__init__.py"], "/scitag/plugins/netstat.py": ["/scitag/__init__.py", "/scitag/config.py"]}
|
34,241
|
esnet/flowd
|
refs/heads/main
|
/scitag/backends/udp_firefly.py
|
import logging
import queue
import socket
import json
from scitag.config import config
import scitag.settings
log = logging.getLogger('scitag')
def run(flow_queue, term_event):
while not term_event.is_set():
try:
flow_id = flow_queue.get(block=True, timeout=0.5)
except queue.Empty:
continue
log.debug(flow_id)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
dst = flow_id.dst
if 'UDP_FIREFLY_DST' in config.keys():
dst = config['UDP_FIREFLY_DST']
udp_flow_id = json.dumps(flow_id._asdict())
sock.sendto(udp_flow_id.encode('utf-8'), (dst, scitag.settings.UDP_FIREFLY_PORT))
|
{"/scitag/stun/services.py": ["/scitag/settings.py"], "/scitag/service.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/stun/services.py", "/scitag/config.py"], "/scitag/backends/udp_firefly.py": ["/scitag/config.py", "/scitag/settings.py"], "/scitag/plugins/np_api.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/config.py"], "/scitag/config.py": ["/scitag/__init__.py"], "/setup.py": ["/scitag/__init__.py"], "/scitag/plugins/netstat.py": ["/scitag/__init__.py", "/scitag/config.py"]}
|
34,242
|
esnet/flowd
|
refs/heads/main
|
/scitag/plugins/np_api.py
|
import logging
import os
import sys
import select
import stat
import scitag
import scitag.settings
from scitag.config import config
log = logging.getLogger('scitag')
def init():
log.debug('np_api init')
if os.path.exists(scitag.settings.NP_API_FILE) and stat.S_ISFIFO(os.stat(scitag.settings.NP_API_FILE).st_mode):
return
try:
os.mkfifo(scitag.settings.NP_API_FILE, mode=0o666)
except IOError as e:
log.error('Unable to create command pipe {}'.format(scitag.settings.NP_API_FILE))
sys.exit(1)
def run(flow_queue, term_event):
np_api_fd = os.open(scitag.settings.NP_API_FILE, os.O_RDWR | os.O_NONBLOCK)
sp = select.poll()
sp.register(np_api_fd, select.POLLIN | select.POLLPRI)
while not term_event.is_set():
try:
tr = sp.poll(3)
if not tr:
continue
np_content = os.read(np_api_fd, 65535)
except IOError as e:
log.exception('Failed to read command pipe {}'.format(scitag.settings.NP_API_FILE))
term_event.wait(3)
continue
log.debug(np_content)
flow_ids = np_content.decode('utf-8').splitlines()
log.debug(flow_ids)
for f_id in flow_ids:
entry = f_id.strip().split(' ')
if len(entry) != 8:
log.error('Unable to parse flow identifier received {}'.format(entry))
continue
# todo: validate entries
flow_id = scitag.FlowID(entry[0].strip(), entry[1].strip(), entry[2].strip(), entry[3].strip(),
entry[4].strip(), entry[5].strip(), entry[6].strip(), entry[7].strip())
log.debug(' --> {}'.format(flow_id))
flow_queue.put(flow_id)
os.unlink(scitag.settings.NP_API_FILE)
|
{"/scitag/stun/services.py": ["/scitag/settings.py"], "/scitag/service.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/stun/services.py", "/scitag/config.py"], "/scitag/backends/udp_firefly.py": ["/scitag/config.py", "/scitag/settings.py"], "/scitag/plugins/np_api.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/config.py"], "/scitag/config.py": ["/scitag/__init__.py"], "/setup.py": ["/scitag/__init__.py"], "/scitag/plugins/netstat.py": ["/scitag/__init__.py", "/scitag/config.py"]}
|
34,243
|
esnet/flowd
|
refs/heads/main
|
/scitag/config.py
|
import os
import sys
import logging
from scitag import settings
__all__ = ['config']
log = logging.getLogger('scitag')
_bcf = settings.CONFIG_PATH
if not os.path.exists(_bcf):
log.error("Config error {}".format(settings.CONFIG_PATH))
sys.exit(1)
config = {}
if sys.version_info[0] == 2:
execfile(_bcf, {}, config)
else:
with open(_bcf) as f:
code = compile(f.read(), os.path.basename(_bcf), 'exec')
exec(code, {}, config)
log.debug("loaded configuration: %s" % config)
|
{"/scitag/stun/services.py": ["/scitag/settings.py"], "/scitag/service.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/stun/services.py", "/scitag/config.py"], "/scitag/backends/udp_firefly.py": ["/scitag/config.py", "/scitag/settings.py"], "/scitag/plugins/np_api.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/config.py"], "/scitag/config.py": ["/scitag/__init__.py"], "/setup.py": ["/scitag/__init__.py"], "/scitag/plugins/netstat.py": ["/scitag/__init__.py", "/scitag/config.py"]}
|
34,244
|
esnet/flowd
|
refs/heads/main
|
/setup.py
|
from setuptools import setup
import scitag
NAME = 'python-flowd'
VERSION = scitag.VERSION
DESCRIPTION = "Flow and Packet Marking Daemon"
LONG_DESCRIPTION = """
Flow and Packet Marking Service (www.scitag.org)
"""
AUTHOR = scitag.AUTHOR
AUTHOR_EMAIL = scitag.AUTHOR_EMAIL
LICENSE = "ASL 2.0"
PLATFORMS = "Any"
URL = "https://github.com/sci-tag/flowd"
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Python Modules"
]
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
platforms=PLATFORMS,
url=URL,
classifiers=CLASSIFIERS,
keywords='operations python network flow packet marking',
packages=['scitag'],
install_requires=[],
data_files=[
('/usr/sbin', ['sbin/flowd']),
]
)
|
{"/scitag/stun/services.py": ["/scitag/settings.py"], "/scitag/service.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/stun/services.py", "/scitag/config.py"], "/scitag/backends/udp_firefly.py": ["/scitag/config.py", "/scitag/settings.py"], "/scitag/plugins/np_api.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/config.py"], "/scitag/config.py": ["/scitag/__init__.py"], "/setup.py": ["/scitag/__init__.py"], "/scitag/plugins/netstat.py": ["/scitag/__init__.py", "/scitag/config.py"]}
|
34,245
|
esnet/flowd
|
refs/heads/main
|
/scitag/plugins/netstat.py
|
import logging
import time
import psutil
import ipaddress
import scitag
from scitag.config import config
log = logging.getLogger('scitag')
def init():
log.debug('init')
if 'EXPERIMENT' not in config.keys():
log.error('Experiment is required for netstat partial tagging')
raise scitag.FlowConfigException('Experiment is required for netstat partial tagging')
if 'NETSTAT_INTERNAL_NETWORKS' in config.keys():
for net in config['NETSTAT_INTERNAL_NETWORKS']:
try:
ipaddress.ip_network(net)
except ValueError as e:
log.error('Unable to parse network {}, configuration error'.format(net))
raise scitag.FlowConfigException('Unable to parse network {}'.format(net))
def __int_ip(ip, int_networks):
if ip.is_private:
return True
for net in int_networks:
if type(ip) is ipaddress.IPv4Address and type(net) is ipaddress.IPv4Network \
and ip in net:
return True
if type(ip) is ipaddress.IPv6Address and type(net) is ipaddress.IPv6Network \
and ip in net:
return True
return False
def run(flow_queue, term_event):
netstat_prev = set()
int_networks = set()
if 'NETSTAT_INTERNAL_NETWORKS' in config.keys():
for net in config['NETSTAT_INTERNAL_NETWORKS']:
int_networks.add(ipaddress.ip_network(net))
while not term_event.is_set():
netstat = set()
try:
netc = psutil.net_connections(kind='tcp')
except Exception as e:
log.exception('Exception caught while calling psutil')
time.sleep(60)
continue
for entry in netc:
if entry.status == 'LISTEN':
continue
prot = 'tcp'
saddr = entry.laddr.ip
sport = entry.laddr.port
daddr = entry.raddr.ip
dport = entry.raddr.port
try:
ipaddress.ip_address(saddr)
ipaddress.ip_address(daddr)
except ValueError:
log.debug('Failed to parse IPs: {}/{}'.format(saddr, daddr))
continue
netstat.add((prot, saddr, sport, daddr, dport))
log.debug(netstat)
if netstat_prev:
new_connections = netstat - netstat_prev
closed_connections = netstat_prev - netstat
for c in new_connections:
daddr = ipaddress.ip_address(c[3])
if __int_ip(daddr, int_networks):
continue
f_id = scitag.FlowID('start', *c, config['EXPERIMENT'], None)
log.debug(' --> {}'.format(f_id))
flow_queue.put(f_id)
for c in closed_connections:
daddr = ipaddress.ip_address(c[3])
if __int_ip(daddr, int_networks):
continue
f_id = scitag.FlowID('end', *c, config['EXPERIMENT'], None)
log.debug(' --> {}'.format(f_id))
flow_queue.put(f_id)
else:
for c in netstat:
daddr = ipaddress.ip_address(c[3])
if __int_ip(daddr, int_networks):
continue
f_id = scitag.FlowID('start', *c, config['EXPERIMENT'], None)
log.debug(' --> {}'.format(f_id))
flow_queue.put(f_id)
netstat_prev = netstat
term_event.wait(60)
|
{"/scitag/stun/services.py": ["/scitag/settings.py"], "/scitag/service.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/stun/services.py", "/scitag/config.py"], "/scitag/backends/udp_firefly.py": ["/scitag/config.py", "/scitag/settings.py"], "/scitag/plugins/np_api.py": ["/scitag/__init__.py", "/scitag/settings.py", "/scitag/config.py"], "/scitag/config.py": ["/scitag/__init__.py"], "/setup.py": ["/scitag/__init__.py"], "/scitag/plugins/netstat.py": ["/scitag/__init__.py", "/scitag/config.py"]}
|
34,246
|
zYeoman/mindwiki
|
refs/heads/master
|
/convert.py
|
# encoding:utf-8
'''
Convert
Convert markdown to kmjson(md2km) and kmjson to markdown(km2md)
Author: zYeoman(zhuangyw.thu#gmail.com)
Create: 2016-07-31
Modify: 2017-02-07
Version: 0.1.3
'''
import re
import json
HEADER = u'''---
theme:{theme}
template:{template}
version:{version}
---
'''
def km2md(km):
js = json.loads(km)
header = HEADER.format(**js)
content = u'\n'.join(_md_build(js['root'], 1))
return header + content
def _md_build(node, level):
lines = []
empty_line = ''
data = node['data']
lines.append('#' * level + ' ' + data.get('text', 'Empty'))
lines.append(empty_line)
image = data.get('image')
link = data.get('hyperlink')
note = data.get('note')
if link:
lines.append(u'[{hyperlinkTitle}]({hyperlink})'.format(**data))
lines.append(empty_line)
if image:
lines.append(u''.format(**data))
lines.append(empty_line)
if note:
lines.append(note)
lines.append(empty_line)
children = node.get('children', [])
children.sort(key=lambda x: x['data']['text'])
for child in children:
lines += _md_build(child, level + 1)
return lines
def md2km(md):
km = {"root": {}}
meta_flag = 0
code_flag = False
node_level = 0
parent_node = []
current_node = None
for line in md.split('\n'):
if re.match(r'^-{3,}$', line) is not None:
meta_flag += 1
continue
if meta_flag == 1:
groups = re.match('(.+):(.+)', line).groups()
km[groups[0]] = groups[1]
continue
if re.match(r'^```\w*', line) is not None:
code_flag = not code_flag
continue
level, content = re.match('^(#+)? ?(.*)$', line).groups()
if code_flag or level is None or len(level) > node_level + 2:
if current_node is not None:
img = re.match(r'!\[(.*)\]\((https?.*)\)', line)
link = re.match(r'\[(.*)\]\((https?.*)\)', line)
if img is not None:
current_node['data']['image'] = img.groups()[1]
current_node['data']['imageTitle'] = img.groups()[0]
elif link is not None:
current_node['data']['hyperlink'] = link.groups()[1]
current_node['data']['hyperlinkTitle'] = link.groups()[0]
else:
current_node['data']['note'] += line + '\n'
continue
current_node = {"data": {"text": content, "note": ""}}
node_level = len(level) - 1
if node_level > 0:
if parent_node[node_level - 1].get('children') is not None:
parent_node[node_level - 1]['children'].append(current_node)
else:
parent_node[node_level - 1]['children'] = [current_node]
if len(parent_node) < node_level + 1:
parent_node.append(current_node)
else:
parent_node[node_level] = current_node
_clean(parent_node[0])
km['root'] = parent_node[0]
return json.dumps(km)
def _clean(node):
if re.match(r'^\s*$', node['data']['note']):
del node['data']['note']
else:
node['data']['note'] = node['data']['note'][1:-2]
children = node.get('children', [])
for child in children:
_clean(child)
|
{"/app.py": ["/convert.py"]}
|
34,247
|
zYeoman/mindwiki
|
refs/heads/master
|
/app.py
|
# encoding:utf-8
'''
MindWiki
A webset of wiki use mindmap.
Author: zYeoman(zhuangyw.thu#gmail.com)
Create: 2016-07-16
Modify: 2017-02-07
Version: 0.1.3
'''
import os
from flask import (Flask, render_template, request)
from flask_script import Manager
import convert
APP = Flask(__name__)
APP.config['CONTENT_DIR'] = 'notes'
APP.config['TITLE'] = 'wiki'
APP.secret_key = 'sdklafj'
try:
APP.config.from_pyfile('config.py')
except IOError:
print("Startup Failure: You need to place a "
"config.py in your root directory.")
@APP.route('/', methods=['GET', 'POST'])
def home():
'''
Root of mindwiki.
'''
return display('home')
@APP.route('/<path:url>', methods=['GET', 'POST'])
def display(url):
'''
Page of mindwiki, auto generate.
'''
filename = url.strip('/').split('/')[-1]
path = os.path.join(APP.config['CONTENT_DIR'],
url.strip('/') + '.md')
if os.path.exists(path):
with open(path, 'rb') as file_read:
content = file_read.read().decode('utf-8')
else:
content = u'# ' + filename
if request.method == 'POST':
folder = os.path.dirname(path)
if not os.path.exists(folder):
os.makedirs(folder)
with open(path, 'wb') as file_write:
markdown = convert.km2md(request.form.get('body')).encode('utf-8')
file_write.write(markdown)
if request.args.get('nofmt'):
return convert.md2km(content)
return render_template('page.html')
if __name__ == '__main__':
MANAGER = Manager(APP)
MANAGER.run()
|
{"/app.py": ["/convert.py"]}
|
34,300
|
xuquanfu/My-Graduation-Design
|
refs/heads/master
|
/evalution_fullimage.py
|
# coding: utf-8
# In[17]:
import numpy as np
import scipy.misc
path = 'E:/code/Mytry/data/evalution/'
gt_label = scipy.misc.imread(path+'Szada_Scene1_gt_testregion.png')
out = scipy.misc.imread(path+'Szada_Scene1.bmp')
f = open(path+'evalution.txt','w')
F_A = [];
M_A = [];
O_E = [];
Pr = [];
Re = [];
F_measure = [];
F_A_update = [];
M_A_update = [];
O_E_update = [];
Pr_update = [];
Re_update = [];
F_measure_update = [];
iter = 1
print(np.shape(gt_label))
print(np.shape(out))
for i in range(0, iter) :
ind = out
a = 0.0 #true-pos
b = 0.0 #false-pos
c = 0.0 #false-neg
d = 0.0 #true-neg
for h in range(0,ind.shape[0]) :
for w in range(0,ind.shape[1]) :
if ((gt_label[h,w] == 255 or gt_label[h,w] == 1) and ind[h,w] == 255) :
a+=1.0;
elif (gt_label[h,w] == 0 and ind[h,w] == 255 ) :
b+=1.0;
elif ((gt_label[h,w] == 255 or gt_label[h,w] == 1) and ind[h,w] == 0) :
c+=1.0;
else :
d+=1.0;
F_A.append(b/(a+b+c+d)*100);
M_A.append(c/(a+b+c+d)*100);
O_E.append((b+c)/(a+b+c+d)*100);
if (a!=0.0 or b!=0.0):
pr = (0.0+a)/(a+b);
Pr.append(pr*100);
else :
#Pr.append(0.0);
Pr.append(1.0*100);
if (a!=0.0 or c!=0.0):
re = (0.0+a)/(a+c);
Re.append(re*100);
else :
Re.append(1.0*100);
if (a!=0.0):
F_measure.append(2*pr*re/(pr+re)*100);#2*Pr*Re/(Pr+Re);
else :
F_measure.append(0.0);
# a = 0.0 # true-pos
# b = 0.0 # false-pos
# c = 0.0 # false-neg
# d = 0.0 # true-neg
# for h in range(0, updatedMap.shape[0]):
# for w in range(0, updatedMap.shape[1]):
# if ((gt_label[h, w] == 255 or gt_label[h, w] == 1) and updatedMap[h, w] == 255):
# a += 1.0;
# elif (gt_label[h, w] == 0 and updatedMap[h, w] == 255):
# b += 1.0;
# elif ((gt_label[h, w] == 255 or gt_label[h, w] == 1) and updatedMap[h, w] == 0):
# c += 1.0;
# else:
# d += 1.0;
# F_A_update.append(b / (a + b + c + d) * 100);
# M_A_update.append(c / (a + b + c + d) * 100);
# O_E_update.append((b + c) / (a + b + c + d) * 100);
#
# if (a != 0.0 or b != 0.0):
# pr = (0.0 + a) / (a + b);
# Pr_update.append(pr * 100);
# else:
# # Pr.append(0.0);
# Pr_update.append(1.0 * 100);
# if (a != 0.0 or c != 0.0):
# re = (0.0 + a) / (a + c);
# Re_update.append(re * 100);
# else:
# Re_update.append(1.0 * 100);
# if (a != 0.0):
# F_measure_update.append(2 * pr * re / (pr + re) * 100); # 2*Pr*Re/(Pr+Re);
# else:
# F_measure_update.append(0.0);
f.write('before update:\n')
f.write('F_A:\n')
f.write(str(F_A))
f.write('\n')
print ('F_A:')
print (F_A)
f.write('M_A:\n')
f.write(str(M_A))
f.write('\n')
print ('M_A')
print (M_A)
f.write('O_E:\n')
f.write(str(O_E))
f.write('\n')
print ('O_E')
print (O_E)
f.write('Pr:\n')
f.write(str(Pr))
f.write('\n')
print ('Pr:')
print (Pr)
f.write('Re:\n')
f.write(str(Re))
f.write('\n')
print ('Re')
print (Re)
f.write('F_measure:\n')
f.write(str(F_measure))
f.write('\n')
print ('F-measure')
print (F_measure)
f.close()
# f1.write('after update:\n')
# f1.write('F_A_update:\n')
# f1.write(str(F_A_update))
# f1.write('\n')
# print ('F_A_update:')
# print (F_A_update)
#
# f1.write('M_A_update:\n')
# f1.write(str(M_A_update))
# f1.write('\n')
# print ('M_A_update')
# print (M_A_update)
#
# f1.write('O_E_update:\n')
# f1.write(str(O_E_update))
# f1.write('\n')
# print ('O_E_update')
# print (O_E_update)
#
# f1.write('Pr_update:\n')
# f1.write(str(Pr_update))
# f1.write('\n')
# print ('Pr_update:')
# print (Pr_update)
#
# f1.write('Re_update:\n')
# f1.write(str(Re_update))
# f1.write('\n')
# print ('Re_update')
# print (Re_update)
#
# f1.write('F_measure_update:\n')
# f1.write(str(F_measure_update))
# f1.write('\n')
# print ('F-measure_update')
# print (F_measure_update)
# f1.close()
# print ('Success!')
|
{"/run.py": ["/inference.py", "/testdataset.py"]}
|
34,301
|
xuquanfu/My-Graduation-Design
|
refs/heads/master
|
/knn_evalution.py
|
import numpy as np
import scipy.misc
def knn(image,win_size):
change = 0
unchange = 0
out_image=image
# for i in range (((win_size-1)//2),(452-(win_size-1)//2)):
# for j in range (((win_size-1))//2,(788-(win_size-1)//2)):
# for w in range((-(win_size-1)//2),((win_size+1)//2)):
# for h in range((-(win_size-1)//2),((win_size+1)//2)):
# if (image[i+w,j+h]>0):
# change=change+1
# else:
# unchange=unchange+1
#
# if(change>=unchange):
# out_image[i,j]=255
# else:
# out_image[i,j]=0
# change = 0
# unchange = 0
for i in range (0,452):
for j in range (0,788):
for w in range((-(win_size-1)//2),((win_size+1)//2)):
for h in range((-(win_size-1)//2),((win_size+1)//2)):
if (image[(i+w)%452,(j+h)%788]>0):
change=change+1
else:
unchange=unchange+1
if(change>=unchange):
out_image[i,j]=255
else:
out_image[i,j]=0
change = 0
unchange = 0
return out_image[0:448,0:784]
path = 'E:/code/Mytry/data/evalution/'
gt_label = scipy.misc.imread(path + 'Szada_Scene1_gt_testregion.png')
out = scipy.misc.imread(path + 'Szada_Scene1_for_knn.bmp')
ind=knn(out,9)
scipy.misc.imsave(path+'/'+'Szada_Scene1_knn.bmp',ind)
f = open(path + 'evalution_knn.txt', 'w')
F_A = [];
M_A = [];
O_E = [];
Pr = [];
Re = [];
F_measure = [];
F_A_update = [];
M_A_update = [];
O_E_update = [];
Pr_update = [];
Re_update = [];
F_measure_update = [];
iter = 1
print(np.shape(gt_label))
print(np.shape(out))
for i in range(0, iter):
a = 0.0 # true-pos
b = 0.0 # false-pos
c = 0.0 # false-neg
d = 0.0 # true-neg
for h in range(0, ind.shape[0]):
for w in range(0, ind.shape[1]):
if ((gt_label[h, w] == 255 or gt_label[h, w] == 1) and ind[h, w] == 255):
a += 1.0;
elif (gt_label[h, w] == 0 and ind[h, w] == 255):
b += 1.0;
elif ((gt_label[h, w] == 255 or gt_label[h, w] == 1) and ind[h, w] == 0):
c += 1.0;
else:
d += 1.0;
F_A.append(b / (a + b + c + d) * 100);
M_A.append(c / (a + b + c + d) * 100);
O_E.append((b + c) / (a + b + c + d) * 100);
if (a != 0.0 or b != 0.0):
pr = (0.0 + a) / (a + b);
Pr.append(pr * 100);
else:
# Pr.append(0.0);
Pr.append(1.0 * 100);
if (a != 0.0 or c != 0.0):
re = (0.0 + a) / (a + c);
Re.append(re * 100);
else:
Re.append(1.0 * 100);
if (a != 0.0):
F_measure.append(2 * pr * re / (pr + re) * 100); # 2*Pr*Re/(Pr+Re);
else:
F_measure.append(0.0);
f.write('before update:\n')
f.write('F_A:\n')
f.write(str(F_A))
f.write('\n')
print ('F_A:')
print (F_A)
f.write('M_A:\n')
f.write(str(M_A))
f.write('\n')
print ('M_A')
print (M_A)
f.write('O_E:\n')
f.write(str(O_E))
f.write('\n')
print ('O_E')
print (O_E)
f.write('Pr:\n')
f.write(str(Pr))
f.write('\n')
print ('Pr:')
print (Pr)
f.write('Re:\n')
f.write(str(Re))
f.write('\n')
print ('Re')
print (Re)
f.write('F_measure:\n')
f.write(str(F_measure))
f.write('\n')
print ('F-measure')
print (F_measure)
f.close()
|
{"/run.py": ["/inference.py", "/testdataset.py"]}
|
34,302
|
xuquanfu/My-Graduation-Design
|
refs/heads/master
|
/inference.py
|
import tensorflow as tf
import numpy as np
class siamese:
# Create model
def __init__(self):
#self.x1 = tf.placeholder(tf.float32, [None, 784])
#self.x2 = tf.placeholder(tf.float32, [None, 784])
self.x1 = tf.placeholder(tf.float32, [None, 112, 112, 3])
self.x2 = tf.placeholder(tf.float32, [None, 112, 112, 3])
with tf.variable_scope("siamese") as scope:
self.o1 = self.network(self.x1)
scope.reuse_variables()
self.o2 = self.network(self.x2)
# Create loss
#self.y_=tf.placeholder(tf.float32, [None])
self.y_ = tf.placeholder(tf.float32, [None,112, 112])
self.output=tf.placeholder(tf.float32, [112,112,1])
self.loss = self.my_loss()
def network(self, x):
# 还要加归一化层bn层
#bx=tf.reshape(x,[-1,28,28,1])
l1 = self.mycnn_layer(x,[3,3,3,64],[1,1,1,1], "l1")
active1=tf.nn.relu(l1)
l2 = self.mycnn_layer(active1,[3,3,64,64],[1,1,1,1], "l2")
active2 = tf.nn.relu(l2)
l3 = self.mycnn_layer(active2,[5,5,64,64],[1,1,1,1], "l3")
active3 = tf.nn.relu(l3)
l4 = self.mycnn_layer(active3, [5, 5, 64, 32], [1, 1, 1, 1], "l4")
active4 = tf.nn.relu(l4)
l5 = self.mycnn_layer(active4, [1, 1, 32, 16], [1, 1, 1, 1], "l5")
active5 = tf.nn.relu(l5)
l5_out=tf.nn.l2_normalize(active5,dim=3)
return l5_out
def fc_layer(self, bottom, n_weight, name):
assert len(bottom.get_shape()) == 2
n_prev_weight = bottom.get_shape()[1]
initer = tf.truncated_normal_initializer(stddev=0.01)
W = tf.get_variable(name+'W', dtype=tf.float32, shape=[n_prev_weight, n_weight], initializer=initer)
b = tf.get_variable(name+'b', dtype=tf.float32, initializer=tf.constant(0.01, shape=[n_weight], dtype=tf.float32))
fc = tf.nn.bias_add(tf.matmul(bottom, W), b)
return fc
def mycnn_layer(self, bottom, input,stride, myname):
kernel = tf.get_variable(name=myname+'W',shape=input, initializer=tf.random_normal_initializer(mean=0, stddev=1))
biases = tf.get_variable(name=myname+'b',shape=np.shape(kernel)[3], initializer=tf.random_normal_initializer(mean=0, stddev=1))
return tf.nn.bias_add(tf.nn.conv2d(bottom, kernel, stride, padding='SAME'), biases)
def my_loss(self):
# distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.o1, self.o2)),axis=3))
distance = tf.square(tf.subtract(self.o1, self.o2))
# print(tf.argmax(Subtract,axis=3))
# print(tf.reduce_max(Subtract, axis=3))
# softmax=tf.nn.softmax(logits=distance, dim=-1)
Normalize=tf.nn.l2_normalize(distance,dim=3)
# tensor=tf.reduce_max(Normalize, axis=3)
# tensor1 = tf.squeeze(tensor)
# image_tensor = tf.expand_dims(tensor1, -1)
# self.output=image_tensor
#
Noisy_or=tf.subtract(1.0,tf.reduce_prod(tf.subtract(1.0,Normalize),axis=3))
tensor1 = tf.squeeze(Noisy_or)
image_tensor = tf.expand_dims(tensor1, -1)
# print(Noisy_or)
self.output = image_tensor
changeloss=9.0*tf.reduce_sum(tf.multiply(self.y_,tf.subtract(1.0,Noisy_or)))
unchangeloss=0.7340*tf.reduce_sum(tf.multiply(tf.subtract(1.0,self.y_),Noisy_or))
# print(softmax)
# ll=tf.reduce_max(softmax, axis=3)
# print(ll)
# self.output=tf.reshape(distance,[112,112,1])
#
# unchange_w = tf.subtract(1.0, self.y_)
# unchangeloss = tf.reduce_sum(tf.multiply(unchange_w,distance))
#m = tf.constant(0.5, dtype=None, shape=(1,112,112), name='m')
# print(m)
# changeloss = tf.reduce_sum(tf.multiply(self.y_,tf.maximum(0.0,tf.subtract(0.5,distance))))
# changeloss = tf.reduce_sum(tf.multiply(self.y_, tf.subtract(1.0, tf.reduce_max(softmax, axis=3))))
# changeloss = tf.reduce_sum(tf.multiply(self.y_, tf.subtract(1.0, tf.reduce_max(Normalize, axis=3))))
loss=unchangeloss+changeloss
# se=tf.Session()
# for i in range(112):
# for j in range (112):
# # print(self.y_[0,i,j])
# # print((Suntract[0,i,j])[tf.argmax(Suntract[0,i,j])])
# # loss=loss+(1-self.y_[0,i,j])*np.max(Suntract[0,i,j])-self.y_[0,i,j]*np.max(Suntract[0,i,j])
# loss=loss+(1-self.y_[0,i,j])*(Suntract[0,i,j])[tf.argmax(Suntract[0,i,j])]-self.y_[0,i,j]*(Suntract[0,i,j])[tf.argmax(Suntract[0,i,j])]
# print(loss)
# print(se.run(np.max(Suntract[0,i,j]),np.min(Suntract[0,i,j])))
#se.close()
# print(Suntract)
# return tf.reduce_sum(Suntract)
return loss
def loss_with_spring(self):
margin = 10.0
labels_t = self.y_
labels_f = tf.subtract(1.0, self.y_, name="1-yi") # labels_ = !labels;
eucd2 = tf.pow(tf.subtract(self.o1, self.o2), 2)
eucd2 = tf.reduce_sum(eucd2, 1)
eucd = tf.sqrt(eucd2+1e-6, name="eucd")
C = tf.constant(margin, name="C")
# yi*||CNN(p1i)-CNN(p2i)||^2 + (1-yi)*max(0, C-||CNN(p1i)-CNN(p2i)||^2)
pos = tf.multiply(labels_t, eucd2, name="yi_x_eucd2")
# neg = tf.multiply(labels_f, tf.subtract(0.0,eucd2), name="yi_x_eucd2")
# neg = tf.multiply(labels_f, tf.maximum(0.0, tf.subtract(C,eucd2)), name="Nyi_x_C-eucd_xx_2")
neg = tf.multiply(labels_f, tf.pow(tf.maximum(tf.subtract(C, eucd), 0), 2), name="Nyi_x_C-eucd_xx_2")
losses = tf.add(pos, neg, name="losses")
loss = tf.reduce_mean(losses, name="loss")
return loss
def loss_with_step(self):
margin = 5.0
labels_t = self.y_
labels_f = tf.subtract(1.0, self.y_, name="1-yi") # labels_ = !labels;
eucd2 = tf.pow(tf.subtract(self.o1, self.o2), 2)
eucd2 = tf.reduce_sum(eucd2, 1)
eucd = tf.sqrt(eucd2+1e-6, name="eucd")
C = tf.constant(margin, name="C")
pos = tf.multiply(labels_t, eucd, name="y_x_eucd")
neg = tf.multiply(labels_f, tf.maximum(0.0, tf.subtract(C, eucd)), name="Ny_C-eucd")
losses = tf.add(pos, neg, name="losses")
loss = tf.reduce_mean(losses, name="loss")
return loss
|
{"/run.py": ["/inference.py", "/testdataset.py"]}
|
34,303
|
xuquanfu/My-Graduation-Design
|
refs/heads/master
|
/changename.py
|
f=open(r'E:/MILdata/train.txt','r')
line= f.readlines()
for i in range(len(line)):
if 'E:/code/Mytry/data' in line[i]:
line[i]=line[i].replace('E:/code/Mytry/data','E:/MILdata')
open(r'E:/MILdata/train.txt','w').writelines(line)
|
{"/run.py": ["/inference.py", "/testdataset.py"]}
|
34,304
|
xuquanfu/My-Graduation-Design
|
refs/heads/master
|
/testdataset.py
|
import scipy.misc
import numpy as np
class DataProvide():
def __init__(self):
self.path=[]
self.image = []
self.data_dir = []
self.pairdata_dir = []
self.label_dir = []
self.batch_size = 1
self.current_step = 0
self.load_dataset()
"""
Load set of images in a directory.
This will automatically allocate a
random 20% of the images as a test set
data_dir: path to directory containing images
"""
def load_dataset(self):
f=open(r'E:/code/Mytry/data/test_Szada_Scene1.txt', 'r')
self.image=f.readlines()
num=len(self.image)
# np.random.shuffle(self.image)
for i in range(num):
self.data_dir.append(self.image[i].split()[0])
self.pairdata_dir.append(self.image[i].split()[1])
self.label_dir.append(self.image[i].split()[2])
f.close()
def next_batch(self):
steps = len(self.image) // self.batch_size
if (self.current_step > steps - 1):
self.current_step = 0
# np.random.shuffle(self.image)
data_batch = []
label_batch = []
pairdata_batch=[]
data_path_batch = self.data_dir[self.current_step * self.batch_size: (self.current_step + 1) * self.batch_size]
pairdata_path_batch = self.pairdata_dir[self.current_step * self.batch_size: (self.current_step + 1) * self.batch_size]
label_path_batch = self.label_dir[self.current_step * self.batch_size: (self.current_step + 1) * self.batch_size]
for i in range(len(data_path_batch)):
img_path = data_path_batch[i]
pairimg_path = pairdata_path_batch[i]
label_path = label_path_batch[i]
img = scipy.misc.imread(img_path)
pairimg=scipy.misc.imread(pairimg_path)
label = scipy.misc.imread(label_path)
data_batch.append(img)
pairdata_batch.append(pairimg)
label_batch.append(label)
self.current_step += 1
#data_batch=np.reshape(data_batch,112*112*3)
#pairdata_batch = np.reshape(pairdata_batch, 112 * 112 * 3)
#label_batch = np.reshape(label_batch, 112 * 112 * 1 )
#print(np.shape(data_batch))
#print(np.shape(pairdata_batch))
#print(np.shape(label_batch))
return data_batch, pairdata_batch,label_batch
'''
for i in range(len(data_path_batch)):
img_path = data_path_batch[i]
pairimg_path = pairdata_path_batch[i]
label_path = label_path_batch[i]
self.current_step += 1
return img_path, pairimg_path,label_path
data =DataProvide()
a,b,c=data.next_batch()
print(a)
print(b)
print(c)
a1,b1,c1=data.next_batch()
print(a1)
print(b1)
print(c1)
'''
|
{"/run.py": ["/inference.py", "/testdataset.py"]}
|
34,305
|
xuquanfu/My-Graduation-Design
|
refs/heads/master
|
/imgpinjie.py
|
import os
import cv2
import numpy as np
import json
import re
import scipy.misc
img_width = 112
img_height = 112
#path = r'/workspace/zmychange/dataset/tiszadob_hist_specify/outmap3/0.44'
path = r'E:\code\Mytry\data\output'
img_dir_firsts = os.listdir(path)
outmaplist = []
beoutmaplist = []
for img_dir_first in img_dir_firsts:
# if 'updataoutmap_' in img_dir_first:
if 'Tiszadob' in img_dir_first:
outmaplist.append(img_dir_first)
# if 'beforeoutmap_' in img_dir_first:
# beoutmaplist.append(img_dir_first)
fullimage = np.zeros([6*112,9*112],np.uint8)
# befullimage = np.zeros([6*112,9*112],np.uint8)
print (fullimage.shape)
num = 0
for i in range(0,fullimage.shape[0],112):
for j in range(0,fullimage.shape[1],112):
# index = outmaplist.index('updataoutmap_' + str(num) + '.jpg')
index = outmaplist.index('Tiszadob' + str(num) + '.bmp')
#print index
fullimage[i:i + img_height , j:j + img_width] = scipy.misc.imread(path+'/'+outmaplist[index])
# beindex = beoutmaplist.index('beforeoutmap_' + str(num) + '.jpg')
# # print index
# befullimage[i:i + img_height, j:j + img_width] = scipy.misc.imread(path + '/' + beoutmaplist[beindex])
num = num+1
# sub_path = '/workspace/zmychange/dataset/Szada_hist_specify1/outmapfull1/resnet_deeplab10_weightall_triplet2b_30000'
sub_path = 'E:\code\Mytry\data\output'
if not os.path.exists(sub_path):
os.mkdir(sub_path)
# sub_path2 = '/workspace/zmychange/dataset/Szada_hist_specify1/outmapfull1/resnet_deeplab10_weightall_triplet2b_30000/0.485'
#
# if not os.path.exists(sub_path2):
# os.mkdir(sub_path2)
# scipy.misc.imsave(sub_path2+'/'+'updataSza_1.bmp',fullimage)
# scipy.misc.imsave(sub_path2+'/'+'beforeSza_1.bmp',befullimage)
fullim1_y = fullimage[0:448,0:784]
fullim2_for_knn=fullimage[0:452,0:788]
# befullim1_y = befullimage[0:640,0:952]
print (fullim1_y.shape)
# print (befullim1_y.shape)
# scipy.misc.imsave(sub_path2+'/'+'updataSza_1_fulloutmap.bmp',fullim1_y)
# scipy.misc.imsave(sub_path2+'/'+'beforeSza_1_fulloutmap.bmp',befullim1_y)
scipy.misc.imsave(sub_path+'/'+'Szada_Scene1.bmp',fullim1_y)
scipy.misc.imsave(sub_path+'/'+'Szada_Scene1_for_knn.bmp',fullim2_for_knn)
|
{"/run.py": ["/inference.py", "/testdataset.py"]}
|
34,306
|
xuquanfu/My-Graduation-Design
|
refs/heads/master
|
/imageclip.py
|
#coding: utf-8
import os
import cv2
import numpy as np
import json
import re
import scipy.misc
# img_path_root = r'E:/dataseg/Tiszadob_hist_specify_3_ch_2_to_1/'
# img_path_root = r'E:/dataseg/Tiszadob_GT/'
# img_sub = 'E:/dataseg/tiszadob_hist_specify112_gt/'
# img_sub = 'E:/dataseg/tiszadob_hist_specify112/'
img_path_root = r'E:/dataseg/Szada_Scene1/'
img_sub = 'E:/dataseg/Szada_Scene1_seg/'
img_dir_firsts = os.listdir(img_path_root)
#print(img_dir_firsts)
img_width = 112
img_height = 112
for img_dir_first in img_dir_firsts:
img_dir_first = img_dir_first.strip().strip('.bmp')
print(img_dir_first)
i = re.sub('\D','',img_dir_first)[0] #i指的是字符串中的第一个数字 即表示场景几
print(i)
#print(type(img_dir_first))
#print('Scene'+str(i))
# img_sub_path = img_sub + 'tiszadob_Scene' + str(i)
# #img_sub_path = img_sub + 'Szada_Scene' + str(i)
# if not os.path.exists(img_sub_path):
# os.mkdir(img_sub_path)
img_np = scipy.misc.imread(os.path.join(img_path_root,img_dir_first+'.bmp'))
print (img_np.shape)
#img_np = img_np[0:630,0:945]
#scipy.misc.imsave(img_sub + img_dir_first + '.bmp', img_np)
#print(type(img_np))
img_shape = np.shape(img_np)
print(img_shape)
img_num = 0
for h in range(0,img_shape[0],112):
for w in range(0,img_shape[1],112):
if h > 630 or w >940:
continue
sub_img = img_np[h:min(h + img_height, img_shape[0]), w:min(w + img_width, img_shape[1])]
print(sub_img.shape)
if sub_img.shape[0] != img_height or sub_img.shape[1] != img_width:
sub_img = np.lib.pad(sub_img, ((0, img_height - int(sub_img.shape[0])), (0, img_width - int(sub_img.shape[1])), (0, 0)), 'constant',
constant_values=0)
# sub_img = np.lib.pad(sub_img, (
# (0, img_height - int(sub_img.shape[0])), (0, img_width - int(sub_img.shape[1]))), 'constant',
# constant_values=0)
print(sub_img.shape)
# scipy.misc.imsave(img_sub_path+'/'+img_dir_first+'_'+'clip'+str(img_num)+'.bmp',sub_img)
scipy.misc.imsave(img_sub + '/' + img_dir_first + '_' + 'clip' + str(img_num) + '.bmp', sub_img)
#f1 = open(img_sub_path+'/'+'index.txt','a')
#s=img_dir_first+'_'+'clip'+str(img_num)
#f1.write(s+':'+'['+str(h)+','+str(w)+']'+'\n')
img_num = img_num+1
print (img_num)
#print(img_sub_path)
|
{"/run.py": ["/inference.py", "/testdataset.py"]}
|
34,307
|
xuquanfu/My-Graduation-Design
|
refs/heads/master
|
/run.py
|
""" Siamese implementation using Tensorflow with MNIST example.
This siamese network embeds a 28x28 image (a point in 784D)
into a point in 2D.
By Youngwook Paul Kwon (young at berkeley.edu)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#import system things
from tensorflow.examples.tutorials.mnist import input_data # for data
import tensorflow as tf
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt;
import scipy.misc
#import helpers
import inference
import visualize
import dataset
import testdataset
# prepare data and tf.session
#mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
data=dataset.DataProvide()
testdata=testdataset.DataProvide()
sess = tf.InteractiveSession()
# setup siamese network
siamese = inference.siamese();
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.005
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,1000, 0.90, staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(siamese.loss,global_step=global_step)
saver = tf.train.Saver()
tf.global_variables_initializer().run()
# if you just want to load a previously trainmodel?
new = True
# model_ckpt = './model.ckpt'
# if os.path.isfile(model_ckpt):
input_var = None
while input_var not in ['yes', 'no']:
input_var = input("We found model.ckpt file. Do you want to load it [yes/no]?")
if input_var == 'yes':
new = False
# start training
if new:
for step in range(500001):
'''
batch_x1, batch_y1 = mnist.train.next_batch(128)
batch_x2, batch_y2 = mnist.train.next_batch(128)
batch_y = (batch_y1 == batch_y2).astype('float')
_, loss_v = sess.run([train_step, siamese.loss], feed_dict={
siamese.x1: batch_x1,
siamese.x2: batch_x2,
siamese.y_: batch_y})
'''
batch_x1,batch_x2,batch_y=data.next_batch()
_, loss_v,output_v,y_v = sess.run([train_step, siamese.loss,siamese.output,siamese.y_], feed_dict={
siamese.x1: batch_x1,
siamese.x2: batch_x2,
siamese.y_: batch_y})
# print(y_v)
# cv2.imshow('output',output_v)
# cv2.waitKey(0)
if np.isnan(loss_v):
print('Model diverged with loss = NaN')
quit()
if step % 50 == 0:
print(output_v)
print ('step %d: loss %.3f' % (step, loss_v))
if step % 1000 == 0 and step > 0:
saver.save(sess, './model.ckpt')
else:
saver.restore(sess, './model100000.ckpt')
for step in range(54):
batch_x1,batch_x2,batch_y=testdata.next_batch()
loss_v,output_v= sess.run([siamese.loss,siamese.output], feed_dict={
siamese.x1: batch_x1,
siamese.x2: batch_x2,
siamese.y_: batch_y})
if np.isnan(loss_v):
print('Model diverged with loss = NaN')
quit()
print ('step %d: loss %.3f' % (step, loss_v))
print(output_v)
output = np.zeros([112, 112], np.uint8)
for i in range(112):
for j in range(112):
if output_v[i,j]>0.7:
output[i, j] = 255
else:
output[i,j]= 0
# print(tf.shape(output_v))
# print(output_v)
cv2.imwrite('E:\code\Mytry\data\output\Tiszadob'+str(step)+'.bmp',output)
# cv2.imshow('output',output_v)
# cv2.imshow('output2', output)
# scipy.misc.imsave('E:\code\Mytry\data\output\Tiszadob'+str(step)+'.bmp',output_v)
# print(tf.shape(y_v))
# tensor1 = tf.squeeze(y_v)
# image_tensor = tf.placeholder(tf.float32, [112, 112, 1])
# image_tensor = tf.expand_dims(tensor1, -1)
# print(tf.shape(image_tensor))
# cv2.imshow('y', image_tensor.eval() )
# cv2.waitKey(0)
# embed = siamese.o1.eval({siamese.x1: mnist.test.images})
# embed.tofile('embed.txt')
#
# # visualize result
# x_test = mnist.test.images.reshape([-1, 28, 28])
# visualize.visualize(embed, x_test)
|
{"/run.py": ["/inference.py", "/testdataset.py"]}
|
34,312
|
tanliyon/chatbot
|
refs/heads/master
|
/eval.py
|
#!/usr/bin/env python
# coding: utf-8
import warnings
import torch
from torch import nn
warnings.filterwarnings(action='ignore')
from autocorrect import Correcter
from chatbot import trainBot, normalizeString, indexesFromSentence
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
correct = Correcter()
# Default word tokens
PAD_token = 0 # Used for padding short sentences
SOS_token = 1 # Start-of-sentence token
EOS_token = 2 # End-of-sentence token
# Hyperparameters
attn_model = 'dot'
load_checkpoint = 6000
max_length = 20
min_count = 3
model_name = "lionheart"
dropout = 0.1
batch_size = 64
clip = 50
teacher_forcing_ratio = 0.5
learning_rate = 0.0001
decoder_learning_ratio = 5
n_iteration = 2000
print_every = 100
save_every = 2000
encoder, decoder, voc = trainBot(attn_model, load_checkpoint, max_length, min_count, model_name, dropout,
batch_size, clip, teacher_forcing_ratio,
learning_rate, decoder_learning_ratio, n_iteration, print_every, save_every)
# In[ ]:
def evaluate(encoder, decoder, searcher, voc, sentence, max_length):
# words -> indexes
indexes_batch = [indexesFromSentence(voc, sentence)]
# Create lengths tensor
lengths = torch.tensor([len(indexes) for indexes in indexes_batch])
# Transpose dimensions of batch to match models' expectations
input_batch = torch.LongTensor(indexes_batch).transpose(0, 1)
# Use appropriate device
input_batch = input_batch.to(device)
lengths = lengths.to(device)
# Decode sentence with searcher
tokens, scores = searcher(input_batch, lengths, max_length)
# indexes -> words
decoded_words = [voc.index2word[token.item()] for token in tokens]
return decoded_words
def evaluateInput(encoder, decoder, searcher, voc):
input_sentence = ''
while(1):
try:
# Get input sentence
input_sentence = input('> ')
# Check if it is quit case
if input_sentence == 'q' or input_sentence == 'quit':
print("Goodbye!")
break
# TODO: Increment autocorrect file count
output = []
correct_input_sentence = correct(input_sentence.lower())
if correct_input_sentence != input_sentence.lower():
input_sentence = correct_input_sentence
# Normalize sentence
input_sentence = normalizeString(input_sentence)
# Evaluate sentence
output_words = evaluate(encoder, decoder, searcher, voc, input_sentence, max_length)
# Format and print response sentence
for word in output_words:
if word == "i":
word = word.capitalize()
if word != 'EOS':
output.append(word)
else:
break
output[0] = output[0].capitalize()
if output[-1] == "." or output[-1] == "!" or output[-1] == "?":
print('Bot:', ' '.join(output[:-1]) + output[-1])
else:
print('Bot:', ' '.join(output) + '.')
except KeyError as key:
key = str(key).strip('\'') # Strip the starting and ending quotation mark
print(f"Bot: {key}?")
# In[ ]:
class GreedySearchDecoder(nn.Module):
def __init__(self, encoder, decoder):
super(GreedySearchDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, input_seq, input_length, max_length):
# Forward input through encoder model
encoder_outputs, encoder_hidden = self.encoder(input_seq, input_length)
# Prepare encoder's final hidden layer to be first hidden input to the decoder
decoder_hidden = encoder_hidden[:decoder.n_layers]
# Initialize decoder input with SOS_token
decoder_input = torch.ones(1, 1, device=device, dtype=torch.long) * SOS_token
# Initialize tensors to append decoded words to
all_tokens = torch.zeros([0], device=device, dtype=torch.long)
all_scores = torch.zeros([0], device=device)
# Iteratively decode one word token at a time
for _ in range(max_length):
# Forward pass through decoder
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs)
# Obtain most likely word token and its softmax score
decoder_scores, decoder_input = torch.max(decoder_output, dim=1)
# Record token and score
all_tokens = torch.cat((all_tokens, decoder_input), dim=0)
all_scores = torch.cat((all_scores, decoder_scores), dim=0)
# Prepare current token to be next decoder input (add a dimension)
decoder_input = torch.unsqueeze(decoder_input, 0)
# Return collections of word tokens and scores
return all_tokens, all_scores
# In[ ]:
# Set dropout layers to eval mode
encoder.eval()
decoder.eval()
# Initialize search module
searcher = GreedySearchDecoder(encoder, decoder)
# Begin chatting (uncomment and run the following line to begin)
evaluateInput(encoder, decoder, searcher, voc)
# In[ ]:
# In[ ]:
|
{"/eval.py": ["/autocorrect.py", "/chatbot.py"]}
|
34,313
|
tanliyon/chatbot
|
refs/heads/master
|
/chatbot.py
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from torch.jit import script, trace
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import random
import re
import os
import unicodedata
from io import open
import itertools
from gensim.models import KeyedVectors
from parse import parse_file
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
corpus_name = "cornell movie-dialogs corpus"
corpus = os.path.join("data", corpus_name)
save_dir = os.path.join("data", "save")
datafile = os.path.join(corpus, "formatted_movie_lines.txt")
if not datafile:
parse_file(datafile, corpus)
# Default word tokens
PAD_token = 0 # Used for padding short sentences
SOS_token = 1 # Start-of-sentence token
EOS_token = 2 # End-of-sentence token
# Relatively fixed parameters
hidden_size = 300
encoder_n_layers = 2
decoder_n_layers = 2
class Voc:
def __init__(self, name):
self.name = name
self.trimmed = False
self.word2index = {}
self.word2count = {}
self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"}
self.num_words = 3 # Count SOS, EOS, PAD
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.num_words
self.word2count[word] = 1
self.index2word[self.num_words] = word
self.num_words += 1
else:
self.word2count[word] += 1
def updateDict(self, dict_old):
for word in self.word2index:
if word not in dict_old["word2index"]:
word2index[word] = self.num_words
self.word2count[word] = 1
self.index2word[self.num_words] = word
self.num_words += 1
# Remove words below a certain count threshold
def trim(self, min_count):
if self.trimmed:
return
self.trimmed = True
keep_words = []
for k, v in self.word2count.items():
if v >= min_count:
keep_words.append(k)
print('Keep_words {} / {} = {:.4f}'.format(
len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
))
# Reinitialize dictionaries
self.word2index = {}
self.word2count = {}
self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"}
self.num_words = 3 # Count default tokens
for word in keep_words:
self.addWord(word)
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?']+", r" ", s)
s = re.sub(r"\s+", r" ", s).strip()
return s
# Read query/response pairs and return a voc object
def readVocs(datafile, corpus_name):
print("Reading lines...")
# Read the file and split into lines
lines = open(datafile, encoding='utf-8'). read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
voc = Voc(corpus_name)
return voc, pairs
# Returns True iff both sentences in a pair 'p' are under the MAX_LENGTH threshold
def filterPair(p, max_length):
# Input sequences need to preserve the last word for EOS token
return len(p[0].split(' ')) < max_length and len(p[1].split(' ')) < max_length
# Filter pairs using filterPair condition
def filterPairs(pairs, max_length):
return [pair for pair in pairs if filterPair(pair, max_length)]
# Using the functions defined above, return a populated voc object and pairs list
def loadPrepareData(corpus, corpus_name, datafile, save_dir, max_length):
print("Start preparing training data ...")
voc, pairs = readVocs(datafile, corpus_name)
print("Read {!s} sentence pairs".format(len(pairs)))
pairs = filterPairs(pairs, max_length)
print("Trimmed to {!s} sentence pairs".format(len(pairs)))
print("Counting words...")
for pair in pairs:
voc.addSentence(pair[0])
voc.addSentence(pair[1])
print("Counted words:", voc.num_words)
return voc, pairs
def trimRareWords(voc, pairs, MIN_COUNT):
# Trim words used under the MIN_COUNT from the voc
voc.trim(MIN_COUNT)
# Filter out pairs with trimmed words
keep_pairs = []
for pair in pairs:
input_sentence = pair[0]
output_sentence = pair[1]
keep_input = True
keep_output = True
# Check input sentence
for word in input_sentence.split(' '):
if word not in voc.word2index:
keep_input = False
break
# Check output sentence
for word in output_sentence.split(' '):
if word not in voc.word2index:
keep_output = False
break
# Only keep pairs that do not contain trimmed word(s) in their input or output sentence
if keep_input and keep_output:
keep_pairs.append(pair)
print("Trimmed from {} pairs to {}, {:.4f} of total".format(len(pairs), len(keep_pairs), len(keep_pairs) / len(pairs)))
return keep_pairs
def indexesFromSentence(voc, sentence):
return [voc.word2index[word] for word in sentence.split(' ')] + [EOS_token]
def zeroPadding(l, fillvalue=PAD_token):
return list(itertools.zip_longest(*l, fillvalue=fillvalue))
def binaryMatrix(l, value=PAD_token):
m = []
for i, seq in enumerate(l):
m.append([])
for token in seq:
if token == PAD_token:
m[i].append(0)
else:
m[i].append(1)
return m
# Returns padded input sequence tensor and lengths
def inputVar(l, voc):
indexes_batch = [indexesFromSentence(voc, sentence) for sentence in l]
lengths = torch.tensor([len(indexes) for indexes in indexes_batch])
padList = zeroPadding(indexes_batch)
padVar = torch.LongTensor(padList)
return padVar, lengths
# Returns padded target sequence tensor, padding mask, and max target length
def outputVar(l, voc):
indexes_batch = [indexesFromSentence(voc, sentence) for sentence in l]
max_target_len = max([len(indexes) for indexes in indexes_batch])
padList = zeroPadding(indexes_batch)
mask = binaryMatrix(padList)
mask = torch.BoolTensor(mask)
padVar = torch.LongTensor(padList)
return padVar, mask, max_target_len
# Returns all items for a given batch of pairs
def batch2TrainData(voc, pair_batch):
pair_batch.sort(key=lambda x: len(x[0].split(" ")), reverse=True)
input_batch, output_batch = [], []
for pair in pair_batch:
input_batch.append(pair[0])
output_batch.append(pair[1])
inp, lengths = inputVar(input_batch, voc)
output, mask, max_target_len = outputVar(output_batch, voc)
return inp, lengths, output, mask, max_target_len
class EncoderRNN(nn.Module):
def __init__(self, hidden_size, embedding, n_layers=1, dropout=0):
super(EncoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.embedding = embedding
# Initialize GRU; the input_size and hidden_size params are both set to 'hidden_size'
# because our input size is a word embedding with number of features == hidden_size
self.gru = nn.GRU(hidden_size, hidden_size, n_layers,
dropout=(0 if n_layers == 1 else dropout), bidirectional=True)
def forward(self, input_seq, input_lengths, hidden=None):
# Convert word indexes to embeddings
embedded = self.embedding(input_seq)
# Pack padded batch of sequences for RNN module
packed = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)
# Forward pass through GRU
outputs, hidden = self.gru(packed, hidden)
# Unpack padding
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs)
# Sum bidirectional GRU outputs
outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:]
# Return output and final hidden state
return outputs, hidden
# Luong attention layer
class Attn(nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
if self.method not in ['dot', 'general', 'concat']:
raise ValueError(self.method, "is not an appropriate attention method.")
self.hidden_size = hidden_size
if self.method == 'general':
self.attn = nn.Linear(self.hidden_size, hidden_size)
elif self.method == 'concat':
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.FloatTensor(hidden_size))
def dot_score(self, hidden, encoder_output):
return torch.sum(hidden * encoder_output, dim=2)
def general_score(self, hidden, encoder_output):
energy = self.attn(encoder_output)
return torch.sum(hidden * energy, dim=2)
def concat_score(self, hidden, encoder_output):
energy = self.attn(torch.cat((hidden.expand(encoder_output.size(0), -1, -1), encoder_output), 2)).tanh()
return torch.sum(self.v * energy, dim=2)
def forward(self, hidden, encoder_outputs):
# Calculate the attention weights (energies) based on the given method
if self.method == 'general':
attn_energies = self.general_score(hidden, encoder_outputs)
elif self.method == 'concat':
attn_energies = self.concat_score(hidden, encoder_outputs)
elif self.method == 'dot':
attn_energies = self.dot_score(hidden, encoder_outputs)
# Transpose max_length and batch_size dimensions
attn_energies = attn_energies.t()
# Return the softmax normalized probability scores (with added dimension)
return F.softmax(attn_energies, dim=1).unsqueeze(1)
class LuongAttnDecoderRNN(nn.Module):
def __init__(self, attn_model, embedding, hidden_size, output_size, n_layers=1, dropout=0.1):
super(LuongAttnDecoderRNN, self).__init__()
# Keep for reference
self.attn_model = attn_model
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout = dropout
# Define layers
self.embedding = embedding
self.embedding_dropout = nn.Dropout(dropout)
self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=(0 if n_layers == 1 else dropout))
self.concat = nn.Linear(hidden_size * 2, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.attn = Attn(attn_model, hidden_size)
def forward(self, input_step, last_hidden, encoder_outputs):
# Note: we run this one step (word) at a time
# Get embedding of current input word
embedded = self.embedding(input_step)
embedded = self.embedding_dropout(embedded)
# Forward through unidirectional GRU
rnn_output, hidden = self.gru(embedded, last_hidden)
# Calculate attention weights from the current GRU output
attn_weights = self.attn(rnn_output, encoder_outputs)
# Multiply attention weights to encoder outputs to get new "weighted sum" context vector
context = attn_weights.bmm(encoder_outputs.transpose(0, 1))
# Concatenate weighted context vector and GRU output using Luong eq. 5
rnn_output = rnn_output.squeeze(0)
context = context.squeeze(1)
concat_input = torch.cat((rnn_output, context), 1)
concat_output = torch.tanh(self.concat(concat_input))
# Predict next word using Luong eq. 6
output = self.out(concat_output)
output = F.softmax(output, dim=1)
# Return output and final hidden state
return output, hidden
def maskNLLLoss(inp, target, mask):
nTotal = mask.sum()
crossEntropy = -torch.log(torch.gather(inp, 1, target.view(-1, 1)).squeeze(1))
loss = crossEntropy.masked_select(mask).mean()
loss = loss.to(device)
return loss, nTotal.item()
def train(input_variable, lengths, target_variable, mask, max_target_len, encoder, decoder, embedding,
encoder_optimizer, decoder_optimizer, batch_size, clip, max_length, teacher_forcing_ratio):
# Zero gradients
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# Set device options
input_variable = input_variable.to(device)
lengths = lengths.to(device)
target_variable = target_variable.to(device)
mask = mask.to(device)
# Initialize variables
loss = 0
print_losses = []
n_totals = 0
# Forward pass through encoder
encoder_outputs, encoder_hidden = encoder(input_variable, lengths)
# Create initial decoder input (start with SOS tokens for each sentence)
decoder_input = torch.LongTensor([[SOS_token for _ in range(batch_size)]])
decoder_input = decoder_input.to(device)
# Set initial decoder hidden state to the encoder's final hidden state
decoder_hidden = encoder_hidden[:decoder.n_layers]
# Determine if we are using teacher forcing this iteration
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
# Forward batch of sequences through decoder one time step at a time
if use_teacher_forcing:
for t in range(max_target_len):
decoder_output, decoder_hidden = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
# Teacher forcing: next input is current target
decoder_input = target_variable[t].view(1, -1)
# Calculate and accumulate loss
mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t])
loss += mask_loss
print_losses.append(mask_loss.item() * nTotal)
n_totals += nTotal
else:
for t in range(max_target_len):
decoder_output, decoder_hidden = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
# No teacher forcing: next input is decoder's own current output
_, topi = decoder_output.topk(1)
decoder_input = torch.LongTensor([[topi[i][0] for i in range(batch_size)]])
decoder_input = decoder_input.to(device)
# Calculate and accumulate loss
mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t])
loss += mask_loss
print_losses.append(mask_loss.item() * nTotal)
n_totals += nTotal
# Perform backpropatation
loss.backward()
# Clip gradients: gradients are modified in place
_ = nn.utils.clip_grad_norm_(encoder.parameters(), clip)
_ = nn.utils.clip_grad_norm_(decoder.parameters(), clip)
# Adjust model weights
encoder_optimizer.step()
decoder_optimizer.step()
return sum(print_losses) / n_totals
def trainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer,
embedding, encoder_n_layers, decoder_n_layers, save_dir, n_iteration, batch_size,
print_every, save_every, clip, corpus_name, loadFilename, max_length, teacher_forcing_ratio, checkpoint):
# Load batches for each iteration
training_batches = [batch2TrainData(voc, [random.choice(pairs) for _ in range(batch_size)])
for _ in range(n_iteration)]
# Initializations
print('Initializing ...')
start_iteration = 0
print_loss = 0
if loadFilename:
start_iteration = checkpoint['iteration']
# Training loop
print("Training...")
for iteration in range(1, n_iteration + 1):
training_batch = training_batches[iteration - 1]
# Extract fields from batch
input_variable, lengths, target_variable, mask, max_target_len = training_batch
# Run a training iteration with batch
loss = train(input_variable, lengths, target_variable, mask, max_target_len, encoder,
decoder, embedding, encoder_optimizer, decoder_optimizer, batch_size, clip, max_length, teacher_forcing_ratio)
print_loss += loss
# Print progress
if iteration % print_every == 0:
print_loss_avg = print_loss / print_every
print("Iteration: {}; Percent complete: {:.1f}%; Average loss: {:.4f}".format(iteration, iteration / n_iteration * 100, print_loss_avg))
print_loss = 0
# Save checkpoint
if (iteration % save_every == 0):
directory = os.path.join(save_dir, model_name, corpus_name, '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size))
if not os.path.exists(directory):
os.makedirs(directory)
torch.save({
'iteration': iteration+start_iteration,
'en': encoder.state_dict(),
'de': decoder.state_dict(),
'en_opt': encoder_optimizer.state_dict(),
'de_opt': decoder_optimizer.state_dict(),
'loss': loss,
'voc_dict': voc.__dict__,
'embedding': embedding.state_dict()
}, os.path.join(directory, '{}_{}.tar'.format(iteration+start_iteration, 'checkpoint')))
def trainBot(attn_model, load_checkpoint, max_length, min_count, model_name, dropout, batch_size, clip,
teacher_forcing_ratio, learning_rate, decoder_learning_ratio, n_iteration, print_every, save_every):
# Clear GPU cache
torch.cuda.empty_cache()
# Load/Assemble voc and pairs
voc, pairs = loadPrepareData(corpus, corpus_name, datafile, save_dir, max_length)
pairs = trimRareWords(voc, pairs, min_count)
if load_checkpoint:
loadFilename = os.path.join(save_dir, model_name, corpus_name,
'{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size),
'{}_checkpoint.tar'.format(load_checkpoint))
else:
loadFilename = None
checkpoint = None
# Load model if a loadFilename is provided
if loadFilename:
# If loading on same machine the model was trained on
checkpoint = torch.load(loadFilename)
encoder_sd = checkpoint['en']
decoder_sd = checkpoint['de']
encoder_optimizer_sd = checkpoint['en_opt']
decoder_optimizer_sd = checkpoint['de_opt']
embedding_sd = checkpoint['embedding']
voc__dict__ = checkpoint['voc_dict']
# voc.updateDict(dict_old)
print('Building encoder and decoder ...')
# Initialize word embeddings
if loadFilename:
embedding = nn.Embedding(100000, 300) # Google word2vec has 3 million embeddings, each has a 300 dim vector
embedding.load_state_dict(embedding_sd)
else:
embedding_model = KeyedVectors.load_word2vec_format('data\GoogleNews-vectors-negative300.bin.gz', binary=True, limit=100000)
embedding_weights = torch.FloatTensor(embedding_model.vectors)
embedding = nn.Embedding.from_pretrained(embedding_weights)
print(embedding("the"))
# Initialize encoder & decoder models
encoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout)
decoder = LuongAttnDecoderRNN(attn_model, embedding, hidden_size, voc.num_words, decoder_n_layers, dropout)
if loadFilename:
encoder.load_state_dict(encoder_sd)
decoder.load_state_dict(decoder_sd)
# Use appropriate device
encoder = encoder.to(device)
decoder = decoder.to(device)
print('Models built and ready to go!')
# Ensure dropout layers are in train mode
encoder.train()
decoder.train()
# Initialize optimizers
print('Building optimizers ...')
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio)
if loadFilename:
encoder_optimizer.load_state_dict(encoder_optimizer_sd)
decoder_optimizer.load_state_dict(decoder_optimizer_sd)
# If you have cuda, configure cuda to call
for state in encoder_optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
for state in decoder_optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
# Run training iterations
print("Starting Training!")
trainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer,
embedding, encoder_n_layers, decoder_n_layers, save_dir, n_iteration, batch_size,
print_every, save_every, clip, corpus_name, loadFilename, max_length, teacher_forcing_ratio, checkpoint)
print("Training Ended!")
return encoder, decoder, voc
embedding_model = KeyedVectors.load_word2vec_format('data\GoogleNews-vectors-negative300.bin.gz', binary=True, limit=100000)
embedding_weights = torch.FloatTensor(embedding_model.vectors)
embedding = nn.Embedding.from_pretrained(embedding_weights)
# input_tensor = torch.LongTensor([[1,2],[1,4]])
# tensor = embedding(input_tensor)
# print(tensor)
# print(word)
# print(tensor.shape)
embedding_model["dog"]
# embedding_model.similar_by_vector(tensor, topn=10)
|
{"/eval.py": ["/autocorrect.py", "/chatbot.py"]}
|
34,314
|
tanliyon/chatbot
|
refs/heads/master
|
/autocorrect.py
|
#!/usr/bin/env python
# coding: utf-8
import re
import os
from itertools import chain
data_path = os.path.join(os.getcwd(), "data\en_words\words.txt")
word_regexes = {
'en': r'[A-Za-z]+'
}
alphabets = {
'en': 'abcdefghijklmnopqrstuvwxyz',
}
def load_data(file):
f = open(file, 'r')
words = {}
for (i, word) in enumerate(f):
word = word.strip('\n')
pairs = word.split(" ")
words[pairs[0]] = pairs[1]
f.close()
return words
class Correcter:
def __init__(self, threshold=0, lang='en'):
self.threshold = threshold
self.nlp_data = load_data(data_path)
self.lang = lang
if threshold > 0:
print(f'Original number of words: {len(self.nlp_data)}')
self.nlp_data = {k: v for k, v in self.nlp_data.items()
if v > threshold}
print(f'After applying threshold: {len(self.nlp_data)}')
def existing(self, words):
"""{'the', 'teh'} => {'the'}"""
return set(word for word in words
if word in self.nlp_data)
def autocorrect_word(self, word):
"""most likely correction for everything up to a double typo"""
w = Word(word, self.lang)
candidates = (self.existing([word]) or
self.existing(list(w.typos())) or
self.existing(list(w.double_typos())) or
[word])
return min(candidates, key=lambda k: self.nlp_data[k])
def autocorrect_sentence(self, sentence):
return re.sub(word_regexes[self.lang],
lambda match: self.autocorrect_word(match.group(0)),
sentence)
def __call__(self, sentence):
return(self.autocorrect_sentence(sentence))
class Word(object):
"""container for word-based methods"""
def __init__(self, word, lang='en'):
"""
Generate slices to assist with typo
definitions.
'the' => (('', 'the'), ('t', 'he'),
('th', 'e'), ('the', ''))
"""
slice_range = range(len(word) + 1)
self.slices = tuple((word[:i], word[i:])
for i in slice_range)
self.word = word
self.alphabet = alphabets[lang]
def _deletes(self):
"""th"""
return (self.concat(a, b[1:])
for a, b in self.slices[:-1])
def _transposes(self):
"""teh"""
return (self.concat(a, reversed(b[:2]), b[2:])
for a, b in self.slices[:-2])
def _replaces(self):
"""tge"""
return (self.concat(a, c, b[1:])
for a, b in self.slices[:-1]
for c in self.alphabet)
def _inserts(self):
"""thwe"""
return (self.concat(a, c, b)
for a, b in self.slices
for c in self.alphabet)
def concat(self, *args):
"""reversed('th'), 'e' => 'hte'"""
try:
return ''.join(args)
except TypeError:
return ''.join(chain.from_iterable(args))
def typos(self):
"""letter combinations one typo away from word"""
yield from self._deletes()
yield from self._transposes()
yield from self._replaces()
yield from self._inserts()
def double_typos(self):
"""letter combinations two typos away from word"""
return (e2 for e1 in self.typos()
for e2 in Word(e1).typos())
correct = Correcter(lang='en')
correct("th")
# In[ ]:
|
{"/eval.py": ["/autocorrect.py", "/chatbot.py"]}
|
34,388
|
0x21/ShodanoptikBaskulerScanner
|
refs/heads/master
|
/apiexample.py
|
import api
api_key = ":D"
query = "your Shodan search Query"
try:
liste = api.shodanapi(api_key,query)
j = 0
for i in liste:
print(i)
j = j+1
print(j)
except:
pass
|
{"/apiexample.py": ["/api.py"]}
|
34,389
|
0x21/ShodanoptikBaskulerScanner
|
refs/heads/master
|
/api.py
|
import requests
import json
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
return final_list
def shodanapi(api_key,query):
api_url = "https://api.shodan.io/shodan/host/search?key="+api_key+"&query="+query
liste = []
req = requests.get(api_url)
if req.status_code == 200:
x = json.loads(req.text)
j=x['total']
page = int(j/100)+2
for i in range(1,page):
pageurl = api_url+"&page="+str(i)
req = requests.get(pageurl)
x = json.loads(req.text)
try:
for k in range(0,99):
liste.append(x['matches'][k]['ip_str'])
except:
pass
liste1 = Remove(liste)
print(str(len(Remove(liste)))+" ip addresses found")
return liste1
else:
print('Please check your Api Key')
|
{"/apiexample.py": ["/api.py"]}
|
34,395
|
mmmds/WirelessDiscoverCrackScan
|
refs/heads/master
|
/wdcs/ap.py
|
from wdcs.basicutils import Bssid
from wdcs.logger import Logger
from wdcs.timeutils import TimeUtils
class AP_Device(object):
def __init__(self, rows, new=False):
self.bssid = rows[0]
self.essid = rows[1]
try:
self.power = int(rows[2])
except ValueError:
Logger.log("Invalid power {} for AP {}".format(rows[2], self.bssid))
pass
self.channel = rows[3]
self.enc = rows[4]
self.first_seen = rows[5]
self.last_seen = rows[6]
self.psk = rows[7]
self.hs4 = rows[8]
self.pmkid = rows[9]
self.wps = rows[10]
self.last_attack = rows[11] if rows[11] is not None else ""
self.new = new
self.status = rows[12] if rows[12] is not None else ""
def __lt__(self, other):
if other.power == "-1":
return True
if self.power == "-1":
return False
return self.power < other.power
def __eq__(self, other):
return self.power == other.power
def merge(self, ap_device):
self.power = ap_device.power
self.channel = ap_device.channel
self.last_seen = ap_device.last
self.wps = ap_device.wps
self.new = ap_device.new
self.enc = ap_device.privacy
def is_no_stations(self):
return self.status == AP_Status.NO_STATIONS
def __str__(self):
return "AP device bssid={}, essid={}, channel={}, priv={}{}".format(self.bssid, self.essid, self.channel,
self.enc, "(WPS)" if self.wps else "")
class Available_AP_Device(object):
def __init__(self, csv, wps_bssids):
self.bssid = Bssid.normalize_bssid(csv[0])
self.first = TimeUtils.normalize_airodump_time(csv[1].strip())
self.last = TimeUtils.normalize_airodump_time(csv[2].strip())
self.channel = csv[3].strip()
self.privacy = csv[5].strip()
self.cipher = csv[6].strip()
self.auth = csv[7].strip()
self.power = csv[8].strip()
self.essid = csv[13].strip()
self.wps = self.bssid in wps_bssids
self.new = False
def __str__(self):
return "AP device bssid={}, essid={}, channel={}, priv={}{}".format(self.bssid, self.essid, self.channel,
self.privacy, "(WPS)" if self.wps else "")
class Available_Station_Device(object):
def __init__(self, csv):
self.mac = Bssid.normalize_bssid(csv[0])
self.first = TimeUtils.normalize_airodump_time(csv[1].strip())
self.last = TimeUtils.normalize_airodump_time(csv[2].strip())
self.bssid = Bssid.normalize_bssid(csv[5])
self.essid = csv[6].strip()
def __str__(self):
return "Station device mac={}, bssid={}, essid={}".format(self.mac, self.bssid, self.essid)
class AP_Status:
NO_STATIONS = "NO_STATIONS"
|
{"/wdcs/ap.py": ["/wdcs/basicutils.py", "/wdcs/logger.py", "/wdcs/timeutils.py"], "/wdcs/wdcs.py": ["/wdcs/config.py", "/wdcs/scan.py", "/wdcs/database.py", "/wdcs/discover.py", "/wdcs/timeutils.py", "/wdcs/crack.py", "/wdcs/hashcat.py"], "/wdcs/basicutils.py": ["/wdcs/process.py", "/wdcs/logger.py"], "/wdcs/discover.py": ["/wdcs/ap.py", "/wdcs/crack.py", "/wdcs/basicutils.py"], "/wdcs.py": ["/wdcs/wdcs.py"], "/wdcs/database.py": ["/wdcs/ap.py"], "/wdcs/config.py": ["/wdcs/basicutils.py", "/wdcs/logger.py"], "/wdcs/scan.py": ["/wdcs/crack.py", "/wdcs/logger.py", "/wdcs/process.py", "/wdcs.py"], "/wdcs/process.py": ["/wdcs/logger.py", "/wdcs.py"], "/wdcs/hashcat.py": ["/wdcs.py", "/wdcs/logger.py"], "/wdcs/crack.py": ["/wdcs/basicutils.py", "/wdcs/logger.py", "/wdcs.py"]}
|
34,396
|
mmmds/WirelessDiscoverCrackScan
|
refs/heads/master
|
/wdcs/timeutils.py
|
import datetime
class TimeUtils(object):
@classmethod
def now(cls):
return datetime.datetime.now()
@classmethod
def now_str(cls):
return cls.now().strftime("%Y-%m-%d %H:%M")
@classmethod
def parse(cls, str):
return datetime.datetime.strptime(str, "%Y-%m-%d %H:%M")
@classmethod
def calc_minutes_diff(cls, dt1, dt2):
return int((dt1 - dt2).seconds / 60)
@classmethod
def normalize_airodump_time(cls, str):
return ":".join(str.split(":")[:2])
|
{"/wdcs/ap.py": ["/wdcs/basicutils.py", "/wdcs/logger.py", "/wdcs/timeutils.py"], "/wdcs/wdcs.py": ["/wdcs/config.py", "/wdcs/scan.py", "/wdcs/database.py", "/wdcs/discover.py", "/wdcs/timeutils.py", "/wdcs/crack.py", "/wdcs/hashcat.py"], "/wdcs/basicutils.py": ["/wdcs/process.py", "/wdcs/logger.py"], "/wdcs/discover.py": ["/wdcs/ap.py", "/wdcs/crack.py", "/wdcs/basicutils.py"], "/wdcs.py": ["/wdcs/wdcs.py"], "/wdcs/database.py": ["/wdcs/ap.py"], "/wdcs/config.py": ["/wdcs/basicutils.py", "/wdcs/logger.py"], "/wdcs/scan.py": ["/wdcs/crack.py", "/wdcs/logger.py", "/wdcs/process.py", "/wdcs.py"], "/wdcs/process.py": ["/wdcs/logger.py", "/wdcs.py"], "/wdcs/hashcat.py": ["/wdcs.py", "/wdcs/logger.py"], "/wdcs/crack.py": ["/wdcs/basicutils.py", "/wdcs/logger.py", "/wdcs.py"]}
|
34,397
|
mmmds/WirelessDiscoverCrackScan
|
refs/heads/master
|
/wdcs/wdcs.py
|
#!/usr/bin/python3
from wdcs.config import Config
from wdcs.scan import *
from wdcs.database import *
from wdcs.discover import Discover
from wdcs.timeutils import TimeUtils
from wdcs.crack import *
from wdcs.hashcat import Hashcat
import itertools
import sys
import os
class StatusEnum:
def __init__(self, scan, attack):
self.scan = scan
self.attack = attack
class Status:
ALL = StatusEnum(True, True)
NONE = StatusEnum(False, False)
SCAN_ONLY = StatusEnum(True, False)
ATTACK_ONLY = StatusEnum(False, True)
class WDCS(object):
def __init__(self):
self.file_manager = FileManager()
self.db = Database(self.file_manager)
self.config = Config(self.file_manager)
self.hashcat = Hashcat(self.config)
self.crack_utils = WpaCrackUtils(self.file_manager, self.db, self.hashcat)
self.scanner = Discover(self.config, self.db, self.crack_utils, self.file_manager)
self.wps_cracker = WpsCracker(self.config, self.file_manager, self.db)
self.wpa_pmkid_cracker = WpaPmkidCracker(self.file_manager, self.crack_utils, self.config)
self.wpa_handshake_cracker = WpaHandshakeCracker(self.config, self.crack_utils, self.file_manager)
self.nmap = Nmap(self.config, self.db, self.file_manager)
def get_display_merged_with_available(self, available_devices):
available_bssids = [d.bssid for d in available_devices]
display_devices = self.db.get_display_devices(available_bssids)
converted = []
i = len(display_devices)
for av in available_devices:
dds = [d for d in display_devices if d.bssid == av.bssid]
if len(dds) > 0:
dds[0].merge(av)
display_devices.extend(converted)
display_devices.sort()
return display_devices
def print_discovered_devices(self, available, devices):
if len(devices) > 0:
essid_length = str(max([len(d.essid) for d in devices]))
enc_length = str(max([len(d.enc) for d in devices]))
power_length = str(max([len(str(d.power)) for d in devices]))
line_format = "{:<3} {:<17} {:<" + essid_length + "} {:<" + power_length + "} {:<2} {:<" + enc_length + "} {:<3} {:<16} {:<16} {:<16} {:<3} {:<3} {:<3} {:<5} {}"
print(line_format.format("NUM", "BSSID", "ESSID", "PWR", "CH", "ENC", "WPS", "FIRST SEEN", "LAST SEEN", "LAST ATTACK", "NEW", "PSK", "4HS", "PMKID", "CLIENTS..."))
num = 0
for d in devices:
num += 1
stations = "[{}]".format(",".join([ Bssid.make_colon_bssid(st.mac) for st in available["station"] if st.bssid == d.bssid ]))
print(line_format.replace("<", ">").format(num, Bssid.make_colon_bssid(d.bssid), d.essid, d.power, d.channel, d.enc, d.wps, d.first_seen, d.last_seen, d.last_attack, d.new, d.psk, d.hs4, d.pmkid, stations))
def export_nmap(self, outdir):
if os.path.exists(outdir):
Logger.log("Directory exists!")
return
os.mkdir(outdir)
rows = self.db.get_nmap_results()
for row in rows:
content = row[1]
scan_type = "TCP"
if "-sSVC" in content[0:250]:
scan_type = "TCP-V"
elif "-sU" in content[0:250]:
scan_type = "UDP"
filename = "{}/nmap_{}_{}".format(outdir, row[0], scan_type)
with open(filename, "w") as f:
f.write(content)
Logger.log("Written to {}".format(filename))
def prepare_essid_dict(self, essids, outdir):
all = set(essids)
all.update(list(itertools.chain(*[e.split("-") for e in essids])))
all.update(list(itertools.chain(*[e.split("_") for e in essids])))
all.update(list(itertools.chain(*[e.split(" ") for e in essids])))
all.update([e.lower() for e in all])
all.update([e.upper() for e in all])
temp = set()
for a in all:
for i in range(0,21):
temp.add("{}{}".format(a,i))
for i in range(2000,2022):
temp.add("{}{}".format(a,i))
all.update(temp)
filename = outdir+"/dict_{}.txt".format(str(time.time()).split(".")[0])
with open(filename, "w") as f:
for a in all:
f.write(a)
f.write("\n")
def export(self, outdir):
if os.path.exists(outdir):
Logger.log("Directory exists!")
return
os.mkdir(outdir)
self.export_pmkid_4hs(outdir)
self.export_psk(outdir)
self.export_nmap(outdir + "/nmap")
def export_pmkid_4hs(self, outdir):
rows = self.db.get_4hs_and_pmkid()
essids = [ r[3] for r in rows ]
self.prepare_essid_dict(essids, outdir)
for row in rows:
bssid = row[0]
pmkid = row[1]
hs = row[2]
if pmkid:
suffix = "16800"
data = pmkid
option = "w"
elif hs:
suffix = "2500"
data = hs
option = "wb"
else:
pass
filename = outdir+"/{}.{}".format(bssid, suffix)
with open(filename, option) as f:
f.write(data)
Logger.log("Written to {}".format(filename))
def export_psk(self, outdir):
psks = self.db.select_all_psk()
psk_data = "\n".join(["{}\t{}\t{}".format(r[0], r[1], r[2]) for r in psks]) + "\n"
pskfile = outdir + "/psk.csv"
with open(pskfile, "w") as f:
f.write(psk_data)
Logger.log("Exported PSK:\n" + psk_data)
def add_psk(self, essid, psk):
if self.db.check_essid_exists(essid):
old_psk = self.db.select_psk_for_essid(essid)
if len(old_psk) > 0:
Logger.log("Changing psk for {}. From {} to {}".format(essid, old_psk, psk))
else:
Logger.log("Adding psk {} to {}".format(psk, essid))
self.db.update_psk_for_essid(essid, psk)
def run_nmap(self, ap):
Logger.log("running nmap for ({})...".format(ap))
self.nmap.scan(ap)
def prepare_available(self):
available = {"ap": [], "station": []}
return available
def is_ap_suitable_for_auto(self, d, available):
return d.power != "-1" and self.config.is_bssid_legal(d.bssid) \
and (self.config.crack_in_auto_mode or (d.psk or d.enc == "OPN")) \
and (len(d.last_attack) == 0 or
(TimeUtils.calc_minutes_diff(TimeUtils.now(), TimeUtils.parse(d.last_attack)) > self.config.auto_scan_device_threshold_minutes)
or (d.is_no_stations() and len(self.get_stations_for_ap(d, available)) > 0))
def get_stations_for_ap(self, ap, available):
stations = [s.mac for s in available["station"] if
s.bssid == ap.bssid or s.essid == ap.essid]
return stations
def print_all(self):
display_devices = self.db.get_display_devices()
stations = {"station": []} # TODO
self.print_discovered_devices(stations, display_devices)
def start(self, auto=False):
Interface.setup(self.config)
status = Status.ALL
discover_sleep = self.config.scan_default_sleep_seconds
devices = None
while True:
try:
if status.scan:
available = self.prepare_available()
Interface.change_mac(self.config.iface)
pcap = self.scanner.discover_networks(available, discover_sleep)
devices = self.get_display_merged_with_available(available["ap"])
if pcap:
self.scanner.select_accidentally_found_hs_and_pmkid(pcap, devices)
if not auto:
os.system("reset")
self.print_discovered_devices(available, devices)
if auto:
index = 0
for d in devices:
if self.is_ap_suitable_for_auto(d, available):
break
index += 1
if 0 <= index < len(devices):
status = Status.ALL
else:
status = Status.SCAN_ONLY
else:
command = input("Enter number to proceed attack, 'c' or 'c SECONDS' to continue scan (-1 is inf), 'q' to quit\n")
if command == "c":
status = Status.SCAN_ONLY
elif command.startswith("c"):
try:
if len(command.split(" ")) == 2:
discover_sleep = int(command.split(" ")[1])
status = Status.SCAN_ONLY
except:
pass
elif command == "q":
break
else:
try:
index = int(command) - 1
if index < 0 or index >= len(devices):
raise ValueError()
status = Status.ALL
except ValueError:
status = Status.NONE
if status.attack:
ap = devices[index]
Logger.log("Selected {}".format(ap))
if not self.config.is_bssid_legal(ap.bssid):
status = Status.NONE
Logger.log("{} is not whitelisted".format(ap.bssid))
continue
ap.last_attack = self.db.update_last_attack(ap.bssid)
try:
if auto and not self.config.crack_in_auto_mode:
Logger.log("Skipping cracking")
pass
elif not (ap.psk or ap.hs4 or ap.pmkid):
Logger.log("Cracking...")
stations = self.get_stations_for_ap(ap, available)
Logger.log("{} wps={}, clients={}".format(str(ap), str(ap.wps), str(stations)))
if ap.wps and not ap.is_no_stations():
if not self.config.skip_wps_pixie:
self.wps_cracker.crack(ap, pixie=True)
if not self.config.skip_wps_bruteforce:
self.wps_cracker.crack(ap, pixie=False)
if "WPA" in ap.enc:
if not (ap.hs4 or ap.pmkid):
if not self.config.skip_pmkid:
self.wpa_pmkid_cracker.crack_wpa_pmkid(ap)
if not self.config.skip_4hs and not ap.is_no_stations():
self.wpa_handshake_cracker.crack_wpa_handshake(ap, stations, True)
else:
Logger.log("Already have handshake/pmkid")
ap.last_attack = self.db.update_last_attack(ap.bssid)
elif ap.enc == "OPN":
Logger.log("Open network. Nothing to crack")
else:
Logger.log("Already have PSK")
except CrackSuccess:
Logger.log("Crack success!")
ap.last_attack = self.db.update_last_attack(ap.bssid)
except NoStations:
Logger.log("No stations") # do not update last attack
ap.last_attack = self.db.update_last_attack(ap.bssid, AP_Status.NO_STATIONS)
if ap.psk or ap.enc == "OPN":
ap.last_attack = self.db.update_last_attack(ap.bssid)
self.run_nmap(ap)
except KeyboardInterrupt as e:
raise e
except:
Logger.log("Something went wrong! {}".format(traceback.format_exc()))
|
{"/wdcs/ap.py": ["/wdcs/basicutils.py", "/wdcs/logger.py", "/wdcs/timeutils.py"], "/wdcs/wdcs.py": ["/wdcs/config.py", "/wdcs/scan.py", "/wdcs/database.py", "/wdcs/discover.py", "/wdcs/timeutils.py", "/wdcs/crack.py", "/wdcs/hashcat.py"], "/wdcs/basicutils.py": ["/wdcs/process.py", "/wdcs/logger.py"], "/wdcs/discover.py": ["/wdcs/ap.py", "/wdcs/crack.py", "/wdcs/basicutils.py"], "/wdcs.py": ["/wdcs/wdcs.py"], "/wdcs/database.py": ["/wdcs/ap.py"], "/wdcs/config.py": ["/wdcs/basicutils.py", "/wdcs/logger.py"], "/wdcs/scan.py": ["/wdcs/crack.py", "/wdcs/logger.py", "/wdcs/process.py", "/wdcs.py"], "/wdcs/process.py": ["/wdcs/logger.py", "/wdcs.py"], "/wdcs/hashcat.py": ["/wdcs.py", "/wdcs/logger.py"], "/wdcs/crack.py": ["/wdcs/basicutils.py", "/wdcs/logger.py", "/wdcs.py"]}
|
34,398
|
mmmds/WirelessDiscoverCrackScan
|
refs/heads/master
|
/additional/generate_8char_pin_dict.py
|
import itertools
with open("8.txt", "w") as f:
counter = 0
for c in itertools.product("0123456789", repeat=8):
counter += 1
if counter % 1000 == 0:
print(str(counter))
f.write("".join(c) + "\n")
|
{"/wdcs/ap.py": ["/wdcs/basicutils.py", "/wdcs/logger.py", "/wdcs/timeutils.py"], "/wdcs/wdcs.py": ["/wdcs/config.py", "/wdcs/scan.py", "/wdcs/database.py", "/wdcs/discover.py", "/wdcs/timeutils.py", "/wdcs/crack.py", "/wdcs/hashcat.py"], "/wdcs/basicutils.py": ["/wdcs/process.py", "/wdcs/logger.py"], "/wdcs/discover.py": ["/wdcs/ap.py", "/wdcs/crack.py", "/wdcs/basicutils.py"], "/wdcs.py": ["/wdcs/wdcs.py"], "/wdcs/database.py": ["/wdcs/ap.py"], "/wdcs/config.py": ["/wdcs/basicutils.py", "/wdcs/logger.py"], "/wdcs/scan.py": ["/wdcs/crack.py", "/wdcs/logger.py", "/wdcs/process.py", "/wdcs.py"], "/wdcs/process.py": ["/wdcs/logger.py", "/wdcs.py"], "/wdcs/hashcat.py": ["/wdcs.py", "/wdcs/logger.py"], "/wdcs/crack.py": ["/wdcs/basicutils.py", "/wdcs/logger.py", "/wdcs.py"]}
|
34,399
|
mmmds/WirelessDiscoverCrackScan
|
refs/heads/master
|
/wdcs/basicutils.py
|
import datetime
from wdcs.process import *
import re
from wdcs.logger import Logger
class Interface(object):
@classmethod
def check_monitor(cls, wlan_device):
p = Process.start_process_pipe(["iwconfig", wlan_device])
o = p.communicate()[0].decode("utf-8")
mon = re.findall("Mode:Monitor", o)
return len(mon) > 0
@classmethod
def change_mac(cls, wlan_device):
Logger.log("changing MAC...")
Process.start_process_pipe(["ifconfig", wlan_device, "down"]).wait()
out = Process.start_process_pipe_stdout(["macchanger", "-r", wlan_device]).communicate()[0].decode("utf-8").split("\n")[-2]
Logger.log(out)
Process.start_process_pipe(["ifconfig", wlan_device, "up"]).wait()
@classmethod
def setup(cls, config):
Logger.log("Checking monitor mode...")
if Interface.check_monitor(config.iface):
Logger.log("already in monitor")
else:
Logger.log("start monitor mode")
Process.start_process_pipe(["ifconfig", config.iface, "down"])
Process.start_process_pipe(["iwconfig", config.iface, "mode", "monitor"])
Process.start_process_pipe(["ifconfig", config.iface, "up"])
if Interface.check_monitor(config.iface):
Logger.log("successfully switched to monitor")
else:
Logger.log("failed in switching to monitor")
Interface.change_mac(config.iface)
class Bssid(object):
@classmethod
def normalize_bssid(cls, bssid):
return bssid.strip().replace(":", "").upper()
@classmethod
def make_colon_bssid(cls, b):
return b[0:2] + ":" + b[2:4] + ":" + b[4:6] + ":" + b[6:8] + ":" + b[8:10] + ":" + b[10:12]
@classmethod
def is_bssid(cls, bssid):
return len(bssid) == 12
class FileManager(object):
def __init__(self):
self.directory = os.getenv("HOME") + "/.wdcs"
if not os.path.exists(self.directory):
os.mkdir(self.directory)
Logger.log("Creating app directory")
elif os.path.exists(self.directory) and os.path.isfile(self.directory):
pass #error
elif os.path.exists(self.directory) and os.path.isdir(self.directory):
Logger.log("App directory already exists")
def filepath(self, filename):
return self.directory + "/" + filename
def filename(self, filepath):
return filepath.split("/")[-1]
|
{"/wdcs/ap.py": ["/wdcs/basicutils.py", "/wdcs/logger.py", "/wdcs/timeutils.py"], "/wdcs/wdcs.py": ["/wdcs/config.py", "/wdcs/scan.py", "/wdcs/database.py", "/wdcs/discover.py", "/wdcs/timeutils.py", "/wdcs/crack.py", "/wdcs/hashcat.py"], "/wdcs/basicutils.py": ["/wdcs/process.py", "/wdcs/logger.py"], "/wdcs/discover.py": ["/wdcs/ap.py", "/wdcs/crack.py", "/wdcs/basicutils.py"], "/wdcs.py": ["/wdcs/wdcs.py"], "/wdcs/database.py": ["/wdcs/ap.py"], "/wdcs/config.py": ["/wdcs/basicutils.py", "/wdcs/logger.py"], "/wdcs/scan.py": ["/wdcs/crack.py", "/wdcs/logger.py", "/wdcs/process.py", "/wdcs.py"], "/wdcs/process.py": ["/wdcs/logger.py", "/wdcs.py"], "/wdcs/hashcat.py": ["/wdcs.py", "/wdcs/logger.py"], "/wdcs/crack.py": ["/wdcs/basicutils.py", "/wdcs/logger.py", "/wdcs.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.